xref: /qemu/linux-user/syscall.c (revision 3072961b6edc99abfbd87caac3de29bb58a52ccf)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "tcg/startup.h"
27 #include "target_mman.h"
28 #include "exec/page-protection.h"
29 #include "exec/mmap-lock.h"
30 #include "exec/tb-flush.h"
31 #include "exec/translation-block.h"
32 #include <elf.h>
33 #include <endian.h>
34 #include <grp.h>
35 #include <sys/ipc.h>
36 #include <sys/msg.h>
37 #include <sys/wait.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/swap.h>
45 #include <linux/capability.h>
46 #include <sched.h>
47 #include <sys/timex.h>
48 #include <sys/socket.h>
49 #include <linux/sockios.h>
50 #include <sys/un.h>
51 #include <sys/uio.h>
52 #include <poll.h>
53 #include <sys/times.h>
54 #include <sys/shm.h>
55 #include <sys/sem.h>
56 #include <sys/statfs.h>
57 #include <utime.h>
58 #include <sys/sysinfo.h>
59 #include <sys/signalfd.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
62 #include <netinet/tcp.h>
63 #include <netinet/udp.h>
64 #include <linux/wireless.h>
65 #include <linux/icmp.h>
66 #include <linux/icmpv6.h>
67 #include <linux/if_tun.h>
68 #include <linux/in6.h>
69 #include <linux/errqueue.h>
70 #include <linux/random.h>
71 #ifdef CONFIG_TIMERFD
72 #include <sys/timerfd.h>
73 #endif
74 #ifdef CONFIG_EVENTFD
75 #include <sys/eventfd.h>
76 #endif
77 #ifdef CONFIG_EPOLL
78 #include <sys/epoll.h>
79 #endif
80 #ifdef CONFIG_ATTR
81 #include "qemu/xattr.h"
82 #endif
83 #ifdef CONFIG_SENDFILE
84 #include <sys/sendfile.h>
85 #endif
86 #ifdef HAVE_SYS_KCOV_H
87 #include <sys/kcov.h>
88 #endif
89 
90 #define termios host_termios
91 #define winsize host_winsize
92 #define termio host_termio
93 #define sgttyb host_sgttyb /* same as target */
94 #define tchars host_tchars /* same as target */
95 #define ltchars host_ltchars /* same as target */
96 
97 #include <linux/termios.h>
98 #include <linux/unistd.h>
99 #include <linux/cdrom.h>
100 #include <linux/hdreg.h>
101 #include <linux/soundcard.h>
102 #include <linux/kd.h>
103 #include <linux/mtio.h>
104 #include <linux/fs.h>
105 #include <linux/fd.h>
106 #if defined(CONFIG_FIEMAP)
107 #include <linux/fiemap.h>
108 #endif
109 #include <linux/fb.h>
110 #if defined(CONFIG_USBFS)
111 #include <linux/usbdevice_fs.h>
112 #include <linux/usb/ch9.h>
113 #endif
114 #include <linux/vt.h>
115 #include <linux/dm-ioctl.h>
116 #include <linux/reboot.h>
117 #include <linux/route.h>
118 #include <linux/filter.h>
119 #include <linux/blkpg.h>
120 #include <netpacket/packet.h>
121 #include <linux/netlink.h>
122 #include <linux/if_alg.h>
123 #include <linux/rtc.h>
124 #include <sound/asound.h>
125 #ifdef HAVE_BTRFS_H
126 #include <linux/btrfs.h>
127 #endif
128 #ifdef HAVE_DRM_H
129 #include <libdrm/drm.h>
130 #include <libdrm/i915_drm.h>
131 #endif
132 #include "linux_loop.h"
133 #include "uname.h"
134 
135 #include "qemu.h"
136 #include "user-internals.h"
137 #include "strace.h"
138 #include "signal-common.h"
139 #include "loader.h"
140 #include "user-mmap.h"
141 #include "user/page-protection.h"
142 #include "user/safe-syscall.h"
143 #include "user/signal.h"
144 #include "qemu/guest-random.h"
145 #include "qemu/selfmap.h"
146 #include "user/syscall-trace.h"
147 #include "special-errno.h"
148 #include "qapi/error.h"
149 #include "fd-trans.h"
150 #include "user/cpu_loop.h"
151 
152 #ifndef CLONE_IO
153 #define CLONE_IO                0x80000000      /* Clone io context */
154 #endif
155 
156 /* We can't directly call the host clone syscall, because this will
157  * badly confuse libc (breaking mutexes, for example). So we must
158  * divide clone flags into:
159  *  * flag combinations that look like pthread_create()
160  *  * flag combinations that look like fork()
161  *  * flags we can implement within QEMU itself
162  *  * flags we can't support and will return an error for
163  */
164 /* For thread creation, all these flags must be present; for
165  * fork, none must be present.
166  */
167 #define CLONE_THREAD_FLAGS                              \
168     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
169      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
170 
171 /* These flags are ignored:
172  * CLONE_DETACHED is now ignored by the kernel;
173  * CLONE_IO is just an optimisation hint to the I/O scheduler
174  */
175 #define CLONE_IGNORED_FLAGS                     \
176     (CLONE_DETACHED | CLONE_IO)
177 
178 #ifndef CLONE_PIDFD
179 # define CLONE_PIDFD 0x00001000
180 #endif
181 
182 /* Flags for fork which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_FORK_FLAGS               \
184     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
185      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
186 
187 /* Flags for thread creation which we can implement within QEMU itself */
188 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
189     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
190      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
191 
192 #define CLONE_INVALID_FORK_FLAGS                                        \
193     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
194 
195 #define CLONE_INVALID_THREAD_FLAGS                                      \
196     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
197        CLONE_IGNORED_FLAGS))
198 
199 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
200  * have almost all been allocated. We cannot support any of
201  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
202  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
203  * The checks against the invalid thread masks above will catch these.
204  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
205  */
206 
207 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
208  * once. This exercises the codepaths for restart.
209  */
210 //#define DEBUG_ERESTARTSYS
211 
212 //#include <linux/msdos_fs.h>
213 #define VFAT_IOCTL_READDIR_BOTH \
214     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
215 #define VFAT_IOCTL_READDIR_SHORT \
216     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
217 
218 #undef _syscall0
219 #undef _syscall1
220 #undef _syscall2
221 #undef _syscall3
222 #undef _syscall4
223 #undef _syscall5
224 #undef _syscall6
225 
226 #define _syscall0(type,name)		\
227 static type name (void)			\
228 {					\
229 	return syscall(__NR_##name);	\
230 }
231 
232 #define _syscall1(type,name,type1,arg1)		\
233 static type name (type1 arg1)			\
234 {						\
235 	return syscall(__NR_##name, arg1);	\
236 }
237 
238 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
239 static type name (type1 arg1,type2 arg2)		\
240 {							\
241 	return syscall(__NR_##name, arg1, arg2);	\
242 }
243 
244 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
245 static type name (type1 arg1,type2 arg2,type3 arg3)		\
246 {								\
247 	return syscall(__NR_##name, arg1, arg2, arg3);		\
248 }
249 
250 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
251 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
252 {										\
253 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
254 }
255 
256 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
257 		  type5,arg5)							\
258 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
259 {										\
260 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
261 }
262 
263 
264 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
265 		  type5,arg5,type6,arg6)					\
266 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
267                   type6 arg6)							\
268 {										\
269 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
270 }
271 
272 
273 #define __NR_sys_uname __NR_uname
274 #define __NR_sys_getcwd1 __NR_getcwd
275 #define __NR_sys_getdents __NR_getdents
276 #define __NR_sys_getdents64 __NR_getdents64
277 #define __NR_sys_getpriority __NR_getpriority
278 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
279 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
280 #define __NR_sys_syslog __NR_syslog
281 #if defined(__NR_futex)
282 # define __NR_sys_futex __NR_futex
283 #endif
284 #if defined(__NR_futex_time64)
285 # define __NR_sys_futex_time64 __NR_futex_time64
286 #endif
287 #define __NR_sys_statx __NR_statx
288 
289 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
290 #define __NR__llseek __NR_lseek
291 #endif
292 
293 /* Newer kernel ports have llseek() instead of _llseek() */
294 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
295 #define TARGET_NR__llseek TARGET_NR_llseek
296 #endif
297 
298 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
299 #ifndef TARGET_O_NONBLOCK_MASK
300 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
301 #endif
302 
303 #define __NR_sys_gettid __NR_gettid
304 _syscall0(int, sys_gettid)
305 
306 /* For the 64-bit guest on 32-bit host case we must emulate
307  * getdents using getdents64, because otherwise the host
308  * might hand us back more dirent records than we can fit
309  * into the guest buffer after structure format conversion.
310  * Otherwise we emulate getdents with getdents if the host has it.
311  */
312 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
313 #define EMULATE_GETDENTS_WITH_GETDENTS
314 #endif
315 
316 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
317 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
318 #endif
319 #if (defined(TARGET_NR_getdents) && \
320       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
321     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
322 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
323 #endif
324 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
325 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
326           loff_t *, res, unsigned int, wh);
327 #endif
328 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
329 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
330           siginfo_t *, uinfo)
331 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
332 #ifdef __NR_exit_group
333 _syscall1(int,exit_group,int,error_code)
334 #endif
335 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
336 #define __NR_sys_close_range __NR_close_range
337 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
338 #ifndef CLOSE_RANGE_CLOEXEC
339 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
340 #endif
341 #endif
342 #if defined(__NR_futex)
343 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
344           const struct timespec *,timeout,int *,uaddr2,int,val3)
345 #endif
346 #if defined(__NR_futex_time64)
347 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
348           const struct timespec *,timeout,int *,uaddr2,int,val3)
349 #endif
350 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
351 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
352 #endif
353 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
354 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
355                              unsigned int, flags);
356 #endif
357 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
358 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
359 #endif
360 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
361 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
362           unsigned long *, user_mask_ptr);
363 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
364 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
365           unsigned long *, user_mask_ptr);
366 /* sched_attr is not defined in glibc < 2.41 */
367 #ifndef SCHED_ATTR_SIZE_VER0
368 struct sched_attr {
369     uint32_t size;
370     uint32_t sched_policy;
371     uint64_t sched_flags;
372     int32_t sched_nice;
373     uint32_t sched_priority;
374     uint64_t sched_runtime;
375     uint64_t sched_deadline;
376     uint64_t sched_period;
377     uint32_t sched_util_min;
378     uint32_t sched_util_max;
379 };
380 #endif
381 #define __NR_sys_sched_getattr __NR_sched_getattr
382 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
383           unsigned int, size, unsigned int, flags);
384 #define __NR_sys_sched_setattr __NR_sched_setattr
385 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
386           unsigned int, flags);
387 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
388 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
389 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
390 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
391           const struct sched_param *, param);
392 #define __NR_sys_sched_getparam __NR_sched_getparam
393 _syscall2(int, sys_sched_getparam, pid_t, pid,
394           struct sched_param *, param);
395 #define __NR_sys_sched_setparam __NR_sched_setparam
396 _syscall2(int, sys_sched_setparam, pid_t, pid,
397           const struct sched_param *, param);
398 #define __NR_sys_getcpu __NR_getcpu
399 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
400 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
401           void *, arg);
402 _syscall2(int, capget, struct __user_cap_header_struct *, header,
403           struct __user_cap_data_struct *, data);
404 _syscall2(int, capset, struct __user_cap_header_struct *, header,
405           struct __user_cap_data_struct *, data);
406 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
407 _syscall2(int, ioprio_get, int, which, int, who)
408 #endif
409 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
410 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
411 #endif
412 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
413 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
414 #endif
415 
416 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
417 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
418           unsigned long, idx1, unsigned long, idx2)
419 #endif
420 
421 /*
422  * It is assumed that struct statx is architecture independent.
423  */
424 #if defined(TARGET_NR_statx) && defined(__NR_statx)
425 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
426           unsigned int, mask, struct target_statx *, statxbuf)
427 #endif
428 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
429 _syscall2(int, membarrier, int, cmd, int, flags)
430 #endif
431 
432 static const bitmask_transtbl fcntl_flags_tbl[] = {
433   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
434   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
435   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
436   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
437   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
438   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
439   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
440   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
441   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
442   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
443   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
444   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
445   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
446 #if defined(O_DIRECT)
447   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
448 #endif
449 #if defined(O_NOATIME)
450   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
451 #endif
452 #if defined(O_CLOEXEC)
453   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
454 #endif
455 #if defined(O_PATH)
456   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
457 #endif
458 #if defined(O_TMPFILE)
459   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
460 #endif
461   /* Don't terminate the list prematurely on 64-bit host+guest.  */
462 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
463   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
464 #endif
465 };
466 
467 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
468 
469 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
470 #if defined(__NR_utimensat)
471 #define __NR_sys_utimensat __NR_utimensat
472 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
473           const struct timespec *,tsp,int,flags)
474 #else
475 static int sys_utimensat(int dirfd, const char *pathname,
476                          const struct timespec times[2], int flags)
477 {
478     errno = ENOSYS;
479     return -1;
480 }
481 #endif
482 #endif /* TARGET_NR_utimensat */
483 
484 #ifdef TARGET_NR_renameat2
485 #if defined(__NR_renameat2)
486 #define __NR_sys_renameat2 __NR_renameat2
487 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
488           const char *, new, unsigned int, flags)
489 #else
490 static int sys_renameat2(int oldfd, const char *old,
491                          int newfd, const char *new, int flags)
492 {
493     if (flags == 0) {
494         return renameat(oldfd, old, newfd, new);
495     }
496     errno = ENOSYS;
497     return -1;
498 }
499 #endif
500 #endif /* TARGET_NR_renameat2 */
501 
502 #ifdef CONFIG_INOTIFY
503 #include <sys/inotify.h>
504 #else
505 /* Userspace can usually survive runtime without inotify */
506 #undef TARGET_NR_inotify_init
507 #undef TARGET_NR_inotify_init1
508 #undef TARGET_NR_inotify_add_watch
509 #undef TARGET_NR_inotify_rm_watch
510 #endif /* CONFIG_INOTIFY  */
511 
512 #if defined(TARGET_NR_prlimit64)
513 #ifndef __NR_prlimit64
514 # define __NR_prlimit64 -1
515 #endif
516 #define __NR_sys_prlimit64 __NR_prlimit64
517 /* The glibc rlimit structure may not be that used by the underlying syscall */
518 struct host_rlimit64 {
519     uint64_t rlim_cur;
520     uint64_t rlim_max;
521 };
522 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
523           const struct host_rlimit64 *, new_limit,
524           struct host_rlimit64 *, old_limit)
525 #endif
526 
527 
528 #if defined(TARGET_NR_timer_create)
529 /* Maximum of 32 active POSIX timers allowed at any one time. */
530 #define GUEST_TIMER_MAX 32
531 static timer_t g_posix_timers[GUEST_TIMER_MAX];
532 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
533 
next_free_host_timer(void)534 static inline int next_free_host_timer(void)
535 {
536     int k;
537     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
538         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
539             return k;
540         }
541     }
542     return -1;
543 }
544 
free_host_timer_slot(int id)545 static inline void free_host_timer_slot(int id)
546 {
547     qatomic_store_release(g_posix_timer_allocated + id, 0);
548 }
549 #endif
550 
host_to_target_errno(int host_errno)551 static inline int host_to_target_errno(int host_errno)
552 {
553     switch (host_errno) {
554 #define E(X)  case X: return TARGET_##X;
555 #include "errnos.c.inc"
556 #undef E
557     default:
558         return host_errno;
559     }
560 }
561 
target_to_host_errno(int target_errno)562 static inline int target_to_host_errno(int target_errno)
563 {
564     switch (target_errno) {
565 #define E(X)  case TARGET_##X: return X;
566 #include "errnos.c.inc"
567 #undef E
568     default:
569         return target_errno;
570     }
571 }
572 
get_errno(abi_long ret)573 abi_long get_errno(abi_long ret)
574 {
575     if (ret == -1)
576         return -host_to_target_errno(errno);
577     else
578         return ret;
579 }
580 
target_strerror(int err)581 const char *target_strerror(int err)
582 {
583     if (err == QEMU_ERESTARTSYS) {
584         return "To be restarted";
585     }
586     if (err == QEMU_ESIGRETURN) {
587         return "Successful exit from sigreturn";
588     }
589 
590     return strerror(target_to_host_errno(err));
591 }
592 
check_zeroed_user(abi_long addr,size_t ksize,size_t usize)593 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
594 {
595     int i;
596     uint8_t b;
597     if (usize <= ksize) {
598         return 1;
599     }
600     for (i = ksize; i < usize; i++) {
601         if (get_user_u8(b, addr + i)) {
602             return -TARGET_EFAULT;
603         }
604         if (b != 0) {
605             return 0;
606         }
607     }
608     return 1;
609 }
610 
611 /*
612  * Copies a target struct to a host struct, in a way that guarantees
613  * backwards-compatibility for struct syscall arguments.
614  *
615  * Similar to kernels uaccess.h:copy_struct_from_user()
616  */
copy_struct_from_user(void * dst,size_t ksize,abi_ptr src,size_t usize)617 int copy_struct_from_user(void *dst, size_t ksize, abi_ptr src, size_t usize)
618 {
619     size_t size = MIN(ksize, usize);
620     size_t rest = MAX(ksize, usize) - size;
621 
622     /* Deal with trailing bytes. */
623     if (usize < ksize) {
624         memset(dst + size, 0, rest);
625     } else if (usize > ksize) {
626         int ret = check_zeroed_user(src, ksize, usize);
627         if (ret <= 0) {
628             return ret ?: -TARGET_E2BIG;
629         }
630     }
631     /* Copy the interoperable parts of the struct. */
632     if (copy_from_user(dst, src, size)) {
633         return -TARGET_EFAULT;
634     }
635     return 0;
636 }
637 
638 #define safe_syscall0(type, name) \
639 static type safe_##name(void) \
640 { \
641     return safe_syscall(__NR_##name); \
642 }
643 
644 #define safe_syscall1(type, name, type1, arg1) \
645 static type safe_##name(type1 arg1) \
646 { \
647     return safe_syscall(__NR_##name, arg1); \
648 }
649 
650 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
651 static type safe_##name(type1 arg1, type2 arg2) \
652 { \
653     return safe_syscall(__NR_##name, arg1, arg2); \
654 }
655 
656 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
657 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
658 { \
659     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
660 }
661 
662 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
663     type4, arg4) \
664 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
665 { \
666     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
667 }
668 
669 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
670     type4, arg4, type5, arg5) \
671 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
672     type5 arg5) \
673 { \
674     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
675 }
676 
677 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
678     type4, arg4, type5, arg5, type6, arg6) \
679 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
680     type5 arg5, type6 arg6) \
681 { \
682     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
683 }
684 
safe_syscall3(ssize_t,read,int,fd,void *,buff,size_t,count)685 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
686 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
687 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
688               int, flags, mode_t, mode)
689 
690 safe_syscall4(int, openat2, int, dirfd, const char *, pathname, \
691               const struct open_how_ver0 *, how, size_t, size)
692 
693 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
694 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
695               struct rusage *, rusage)
696 #endif
697 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
698               int, options, struct rusage *, rusage)
699 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
700 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
701               char **, argv, char **, envp, int, flags)
702 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
703     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
704 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
705               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
706 #endif
707 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
708 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
709               struct timespec *, tsp, const sigset_t *, sigmask,
710               size_t, sigsetsize)
711 #endif
712 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
713               int, maxevents, int, timeout, const sigset_t *, sigmask,
714               size_t, sigsetsize)
715 #if defined(__NR_futex)
716 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
717               const struct timespec *,timeout,int *,uaddr2,int,val3)
718 #endif
719 #if defined(__NR_futex_time64)
720 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
721               const struct timespec *,timeout,int *,uaddr2,int,val3)
722 #endif
723 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
724 safe_syscall2(int, kill, pid_t, pid, int, sig)
725 safe_syscall2(int, tkill, int, tid, int, sig)
726 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
727 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
728 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
729 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
730               unsigned long, pos_l, unsigned long, pos_h)
731 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
732               unsigned long, pos_l, unsigned long, pos_h)
733 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
734               socklen_t, addrlen)
735 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
736               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
737 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
738               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
739 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
740 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
741 safe_syscall2(int, flock, int, fd, int, operation)
742 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
743 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
744               const struct timespec *, uts, size_t, sigsetsize)
745 #endif
746 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
747               int, flags)
748 #if defined(TARGET_NR_nanosleep)
749 safe_syscall2(int, nanosleep, const struct timespec *, req,
750               struct timespec *, rem)
751 #endif
752 #if defined(TARGET_NR_clock_nanosleep) || \
753     defined(TARGET_NR_clock_nanosleep_time64)
754 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
755               const struct timespec *, req, struct timespec *, rem)
756 #endif
757 #ifdef __NR_ipc
758 #ifdef __s390x__
759 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
760               void *, ptr)
761 #else
762 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
763               void *, ptr, long, fifth)
764 #endif
765 #endif
766 #ifdef __NR_msgsnd
767 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
768               int, flags)
769 #endif
770 #ifdef __NR_msgrcv
771 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
772               long, msgtype, int, flags)
773 #endif
774 #ifdef __NR_semtimedop
775 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
776               unsigned, nsops, const struct timespec *, timeout)
777 #endif
778 #if defined(TARGET_NR_mq_timedsend) || \
779     defined(TARGET_NR_mq_timedsend_time64)
780 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
781               size_t, len, unsigned, prio, const struct timespec *, timeout)
782 #endif
783 #if defined(TARGET_NR_mq_timedreceive) || \
784     defined(TARGET_NR_mq_timedreceive_time64)
785 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
786               size_t, len, unsigned *, prio, const struct timespec *, timeout)
787 #endif
788 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
789 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
790               int, outfd, loff_t *, poutoff, size_t, length,
791               unsigned int, flags)
792 #endif
793 
794 /* We do ioctl like this rather than via safe_syscall3 to preserve the
795  * "third argument might be integer or pointer or not present" behaviour of
796  * the libc function.
797  */
798 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
799 /* Similarly for fcntl. Since we always build with LFS enabled,
800  * we should be using the 64-bit structures automatically.
801  */
802 #ifdef __NR_fcntl64
803 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
804 #else
805 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
806 #endif
807 
808 static inline int host_to_target_sock_type(int host_type)
809 {
810     int target_type;
811 
812     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
813     case SOCK_DGRAM:
814         target_type = TARGET_SOCK_DGRAM;
815         break;
816     case SOCK_STREAM:
817         target_type = TARGET_SOCK_STREAM;
818         break;
819     default:
820         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
821         break;
822     }
823 
824 #if defined(SOCK_CLOEXEC)
825     if (host_type & SOCK_CLOEXEC) {
826         target_type |= TARGET_SOCK_CLOEXEC;
827     }
828 #endif
829 
830 #if defined(SOCK_NONBLOCK)
831     if (host_type & SOCK_NONBLOCK) {
832         target_type |= TARGET_SOCK_NONBLOCK;
833     }
834 #endif
835 
836     return target_type;
837 }
838 
839 static abi_ulong target_brk, initial_target_brk;
840 
target_set_brk(abi_ulong new_brk)841 void target_set_brk(abi_ulong new_brk)
842 {
843     target_brk = TARGET_PAGE_ALIGN(new_brk);
844     initial_target_brk = target_brk;
845 }
846 
847 /* do_brk() must return target values and target errnos. */
do_brk(abi_ulong brk_val)848 abi_long do_brk(abi_ulong brk_val)
849 {
850     abi_long mapped_addr;
851     abi_ulong new_brk;
852     abi_ulong old_brk;
853 
854     /* brk pointers are always untagged */
855 
856     /* do not allow to shrink below initial brk value */
857     if (brk_val < initial_target_brk) {
858         return target_brk;
859     }
860 
861     new_brk = TARGET_PAGE_ALIGN(brk_val);
862     old_brk = TARGET_PAGE_ALIGN(target_brk);
863 
864     /* new and old target_brk might be on the same page */
865     if (new_brk == old_brk) {
866         target_brk = brk_val;
867         return target_brk;
868     }
869 
870     /* Release heap if necessary */
871     if (new_brk < old_brk) {
872         target_munmap(new_brk, old_brk - new_brk);
873 
874         target_brk = brk_val;
875         return target_brk;
876     }
877 
878     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
879                               PROT_READ | PROT_WRITE,
880                               MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
881                               -1, 0);
882 
883     if (mapped_addr == old_brk) {
884         target_brk = brk_val;
885         return target_brk;
886     }
887 
888 #if defined(TARGET_ALPHA)
889     /* We (partially) emulate OSF/1 on Alpha, which requires we
890        return a proper errno, not an unchanged brk value.  */
891     return -TARGET_ENOMEM;
892 #endif
893     /* For everything else, return the previous break. */
894     return target_brk;
895 }
896 
897 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
898     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
copy_from_user_fdset(fd_set * fds,abi_ulong target_fds_addr,int n)899 static inline abi_long copy_from_user_fdset(fd_set *fds,
900                                             abi_ulong target_fds_addr,
901                                             int n)
902 {
903     int i, nw, j, k;
904     abi_ulong b, *target_fds;
905 
906     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
907     if (!(target_fds = lock_user(VERIFY_READ,
908                                  target_fds_addr,
909                                  sizeof(abi_ulong) * nw,
910                                  1)))
911         return -TARGET_EFAULT;
912 
913     FD_ZERO(fds);
914     k = 0;
915     for (i = 0; i < nw; i++) {
916         /* grab the abi_ulong */
917         __get_user(b, &target_fds[i]);
918         for (j = 0; j < TARGET_ABI_BITS; j++) {
919             /* check the bit inside the abi_ulong */
920             if ((b >> j) & 1)
921                 FD_SET(k, fds);
922             k++;
923         }
924     }
925 
926     unlock_user(target_fds, target_fds_addr, 0);
927 
928     return 0;
929 }
930 
copy_from_user_fdset_ptr(fd_set * fds,fd_set ** fds_ptr,abi_ulong target_fds_addr,int n)931 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
932                                                  abi_ulong target_fds_addr,
933                                                  int n)
934 {
935     if (target_fds_addr) {
936         if (copy_from_user_fdset(fds, target_fds_addr, n))
937             return -TARGET_EFAULT;
938         *fds_ptr = fds;
939     } else {
940         *fds_ptr = NULL;
941     }
942     return 0;
943 }
944 
copy_to_user_fdset(abi_ulong target_fds_addr,const fd_set * fds,int n)945 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
946                                           const fd_set *fds,
947                                           int n)
948 {
949     int i, nw, j, k;
950     abi_long v;
951     abi_ulong *target_fds;
952 
953     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
954     if (!(target_fds = lock_user(VERIFY_WRITE,
955                                  target_fds_addr,
956                                  sizeof(abi_ulong) * nw,
957                                  0)))
958         return -TARGET_EFAULT;
959 
960     k = 0;
961     for (i = 0; i < nw; i++) {
962         v = 0;
963         for (j = 0; j < TARGET_ABI_BITS; j++) {
964             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
965             k++;
966         }
967         __put_user(v, &target_fds[i]);
968     }
969 
970     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
971 
972     return 0;
973 }
974 #endif
975 
976 #if defined(__alpha__)
977 #define HOST_HZ 1024
978 #else
979 #define HOST_HZ 100
980 #endif
981 
host_to_target_clock_t(long ticks)982 static inline abi_long host_to_target_clock_t(long ticks)
983 {
984 #if HOST_HZ == TARGET_HZ
985     return ticks;
986 #else
987     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
988 #endif
989 }
990 
host_to_target_rusage(abi_ulong target_addr,const struct rusage * rusage)991 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
992                                              const struct rusage *rusage)
993 {
994     struct target_rusage *target_rusage;
995 
996     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
997         return -TARGET_EFAULT;
998     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
999     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1000     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1001     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1002     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1003     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1004     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1005     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1006     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1007     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1008     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1009     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1010     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1011     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1012     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1013     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1014     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1015     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1016     unlock_user_struct(target_rusage, target_addr, 1);
1017 
1018     return 0;
1019 }
1020 
1021 #ifdef TARGET_NR_setrlimit
target_to_host_rlim(abi_ulong target_rlim)1022 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1023 {
1024     abi_ulong target_rlim_swap;
1025     rlim_t result;
1026 
1027     target_rlim_swap = tswapal(target_rlim);
1028     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1029         return RLIM_INFINITY;
1030 
1031     result = target_rlim_swap;
1032     if (target_rlim_swap != (rlim_t)result)
1033         return RLIM_INFINITY;
1034 
1035     return result;
1036 }
1037 #endif
1038 
1039 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
host_to_target_rlim(rlim_t rlim)1040 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1041 {
1042     abi_ulong target_rlim_swap;
1043     abi_ulong result;
1044 
1045     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1046         target_rlim_swap = TARGET_RLIM_INFINITY;
1047     else
1048         target_rlim_swap = rlim;
1049     result = tswapal(target_rlim_swap);
1050 
1051     return result;
1052 }
1053 #endif
1054 
target_to_host_resource(int code)1055 static inline int target_to_host_resource(int code)
1056 {
1057     switch (code) {
1058     case TARGET_RLIMIT_AS:
1059         return RLIMIT_AS;
1060     case TARGET_RLIMIT_CORE:
1061         return RLIMIT_CORE;
1062     case TARGET_RLIMIT_CPU:
1063         return RLIMIT_CPU;
1064     case TARGET_RLIMIT_DATA:
1065         return RLIMIT_DATA;
1066     case TARGET_RLIMIT_FSIZE:
1067         return RLIMIT_FSIZE;
1068     case TARGET_RLIMIT_LOCKS:
1069         return RLIMIT_LOCKS;
1070     case TARGET_RLIMIT_MEMLOCK:
1071         return RLIMIT_MEMLOCK;
1072     case TARGET_RLIMIT_MSGQUEUE:
1073         return RLIMIT_MSGQUEUE;
1074     case TARGET_RLIMIT_NICE:
1075         return RLIMIT_NICE;
1076     case TARGET_RLIMIT_NOFILE:
1077         return RLIMIT_NOFILE;
1078     case TARGET_RLIMIT_NPROC:
1079         return RLIMIT_NPROC;
1080     case TARGET_RLIMIT_RSS:
1081         return RLIMIT_RSS;
1082     case TARGET_RLIMIT_RTPRIO:
1083         return RLIMIT_RTPRIO;
1084 #ifdef RLIMIT_RTTIME
1085     case TARGET_RLIMIT_RTTIME:
1086         return RLIMIT_RTTIME;
1087 #endif
1088     case TARGET_RLIMIT_SIGPENDING:
1089         return RLIMIT_SIGPENDING;
1090     case TARGET_RLIMIT_STACK:
1091         return RLIMIT_STACK;
1092     default:
1093         return code;
1094     }
1095 }
1096 
copy_from_user_timeval(struct timeval * tv,abi_ulong target_tv_addr)1097 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1098                                               abi_ulong target_tv_addr)
1099 {
1100     struct target_timeval *target_tv;
1101 
1102     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1103         return -TARGET_EFAULT;
1104     }
1105 
1106     __get_user(tv->tv_sec, &target_tv->tv_sec);
1107     __get_user(tv->tv_usec, &target_tv->tv_usec);
1108 
1109     unlock_user_struct(target_tv, target_tv_addr, 0);
1110 
1111     return 0;
1112 }
1113 
copy_to_user_timeval(abi_ulong target_tv_addr,const struct timeval * tv)1114 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1115                                             const struct timeval *tv)
1116 {
1117     struct target_timeval *target_tv;
1118 
1119     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1120         return -TARGET_EFAULT;
1121     }
1122 
1123     __put_user(tv->tv_sec, &target_tv->tv_sec);
1124     __put_user(tv->tv_usec, &target_tv->tv_usec);
1125 
1126     unlock_user_struct(target_tv, target_tv_addr, 1);
1127 
1128     return 0;
1129 }
1130 
1131 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
copy_from_user_timeval64(struct timeval * tv,abi_ulong target_tv_addr)1132 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1133                                                 abi_ulong target_tv_addr)
1134 {
1135     struct target__kernel_sock_timeval *target_tv;
1136 
1137     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1138         return -TARGET_EFAULT;
1139     }
1140 
1141     __get_user(tv->tv_sec, &target_tv->tv_sec);
1142     __get_user(tv->tv_usec, &target_tv->tv_usec);
1143 
1144     unlock_user_struct(target_tv, target_tv_addr, 0);
1145 
1146     return 0;
1147 }
1148 #endif
1149 
copy_to_user_timeval64(abi_ulong target_tv_addr,const struct timeval * tv)1150 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1151                                               const struct timeval *tv)
1152 {
1153     struct target__kernel_sock_timeval *target_tv;
1154 
1155     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1156         return -TARGET_EFAULT;
1157     }
1158 
1159     __put_user(tv->tv_sec, &target_tv->tv_sec);
1160     __put_user(tv->tv_usec, &target_tv->tv_usec);
1161 
1162     unlock_user_struct(target_tv, target_tv_addr, 1);
1163 
1164     return 0;
1165 }
1166 
1167 #if defined(TARGET_NR_futex) || \
1168     defined(TARGET_NR_rt_sigtimedwait) || \
1169     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1170     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1171     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1172     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1173     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1174     defined(TARGET_NR_timer_settime) || \
1175     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_timespec(struct timespec * host_ts,abi_ulong target_addr)1176 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1177                                                abi_ulong target_addr)
1178 {
1179     struct target_timespec *target_ts;
1180 
1181     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1182         return -TARGET_EFAULT;
1183     }
1184     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1185     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1186     unlock_user_struct(target_ts, target_addr, 0);
1187     return 0;
1188 }
1189 #endif
1190 
1191 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1192     defined(TARGET_NR_timer_settime64) || \
1193     defined(TARGET_NR_mq_timedsend_time64) || \
1194     defined(TARGET_NR_mq_timedreceive_time64) || \
1195     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1196     defined(TARGET_NR_clock_nanosleep_time64) || \
1197     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1198     defined(TARGET_NR_utimensat) || \
1199     defined(TARGET_NR_utimensat_time64) || \
1200     defined(TARGET_NR_semtimedop_time64) || \
1201     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
target_to_host_timespec64(struct timespec * host_ts,abi_ulong target_addr)1202 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1203                                                  abi_ulong target_addr)
1204 {
1205     struct target__kernel_timespec *target_ts;
1206 
1207     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1208         return -TARGET_EFAULT;
1209     }
1210     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1211     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1212     /* in 32bit mode, this drops the padding */
1213     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1214     unlock_user_struct(target_ts, target_addr, 0);
1215     return 0;
1216 }
1217 #endif
1218 
host_to_target_timespec(abi_ulong target_addr,struct timespec * host_ts)1219 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1220                                                struct timespec *host_ts)
1221 {
1222     struct target_timespec *target_ts;
1223 
1224     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1225         return -TARGET_EFAULT;
1226     }
1227     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1228     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1229     unlock_user_struct(target_ts, target_addr, 1);
1230     return 0;
1231 }
1232 
host_to_target_timespec64(abi_ulong target_addr,struct timespec * host_ts)1233 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1234                                                  struct timespec *host_ts)
1235 {
1236     struct target__kernel_timespec *target_ts;
1237 
1238     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1239         return -TARGET_EFAULT;
1240     }
1241     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1242     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1243     unlock_user_struct(target_ts, target_addr, 1);
1244     return 0;
1245 }
1246 
1247 #if defined(TARGET_NR_gettimeofday)
copy_to_user_timezone(abi_ulong target_tz_addr,struct timezone * tz)1248 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1249                                              struct timezone *tz)
1250 {
1251     struct target_timezone *target_tz;
1252 
1253     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1254         return -TARGET_EFAULT;
1255     }
1256 
1257     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1258     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1259 
1260     unlock_user_struct(target_tz, target_tz_addr, 1);
1261 
1262     return 0;
1263 }
1264 #endif
1265 
1266 #if defined(TARGET_NR_settimeofday)
copy_from_user_timezone(struct timezone * tz,abi_ulong target_tz_addr)1267 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1268                                                abi_ulong target_tz_addr)
1269 {
1270     struct target_timezone *target_tz;
1271 
1272     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1273         return -TARGET_EFAULT;
1274     }
1275 
1276     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1277     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1278 
1279     unlock_user_struct(target_tz, target_tz_addr, 0);
1280 
1281     return 0;
1282 }
1283 #endif
1284 
1285 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1286 #include <mqueue.h>
1287 
copy_from_user_mq_attr(struct mq_attr * attr,abi_ulong target_mq_attr_addr)1288 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1289                                               abi_ulong target_mq_attr_addr)
1290 {
1291     struct target_mq_attr *target_mq_attr;
1292 
1293     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1294                           target_mq_attr_addr, 1))
1295         return -TARGET_EFAULT;
1296 
1297     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1298     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1299     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1300     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1301 
1302     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1303 
1304     return 0;
1305 }
1306 
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,const struct mq_attr * attr)1307 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1308                                             const struct mq_attr *attr)
1309 {
1310     struct target_mq_attr *target_mq_attr;
1311 
1312     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1313                           target_mq_attr_addr, 0))
1314         return -TARGET_EFAULT;
1315 
1316     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1317     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1318     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1319     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1320 
1321     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1322 
1323     return 0;
1324 }
1325 #endif
1326 
1327 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1328 /* do_select() must return target values and target errnos. */
do_select(int n,abi_ulong rfd_addr,abi_ulong wfd_addr,abi_ulong efd_addr,abi_ulong target_tv_addr)1329 static abi_long do_select(int n,
1330                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1331                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1332 {
1333     fd_set rfds, wfds, efds;
1334     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1335     struct timeval tv;
1336     struct timespec ts, *ts_ptr;
1337     abi_long ret;
1338 
1339     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1340     if (ret) {
1341         return ret;
1342     }
1343     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1344     if (ret) {
1345         return ret;
1346     }
1347     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1348     if (ret) {
1349         return ret;
1350     }
1351 
1352     if (target_tv_addr) {
1353         if (copy_from_user_timeval(&tv, target_tv_addr))
1354             return -TARGET_EFAULT;
1355         ts.tv_sec = tv.tv_sec;
1356         ts.tv_nsec = tv.tv_usec * 1000;
1357         ts_ptr = &ts;
1358     } else {
1359         ts_ptr = NULL;
1360     }
1361 
1362     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1363                                   ts_ptr, NULL));
1364 
1365     if (!is_error(ret)) {
1366         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1367             return -TARGET_EFAULT;
1368         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1369             return -TARGET_EFAULT;
1370         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1371             return -TARGET_EFAULT;
1372 
1373         if (target_tv_addr) {
1374             tv.tv_sec = ts.tv_sec;
1375             tv.tv_usec = ts.tv_nsec / 1000;
1376             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1377                 return -TARGET_EFAULT;
1378             }
1379         }
1380     }
1381 
1382     return ret;
1383 }
1384 
1385 #if defined(TARGET_WANT_OLD_SYS_SELECT)
do_old_select(abi_ulong arg1)1386 static abi_long do_old_select(abi_ulong arg1)
1387 {
1388     struct target_sel_arg_struct *sel;
1389     abi_ulong inp, outp, exp, tvp;
1390     long nsel;
1391 
1392     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1393         return -TARGET_EFAULT;
1394     }
1395 
1396     nsel = tswapal(sel->n);
1397     inp = tswapal(sel->inp);
1398     outp = tswapal(sel->outp);
1399     exp = tswapal(sel->exp);
1400     tvp = tswapal(sel->tvp);
1401 
1402     unlock_user_struct(sel, arg1, 0);
1403 
1404     return do_select(nsel, inp, outp, exp, tvp);
1405 }
1406 #endif
1407 #endif
1408 
1409 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
do_pselect6(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,bool time64)1410 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1411                             abi_long arg4, abi_long arg5, abi_long arg6,
1412                             bool time64)
1413 {
1414     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1415     fd_set rfds, wfds, efds;
1416     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1417     struct timespec ts, *ts_ptr;
1418     abi_long ret;
1419 
1420     /*
1421      * The 6th arg is actually two args smashed together,
1422      * so we cannot use the C library.
1423      */
1424     struct {
1425         sigset_t *set;
1426         size_t size;
1427     } sig, *sig_ptr;
1428 
1429     abi_ulong arg_sigset, arg_sigsize, *arg7;
1430 
1431     n = arg1;
1432     rfd_addr = arg2;
1433     wfd_addr = arg3;
1434     efd_addr = arg4;
1435     ts_addr = arg5;
1436 
1437     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1438     if (ret) {
1439         return ret;
1440     }
1441     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1442     if (ret) {
1443         return ret;
1444     }
1445     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1446     if (ret) {
1447         return ret;
1448     }
1449 
1450     /*
1451      * This takes a timespec, and not a timeval, so we cannot
1452      * use the do_select() helper ...
1453      */
1454     if (ts_addr) {
1455         if (time64) {
1456             if (target_to_host_timespec64(&ts, ts_addr)) {
1457                 return -TARGET_EFAULT;
1458             }
1459         } else {
1460             if (target_to_host_timespec(&ts, ts_addr)) {
1461                 return -TARGET_EFAULT;
1462             }
1463         }
1464             ts_ptr = &ts;
1465     } else {
1466         ts_ptr = NULL;
1467     }
1468 
1469     /* Extract the two packed args for the sigset */
1470     sig_ptr = NULL;
1471     if (arg6) {
1472         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1473         if (!arg7) {
1474             return -TARGET_EFAULT;
1475         }
1476         arg_sigset = tswapal(arg7[0]);
1477         arg_sigsize = tswapal(arg7[1]);
1478         unlock_user(arg7, arg6, 0);
1479 
1480         if (arg_sigset) {
1481             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1482             if (ret != 0) {
1483                 return ret;
1484             }
1485             sig_ptr = &sig;
1486             sig.size = SIGSET_T_SIZE;
1487         }
1488     }
1489 
1490     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1491                                   ts_ptr, sig_ptr));
1492 
1493     if (sig_ptr) {
1494         finish_sigsuspend_mask(ret);
1495     }
1496 
1497     if (!is_error(ret)) {
1498         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1499             return -TARGET_EFAULT;
1500         }
1501         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1502             return -TARGET_EFAULT;
1503         }
1504         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1505             return -TARGET_EFAULT;
1506         }
1507         if (time64) {
1508             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1509                 return -TARGET_EFAULT;
1510             }
1511         } else {
1512             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1513                 return -TARGET_EFAULT;
1514             }
1515         }
1516     }
1517     return ret;
1518 }
1519 #endif
1520 
1521 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1522     defined(TARGET_NR_ppoll_time64)
do_ppoll(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,bool ppoll,bool time64)1523 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1524                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1525 {
1526     struct target_pollfd *target_pfd;
1527     unsigned int nfds = arg2;
1528     struct pollfd *pfd;
1529     unsigned int i;
1530     abi_long ret;
1531 
1532     pfd = NULL;
1533     target_pfd = NULL;
1534     if (nfds) {
1535         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1536             return -TARGET_EINVAL;
1537         }
1538         target_pfd = lock_user(VERIFY_WRITE, arg1,
1539                                sizeof(struct target_pollfd) * nfds, 1);
1540         if (!target_pfd) {
1541             return -TARGET_EFAULT;
1542         }
1543 
1544         pfd = alloca(sizeof(struct pollfd) * nfds);
1545         for (i = 0; i < nfds; i++) {
1546             pfd[i].fd = tswap32(target_pfd[i].fd);
1547             pfd[i].events = tswap16(target_pfd[i].events);
1548         }
1549     }
1550     if (ppoll) {
1551         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1552         sigset_t *set = NULL;
1553 
1554         if (arg3) {
1555             if (time64) {
1556                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1557                     unlock_user(target_pfd, arg1, 0);
1558                     return -TARGET_EFAULT;
1559                 }
1560             } else {
1561                 if (target_to_host_timespec(timeout_ts, arg3)) {
1562                     unlock_user(target_pfd, arg1, 0);
1563                     return -TARGET_EFAULT;
1564                 }
1565             }
1566         } else {
1567             timeout_ts = NULL;
1568         }
1569 
1570         if (arg4) {
1571             ret = process_sigsuspend_mask(&set, arg4, arg5);
1572             if (ret != 0) {
1573                 unlock_user(target_pfd, arg1, 0);
1574                 return ret;
1575             }
1576         }
1577 
1578         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1579                                    set, SIGSET_T_SIZE));
1580 
1581         if (set) {
1582             finish_sigsuspend_mask(ret);
1583         }
1584         if (!is_error(ret) && arg3) {
1585             if (time64) {
1586                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1587                     return -TARGET_EFAULT;
1588                 }
1589             } else {
1590                 if (host_to_target_timespec(arg3, timeout_ts)) {
1591                     return -TARGET_EFAULT;
1592                 }
1593             }
1594         }
1595     } else {
1596           struct timespec ts, *pts;
1597 
1598           if (arg3 >= 0) {
1599               /* Convert ms to secs, ns */
1600               ts.tv_sec = arg3 / 1000;
1601               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1602               pts = &ts;
1603           } else {
1604               /* -ve poll() timeout means "infinite" */
1605               pts = NULL;
1606           }
1607           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1608     }
1609 
1610     if (!is_error(ret)) {
1611         for (i = 0; i < nfds; i++) {
1612             target_pfd[i].revents = tswap16(pfd[i].revents);
1613         }
1614     }
1615     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1616     return ret;
1617 }
1618 #endif
1619 
do_pipe(CPUArchState * cpu_env,abi_ulong pipedes,int flags,int is_pipe2)1620 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1621                         int flags, int is_pipe2)
1622 {
1623     int host_pipe[2];
1624     abi_long ret;
1625     ret = pipe2(host_pipe, flags);
1626 
1627     if (is_error(ret))
1628         return get_errno(ret);
1629 
1630     /* Several targets have special calling conventions for the original
1631        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1632     if (!is_pipe2) {
1633 #if defined(TARGET_ALPHA)
1634         cpu_env->ir[IR_A4] = host_pipe[1];
1635         return host_pipe[0];
1636 #elif defined(TARGET_MIPS)
1637         cpu_env->active_tc.gpr[3] = host_pipe[1];
1638         return host_pipe[0];
1639 #elif defined(TARGET_SH4)
1640         cpu_env->gregs[1] = host_pipe[1];
1641         return host_pipe[0];
1642 #elif defined(TARGET_SPARC)
1643         cpu_env->regwptr[1] = host_pipe[1];
1644         return host_pipe[0];
1645 #endif
1646     }
1647 
1648     if (put_user_s32(host_pipe[0], pipedes)
1649         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1650         return -TARGET_EFAULT;
1651     return get_errno(ret);
1652 }
1653 
target_to_host_sockaddr(int fd,struct sockaddr * addr,abi_ulong target_addr,socklen_t len)1654 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1655                                                abi_ulong target_addr,
1656                                                socklen_t len)
1657 {
1658     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1659     sa_family_t sa_family;
1660     struct target_sockaddr *target_saddr;
1661 
1662     if (fd_trans_target_to_host_addr(fd)) {
1663         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1664     }
1665 
1666     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1667     if (!target_saddr)
1668         return -TARGET_EFAULT;
1669 
1670     sa_family = tswap16(target_saddr->sa_family);
1671 
1672     /* Oops. The caller might send a incomplete sun_path; sun_path
1673      * must be terminated by \0 (see the manual page), but
1674      * unfortunately it is quite common to specify sockaddr_un
1675      * length as "strlen(x->sun_path)" while it should be
1676      * "strlen(...) + 1". We'll fix that here if needed.
1677      * Linux kernel has a similar feature.
1678      */
1679 
1680     if (sa_family == AF_UNIX) {
1681         if (len < unix_maxlen && len > 0) {
1682             char *cp = (char*)target_saddr;
1683 
1684             if ( cp[len-1] && !cp[len] )
1685                 len++;
1686         }
1687         if (len > unix_maxlen)
1688             len = unix_maxlen;
1689     }
1690 
1691     memcpy(addr, target_saddr, len);
1692     addr->sa_family = sa_family;
1693     if (sa_family == AF_NETLINK) {
1694         struct sockaddr_nl *nladdr;
1695 
1696         nladdr = (struct sockaddr_nl *)addr;
1697         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1698         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1699     } else if (sa_family == AF_PACKET) {
1700 	struct target_sockaddr_ll *lladdr;
1701 
1702 	lladdr = (struct target_sockaddr_ll *)addr;
1703 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1704 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1705     } else if (sa_family == AF_INET6) {
1706         struct sockaddr_in6 *in6addr;
1707 
1708         in6addr = (struct sockaddr_in6 *)addr;
1709         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1710     }
1711     unlock_user(target_saddr, target_addr, 0);
1712 
1713     return 0;
1714 }
1715 
host_to_target_sockaddr(abi_ulong target_addr,struct sockaddr * addr,socklen_t len)1716 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1717                                                struct sockaddr *addr,
1718                                                socklen_t len)
1719 {
1720     struct target_sockaddr *target_saddr;
1721 
1722     if (len == 0) {
1723         return 0;
1724     }
1725     assert(addr);
1726 
1727     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1728     if (!target_saddr)
1729         return -TARGET_EFAULT;
1730     memcpy(target_saddr, addr, len);
1731     if (len >= offsetof(struct target_sockaddr, sa_family) +
1732         sizeof(target_saddr->sa_family)) {
1733         target_saddr->sa_family = tswap16(addr->sa_family);
1734     }
1735     if (addr->sa_family == AF_NETLINK &&
1736         len >= sizeof(struct target_sockaddr_nl)) {
1737         struct target_sockaddr_nl *target_nl =
1738                (struct target_sockaddr_nl *)target_saddr;
1739         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1740         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1741     } else if (addr->sa_family == AF_PACKET) {
1742         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1743         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1744         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1745     } else if (addr->sa_family == AF_INET6 &&
1746                len >= sizeof(struct target_sockaddr_in6)) {
1747         struct target_sockaddr_in6 *target_in6 =
1748                (struct target_sockaddr_in6 *)target_saddr;
1749         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1750     }
1751     unlock_user(target_saddr, target_addr, len);
1752 
1753     return 0;
1754 }
1755 
target_to_host_cmsg(struct msghdr * msgh,struct target_msghdr * target_msgh)1756 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1757                                            struct target_msghdr *target_msgh)
1758 {
1759     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1760     abi_long msg_controllen;
1761     abi_ulong target_cmsg_addr;
1762     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1763     socklen_t space = 0;
1764 
1765     msg_controllen = tswapal(target_msgh->msg_controllen);
1766     if (msg_controllen < sizeof (struct target_cmsghdr))
1767         goto the_end;
1768     target_cmsg_addr = tswapal(target_msgh->msg_control);
1769     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1770     target_cmsg_start = target_cmsg;
1771     if (!target_cmsg)
1772         return -TARGET_EFAULT;
1773 
1774     while (cmsg && target_cmsg) {
1775         void *data = CMSG_DATA(cmsg);
1776         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1777 
1778         int len = tswapal(target_cmsg->cmsg_len)
1779             - sizeof(struct target_cmsghdr);
1780 
1781         space += CMSG_SPACE(len);
1782         if (space > msgh->msg_controllen) {
1783             space -= CMSG_SPACE(len);
1784             /* This is a QEMU bug, since we allocated the payload
1785              * area ourselves (unlike overflow in host-to-target
1786              * conversion, which is just the guest giving us a buffer
1787              * that's too small). It can't happen for the payload types
1788              * we currently support; if it becomes an issue in future
1789              * we would need to improve our allocation strategy to
1790              * something more intelligent than "twice the size of the
1791              * target buffer we're reading from".
1792              */
1793             qemu_log_mask(LOG_UNIMP,
1794                           ("Unsupported ancillary data %d/%d: "
1795                            "unhandled msg size\n"),
1796                           tswap32(target_cmsg->cmsg_level),
1797                           tswap32(target_cmsg->cmsg_type));
1798             break;
1799         }
1800 
1801         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1802             cmsg->cmsg_level = SOL_SOCKET;
1803         } else {
1804             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1805         }
1806         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1807         cmsg->cmsg_len = CMSG_LEN(len);
1808 
1809         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1810             int *fd = (int *)data;
1811             int *target_fd = (int *)target_data;
1812             int i, numfds = len / sizeof(int);
1813 
1814             for (i = 0; i < numfds; i++) {
1815                 __get_user(fd[i], target_fd + i);
1816             }
1817         } else if (cmsg->cmsg_level == SOL_SOCKET
1818                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1819             struct ucred *cred = (struct ucred *)data;
1820             struct target_ucred *target_cred =
1821                 (struct target_ucred *)target_data;
1822 
1823             __get_user(cred->pid, &target_cred->pid);
1824             __get_user(cred->uid, &target_cred->uid);
1825             __get_user(cred->gid, &target_cred->gid);
1826         } else if (cmsg->cmsg_level == SOL_ALG) {
1827             uint32_t *dst = (uint32_t *)data;
1828 
1829             memcpy(dst, target_data, len);
1830             /* fix endianness of first 32-bit word */
1831             if (len >= sizeof(uint32_t)) {
1832                 *dst = tswap32(*dst);
1833             }
1834         } else {
1835             qemu_log_mask(LOG_UNIMP, "Unsupported target ancillary data: %d/%d\n",
1836                           cmsg->cmsg_level, cmsg->cmsg_type);
1837             memcpy(data, target_data, len);
1838         }
1839 
1840         cmsg = CMSG_NXTHDR(msgh, cmsg);
1841         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1842                                          target_cmsg_start);
1843     }
1844     unlock_user(target_cmsg, target_cmsg_addr, 0);
1845  the_end:
1846     msgh->msg_controllen = space;
1847     return 0;
1848 }
1849 
host_to_target_cmsg(struct target_msghdr * target_msgh,struct msghdr * msgh)1850 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1851                                            struct msghdr *msgh)
1852 {
1853     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1854     abi_long msg_controllen;
1855     abi_ulong target_cmsg_addr;
1856     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1857     socklen_t space = 0;
1858 
1859     msg_controllen = tswapal(target_msgh->msg_controllen);
1860     if (msg_controllen < sizeof (struct target_cmsghdr))
1861         goto the_end;
1862     target_cmsg_addr = tswapal(target_msgh->msg_control);
1863     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1864     target_cmsg_start = target_cmsg;
1865     if (!target_cmsg)
1866         return -TARGET_EFAULT;
1867 
1868     while (cmsg && target_cmsg) {
1869         void *data = CMSG_DATA(cmsg);
1870         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1871 
1872         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1873         int tgt_len, tgt_space;
1874 
1875         /* We never copy a half-header but may copy half-data;
1876          * this is Linux's behaviour in put_cmsg(). Note that
1877          * truncation here is a guest problem (which we report
1878          * to the guest via the CTRUNC bit), unlike truncation
1879          * in target_to_host_cmsg, which is a QEMU bug.
1880          */
1881         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1882             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1883             break;
1884         }
1885 
1886         if (cmsg->cmsg_level == SOL_SOCKET) {
1887             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1888         } else {
1889             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1890         }
1891         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1892 
1893         /* Payload types which need a different size of payload on
1894          * the target must adjust tgt_len here.
1895          */
1896         tgt_len = len;
1897         switch (cmsg->cmsg_level) {
1898         case SOL_SOCKET:
1899             switch (cmsg->cmsg_type) {
1900             case SO_TIMESTAMP:
1901                 tgt_len = sizeof(struct target_timeval);
1902                 break;
1903             default:
1904                 break;
1905             }
1906             break;
1907         default:
1908             break;
1909         }
1910 
1911         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1912             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1913             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1914         }
1915 
1916         /* We must now copy-and-convert len bytes of payload
1917          * into tgt_len bytes of destination space. Bear in mind
1918          * that in both source and destination we may be dealing
1919          * with a truncated value!
1920          */
1921         switch (cmsg->cmsg_level) {
1922         case SOL_SOCKET:
1923             switch (cmsg->cmsg_type) {
1924             case SCM_RIGHTS:
1925             {
1926                 int *fd = (int *)data;
1927                 int *target_fd = (int *)target_data;
1928                 int i, numfds = tgt_len / sizeof(int);
1929 
1930                 for (i = 0; i < numfds; i++) {
1931                     __put_user(fd[i], target_fd + i);
1932                 }
1933                 break;
1934             }
1935             case SO_TIMESTAMP:
1936             {
1937                 struct timeval *tv = (struct timeval *)data;
1938                 struct target_timeval *target_tv =
1939                     (struct target_timeval *)target_data;
1940 
1941                 if (len != sizeof(struct timeval) ||
1942                     tgt_len != sizeof(struct target_timeval)) {
1943                     goto unimplemented;
1944                 }
1945 
1946                 /* copy struct timeval to target */
1947                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1948                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1949                 break;
1950             }
1951             case SCM_CREDENTIALS:
1952             {
1953                 struct ucred *cred = (struct ucred *)data;
1954                 struct target_ucred *target_cred =
1955                     (struct target_ucred *)target_data;
1956 
1957                 __put_user(cred->pid, &target_cred->pid);
1958                 __put_user(cred->uid, &target_cred->uid);
1959                 __put_user(cred->gid, &target_cred->gid);
1960                 break;
1961             }
1962             default:
1963                 goto unimplemented;
1964             }
1965             break;
1966 
1967         case SOL_IP:
1968             switch (cmsg->cmsg_type) {
1969             case IP_TTL:
1970             {
1971                 uint32_t *v = (uint32_t *)data;
1972                 uint32_t *t_int = (uint32_t *)target_data;
1973 
1974                 if (len != sizeof(uint32_t) ||
1975                     tgt_len != sizeof(uint32_t)) {
1976                     goto unimplemented;
1977                 }
1978                 __put_user(*v, t_int);
1979                 break;
1980             }
1981             case IP_RECVERR:
1982             {
1983                 struct errhdr_t {
1984                    struct sock_extended_err ee;
1985                    struct sockaddr_in offender;
1986                 };
1987                 struct errhdr_t *errh = (struct errhdr_t *)data;
1988                 struct errhdr_t *target_errh =
1989                     (struct errhdr_t *)target_data;
1990 
1991                 if (len != sizeof(struct errhdr_t) ||
1992                     tgt_len != sizeof(struct errhdr_t)) {
1993                     goto unimplemented;
1994                 }
1995                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1996                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1997                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1998                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1999                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2000                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2001                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2002                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2003                     (void *) &errh->offender, sizeof(errh->offender));
2004                 break;
2005             }
2006             case IP_PKTINFO:
2007             {
2008                 struct in_pktinfo *pkti = data;
2009                 struct target_in_pktinfo *target_pi = target_data;
2010 
2011                 __put_user(pkti->ipi_ifindex, &target_pi->ipi_ifindex);
2012                 target_pi->ipi_spec_dst.s_addr = pkti->ipi_spec_dst.s_addr;
2013                 target_pi->ipi_addr.s_addr = pkti->ipi_addr.s_addr;
2014                 break;
2015             }
2016             default:
2017                 goto unimplemented;
2018             }
2019             break;
2020 
2021         case SOL_IPV6:
2022             switch (cmsg->cmsg_type) {
2023             case IPV6_HOPLIMIT:
2024             {
2025                 uint32_t *v = (uint32_t *)data;
2026                 uint32_t *t_int = (uint32_t *)target_data;
2027 
2028                 if (len != sizeof(uint32_t) ||
2029                     tgt_len != sizeof(uint32_t)) {
2030                     goto unimplemented;
2031                 }
2032                 __put_user(*v, t_int);
2033                 break;
2034             }
2035             case IPV6_RECVERR:
2036             {
2037                 struct errhdr6_t {
2038                    struct sock_extended_err ee;
2039                    struct sockaddr_in6 offender;
2040                 };
2041                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2042                 struct errhdr6_t *target_errh =
2043                     (struct errhdr6_t *)target_data;
2044 
2045                 if (len != sizeof(struct errhdr6_t) ||
2046                     tgt_len != sizeof(struct errhdr6_t)) {
2047                     goto unimplemented;
2048                 }
2049                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2050                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2051                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2052                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2053                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2054                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2055                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2056                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2057                     (void *) &errh->offender, sizeof(errh->offender));
2058                 break;
2059             }
2060             default:
2061                 goto unimplemented;
2062             }
2063             break;
2064 
2065         default:
2066         unimplemented:
2067             qemu_log_mask(LOG_UNIMP, "Unsupported host ancillary data: %d/%d\n",
2068                           cmsg->cmsg_level, cmsg->cmsg_type);
2069             memcpy(target_data, data, MIN(len, tgt_len));
2070             if (tgt_len > len) {
2071                 memset(target_data + len, 0, tgt_len - len);
2072             }
2073         }
2074 
2075         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2076         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2077         if (msg_controllen < tgt_space) {
2078             tgt_space = msg_controllen;
2079         }
2080         msg_controllen -= tgt_space;
2081         space += tgt_space;
2082         cmsg = CMSG_NXTHDR(msgh, cmsg);
2083         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2084                                          target_cmsg_start);
2085     }
2086     unlock_user(target_cmsg, target_cmsg_addr, space);
2087  the_end:
2088     target_msgh->msg_controllen = tswapal(space);
2089     return 0;
2090 }
2091 
2092 /* do_setsockopt() Must return target values and target errnos. */
do_setsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,socklen_t optlen)2093 static abi_long do_setsockopt(int sockfd, int level, int optname,
2094                               abi_ulong optval_addr, socklen_t optlen)
2095 {
2096     abi_long ret;
2097     int val;
2098 
2099     switch(level) {
2100     case SOL_TCP:
2101     case SOL_UDP:
2102         /* TCP and UDP options all take an 'int' value.  */
2103         if (optlen < sizeof(uint32_t))
2104             return -TARGET_EINVAL;
2105 
2106         if (get_user_u32(val, optval_addr))
2107             return -TARGET_EFAULT;
2108         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2109         break;
2110     case SOL_IP:
2111         switch(optname) {
2112         case IP_TOS:
2113         case IP_TTL:
2114         case IP_HDRINCL:
2115         case IP_ROUTER_ALERT:
2116         case IP_RECVOPTS:
2117         case IP_RETOPTS:
2118         case IP_PKTINFO:
2119         case IP_MTU_DISCOVER:
2120         case IP_RECVERR:
2121         case IP_RECVTTL:
2122         case IP_RECVTOS:
2123 #ifdef IP_FREEBIND
2124         case IP_FREEBIND:
2125 #endif
2126         case IP_MULTICAST_TTL:
2127         case IP_MULTICAST_LOOP:
2128             val = 0;
2129             if (optlen >= sizeof(uint32_t)) {
2130                 if (get_user_u32(val, optval_addr))
2131                     return -TARGET_EFAULT;
2132             } else if (optlen >= 1) {
2133                 if (get_user_u8(val, optval_addr))
2134                     return -TARGET_EFAULT;
2135             }
2136             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2137             break;
2138         case IP_MULTICAST_IF:
2139         case IP_ADD_MEMBERSHIP:
2140         case IP_DROP_MEMBERSHIP:
2141         {
2142             struct ip_mreqn ip_mreq;
2143             struct target_ip_mreqn *target_smreqn;
2144             int min_size;
2145 
2146             QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) !=
2147                               sizeof(struct target_ip_mreq));
2148 
2149             if (optname == IP_MULTICAST_IF) {
2150                 min_size = sizeof(struct in_addr);
2151             } else {
2152                 min_size = sizeof(struct target_ip_mreq);
2153             }
2154             if (optlen < min_size ||
2155                 optlen > sizeof (struct target_ip_mreqn)) {
2156                 return -TARGET_EINVAL;
2157             }
2158 
2159             target_smreqn = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2160             if (!target_smreqn) {
2161                 return -TARGET_EFAULT;
2162             }
2163             ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
2164             if (optlen >= sizeof(struct target_ip_mreq)) {
2165                 ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
2166                 if (optlen >= sizeof(struct target_ip_mreqn)) {
2167                     __put_user(target_smreqn->imr_ifindex, &ip_mreq.imr_ifindex);
2168                     optlen = sizeof(struct ip_mreqn);
2169                 }
2170             }
2171             unlock_user(target_smreqn, optval_addr, 0);
2172             ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen));
2173             break;
2174         }
2175         case IP_BLOCK_SOURCE:
2176         case IP_UNBLOCK_SOURCE:
2177         case IP_ADD_SOURCE_MEMBERSHIP:
2178         case IP_DROP_SOURCE_MEMBERSHIP:
2179         {
2180             struct ip_mreq_source *ip_mreq_source;
2181 
2182             if (optlen != sizeof (struct target_ip_mreq_source))
2183                 return -TARGET_EINVAL;
2184 
2185             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2186             if (!ip_mreq_source) {
2187                 return -TARGET_EFAULT;
2188             }
2189             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2190             unlock_user (ip_mreq_source, optval_addr, 0);
2191             break;
2192         }
2193         default:
2194             goto unimplemented;
2195         }
2196         break;
2197     case SOL_IPV6:
2198         switch (optname) {
2199         case IPV6_MTU_DISCOVER:
2200         case IPV6_MTU:
2201         case IPV6_V6ONLY:
2202         case IPV6_RECVPKTINFO:
2203         case IPV6_UNICAST_HOPS:
2204         case IPV6_MULTICAST_HOPS:
2205         case IPV6_MULTICAST_LOOP:
2206         case IPV6_RECVERR:
2207         case IPV6_RECVHOPLIMIT:
2208         case IPV6_2292HOPLIMIT:
2209         case IPV6_CHECKSUM:
2210         case IPV6_ADDRFORM:
2211         case IPV6_2292PKTINFO:
2212         case IPV6_RECVTCLASS:
2213         case IPV6_RECVRTHDR:
2214         case IPV6_2292RTHDR:
2215         case IPV6_RECVHOPOPTS:
2216         case IPV6_2292HOPOPTS:
2217         case IPV6_RECVDSTOPTS:
2218         case IPV6_2292DSTOPTS:
2219         case IPV6_TCLASS:
2220         case IPV6_ADDR_PREFERENCES:
2221 #ifdef IPV6_RECVPATHMTU
2222         case IPV6_RECVPATHMTU:
2223 #endif
2224 #ifdef IPV6_TRANSPARENT
2225         case IPV6_TRANSPARENT:
2226 #endif
2227 #ifdef IPV6_FREEBIND
2228         case IPV6_FREEBIND:
2229 #endif
2230 #ifdef IPV6_RECVORIGDSTADDR
2231         case IPV6_RECVORIGDSTADDR:
2232 #endif
2233             val = 0;
2234             if (optlen < sizeof(uint32_t)) {
2235                 return -TARGET_EINVAL;
2236             }
2237             if (get_user_u32(val, optval_addr)) {
2238                 return -TARGET_EFAULT;
2239             }
2240             ret = get_errno(setsockopt(sockfd, level, optname,
2241                                        &val, sizeof(val)));
2242             break;
2243         case IPV6_PKTINFO:
2244         {
2245             struct in6_pktinfo pki;
2246 
2247             if (optlen < sizeof(pki)) {
2248                 return -TARGET_EINVAL;
2249             }
2250 
2251             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2252                 return -TARGET_EFAULT;
2253             }
2254 
2255             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2256 
2257             ret = get_errno(setsockopt(sockfd, level, optname,
2258                                        &pki, sizeof(pki)));
2259             break;
2260         }
2261         case IPV6_ADD_MEMBERSHIP:
2262         case IPV6_DROP_MEMBERSHIP:
2263         {
2264             struct ipv6_mreq ipv6mreq;
2265 
2266             if (optlen < sizeof(ipv6mreq)) {
2267                 return -TARGET_EINVAL;
2268             }
2269 
2270             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2271                 return -TARGET_EFAULT;
2272             }
2273 
2274             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2275 
2276             ret = get_errno(setsockopt(sockfd, level, optname,
2277                                        &ipv6mreq, sizeof(ipv6mreq)));
2278             break;
2279         }
2280         default:
2281             goto unimplemented;
2282         }
2283         break;
2284     case SOL_ICMPV6:
2285         switch (optname) {
2286         case ICMPV6_FILTER:
2287         {
2288             struct icmp6_filter icmp6f;
2289 
2290             if (optlen > sizeof(icmp6f)) {
2291                 optlen = sizeof(icmp6f);
2292             }
2293 
2294             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2295                 return -TARGET_EFAULT;
2296             }
2297 
2298             for (val = 0; val < 8; val++) {
2299                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2300             }
2301 
2302             ret = get_errno(setsockopt(sockfd, level, optname,
2303                                        &icmp6f, optlen));
2304             break;
2305         }
2306         default:
2307             goto unimplemented;
2308         }
2309         break;
2310     case SOL_RAW:
2311         switch (optname) {
2312         case ICMP_FILTER:
2313         case IPV6_CHECKSUM:
2314             /* those take an u32 value */
2315             if (optlen < sizeof(uint32_t)) {
2316                 return -TARGET_EINVAL;
2317             }
2318 
2319             if (get_user_u32(val, optval_addr)) {
2320                 return -TARGET_EFAULT;
2321             }
2322             ret = get_errno(setsockopt(sockfd, level, optname,
2323                                        &val, sizeof(val)));
2324             break;
2325 
2326         default:
2327             goto unimplemented;
2328         }
2329         break;
2330 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2331     case SOL_ALG:
2332         switch (optname) {
2333         case ALG_SET_KEY:
2334         {
2335             char *alg_key = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2336             if (!alg_key) {
2337                 return -TARGET_EFAULT;
2338             }
2339             ret = get_errno(setsockopt(sockfd, level, optname,
2340                                        alg_key, optlen));
2341             unlock_user(alg_key, optval_addr, optlen);
2342             break;
2343         }
2344         case ALG_SET_AEAD_AUTHSIZE:
2345         {
2346             ret = get_errno(setsockopt(sockfd, level, optname,
2347                                        NULL, optlen));
2348             break;
2349         }
2350         default:
2351             goto unimplemented;
2352         }
2353         break;
2354 #endif
2355     case TARGET_SOL_SOCKET:
2356         switch (optname) {
2357         case TARGET_SO_RCVTIMEO:
2358         case TARGET_SO_SNDTIMEO:
2359         {
2360                 struct timeval tv;
2361 
2362                 if (optlen != sizeof(struct target_timeval)) {
2363                     return -TARGET_EINVAL;
2364                 }
2365 
2366                 if (copy_from_user_timeval(&tv, optval_addr)) {
2367                     return -TARGET_EFAULT;
2368                 }
2369 
2370                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2371                                 optname == TARGET_SO_RCVTIMEO ?
2372                                     SO_RCVTIMEO : SO_SNDTIMEO,
2373                                 &tv, sizeof(tv)));
2374                 return ret;
2375         }
2376         case TARGET_SO_ATTACH_FILTER:
2377         {
2378                 struct target_sock_fprog *tfprog;
2379                 struct target_sock_filter *tfilter;
2380                 struct sock_fprog fprog;
2381                 struct sock_filter *filter;
2382                 int i;
2383 
2384                 if (optlen != sizeof(*tfprog)) {
2385                     return -TARGET_EINVAL;
2386                 }
2387                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2388                     return -TARGET_EFAULT;
2389                 }
2390                 if (!lock_user_struct(VERIFY_READ, tfilter,
2391                                       tswapal(tfprog->filter), 0)) {
2392                     unlock_user_struct(tfprog, optval_addr, 1);
2393                     return -TARGET_EFAULT;
2394                 }
2395 
2396                 fprog.len = tswap16(tfprog->len);
2397                 filter = g_try_new(struct sock_filter, fprog.len);
2398                 if (filter == NULL) {
2399                     unlock_user_struct(tfilter, tfprog->filter, 1);
2400                     unlock_user_struct(tfprog, optval_addr, 1);
2401                     return -TARGET_ENOMEM;
2402                 }
2403                 for (i = 0; i < fprog.len; i++) {
2404                     filter[i].code = tswap16(tfilter[i].code);
2405                     filter[i].jt = tfilter[i].jt;
2406                     filter[i].jf = tfilter[i].jf;
2407                     filter[i].k = tswap32(tfilter[i].k);
2408                 }
2409                 fprog.filter = filter;
2410 
2411                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2412                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2413                 g_free(filter);
2414 
2415                 unlock_user_struct(tfilter, tfprog->filter, 1);
2416                 unlock_user_struct(tfprog, optval_addr, 1);
2417                 return ret;
2418         }
2419 	case TARGET_SO_BINDTODEVICE:
2420 	{
2421 		char *dev_ifname, *addr_ifname;
2422 
2423 		if (optlen > IFNAMSIZ - 1) {
2424 		    optlen = IFNAMSIZ - 1;
2425 		}
2426 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2427 		if (!dev_ifname) {
2428 		    return -TARGET_EFAULT;
2429 		}
2430 		optname = SO_BINDTODEVICE;
2431 		addr_ifname = alloca(IFNAMSIZ);
2432 		memcpy(addr_ifname, dev_ifname, optlen);
2433 		addr_ifname[optlen] = 0;
2434 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2435                                            addr_ifname, optlen));
2436 		unlock_user (dev_ifname, optval_addr, 0);
2437 		return ret;
2438 	}
2439         case TARGET_SO_LINGER:
2440         {
2441                 struct linger lg;
2442                 struct target_linger *tlg;
2443 
2444                 if (optlen != sizeof(struct target_linger)) {
2445                     return -TARGET_EINVAL;
2446                 }
2447                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2448                     return -TARGET_EFAULT;
2449                 }
2450                 __get_user(lg.l_onoff, &tlg->l_onoff);
2451                 __get_user(lg.l_linger, &tlg->l_linger);
2452                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2453                                 &lg, sizeof(lg)));
2454                 unlock_user_struct(tlg, optval_addr, 0);
2455                 return ret;
2456         }
2457             /* Options with 'int' argument.  */
2458         case TARGET_SO_DEBUG:
2459 		optname = SO_DEBUG;
2460 		break;
2461         case TARGET_SO_REUSEADDR:
2462 		optname = SO_REUSEADDR;
2463 		break;
2464 #ifdef SO_REUSEPORT
2465         case TARGET_SO_REUSEPORT:
2466                 optname = SO_REUSEPORT;
2467                 break;
2468 #endif
2469         case TARGET_SO_TYPE:
2470 		optname = SO_TYPE;
2471 		break;
2472         case TARGET_SO_ERROR:
2473 		optname = SO_ERROR;
2474 		break;
2475         case TARGET_SO_DONTROUTE:
2476 		optname = SO_DONTROUTE;
2477 		break;
2478         case TARGET_SO_BROADCAST:
2479 		optname = SO_BROADCAST;
2480 		break;
2481         case TARGET_SO_SNDBUF:
2482 		optname = SO_SNDBUF;
2483 		break;
2484         case TARGET_SO_SNDBUFFORCE:
2485                 optname = SO_SNDBUFFORCE;
2486                 break;
2487         case TARGET_SO_RCVBUF:
2488 		optname = SO_RCVBUF;
2489 		break;
2490         case TARGET_SO_RCVBUFFORCE:
2491                 optname = SO_RCVBUFFORCE;
2492                 break;
2493         case TARGET_SO_KEEPALIVE:
2494 		optname = SO_KEEPALIVE;
2495 		break;
2496         case TARGET_SO_OOBINLINE:
2497 		optname = SO_OOBINLINE;
2498 		break;
2499         case TARGET_SO_NO_CHECK:
2500 		optname = SO_NO_CHECK;
2501 		break;
2502         case TARGET_SO_PRIORITY:
2503 		optname = SO_PRIORITY;
2504 		break;
2505 #ifdef SO_BSDCOMPAT
2506         case TARGET_SO_BSDCOMPAT:
2507 		optname = SO_BSDCOMPAT;
2508 		break;
2509 #endif
2510         case TARGET_SO_PASSCRED:
2511 		optname = SO_PASSCRED;
2512 		break;
2513         case TARGET_SO_PASSSEC:
2514                 optname = SO_PASSSEC;
2515                 break;
2516         case TARGET_SO_TIMESTAMP:
2517 		optname = SO_TIMESTAMP;
2518 		break;
2519         case TARGET_SO_RCVLOWAT:
2520 		optname = SO_RCVLOWAT;
2521 		break;
2522         default:
2523             goto unimplemented;
2524         }
2525 	if (optlen < sizeof(uint32_t))
2526             return -TARGET_EINVAL;
2527 
2528 	if (get_user_u32(val, optval_addr))
2529             return -TARGET_EFAULT;
2530 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2531         break;
2532 #ifdef SOL_NETLINK
2533     case SOL_NETLINK:
2534         switch (optname) {
2535         case NETLINK_PKTINFO:
2536         case NETLINK_ADD_MEMBERSHIP:
2537         case NETLINK_DROP_MEMBERSHIP:
2538         case NETLINK_BROADCAST_ERROR:
2539         case NETLINK_NO_ENOBUFS:
2540 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2541         case NETLINK_LISTEN_ALL_NSID:
2542         case NETLINK_CAP_ACK:
2543 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2544 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2545         case NETLINK_EXT_ACK:
2546 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2547 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2548         case NETLINK_GET_STRICT_CHK:
2549 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2550             break;
2551         default:
2552             goto unimplemented;
2553         }
2554         val = 0;
2555         if (optlen < sizeof(uint32_t)) {
2556             return -TARGET_EINVAL;
2557         }
2558         if (get_user_u32(val, optval_addr)) {
2559             return -TARGET_EFAULT;
2560         }
2561         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2562                                    sizeof(val)));
2563         break;
2564 #endif /* SOL_NETLINK */
2565     default:
2566     unimplemented:
2567         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2568                       level, optname);
2569         ret = -TARGET_ENOPROTOOPT;
2570     }
2571     return ret;
2572 }
2573 
2574 /* do_getsockopt() Must return target values and target errnos. */
do_getsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,abi_ulong optlen)2575 static abi_long do_getsockopt(int sockfd, int level, int optname,
2576                               abi_ulong optval_addr, abi_ulong optlen)
2577 {
2578     abi_long ret;
2579     int len, val;
2580     socklen_t lv;
2581 
2582     switch(level) {
2583     case TARGET_SOL_SOCKET:
2584         level = SOL_SOCKET;
2585         switch (optname) {
2586         /* These don't just return a single integer */
2587         case TARGET_SO_PEERNAME:
2588             goto unimplemented;
2589         case TARGET_SO_RCVTIMEO: {
2590             struct timeval tv;
2591             socklen_t tvlen;
2592 
2593             optname = SO_RCVTIMEO;
2594 
2595 get_timeout:
2596             if (get_user_u32(len, optlen)) {
2597                 return -TARGET_EFAULT;
2598             }
2599             if (len < 0) {
2600                 return -TARGET_EINVAL;
2601             }
2602 
2603             tvlen = sizeof(tv);
2604             ret = get_errno(getsockopt(sockfd, level, optname,
2605                                        &tv, &tvlen));
2606             if (ret < 0) {
2607                 return ret;
2608             }
2609             if (len > sizeof(struct target_timeval)) {
2610                 len = sizeof(struct target_timeval);
2611             }
2612             if (copy_to_user_timeval(optval_addr, &tv)) {
2613                 return -TARGET_EFAULT;
2614             }
2615             if (put_user_u32(len, optlen)) {
2616                 return -TARGET_EFAULT;
2617             }
2618             break;
2619         }
2620         case TARGET_SO_SNDTIMEO:
2621             optname = SO_SNDTIMEO;
2622             goto get_timeout;
2623         case TARGET_SO_PEERCRED: {
2624             struct ucred cr;
2625             socklen_t crlen;
2626             struct target_ucred *tcr;
2627 
2628             if (get_user_u32(len, optlen)) {
2629                 return -TARGET_EFAULT;
2630             }
2631             if (len < 0) {
2632                 return -TARGET_EINVAL;
2633             }
2634 
2635             crlen = sizeof(cr);
2636             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2637                                        &cr, &crlen));
2638             if (ret < 0) {
2639                 return ret;
2640             }
2641             if (len > crlen) {
2642                 len = crlen;
2643             }
2644             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2645                 return -TARGET_EFAULT;
2646             }
2647             __put_user(cr.pid, &tcr->pid);
2648             __put_user(cr.uid, &tcr->uid);
2649             __put_user(cr.gid, &tcr->gid);
2650             unlock_user_struct(tcr, optval_addr, 1);
2651             if (put_user_u32(len, optlen)) {
2652                 return -TARGET_EFAULT;
2653             }
2654             break;
2655         }
2656         case TARGET_SO_PEERSEC: {
2657             char *name;
2658 
2659             if (get_user_u32(len, optlen)) {
2660                 return -TARGET_EFAULT;
2661             }
2662             if (len < 0) {
2663                 return -TARGET_EINVAL;
2664             }
2665             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2666             if (!name) {
2667                 return -TARGET_EFAULT;
2668             }
2669             lv = len;
2670             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2671                                        name, &lv));
2672             if (put_user_u32(lv, optlen)) {
2673                 ret = -TARGET_EFAULT;
2674             }
2675             unlock_user(name, optval_addr, lv);
2676             break;
2677         }
2678         case TARGET_SO_LINGER:
2679         {
2680             struct linger lg;
2681             socklen_t lglen;
2682             struct target_linger *tlg;
2683 
2684             if (get_user_u32(len, optlen)) {
2685                 return -TARGET_EFAULT;
2686             }
2687             if (len < 0) {
2688                 return -TARGET_EINVAL;
2689             }
2690 
2691             lglen = sizeof(lg);
2692             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2693                                        &lg, &lglen));
2694             if (ret < 0) {
2695                 return ret;
2696             }
2697             if (len > lglen) {
2698                 len = lglen;
2699             }
2700             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2701                 return -TARGET_EFAULT;
2702             }
2703             __put_user(lg.l_onoff, &tlg->l_onoff);
2704             __put_user(lg.l_linger, &tlg->l_linger);
2705             unlock_user_struct(tlg, optval_addr, 1);
2706             if (put_user_u32(len, optlen)) {
2707                 return -TARGET_EFAULT;
2708             }
2709             break;
2710         }
2711         /* Options with 'int' argument.  */
2712         case TARGET_SO_DEBUG:
2713             optname = SO_DEBUG;
2714             goto int_case;
2715         case TARGET_SO_REUSEADDR:
2716             optname = SO_REUSEADDR;
2717             goto int_case;
2718 #ifdef SO_REUSEPORT
2719         case TARGET_SO_REUSEPORT:
2720             optname = SO_REUSEPORT;
2721             goto int_case;
2722 #endif
2723         case TARGET_SO_TYPE:
2724             optname = SO_TYPE;
2725             goto int_case;
2726         case TARGET_SO_ERROR:
2727             optname = SO_ERROR;
2728             goto int_case;
2729         case TARGET_SO_DONTROUTE:
2730             optname = SO_DONTROUTE;
2731             goto int_case;
2732         case TARGET_SO_BROADCAST:
2733             optname = SO_BROADCAST;
2734             goto int_case;
2735         case TARGET_SO_SNDBUF:
2736             optname = SO_SNDBUF;
2737             goto int_case;
2738         case TARGET_SO_RCVBUF:
2739             optname = SO_RCVBUF;
2740             goto int_case;
2741         case TARGET_SO_KEEPALIVE:
2742             optname = SO_KEEPALIVE;
2743             goto int_case;
2744         case TARGET_SO_OOBINLINE:
2745             optname = SO_OOBINLINE;
2746             goto int_case;
2747         case TARGET_SO_NO_CHECK:
2748             optname = SO_NO_CHECK;
2749             goto int_case;
2750         case TARGET_SO_PRIORITY:
2751             optname = SO_PRIORITY;
2752             goto int_case;
2753 #ifdef SO_BSDCOMPAT
2754         case TARGET_SO_BSDCOMPAT:
2755             optname = SO_BSDCOMPAT;
2756             goto int_case;
2757 #endif
2758         case TARGET_SO_PASSCRED:
2759             optname = SO_PASSCRED;
2760             goto int_case;
2761         case TARGET_SO_TIMESTAMP:
2762             optname = SO_TIMESTAMP;
2763             goto int_case;
2764         case TARGET_SO_RCVLOWAT:
2765             optname = SO_RCVLOWAT;
2766             goto int_case;
2767         case TARGET_SO_ACCEPTCONN:
2768             optname = SO_ACCEPTCONN;
2769             goto int_case;
2770         case TARGET_SO_PROTOCOL:
2771             optname = SO_PROTOCOL;
2772             goto int_case;
2773         case TARGET_SO_DOMAIN:
2774             optname = SO_DOMAIN;
2775             goto int_case;
2776         default:
2777             goto int_case;
2778         }
2779         break;
2780     case SOL_TCP:
2781     case SOL_UDP:
2782         /* TCP and UDP options all take an 'int' value.  */
2783     int_case:
2784         if (get_user_u32(len, optlen))
2785             return -TARGET_EFAULT;
2786         if (len < 0)
2787             return -TARGET_EINVAL;
2788         lv = sizeof(lv);
2789         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2790         if (ret < 0)
2791             return ret;
2792         switch (optname) {
2793         case SO_TYPE:
2794             val = host_to_target_sock_type(val);
2795             break;
2796         case SO_ERROR:
2797             val = host_to_target_errno(val);
2798             break;
2799         }
2800         if (len > lv)
2801             len = lv;
2802         if (len == 4) {
2803             if (put_user_u32(val, optval_addr))
2804                 return -TARGET_EFAULT;
2805         } else {
2806             if (put_user_u8(val, optval_addr))
2807                 return -TARGET_EFAULT;
2808         }
2809         if (put_user_u32(len, optlen))
2810             return -TARGET_EFAULT;
2811         break;
2812     case SOL_IP:
2813         switch(optname) {
2814         case IP_TOS:
2815         case IP_TTL:
2816         case IP_HDRINCL:
2817         case IP_ROUTER_ALERT:
2818         case IP_RECVOPTS:
2819         case IP_RETOPTS:
2820         case IP_PKTINFO:
2821         case IP_MTU_DISCOVER:
2822         case IP_RECVERR:
2823         case IP_RECVTOS:
2824 #ifdef IP_FREEBIND
2825         case IP_FREEBIND:
2826 #endif
2827         case IP_MULTICAST_TTL:
2828         case IP_MULTICAST_LOOP:
2829             if (get_user_u32(len, optlen))
2830                 return -TARGET_EFAULT;
2831             if (len < 0)
2832                 return -TARGET_EINVAL;
2833             lv = sizeof(lv);
2834             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2835             if (ret < 0)
2836                 return ret;
2837             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2838                 len = 1;
2839                 if (put_user_u32(len, optlen)
2840                     || put_user_u8(val, optval_addr))
2841                     return -TARGET_EFAULT;
2842             } else {
2843                 if (len > sizeof(int))
2844                     len = sizeof(int);
2845                 if (put_user_u32(len, optlen)
2846                     || put_user_u32(val, optval_addr))
2847                     return -TARGET_EFAULT;
2848             }
2849             break;
2850         default:
2851             ret = -TARGET_ENOPROTOOPT;
2852             break;
2853         }
2854         break;
2855     case SOL_IPV6:
2856         switch (optname) {
2857         case IPV6_MTU_DISCOVER:
2858         case IPV6_MTU:
2859         case IPV6_V6ONLY:
2860         case IPV6_RECVPKTINFO:
2861         case IPV6_UNICAST_HOPS:
2862         case IPV6_MULTICAST_HOPS:
2863         case IPV6_MULTICAST_LOOP:
2864         case IPV6_RECVERR:
2865         case IPV6_RECVHOPLIMIT:
2866         case IPV6_2292HOPLIMIT:
2867         case IPV6_CHECKSUM:
2868         case IPV6_ADDRFORM:
2869         case IPV6_2292PKTINFO:
2870         case IPV6_RECVTCLASS:
2871         case IPV6_RECVRTHDR:
2872         case IPV6_2292RTHDR:
2873         case IPV6_RECVHOPOPTS:
2874         case IPV6_2292HOPOPTS:
2875         case IPV6_RECVDSTOPTS:
2876         case IPV6_2292DSTOPTS:
2877         case IPV6_TCLASS:
2878         case IPV6_ADDR_PREFERENCES:
2879 #ifdef IPV6_RECVPATHMTU
2880         case IPV6_RECVPATHMTU:
2881 #endif
2882 #ifdef IPV6_TRANSPARENT
2883         case IPV6_TRANSPARENT:
2884 #endif
2885 #ifdef IPV6_FREEBIND
2886         case IPV6_FREEBIND:
2887 #endif
2888 #ifdef IPV6_RECVORIGDSTADDR
2889         case IPV6_RECVORIGDSTADDR:
2890 #endif
2891             if (get_user_u32(len, optlen))
2892                 return -TARGET_EFAULT;
2893             if (len < 0)
2894                 return -TARGET_EINVAL;
2895             lv = sizeof(lv);
2896             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2897             if (ret < 0)
2898                 return ret;
2899             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2900                 len = 1;
2901                 if (put_user_u32(len, optlen)
2902                     || put_user_u8(val, optval_addr))
2903                     return -TARGET_EFAULT;
2904             } else {
2905                 if (len > sizeof(int))
2906                     len = sizeof(int);
2907                 if (put_user_u32(len, optlen)
2908                     || put_user_u32(val, optval_addr))
2909                     return -TARGET_EFAULT;
2910             }
2911             break;
2912         default:
2913             ret = -TARGET_ENOPROTOOPT;
2914             break;
2915         }
2916         break;
2917 #ifdef SOL_NETLINK
2918     case SOL_NETLINK:
2919         switch (optname) {
2920         case NETLINK_PKTINFO:
2921         case NETLINK_BROADCAST_ERROR:
2922         case NETLINK_NO_ENOBUFS:
2923 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2924         case NETLINK_LISTEN_ALL_NSID:
2925         case NETLINK_CAP_ACK:
2926 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2927 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2928         case NETLINK_EXT_ACK:
2929 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2930 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2931         case NETLINK_GET_STRICT_CHK:
2932 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2933             if (get_user_u32(len, optlen)) {
2934                 return -TARGET_EFAULT;
2935             }
2936             if (len != sizeof(val)) {
2937                 return -TARGET_EINVAL;
2938             }
2939             lv = len;
2940             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2941             if (ret < 0) {
2942                 return ret;
2943             }
2944             if (put_user_u32(lv, optlen)
2945                 || put_user_u32(val, optval_addr)) {
2946                 return -TARGET_EFAULT;
2947             }
2948             break;
2949 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2950         case NETLINK_LIST_MEMBERSHIPS:
2951         {
2952             uint32_t *results;
2953             int i;
2954             if (get_user_u32(len, optlen)) {
2955                 return -TARGET_EFAULT;
2956             }
2957             if (len < 0) {
2958                 return -TARGET_EINVAL;
2959             }
2960             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2961             if (!results && len > 0) {
2962                 return -TARGET_EFAULT;
2963             }
2964             lv = len;
2965             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2966             if (ret < 0) {
2967                 unlock_user(results, optval_addr, 0);
2968                 return ret;
2969             }
2970             /* swap host endianness to target endianness. */
2971             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2972                 results[i] = tswap32(results[i]);
2973             }
2974             if (put_user_u32(lv, optlen)) {
2975                 return -TARGET_EFAULT;
2976             }
2977             unlock_user(results, optval_addr, 0);
2978             break;
2979         }
2980 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2981         default:
2982             goto unimplemented;
2983         }
2984         break;
2985 #endif /* SOL_NETLINK */
2986     default:
2987     unimplemented:
2988         qemu_log_mask(LOG_UNIMP,
2989                       "getsockopt level=%d optname=%d not yet supported\n",
2990                       level, optname);
2991         ret = -TARGET_EOPNOTSUPP;
2992         break;
2993     }
2994     return ret;
2995 }
2996 
2997 /* Convert target low/high pair representing file offset into the host
2998  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2999  * as the kernel doesn't handle them either.
3000  */
target_to_host_low_high(abi_ulong tlow,abi_ulong thigh,unsigned long * hlow,unsigned long * hhigh)3001 static void target_to_host_low_high(abi_ulong tlow,
3002                                     abi_ulong thigh,
3003                                     unsigned long *hlow,
3004                                     unsigned long *hhigh)
3005 {
3006     uint64_t off = tlow |
3007         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3008         TARGET_LONG_BITS / 2;
3009 
3010     *hlow = off;
3011     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3012 }
3013 
lock_iovec(int type,abi_ulong target_addr,abi_ulong count,int copy)3014 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3015                                 abi_ulong count, int copy)
3016 {
3017     struct target_iovec *target_vec;
3018     struct iovec *vec;
3019     abi_ulong total_len, max_len;
3020     int i;
3021     int err = 0;
3022     bool bad_address = false;
3023 
3024     if (count == 0) {
3025         errno = 0;
3026         return NULL;
3027     }
3028     if (count > IOV_MAX) {
3029         errno = EINVAL;
3030         return NULL;
3031     }
3032 
3033     vec = g_try_new0(struct iovec, count);
3034     if (vec == NULL) {
3035         errno = ENOMEM;
3036         return NULL;
3037     }
3038 
3039     target_vec = lock_user(VERIFY_READ, target_addr,
3040                            count * sizeof(struct target_iovec), 1);
3041     if (target_vec == NULL) {
3042         err = EFAULT;
3043         goto fail2;
3044     }
3045 
3046     /* ??? If host page size > target page size, this will result in a
3047        value larger than what we can actually support.  */
3048     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3049     total_len = 0;
3050 
3051     for (i = 0; i < count; i++) {
3052         abi_ulong base = tswapal(target_vec[i].iov_base);
3053         abi_long len = tswapal(target_vec[i].iov_len);
3054 
3055         if (len < 0) {
3056             err = EINVAL;
3057             goto fail;
3058         } else if (len == 0) {
3059             /* Zero length pointer is ignored.  */
3060             vec[i].iov_base = 0;
3061         } else {
3062             vec[i].iov_base = lock_user(type, base, len, copy);
3063             /* If the first buffer pointer is bad, this is a fault.  But
3064              * subsequent bad buffers will result in a partial write; this
3065              * is realized by filling the vector with null pointers and
3066              * zero lengths. */
3067             if (!vec[i].iov_base) {
3068                 if (i == 0) {
3069                     err = EFAULT;
3070                     goto fail;
3071                 } else {
3072                     bad_address = true;
3073                 }
3074             }
3075             if (bad_address) {
3076                 len = 0;
3077             }
3078             if (len > max_len - total_len) {
3079                 len = max_len - total_len;
3080             }
3081         }
3082         vec[i].iov_len = len;
3083         total_len += len;
3084     }
3085 
3086     unlock_user(target_vec, target_addr, 0);
3087     return vec;
3088 
3089  fail:
3090     while (--i >= 0) {
3091         if (tswapal(target_vec[i].iov_len) > 0) {
3092             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3093         }
3094     }
3095     unlock_user(target_vec, target_addr, 0);
3096  fail2:
3097     g_free(vec);
3098     errno = err;
3099     return NULL;
3100 }
3101 
unlock_iovec(struct iovec * vec,abi_ulong target_addr,abi_ulong count,int copy)3102 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3103                          abi_ulong count, int copy)
3104 {
3105     struct target_iovec *target_vec;
3106     int i;
3107 
3108     target_vec = lock_user(VERIFY_READ, target_addr,
3109                            count * sizeof(struct target_iovec), 1);
3110     if (target_vec) {
3111         for (i = 0; i < count; i++) {
3112             abi_ulong base = tswapal(target_vec[i].iov_base);
3113             abi_long len = tswapal(target_vec[i].iov_len);
3114             if (len < 0) {
3115                 break;
3116             }
3117             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3118         }
3119         unlock_user(target_vec, target_addr, 0);
3120     }
3121 
3122     g_free(vec);
3123 }
3124 
target_to_host_sock_type(int * type)3125 static inline int target_to_host_sock_type(int *type)
3126 {
3127     int host_type = 0;
3128     int target_type = *type;
3129 
3130     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3131     case TARGET_SOCK_DGRAM:
3132         host_type = SOCK_DGRAM;
3133         break;
3134     case TARGET_SOCK_STREAM:
3135         host_type = SOCK_STREAM;
3136         break;
3137     default:
3138         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3139         break;
3140     }
3141     if (target_type & TARGET_SOCK_CLOEXEC) {
3142 #if defined(SOCK_CLOEXEC)
3143         host_type |= SOCK_CLOEXEC;
3144 #else
3145         return -TARGET_EINVAL;
3146 #endif
3147     }
3148     if (target_type & TARGET_SOCK_NONBLOCK) {
3149 #if defined(SOCK_NONBLOCK)
3150         host_type |= SOCK_NONBLOCK;
3151 #elif !defined(O_NONBLOCK)
3152         return -TARGET_EINVAL;
3153 #endif
3154     }
3155     *type = host_type;
3156     return 0;
3157 }
3158 
3159 /* Try to emulate socket type flags after socket creation.  */
sock_flags_fixup(int fd,int target_type)3160 static int sock_flags_fixup(int fd, int target_type)
3161 {
3162 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3163     if (target_type & TARGET_SOCK_NONBLOCK) {
3164         int flags = fcntl(fd, F_GETFL);
3165         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3166             close(fd);
3167             return -TARGET_EINVAL;
3168         }
3169     }
3170 #endif
3171     return fd;
3172 }
3173 
3174 /* do_socket() Must return target values and target errnos. */
do_socket(int domain,int type,int protocol)3175 static abi_long do_socket(int domain, int type, int protocol)
3176 {
3177     int target_type = type;
3178     int ret;
3179 
3180     ret = target_to_host_sock_type(&type);
3181     if (ret) {
3182         return ret;
3183     }
3184 
3185     if (domain == PF_NETLINK && !(
3186 #ifdef CONFIG_RTNETLINK
3187          protocol == NETLINK_ROUTE ||
3188 #endif
3189          protocol == NETLINK_KOBJECT_UEVENT ||
3190          protocol == NETLINK_AUDIT)) {
3191         return -TARGET_EPROTONOSUPPORT;
3192     }
3193 
3194     if (domain == AF_PACKET ||
3195         (domain == AF_INET && type == SOCK_PACKET)) {
3196         protocol = tswap16(protocol);
3197     }
3198 
3199     ret = get_errno(socket(domain, type, protocol));
3200     if (ret >= 0) {
3201         ret = sock_flags_fixup(ret, target_type);
3202         if (type == SOCK_PACKET) {
3203             /* Manage an obsolete case :
3204              * if socket type is SOCK_PACKET, bind by name
3205              */
3206             fd_trans_register(ret, &target_packet_trans);
3207         } else if (domain == PF_NETLINK) {
3208             switch (protocol) {
3209 #ifdef CONFIG_RTNETLINK
3210             case NETLINK_ROUTE:
3211                 fd_trans_register(ret, &target_netlink_route_trans);
3212                 break;
3213 #endif
3214             case NETLINK_KOBJECT_UEVENT:
3215                 /* nothing to do: messages are strings */
3216                 break;
3217             case NETLINK_AUDIT:
3218                 fd_trans_register(ret, &target_netlink_audit_trans);
3219                 break;
3220             default:
3221                 g_assert_not_reached();
3222             }
3223         }
3224     }
3225     return ret;
3226 }
3227 
3228 /* do_bind() Must return target values and target errnos. */
do_bind(int sockfd,abi_ulong target_addr,socklen_t addrlen)3229 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3230                         socklen_t addrlen)
3231 {
3232     void *addr;
3233     abi_long ret;
3234 
3235     if ((int)addrlen < 0) {
3236         return -TARGET_EINVAL;
3237     }
3238 
3239     addr = alloca(addrlen+1);
3240 
3241     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3242     if (ret)
3243         return ret;
3244 
3245     return get_errno(bind(sockfd, addr, addrlen));
3246 }
3247 
3248 /* do_connect() Must return target values and target errnos. */
do_connect(int sockfd,abi_ulong target_addr,socklen_t addrlen)3249 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3250                            socklen_t addrlen)
3251 {
3252     void *addr;
3253     abi_long ret;
3254 
3255     if ((int)addrlen < 0) {
3256         return -TARGET_EINVAL;
3257     }
3258 
3259     addr = alloca(addrlen+1);
3260 
3261     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3262     if (ret)
3263         return ret;
3264 
3265     return get_errno(safe_connect(sockfd, addr, addrlen));
3266 }
3267 
3268 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
do_sendrecvmsg_locked(int fd,struct target_msghdr * msgp,int flags,int send)3269 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3270                                       int flags, int send)
3271 {
3272     abi_long ret, len;
3273     struct msghdr msg;
3274     abi_ulong count;
3275     struct iovec *vec;
3276     abi_ulong target_vec;
3277 
3278     if (msgp->msg_name) {
3279         msg.msg_namelen = tswap32(msgp->msg_namelen);
3280         msg.msg_name = alloca(msg.msg_namelen+1);
3281         ret = target_to_host_sockaddr(fd, msg.msg_name,
3282                                       tswapal(msgp->msg_name),
3283                                       msg.msg_namelen);
3284         if (ret == -TARGET_EFAULT) {
3285             /* For connected sockets msg_name and msg_namelen must
3286              * be ignored, so returning EFAULT immediately is wrong.
3287              * Instead, pass a bad msg_name to the host kernel, and
3288              * let it decide whether to return EFAULT or not.
3289              */
3290             msg.msg_name = (void *)-1;
3291         } else if (ret) {
3292             goto out2;
3293         }
3294     } else {
3295         msg.msg_name = NULL;
3296         msg.msg_namelen = 0;
3297     }
3298     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3299     msg.msg_control = alloca(msg.msg_controllen);
3300     memset(msg.msg_control, 0, msg.msg_controllen);
3301 
3302     msg.msg_flags = tswap32(msgp->msg_flags);
3303 
3304     count = tswapal(msgp->msg_iovlen);
3305     target_vec = tswapal(msgp->msg_iov);
3306 
3307     if (count > IOV_MAX) {
3308         /* sendrcvmsg returns a different errno for this condition than
3309          * readv/writev, so we must catch it here before lock_iovec() does.
3310          */
3311         ret = -TARGET_EMSGSIZE;
3312         goto out2;
3313     }
3314 
3315     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3316                      target_vec, count, send);
3317     if (vec == NULL) {
3318         ret = -host_to_target_errno(errno);
3319         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3320         if (!send || ret) {
3321             goto out2;
3322         }
3323     }
3324     msg.msg_iovlen = count;
3325     msg.msg_iov = vec;
3326 
3327     if (send) {
3328         if (fd_trans_target_to_host_data(fd)) {
3329             void *host_msg;
3330 
3331             host_msg = g_malloc(msg.msg_iov->iov_len);
3332             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3333             ret = fd_trans_target_to_host_data(fd)(host_msg,
3334                                                    msg.msg_iov->iov_len);
3335             if (ret >= 0) {
3336                 msg.msg_iov->iov_base = host_msg;
3337                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3338             }
3339             g_free(host_msg);
3340         } else {
3341             ret = target_to_host_cmsg(&msg, msgp);
3342             if (ret == 0) {
3343                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3344             }
3345         }
3346     } else {
3347         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3348         if (!is_error(ret)) {
3349             len = ret;
3350             if (fd_trans_host_to_target_data(fd)) {
3351                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3352                                                MIN(msg.msg_iov->iov_len, len));
3353             }
3354             if (!is_error(ret)) {
3355                 ret = host_to_target_cmsg(msgp, &msg);
3356             }
3357             if (!is_error(ret)) {
3358                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3359                 msgp->msg_flags = tswap32(msg.msg_flags);
3360                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3361                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3362                                     msg.msg_name, msg.msg_namelen);
3363                     if (ret) {
3364                         goto out;
3365                     }
3366                 }
3367 
3368                 ret = len;
3369             }
3370         }
3371     }
3372 
3373 out:
3374     if (vec) {
3375         unlock_iovec(vec, target_vec, count, !send);
3376     }
3377 out2:
3378     return ret;
3379 }
3380 
do_sendrecvmsg(int fd,abi_ulong target_msg,int flags,int send)3381 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3382                                int flags, int send)
3383 {
3384     abi_long ret;
3385     struct target_msghdr *msgp;
3386 
3387     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3388                           msgp,
3389                           target_msg,
3390                           send ? 1 : 0)) {
3391         return -TARGET_EFAULT;
3392     }
3393     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3394     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3395     return ret;
3396 }
3397 
3398 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3399  * so it might not have this *mmsg-specific flag either.
3400  */
3401 #ifndef MSG_WAITFORONE
3402 #define MSG_WAITFORONE 0x10000
3403 #endif
3404 
do_sendrecvmmsg(int fd,abi_ulong target_msgvec,unsigned int vlen,unsigned int flags,int send)3405 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3406                                 unsigned int vlen, unsigned int flags,
3407                                 int send)
3408 {
3409     struct target_mmsghdr *mmsgp;
3410     abi_long ret = 0;
3411     int i;
3412 
3413     if (vlen > UIO_MAXIOV) {
3414         vlen = UIO_MAXIOV;
3415     }
3416 
3417     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3418     if (!mmsgp) {
3419         return -TARGET_EFAULT;
3420     }
3421 
3422     for (i = 0; i < vlen; i++) {
3423         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3424         if (is_error(ret)) {
3425             break;
3426         }
3427         mmsgp[i].msg_len = tswap32(ret);
3428         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3429         if (flags & MSG_WAITFORONE) {
3430             flags |= MSG_DONTWAIT;
3431         }
3432     }
3433 
3434     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3435 
3436     /* Return number of datagrams sent if we sent any at all;
3437      * otherwise return the error.
3438      */
3439     if (i) {
3440         return i;
3441     }
3442     return ret;
3443 }
3444 
3445 /* do_accept4() Must return target values and target errnos. */
do_accept4(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr,int flags)3446 static abi_long do_accept4(int fd, abi_ulong target_addr,
3447                            abi_ulong target_addrlen_addr, int flags)
3448 {
3449     socklen_t addrlen, ret_addrlen;
3450     void *addr;
3451     abi_long ret;
3452     int host_flags;
3453 
3454     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3455         return -TARGET_EINVAL;
3456     }
3457 
3458     host_flags = 0;
3459     if (flags & TARGET_SOCK_NONBLOCK) {
3460         host_flags |= SOCK_NONBLOCK;
3461     }
3462     if (flags & TARGET_SOCK_CLOEXEC) {
3463         host_flags |= SOCK_CLOEXEC;
3464     }
3465 
3466     if (target_addr == 0) {
3467         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3468     }
3469 
3470     /* linux returns EFAULT if addrlen pointer is invalid */
3471     if (get_user_u32(addrlen, target_addrlen_addr))
3472         return -TARGET_EFAULT;
3473 
3474     if ((int)addrlen < 0) {
3475         return -TARGET_EINVAL;
3476     }
3477 
3478     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3479         return -TARGET_EFAULT;
3480     }
3481 
3482     addr = alloca(addrlen);
3483 
3484     ret_addrlen = addrlen;
3485     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3486     if (!is_error(ret)) {
3487         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3488         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3489             ret = -TARGET_EFAULT;
3490         }
3491     }
3492     return ret;
3493 }
3494 
3495 /* do_getpeername() Must return target values and target errnos. */
do_getpeername(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3496 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3497                                abi_ulong target_addrlen_addr)
3498 {
3499     socklen_t addrlen, ret_addrlen;
3500     void *addr;
3501     abi_long ret;
3502 
3503     if (get_user_u32(addrlen, target_addrlen_addr))
3504         return -TARGET_EFAULT;
3505 
3506     if ((int)addrlen < 0) {
3507         return -TARGET_EINVAL;
3508     }
3509 
3510     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3511         return -TARGET_EFAULT;
3512     }
3513 
3514     addr = alloca(addrlen);
3515 
3516     ret_addrlen = addrlen;
3517     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3518     if (!is_error(ret)) {
3519         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3520         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3521             ret = -TARGET_EFAULT;
3522         }
3523     }
3524     return ret;
3525 }
3526 
3527 /* do_getsockname() Must return target values and target errnos. */
do_getsockname(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3528 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3529                                abi_ulong target_addrlen_addr)
3530 {
3531     socklen_t addrlen, ret_addrlen;
3532     void *addr;
3533     abi_long ret;
3534 
3535     if (get_user_u32(addrlen, target_addrlen_addr))
3536         return -TARGET_EFAULT;
3537 
3538     if ((int)addrlen < 0) {
3539         return -TARGET_EINVAL;
3540     }
3541 
3542     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3543         return -TARGET_EFAULT;
3544     }
3545 
3546     addr = alloca(addrlen);
3547 
3548     ret_addrlen = addrlen;
3549     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3550     if (!is_error(ret)) {
3551         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3552         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3553             ret = -TARGET_EFAULT;
3554         }
3555     }
3556     return ret;
3557 }
3558 
3559 /* do_socketpair() Must return target values and target errnos. */
do_socketpair(int domain,int type,int protocol,abi_ulong target_tab_addr)3560 static abi_long do_socketpair(int domain, int type, int protocol,
3561                               abi_ulong target_tab_addr)
3562 {
3563     int tab[2];
3564     abi_long ret;
3565 
3566     target_to_host_sock_type(&type);
3567 
3568     ret = get_errno(socketpair(domain, type, protocol, tab));
3569     if (!is_error(ret)) {
3570         if (put_user_s32(tab[0], target_tab_addr)
3571             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3572             ret = -TARGET_EFAULT;
3573     }
3574     return ret;
3575 }
3576 
3577 /* do_sendto() Must return target values and target errnos. */
do_sendto(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,socklen_t addrlen)3578 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3579                           abi_ulong target_addr, socklen_t addrlen)
3580 {
3581     void *addr;
3582     void *host_msg;
3583     void *copy_msg = NULL;
3584     abi_long ret;
3585 
3586     if ((int)addrlen < 0) {
3587         return -TARGET_EINVAL;
3588     }
3589 
3590     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3591     if (!host_msg)
3592         return -TARGET_EFAULT;
3593     if (fd_trans_target_to_host_data(fd)) {
3594         copy_msg = host_msg;
3595         host_msg = g_malloc(len);
3596         memcpy(host_msg, copy_msg, len);
3597         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3598         if (ret < 0) {
3599             goto fail;
3600         }
3601     }
3602     if (target_addr) {
3603         addr = alloca(addrlen+1);
3604         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3605         if (ret) {
3606             goto fail;
3607         }
3608         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3609     } else {
3610         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3611     }
3612 fail:
3613     if (copy_msg) {
3614         g_free(host_msg);
3615         host_msg = copy_msg;
3616     }
3617     unlock_user(host_msg, msg, 0);
3618     return ret;
3619 }
3620 
3621 /* do_recvfrom() Must return target values and target errnos. */
do_recvfrom(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,abi_ulong target_addrlen)3622 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3623                             abi_ulong target_addr,
3624                             abi_ulong target_addrlen)
3625 {
3626     socklen_t addrlen, ret_addrlen;
3627     void *addr;
3628     void *host_msg;
3629     abi_long ret;
3630 
3631     if (!msg) {
3632         host_msg = NULL;
3633     } else {
3634         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3635         if (!host_msg) {
3636             return -TARGET_EFAULT;
3637         }
3638     }
3639     if (target_addr) {
3640         if (get_user_u32(addrlen, target_addrlen)) {
3641             ret = -TARGET_EFAULT;
3642             goto fail;
3643         }
3644         if ((int)addrlen < 0) {
3645             ret = -TARGET_EINVAL;
3646             goto fail;
3647         }
3648         addr = alloca(addrlen);
3649         ret_addrlen = addrlen;
3650         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3651                                       addr, &ret_addrlen));
3652     } else {
3653         addr = NULL; /* To keep compiler quiet.  */
3654         addrlen = 0; /* To keep compiler quiet.  */
3655         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3656     }
3657     if (!is_error(ret)) {
3658         if (fd_trans_host_to_target_data(fd)) {
3659             abi_long trans;
3660             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3661             if (is_error(trans)) {
3662                 ret = trans;
3663                 goto fail;
3664             }
3665         }
3666         if (target_addr) {
3667             host_to_target_sockaddr(target_addr, addr,
3668                                     MIN(addrlen, ret_addrlen));
3669             if (put_user_u32(ret_addrlen, target_addrlen)) {
3670                 ret = -TARGET_EFAULT;
3671                 goto fail;
3672             }
3673         }
3674         unlock_user(host_msg, msg, len);
3675     } else {
3676 fail:
3677         unlock_user(host_msg, msg, 0);
3678     }
3679     return ret;
3680 }
3681 
3682 #ifdef TARGET_NR_socketcall
3683 /* do_socketcall() must return target values and target errnos. */
do_socketcall(int num,abi_ulong vptr)3684 static abi_long do_socketcall(int num, abi_ulong vptr)
3685 {
3686     static const unsigned nargs[] = { /* number of arguments per operation */
3687         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3688         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3689         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3690         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3691         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3692         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3693         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3694         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3695         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3696         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3697         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3698         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3699         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3700         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3701         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3702         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3703         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3704         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3705         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3706         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3707     };
3708     abi_long a[6]; /* max 6 args */
3709     unsigned i;
3710 
3711     /* check the range of the first argument num */
3712     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3713     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3714         return -TARGET_EINVAL;
3715     }
3716     /* ensure we have space for args */
3717     if (nargs[num] > ARRAY_SIZE(a)) {
3718         return -TARGET_EINVAL;
3719     }
3720     /* collect the arguments in a[] according to nargs[] */
3721     for (i = 0; i < nargs[num]; ++i) {
3722         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3723             return -TARGET_EFAULT;
3724         }
3725     }
3726     /* now when we have the args, invoke the appropriate underlying function */
3727     switch (num) {
3728     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3729         return do_socket(a[0], a[1], a[2]);
3730     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3731         return do_bind(a[0], a[1], a[2]);
3732     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3733         return do_connect(a[0], a[1], a[2]);
3734     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3735         return get_errno(listen(a[0], a[1]));
3736     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3737         return do_accept4(a[0], a[1], a[2], 0);
3738     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3739         return do_getsockname(a[0], a[1], a[2]);
3740     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3741         return do_getpeername(a[0], a[1], a[2]);
3742     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3743         return do_socketpair(a[0], a[1], a[2], a[3]);
3744     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3745         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3746     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3747         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3748     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3749         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3750     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3751         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3752     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3753         return get_errno(shutdown(a[0], a[1]));
3754     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3755         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3756     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3757         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3758     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3759         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3760     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3761         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3762     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3763         return do_accept4(a[0], a[1], a[2], a[3]);
3764     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3765         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3766     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3767         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3768     default:
3769         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3770         return -TARGET_EINVAL;
3771     }
3772 }
3773 #endif
3774 
3775 #ifndef TARGET_SEMID64_DS
3776 /* asm-generic version of this struct */
3777 struct target_semid64_ds
3778 {
3779   struct target_ipc_perm sem_perm;
3780   abi_ulong sem_otime;
3781 #if TARGET_ABI_BITS == 32
3782   abi_ulong __unused1;
3783 #endif
3784   abi_ulong sem_ctime;
3785 #if TARGET_ABI_BITS == 32
3786   abi_ulong __unused2;
3787 #endif
3788   abi_ulong sem_nsems;
3789   abi_ulong __unused3;
3790   abi_ulong __unused4;
3791 };
3792 #endif
3793 
target_to_host_ipc_perm(struct ipc_perm * host_ip,abi_ulong target_addr)3794 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3795                                                abi_ulong target_addr)
3796 {
3797     struct target_ipc_perm *target_ip;
3798     struct target_semid64_ds *target_sd;
3799 
3800     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3801         return -TARGET_EFAULT;
3802     target_ip = &(target_sd->sem_perm);
3803     host_ip->__key = tswap32(target_ip->__key);
3804     host_ip->uid = tswap32(target_ip->uid);
3805     host_ip->gid = tswap32(target_ip->gid);
3806     host_ip->cuid = tswap32(target_ip->cuid);
3807     host_ip->cgid = tswap32(target_ip->cgid);
3808 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3809     host_ip->mode = tswap32(target_ip->mode);
3810 #else
3811     host_ip->mode = tswap16(target_ip->mode);
3812 #endif
3813 #if defined(TARGET_PPC)
3814     host_ip->__seq = tswap32(target_ip->__seq);
3815 #else
3816     host_ip->__seq = tswap16(target_ip->__seq);
3817 #endif
3818     unlock_user_struct(target_sd, target_addr, 0);
3819     return 0;
3820 }
3821 
host_to_target_ipc_perm(abi_ulong target_addr,struct ipc_perm * host_ip)3822 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3823                                                struct ipc_perm *host_ip)
3824 {
3825     struct target_ipc_perm *target_ip;
3826     struct target_semid64_ds *target_sd;
3827 
3828     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3829         return -TARGET_EFAULT;
3830     target_ip = &(target_sd->sem_perm);
3831     target_ip->__key = tswap32(host_ip->__key);
3832     target_ip->uid = tswap32(host_ip->uid);
3833     target_ip->gid = tswap32(host_ip->gid);
3834     target_ip->cuid = tswap32(host_ip->cuid);
3835     target_ip->cgid = tswap32(host_ip->cgid);
3836 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3837     target_ip->mode = tswap32(host_ip->mode);
3838 #else
3839     target_ip->mode = tswap16(host_ip->mode);
3840 #endif
3841 #if defined(TARGET_PPC)
3842     target_ip->__seq = tswap32(host_ip->__seq);
3843 #else
3844     target_ip->__seq = tswap16(host_ip->__seq);
3845 #endif
3846     unlock_user_struct(target_sd, target_addr, 1);
3847     return 0;
3848 }
3849 
target_to_host_semid_ds(struct semid_ds * host_sd,abi_ulong target_addr)3850 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3851                                                abi_ulong target_addr)
3852 {
3853     struct target_semid64_ds *target_sd;
3854 
3855     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3856         return -TARGET_EFAULT;
3857     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3858         return -TARGET_EFAULT;
3859     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3860     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3861     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3862     unlock_user_struct(target_sd, target_addr, 0);
3863     return 0;
3864 }
3865 
host_to_target_semid_ds(abi_ulong target_addr,struct semid_ds * host_sd)3866 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3867                                                struct semid_ds *host_sd)
3868 {
3869     struct target_semid64_ds *target_sd;
3870 
3871     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3872         return -TARGET_EFAULT;
3873     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3874         return -TARGET_EFAULT;
3875     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3876     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3877     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3878     unlock_user_struct(target_sd, target_addr, 1);
3879     return 0;
3880 }
3881 
3882 struct target_seminfo {
3883     int semmap;
3884     int semmni;
3885     int semmns;
3886     int semmnu;
3887     int semmsl;
3888     int semopm;
3889     int semume;
3890     int semusz;
3891     int semvmx;
3892     int semaem;
3893 };
3894 
host_to_target_seminfo(abi_ulong target_addr,struct seminfo * host_seminfo)3895 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3896                                               struct seminfo *host_seminfo)
3897 {
3898     struct target_seminfo *target_seminfo;
3899     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3900         return -TARGET_EFAULT;
3901     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3902     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3903     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3904     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3905     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3906     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3907     __put_user(host_seminfo->semume, &target_seminfo->semume);
3908     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3909     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3910     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3911     unlock_user_struct(target_seminfo, target_addr, 1);
3912     return 0;
3913 }
3914 
3915 union semun {
3916 	int val;
3917 	struct semid_ds *buf;
3918 	unsigned short *array;
3919 	struct seminfo *__buf;
3920 };
3921 
3922 union target_semun {
3923 	int val;
3924 	abi_ulong buf;
3925 	abi_ulong array;
3926 	abi_ulong __buf;
3927 };
3928 
target_to_host_semarray(int semid,unsigned short ** host_array,abi_ulong target_addr)3929 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3930                                                abi_ulong target_addr)
3931 {
3932     int nsems;
3933     unsigned short *array;
3934     union semun semun;
3935     struct semid_ds semid_ds;
3936     int i, ret;
3937 
3938     semun.buf = &semid_ds;
3939 
3940     ret = semctl(semid, 0, IPC_STAT, semun);
3941     if (ret == -1)
3942         return get_errno(ret);
3943 
3944     nsems = semid_ds.sem_nsems;
3945 
3946     *host_array = g_try_new(unsigned short, nsems);
3947     if (!*host_array) {
3948         return -TARGET_ENOMEM;
3949     }
3950     array = lock_user(VERIFY_READ, target_addr,
3951                       nsems*sizeof(unsigned short), 1);
3952     if (!array) {
3953         g_free(*host_array);
3954         return -TARGET_EFAULT;
3955     }
3956 
3957     for(i=0; i<nsems; i++) {
3958         __get_user((*host_array)[i], &array[i]);
3959     }
3960     unlock_user(array, target_addr, 0);
3961 
3962     return 0;
3963 }
3964 
host_to_target_semarray(int semid,abi_ulong target_addr,unsigned short ** host_array)3965 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3966                                                unsigned short **host_array)
3967 {
3968     int nsems;
3969     unsigned short *array;
3970     union semun semun;
3971     struct semid_ds semid_ds;
3972     int i, ret;
3973 
3974     semun.buf = &semid_ds;
3975 
3976     ret = semctl(semid, 0, IPC_STAT, semun);
3977     if (ret == -1)
3978         return get_errno(ret);
3979 
3980     nsems = semid_ds.sem_nsems;
3981 
3982     array = lock_user(VERIFY_WRITE, target_addr,
3983                       nsems*sizeof(unsigned short), 0);
3984     if (!array)
3985         return -TARGET_EFAULT;
3986 
3987     for(i=0; i<nsems; i++) {
3988         __put_user((*host_array)[i], &array[i]);
3989     }
3990     g_free(*host_array);
3991     unlock_user(array, target_addr, 1);
3992 
3993     return 0;
3994 }
3995 
do_semctl(int semid,int semnum,int cmd,abi_ulong target_arg)3996 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3997                                  abi_ulong target_arg)
3998 {
3999     union target_semun target_su = { .buf = target_arg };
4000     union semun arg;
4001     struct semid_ds dsarg;
4002     unsigned short *array = NULL;
4003     struct seminfo seminfo;
4004     abi_long ret = -TARGET_EINVAL;
4005     abi_long err;
4006     cmd &= 0xff;
4007 
4008     switch( cmd ) {
4009 	case GETVAL:
4010 	case SETVAL:
4011             /* In 64 bit cross-endian situations, we will erroneously pick up
4012              * the wrong half of the union for the "val" element.  To rectify
4013              * this, the entire 8-byte structure is byteswapped, followed by
4014 	     * a swap of the 4 byte val field. In other cases, the data is
4015 	     * already in proper host byte order. */
4016 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4017 		target_su.buf = tswapal(target_su.buf);
4018 		arg.val = tswap32(target_su.val);
4019 	    } else {
4020 		arg.val = target_su.val;
4021 	    }
4022             ret = get_errno(semctl(semid, semnum, cmd, arg));
4023             break;
4024 	case GETALL:
4025 	case SETALL:
4026             err = target_to_host_semarray(semid, &array, target_su.array);
4027             if (err)
4028                 return err;
4029             arg.array = array;
4030             ret = get_errno(semctl(semid, semnum, cmd, arg));
4031             err = host_to_target_semarray(semid, target_su.array, &array);
4032             if (err)
4033                 return err;
4034             break;
4035 	case IPC_STAT:
4036 	case IPC_SET:
4037 	case SEM_STAT:
4038             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4039             if (err)
4040                 return err;
4041             arg.buf = &dsarg;
4042             ret = get_errno(semctl(semid, semnum, cmd, arg));
4043             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4044             if (err)
4045                 return err;
4046             break;
4047 	case IPC_INFO:
4048 	case SEM_INFO:
4049             arg.__buf = &seminfo;
4050             ret = get_errno(semctl(semid, semnum, cmd, arg));
4051             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4052             if (err)
4053                 return err;
4054             break;
4055 	case IPC_RMID:
4056 	case GETPID:
4057 	case GETNCNT:
4058 	case GETZCNT:
4059             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4060             break;
4061     }
4062 
4063     return ret;
4064 }
4065 
4066 struct target_sembuf {
4067     unsigned short sem_num;
4068     short sem_op;
4069     short sem_flg;
4070 };
4071 
target_to_host_sembuf(struct sembuf * host_sembuf,abi_ulong target_addr,unsigned nsops)4072 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4073                                              abi_ulong target_addr,
4074                                              unsigned nsops)
4075 {
4076     struct target_sembuf *target_sembuf;
4077     int i;
4078 
4079     target_sembuf = lock_user(VERIFY_READ, target_addr,
4080                               nsops*sizeof(struct target_sembuf), 1);
4081     if (!target_sembuf)
4082         return -TARGET_EFAULT;
4083 
4084     for(i=0; i<nsops; i++) {
4085         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4086         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4087         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4088     }
4089 
4090     unlock_user(target_sembuf, target_addr, 0);
4091 
4092     return 0;
4093 }
4094 
4095 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4096     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4097 
4098 /*
4099  * This macro is required to handle the s390 variants, which passes the
4100  * arguments in a different order than default.
4101  */
4102 #ifdef __s390x__
4103 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4104   (__nsops), (__timeout), (__sops)
4105 #else
4106 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4107   (__nsops), 0, (__sops), (__timeout)
4108 #endif
4109 
do_semtimedop(int semid,abi_long ptr,unsigned nsops,abi_long timeout,bool time64)4110 static inline abi_long do_semtimedop(int semid,
4111                                      abi_long ptr,
4112                                      unsigned nsops,
4113                                      abi_long timeout, bool time64)
4114 {
4115     struct sembuf *sops;
4116     struct timespec ts, *pts = NULL;
4117     abi_long ret;
4118 
4119     if (timeout) {
4120         pts = &ts;
4121         if (time64) {
4122             if (target_to_host_timespec64(pts, timeout)) {
4123                 return -TARGET_EFAULT;
4124             }
4125         } else {
4126             if (target_to_host_timespec(pts, timeout)) {
4127                 return -TARGET_EFAULT;
4128             }
4129         }
4130     }
4131 
4132     if (nsops > TARGET_SEMOPM) {
4133         return -TARGET_E2BIG;
4134     }
4135 
4136     sops = g_new(struct sembuf, nsops);
4137 
4138     if (target_to_host_sembuf(sops, ptr, nsops)) {
4139         g_free(sops);
4140         return -TARGET_EFAULT;
4141     }
4142 
4143     ret = -TARGET_ENOSYS;
4144 #ifdef __NR_semtimedop
4145     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4146 #endif
4147 #ifdef __NR_ipc
4148     if (ret == -TARGET_ENOSYS) {
4149         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4150                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4151     }
4152 #endif
4153     g_free(sops);
4154     return ret;
4155 }
4156 #endif
4157 
4158 struct target_msqid_ds
4159 {
4160     struct target_ipc_perm msg_perm;
4161     abi_ulong msg_stime;
4162 #if TARGET_ABI_BITS == 32
4163     abi_ulong __unused1;
4164 #endif
4165     abi_ulong msg_rtime;
4166 #if TARGET_ABI_BITS == 32
4167     abi_ulong __unused2;
4168 #endif
4169     abi_ulong msg_ctime;
4170 #if TARGET_ABI_BITS == 32
4171     abi_ulong __unused3;
4172 #endif
4173     abi_ulong __msg_cbytes;
4174     abi_ulong msg_qnum;
4175     abi_ulong msg_qbytes;
4176     abi_ulong msg_lspid;
4177     abi_ulong msg_lrpid;
4178     abi_ulong __unused4;
4179     abi_ulong __unused5;
4180 };
4181 
target_to_host_msqid_ds(struct msqid_ds * host_md,abi_ulong target_addr)4182 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4183                                                abi_ulong target_addr)
4184 {
4185     struct target_msqid_ds *target_md;
4186 
4187     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4188         return -TARGET_EFAULT;
4189     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4190         return -TARGET_EFAULT;
4191     host_md->msg_stime = tswapal(target_md->msg_stime);
4192     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4193     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4194     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4195     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4196     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4197     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4198     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4199     unlock_user_struct(target_md, target_addr, 0);
4200     return 0;
4201 }
4202 
host_to_target_msqid_ds(abi_ulong target_addr,struct msqid_ds * host_md)4203 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4204                                                struct msqid_ds *host_md)
4205 {
4206     struct target_msqid_ds *target_md;
4207 
4208     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4209         return -TARGET_EFAULT;
4210     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4211         return -TARGET_EFAULT;
4212     target_md->msg_stime = tswapal(host_md->msg_stime);
4213     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4214     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4215     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4216     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4217     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4218     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4219     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4220     unlock_user_struct(target_md, target_addr, 1);
4221     return 0;
4222 }
4223 
4224 struct target_msginfo {
4225     int msgpool;
4226     int msgmap;
4227     int msgmax;
4228     int msgmnb;
4229     int msgmni;
4230     int msgssz;
4231     int msgtql;
4232     unsigned short int msgseg;
4233 };
4234 
host_to_target_msginfo(abi_ulong target_addr,struct msginfo * host_msginfo)4235 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4236                                               struct msginfo *host_msginfo)
4237 {
4238     struct target_msginfo *target_msginfo;
4239     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4240         return -TARGET_EFAULT;
4241     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4242     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4243     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4244     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4245     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4246     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4247     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4248     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4249     unlock_user_struct(target_msginfo, target_addr, 1);
4250     return 0;
4251 }
4252 
do_msgctl(int msgid,int cmd,abi_long ptr)4253 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4254 {
4255     struct msqid_ds dsarg;
4256     struct msginfo msginfo;
4257     abi_long ret = -TARGET_EINVAL;
4258 
4259     cmd &= 0xff;
4260 
4261     switch (cmd) {
4262     case IPC_STAT:
4263     case IPC_SET:
4264     case MSG_STAT:
4265         if (target_to_host_msqid_ds(&dsarg,ptr))
4266             return -TARGET_EFAULT;
4267         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4268         if (host_to_target_msqid_ds(ptr,&dsarg))
4269             return -TARGET_EFAULT;
4270         break;
4271     case IPC_RMID:
4272         ret = get_errno(msgctl(msgid, cmd, NULL));
4273         break;
4274     case IPC_INFO:
4275     case MSG_INFO:
4276         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4277         if (host_to_target_msginfo(ptr, &msginfo))
4278             return -TARGET_EFAULT;
4279         break;
4280     }
4281 
4282     return ret;
4283 }
4284 
4285 struct target_msgbuf {
4286     abi_long mtype;
4287     char	mtext[1];
4288 };
4289 
do_msgsnd(int msqid,abi_long msgp,ssize_t msgsz,int msgflg)4290 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4291                                  ssize_t msgsz, int msgflg)
4292 {
4293     struct target_msgbuf *target_mb;
4294     struct msgbuf *host_mb;
4295     abi_long ret = 0;
4296 
4297     if (msgsz < 0) {
4298         return -TARGET_EINVAL;
4299     }
4300 
4301     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4302         return -TARGET_EFAULT;
4303     host_mb = g_try_malloc(msgsz + sizeof(long));
4304     if (!host_mb) {
4305         unlock_user_struct(target_mb, msgp, 0);
4306         return -TARGET_ENOMEM;
4307     }
4308     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4309     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4310     ret = -TARGET_ENOSYS;
4311 #ifdef __NR_msgsnd
4312     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4313 #endif
4314 #ifdef __NR_ipc
4315     if (ret == -TARGET_ENOSYS) {
4316 #ifdef __s390x__
4317         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4318                                  host_mb));
4319 #else
4320         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4321                                  host_mb, 0));
4322 #endif
4323     }
4324 #endif
4325     g_free(host_mb);
4326     unlock_user_struct(target_mb, msgp, 0);
4327 
4328     return ret;
4329 }
4330 
4331 #ifdef __NR_ipc
4332 #if defined(__sparc__)
4333 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4334 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4335 #elif defined(__s390x__)
4336 /* The s390 sys_ipc variant has only five parameters.  */
4337 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4338     ((long int[]){(long int)__msgp, __msgtyp})
4339 #else
4340 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4341     ((long int[]){(long int)__msgp, __msgtyp}), 0
4342 #endif
4343 #endif
4344 
do_msgrcv(int msqid,abi_long msgp,ssize_t msgsz,abi_long msgtyp,int msgflg)4345 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4346                                  ssize_t msgsz, abi_long msgtyp,
4347                                  int msgflg)
4348 {
4349     struct target_msgbuf *target_mb;
4350     char *target_mtext;
4351     struct msgbuf *host_mb;
4352     abi_long ret = 0;
4353 
4354     if (msgsz < 0) {
4355         return -TARGET_EINVAL;
4356     }
4357 
4358     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4359         return -TARGET_EFAULT;
4360 
4361     host_mb = g_try_malloc(msgsz + sizeof(long));
4362     if (!host_mb) {
4363         ret = -TARGET_ENOMEM;
4364         goto end;
4365     }
4366     ret = -TARGET_ENOSYS;
4367 #ifdef __NR_msgrcv
4368     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4369 #endif
4370 #ifdef __NR_ipc
4371     if (ret == -TARGET_ENOSYS) {
4372         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4373                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4374     }
4375 #endif
4376 
4377     if (ret > 0) {
4378         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4379         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4380         if (!target_mtext) {
4381             ret = -TARGET_EFAULT;
4382             goto end;
4383         }
4384         memcpy(target_mb->mtext, host_mb->mtext, ret);
4385         unlock_user(target_mtext, target_mtext_addr, ret);
4386     }
4387 
4388     target_mb->mtype = tswapal(host_mb->mtype);
4389 
4390 end:
4391     if (target_mb)
4392         unlock_user_struct(target_mb, msgp, 1);
4393     g_free(host_mb);
4394     return ret;
4395 }
4396 
target_to_host_shmid_ds(struct shmid_ds * host_sd,abi_ulong target_addr)4397 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4398                                                abi_ulong target_addr)
4399 {
4400     struct target_shmid_ds *target_sd;
4401 
4402     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4403         return -TARGET_EFAULT;
4404     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4405         return -TARGET_EFAULT;
4406     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4407     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4408     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4409     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4410     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4411     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4412     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4413     unlock_user_struct(target_sd, target_addr, 0);
4414     return 0;
4415 }
4416 
host_to_target_shmid_ds(abi_ulong target_addr,struct shmid_ds * host_sd)4417 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4418                                                struct shmid_ds *host_sd)
4419 {
4420     struct target_shmid_ds *target_sd;
4421 
4422     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4423         return -TARGET_EFAULT;
4424     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4425         return -TARGET_EFAULT;
4426     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4427     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4428     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4429     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4430     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4431     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4432     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4433     unlock_user_struct(target_sd, target_addr, 1);
4434     return 0;
4435 }
4436 
4437 struct  target_shminfo {
4438     abi_ulong shmmax;
4439     abi_ulong shmmin;
4440     abi_ulong shmmni;
4441     abi_ulong shmseg;
4442     abi_ulong shmall;
4443 };
4444 
host_to_target_shminfo(abi_ulong target_addr,struct shminfo * host_shminfo)4445 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4446                                               struct shminfo *host_shminfo)
4447 {
4448     struct target_shminfo *target_shminfo;
4449     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4450         return -TARGET_EFAULT;
4451     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4452     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4453     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4454     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4455     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4456     unlock_user_struct(target_shminfo, target_addr, 1);
4457     return 0;
4458 }
4459 
4460 struct target_shm_info {
4461     int used_ids;
4462     abi_ulong shm_tot;
4463     abi_ulong shm_rss;
4464     abi_ulong shm_swp;
4465     abi_ulong swap_attempts;
4466     abi_ulong swap_successes;
4467 };
4468 
host_to_target_shm_info(abi_ulong target_addr,struct shm_info * host_shm_info)4469 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4470                                                struct shm_info *host_shm_info)
4471 {
4472     struct target_shm_info *target_shm_info;
4473     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4474         return -TARGET_EFAULT;
4475     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4476     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4477     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4478     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4479     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4480     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4481     unlock_user_struct(target_shm_info, target_addr, 1);
4482     return 0;
4483 }
4484 
do_shmctl(int shmid,int cmd,abi_long buf)4485 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4486 {
4487     struct shmid_ds dsarg;
4488     struct shminfo shminfo;
4489     struct shm_info shm_info;
4490     abi_long ret = -TARGET_EINVAL;
4491 
4492     cmd &= 0xff;
4493 
4494     switch(cmd) {
4495     case IPC_STAT:
4496     case IPC_SET:
4497     case SHM_STAT:
4498         if (target_to_host_shmid_ds(&dsarg, buf))
4499             return -TARGET_EFAULT;
4500         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4501         if (host_to_target_shmid_ds(buf, &dsarg))
4502             return -TARGET_EFAULT;
4503         break;
4504     case IPC_INFO:
4505         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4506         if (host_to_target_shminfo(buf, &shminfo))
4507             return -TARGET_EFAULT;
4508         break;
4509     case SHM_INFO:
4510         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4511         if (host_to_target_shm_info(buf, &shm_info))
4512             return -TARGET_EFAULT;
4513         break;
4514     case IPC_RMID:
4515     case SHM_LOCK:
4516     case SHM_UNLOCK:
4517         ret = get_errno(shmctl(shmid, cmd, NULL));
4518         break;
4519     }
4520 
4521     return ret;
4522 }
4523 
4524 #ifdef TARGET_NR_ipc
4525 /* ??? This only works with linear mappings.  */
4526 /* do_ipc() must return target values and target errnos. */
do_ipc(CPUArchState * cpu_env,unsigned int call,abi_long first,abi_long second,abi_long third,abi_long ptr,abi_long fifth)4527 static abi_long do_ipc(CPUArchState *cpu_env,
4528                        unsigned int call, abi_long first,
4529                        abi_long second, abi_long third,
4530                        abi_long ptr, abi_long fifth)
4531 {
4532     int version;
4533     abi_long ret = 0;
4534 
4535     version = call >> 16;
4536     call &= 0xffff;
4537 
4538     switch (call) {
4539     case IPCOP_semop:
4540         ret = do_semtimedop(first, ptr, second, 0, false);
4541         break;
4542     case IPCOP_semtimedop:
4543     /*
4544      * The s390 sys_ipc variant has only five parameters instead of six
4545      * (as for default variant) and the only difference is the handling of
4546      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4547      * to a struct timespec where the generic variant uses fifth parameter.
4548      */
4549 #if defined(TARGET_S390X)
4550         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4551 #else
4552         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4553 #endif
4554         break;
4555 
4556     case IPCOP_semget:
4557         ret = get_errno(semget(first, second, third));
4558         break;
4559 
4560     case IPCOP_semctl: {
4561         /* The semun argument to semctl is passed by value, so dereference the
4562          * ptr argument. */
4563         abi_ulong atptr;
4564         get_user_ual(atptr, ptr);
4565         ret = do_semctl(first, second, third, atptr);
4566         break;
4567     }
4568 
4569     case IPCOP_msgget:
4570         ret = get_errno(msgget(first, second));
4571         break;
4572 
4573     case IPCOP_msgsnd:
4574         ret = do_msgsnd(first, ptr, second, third);
4575         break;
4576 
4577     case IPCOP_msgctl:
4578         ret = do_msgctl(first, second, ptr);
4579         break;
4580 
4581     case IPCOP_msgrcv:
4582         switch (version) {
4583         case 0:
4584             {
4585                 struct target_ipc_kludge {
4586                     abi_long msgp;
4587                     abi_long msgtyp;
4588                 } *tmp;
4589 
4590                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4591                     ret = -TARGET_EFAULT;
4592                     break;
4593                 }
4594 
4595                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4596 
4597                 unlock_user_struct(tmp, ptr, 0);
4598                 break;
4599             }
4600         default:
4601             ret = do_msgrcv(first, ptr, second, fifth, third);
4602         }
4603         break;
4604 
4605     case IPCOP_shmat:
4606         switch (version) {
4607         default:
4608         {
4609             abi_ulong raddr;
4610             raddr = target_shmat(cpu_env, first, ptr, second);
4611             if (is_error(raddr))
4612                 return get_errno(raddr);
4613             if (put_user_ual(raddr, third))
4614                 return -TARGET_EFAULT;
4615             break;
4616         }
4617         case 1:
4618             ret = -TARGET_EINVAL;
4619             break;
4620         }
4621 	break;
4622     case IPCOP_shmdt:
4623         ret = target_shmdt(ptr);
4624 	break;
4625 
4626     case IPCOP_shmget:
4627 	/* IPC_* flag values are the same on all linux platforms */
4628 	ret = get_errno(shmget(first, second, third));
4629 	break;
4630 
4631 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4632     case IPCOP_shmctl:
4633         ret = do_shmctl(first, second, ptr);
4634         break;
4635     default:
4636         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4637                       call, version);
4638 	ret = -TARGET_ENOSYS;
4639 	break;
4640     }
4641     return ret;
4642 }
4643 #endif
4644 
4645 /* kernel structure types definitions */
4646 
4647 #define STRUCT(name, ...) STRUCT_ ## name,
4648 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4649 enum {
4650 #include "syscall_types.h"
4651 STRUCT_MAX
4652 };
4653 #undef STRUCT
4654 #undef STRUCT_SPECIAL
4655 
4656 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4657 #define STRUCT_SPECIAL(name)
4658 #include "syscall_types.h"
4659 #undef STRUCT
4660 #undef STRUCT_SPECIAL
4661 
4662 #define MAX_STRUCT_SIZE 4096
4663 
4664 #ifdef CONFIG_FIEMAP
4665 /* So fiemap access checks don't overflow on 32 bit systems.
4666  * This is very slightly smaller than the limit imposed by
4667  * the underlying kernel.
4668  */
4669 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4670                             / sizeof(struct fiemap_extent))
4671 
do_ioctl_fs_ioc_fiemap(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4672 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4673                                        int fd, int cmd, abi_long arg)
4674 {
4675     /* The parameter for this ioctl is a struct fiemap followed
4676      * by an array of struct fiemap_extent whose size is set
4677      * in fiemap->fm_extent_count. The array is filled in by the
4678      * ioctl.
4679      */
4680     int target_size_in, target_size_out;
4681     struct fiemap *fm;
4682     const argtype *arg_type = ie->arg_type;
4683     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4684     void *argptr, *p;
4685     abi_long ret;
4686     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4687     uint32_t outbufsz;
4688     int free_fm = 0;
4689 
4690     assert(arg_type[0] == TYPE_PTR);
4691     assert(ie->access == IOC_RW);
4692     arg_type++;
4693     target_size_in = thunk_type_size(arg_type, 0);
4694     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4695     if (!argptr) {
4696         return -TARGET_EFAULT;
4697     }
4698     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4699     unlock_user(argptr, arg, 0);
4700     fm = (struct fiemap *)buf_temp;
4701     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4702         return -TARGET_EINVAL;
4703     }
4704 
4705     outbufsz = sizeof (*fm) +
4706         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4707 
4708     if (outbufsz > MAX_STRUCT_SIZE) {
4709         /* We can't fit all the extents into the fixed size buffer.
4710          * Allocate one that is large enough and use it instead.
4711          */
4712         fm = g_try_malloc(outbufsz);
4713         if (!fm) {
4714             return -TARGET_ENOMEM;
4715         }
4716         memcpy(fm, buf_temp, sizeof(struct fiemap));
4717         free_fm = 1;
4718     }
4719     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4720     if (!is_error(ret)) {
4721         target_size_out = target_size_in;
4722         /* An extent_count of 0 means we were only counting the extents
4723          * so there are no structs to copy
4724          */
4725         if (fm->fm_extent_count != 0) {
4726             target_size_out += fm->fm_mapped_extents * extent_size;
4727         }
4728         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4729         if (!argptr) {
4730             ret = -TARGET_EFAULT;
4731         } else {
4732             /* Convert the struct fiemap */
4733             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4734             if (fm->fm_extent_count != 0) {
4735                 p = argptr + target_size_in;
4736                 /* ...and then all the struct fiemap_extents */
4737                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4738                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4739                                   THUNK_TARGET);
4740                     p += extent_size;
4741                 }
4742             }
4743             unlock_user(argptr, arg, target_size_out);
4744         }
4745     }
4746     if (free_fm) {
4747         g_free(fm);
4748     }
4749     return ret;
4750 }
4751 #endif
4752 
do_ioctl_ifconf(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4753 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4754                                 int fd, int cmd, abi_long arg)
4755 {
4756     const argtype *arg_type = ie->arg_type;
4757     int target_size;
4758     void *argptr;
4759     int ret;
4760     struct ifconf *host_ifconf;
4761     uint32_t outbufsz;
4762     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4763     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4764     int target_ifreq_size;
4765     int nb_ifreq;
4766     int free_buf = 0;
4767     int i;
4768     int target_ifc_len;
4769     abi_long target_ifc_buf;
4770     int host_ifc_len;
4771     char *host_ifc_buf;
4772 
4773     assert(arg_type[0] == TYPE_PTR);
4774     assert(ie->access == IOC_RW);
4775 
4776     arg_type++;
4777     target_size = thunk_type_size(arg_type, 0);
4778 
4779     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4780     if (!argptr)
4781         return -TARGET_EFAULT;
4782     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4783     unlock_user(argptr, arg, 0);
4784 
4785     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4786     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4787     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4788 
4789     if (target_ifc_buf != 0) {
4790         target_ifc_len = host_ifconf->ifc_len;
4791         nb_ifreq = target_ifc_len / target_ifreq_size;
4792         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4793 
4794         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4795         if (outbufsz > MAX_STRUCT_SIZE) {
4796             /*
4797              * We can't fit all the extents into the fixed size buffer.
4798              * Allocate one that is large enough and use it instead.
4799              */
4800             host_ifconf = g_try_malloc(outbufsz);
4801             if (!host_ifconf) {
4802                 return -TARGET_ENOMEM;
4803             }
4804             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4805             free_buf = 1;
4806         }
4807         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4808 
4809         host_ifconf->ifc_len = host_ifc_len;
4810     } else {
4811       host_ifc_buf = NULL;
4812     }
4813     host_ifconf->ifc_buf = host_ifc_buf;
4814 
4815     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4816     if (!is_error(ret)) {
4817 	/* convert host ifc_len to target ifc_len */
4818 
4819         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4820         target_ifc_len = nb_ifreq * target_ifreq_size;
4821         host_ifconf->ifc_len = target_ifc_len;
4822 
4823 	/* restore target ifc_buf */
4824 
4825         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4826 
4827 	/* copy struct ifconf to target user */
4828 
4829         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4830         if (!argptr)
4831             return -TARGET_EFAULT;
4832         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4833         unlock_user(argptr, arg, target_size);
4834 
4835         if (target_ifc_buf != 0) {
4836             /* copy ifreq[] to target user */
4837             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4838             for (i = 0; i < nb_ifreq ; i++) {
4839                 thunk_convert(argptr + i * target_ifreq_size,
4840                               host_ifc_buf + i * sizeof(struct ifreq),
4841                               ifreq_arg_type, THUNK_TARGET);
4842             }
4843             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4844         }
4845     }
4846 
4847     if (free_buf) {
4848         g_free(host_ifconf);
4849     }
4850 
4851     return ret;
4852 }
4853 
4854 #if defined(CONFIG_USBFS)
4855 #if HOST_LONG_BITS > 64
4856 #error USBDEVFS thunks do not support >64 bit hosts yet.
4857 #endif
4858 struct live_urb {
4859     uint64_t target_urb_adr;
4860     uint64_t target_buf_adr;
4861     char *target_buf_ptr;
4862     struct usbdevfs_urb host_urb;
4863 };
4864 
usbdevfs_urb_hashtable(void)4865 static GHashTable *usbdevfs_urb_hashtable(void)
4866 {
4867     static GHashTable *urb_hashtable;
4868 
4869     if (!urb_hashtable) {
4870         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4871     }
4872     return urb_hashtable;
4873 }
4874 
urb_hashtable_insert(struct live_urb * urb)4875 static void urb_hashtable_insert(struct live_urb *urb)
4876 {
4877     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4878     g_hash_table_insert(urb_hashtable, urb, urb);
4879 }
4880 
urb_hashtable_lookup(uint64_t target_urb_adr)4881 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4882 {
4883     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4884     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4885 }
4886 
urb_hashtable_remove(struct live_urb * urb)4887 static void urb_hashtable_remove(struct live_urb *urb)
4888 {
4889     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4890     g_hash_table_remove(urb_hashtable, urb);
4891 }
4892 
4893 static abi_long
do_ioctl_usbdevfs_reapurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4894 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4895                           int fd, int cmd, abi_long arg)
4896 {
4897     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4898     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4899     struct live_urb *lurb;
4900     void *argptr;
4901     uint64_t hurb;
4902     int target_size;
4903     uintptr_t target_urb_adr;
4904     abi_long ret;
4905 
4906     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4907 
4908     memset(buf_temp, 0, sizeof(uint64_t));
4909     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4910     if (is_error(ret)) {
4911         return ret;
4912     }
4913 
4914     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4915     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4916     if (!lurb->target_urb_adr) {
4917         return -TARGET_EFAULT;
4918     }
4919     urb_hashtable_remove(lurb);
4920     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4921         lurb->host_urb.buffer_length);
4922     lurb->target_buf_ptr = NULL;
4923 
4924     /* restore the guest buffer pointer */
4925     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4926 
4927     /* update the guest urb struct */
4928     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4929     if (!argptr) {
4930         g_free(lurb);
4931         return -TARGET_EFAULT;
4932     }
4933     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4934     unlock_user(argptr, lurb->target_urb_adr, target_size);
4935 
4936     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4937     /* write back the urb handle */
4938     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4939     if (!argptr) {
4940         g_free(lurb);
4941         return -TARGET_EFAULT;
4942     }
4943 
4944     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4945     target_urb_adr = lurb->target_urb_adr;
4946     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4947     unlock_user(argptr, arg, target_size);
4948 
4949     g_free(lurb);
4950     return ret;
4951 }
4952 
4953 static abi_long
do_ioctl_usbdevfs_discardurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4954 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4955                              uint8_t *buf_temp __attribute__((unused)),
4956                              int fd, int cmd, abi_long arg)
4957 {
4958     struct live_urb *lurb;
4959 
4960     /* map target address back to host URB with metadata. */
4961     lurb = urb_hashtable_lookup(arg);
4962     if (!lurb) {
4963         return -TARGET_EFAULT;
4964     }
4965     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4966 }
4967 
4968 static abi_long
do_ioctl_usbdevfs_submiturb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4969 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4970                             int fd, int cmd, abi_long arg)
4971 {
4972     const argtype *arg_type = ie->arg_type;
4973     int target_size;
4974     abi_long ret;
4975     void *argptr;
4976     int rw_dir;
4977     struct live_urb *lurb;
4978 
4979     /*
4980      * each submitted URB needs to map to a unique ID for the
4981      * kernel, and that unique ID needs to be a pointer to
4982      * host memory.  hence, we need to malloc for each URB.
4983      * isochronous transfers have a variable length struct.
4984      */
4985     arg_type++;
4986     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4987 
4988     /* construct host copy of urb and metadata */
4989     lurb = g_try_new0(struct live_urb, 1);
4990     if (!lurb) {
4991         return -TARGET_ENOMEM;
4992     }
4993 
4994     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4995     if (!argptr) {
4996         g_free(lurb);
4997         return -TARGET_EFAULT;
4998     }
4999     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5000     unlock_user(argptr, arg, 0);
5001 
5002     lurb->target_urb_adr = arg;
5003     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5004 
5005     /* buffer space used depends on endpoint type so lock the entire buffer */
5006     /* control type urbs should check the buffer contents for true direction */
5007     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5008     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5009         lurb->host_urb.buffer_length, 1);
5010     if (lurb->target_buf_ptr == NULL) {
5011         g_free(lurb);
5012         return -TARGET_EFAULT;
5013     }
5014 
5015     /* update buffer pointer in host copy */
5016     lurb->host_urb.buffer = lurb->target_buf_ptr;
5017 
5018     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5019     if (is_error(ret)) {
5020         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5021         g_free(lurb);
5022     } else {
5023         urb_hashtable_insert(lurb);
5024     }
5025 
5026     return ret;
5027 }
5028 #endif /* CONFIG_USBFS */
5029 
do_ioctl_dm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5030 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5031                             int cmd, abi_long arg)
5032 {
5033     void *argptr;
5034     struct dm_ioctl *host_dm;
5035     abi_long guest_data;
5036     uint32_t guest_data_size;
5037     int target_size;
5038     const argtype *arg_type = ie->arg_type;
5039     abi_long ret;
5040     void *big_buf = NULL;
5041     char *host_data;
5042 
5043     arg_type++;
5044     target_size = thunk_type_size(arg_type, 0);
5045     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5046     if (!argptr) {
5047         ret = -TARGET_EFAULT;
5048         goto out;
5049     }
5050     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5051     unlock_user(argptr, arg, 0);
5052 
5053     /* buf_temp is too small, so fetch things into a bigger buffer */
5054     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5055     memcpy(big_buf, buf_temp, target_size);
5056     buf_temp = big_buf;
5057     host_dm = big_buf;
5058 
5059     guest_data = arg + host_dm->data_start;
5060     if ((guest_data - arg) < 0) {
5061         ret = -TARGET_EINVAL;
5062         goto out;
5063     }
5064     guest_data_size = host_dm->data_size - host_dm->data_start;
5065     host_data = (char*)host_dm + host_dm->data_start;
5066 
5067     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5068     if (!argptr) {
5069         ret = -TARGET_EFAULT;
5070         goto out;
5071     }
5072 
5073     switch (ie->host_cmd) {
5074     case DM_REMOVE_ALL:
5075     case DM_LIST_DEVICES:
5076     case DM_DEV_CREATE:
5077     case DM_DEV_REMOVE:
5078     case DM_DEV_SUSPEND:
5079     case DM_DEV_STATUS:
5080     case DM_DEV_WAIT:
5081     case DM_TABLE_STATUS:
5082     case DM_TABLE_CLEAR:
5083     case DM_TABLE_DEPS:
5084     case DM_LIST_VERSIONS:
5085         /* no input data */
5086         break;
5087     case DM_DEV_RENAME:
5088     case DM_DEV_SET_GEOMETRY:
5089         /* data contains only strings */
5090         memcpy(host_data, argptr, guest_data_size);
5091         break;
5092     case DM_TARGET_MSG:
5093         memcpy(host_data, argptr, guest_data_size);
5094         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5095         break;
5096     case DM_TABLE_LOAD:
5097     {
5098         void *gspec = argptr;
5099         void *cur_data = host_data;
5100         const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5101         int spec_size = thunk_type_size(dm_arg_type, 0);
5102         int i;
5103 
5104         for (i = 0; i < host_dm->target_count; i++) {
5105             struct dm_target_spec *spec = cur_data;
5106             uint32_t next;
5107             int slen;
5108 
5109             thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
5110             slen = strlen((char*)gspec + spec_size) + 1;
5111             next = spec->next;
5112             spec->next = sizeof(*spec) + slen;
5113             strcpy((char*)&spec[1], gspec + spec_size);
5114             gspec += next;
5115             cur_data += spec->next;
5116         }
5117         break;
5118     }
5119     default:
5120         ret = -TARGET_EINVAL;
5121         unlock_user(argptr, guest_data, 0);
5122         goto out;
5123     }
5124     unlock_user(argptr, guest_data, 0);
5125 
5126     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5127     if (!is_error(ret)) {
5128         guest_data = arg + host_dm->data_start;
5129         guest_data_size = host_dm->data_size - host_dm->data_start;
5130         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5131         switch (ie->host_cmd) {
5132         case DM_REMOVE_ALL:
5133         case DM_DEV_CREATE:
5134         case DM_DEV_REMOVE:
5135         case DM_DEV_RENAME:
5136         case DM_DEV_SUSPEND:
5137         case DM_DEV_STATUS:
5138         case DM_TABLE_LOAD:
5139         case DM_TABLE_CLEAR:
5140         case DM_TARGET_MSG:
5141         case DM_DEV_SET_GEOMETRY:
5142             /* no return data */
5143             break;
5144         case DM_LIST_DEVICES:
5145         {
5146             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5147             uint32_t remaining_data = guest_data_size;
5148             void *cur_data = argptr;
5149             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5150             int nl_size = 12; /* can't use thunk_size due to alignment */
5151 
5152             while (1) {
5153                 uint32_t next = nl->next;
5154                 if (next) {
5155                     nl->next = nl_size + (strlen(nl->name) + 1);
5156                 }
5157                 if (remaining_data < nl->next) {
5158                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5159                     break;
5160                 }
5161                 thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
5162                 strcpy(cur_data + nl_size, nl->name);
5163                 cur_data += nl->next;
5164                 remaining_data -= nl->next;
5165                 if (!next) {
5166                     break;
5167                 }
5168                 nl = (void*)nl + next;
5169             }
5170             break;
5171         }
5172         case DM_DEV_WAIT:
5173         case DM_TABLE_STATUS:
5174         {
5175             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5176             void *cur_data = argptr;
5177             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5178             int spec_size = thunk_type_size(dm_arg_type, 0);
5179             int i;
5180 
5181             for (i = 0; i < host_dm->target_count; i++) {
5182                 uint32_t next = spec->next;
5183                 int slen = strlen((char*)&spec[1]) + 1;
5184                 spec->next = (cur_data - argptr) + spec_size + slen;
5185                 if (guest_data_size < spec->next) {
5186                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5187                     break;
5188                 }
5189                 thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
5190                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5191                 cur_data = argptr + spec->next;
5192                 spec = (void*)host_dm + host_dm->data_start + next;
5193             }
5194             break;
5195         }
5196         case DM_TABLE_DEPS:
5197         {
5198             void *hdata = (void*)host_dm + host_dm->data_start;
5199             int count = *(uint32_t*)hdata;
5200             uint64_t *hdev = hdata + 8;
5201             uint64_t *gdev = argptr + 8;
5202             int i;
5203 
5204             *(uint32_t*)argptr = tswap32(count);
5205             for (i = 0; i < count; i++) {
5206                 *gdev = tswap64(*hdev);
5207                 gdev++;
5208                 hdev++;
5209             }
5210             break;
5211         }
5212         case DM_LIST_VERSIONS:
5213         {
5214             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5215             uint32_t remaining_data = guest_data_size;
5216             void *cur_data = argptr;
5217             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5218             int vers_size = thunk_type_size(dm_arg_type, 0);
5219 
5220             while (1) {
5221                 uint32_t next = vers->next;
5222                 if (next) {
5223                     vers->next = vers_size + (strlen(vers->name) + 1);
5224                 }
5225                 if (remaining_data < vers->next) {
5226                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5227                     break;
5228                 }
5229                 thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
5230                 strcpy(cur_data + vers_size, vers->name);
5231                 cur_data += vers->next;
5232                 remaining_data -= vers->next;
5233                 if (!next) {
5234                     break;
5235                 }
5236                 vers = (void*)vers + next;
5237             }
5238             break;
5239         }
5240         default:
5241             unlock_user(argptr, guest_data, 0);
5242             ret = -TARGET_EINVAL;
5243             goto out;
5244         }
5245         unlock_user(argptr, guest_data, guest_data_size);
5246 
5247         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5248         if (!argptr) {
5249             ret = -TARGET_EFAULT;
5250             goto out;
5251         }
5252         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5253         unlock_user(argptr, arg, target_size);
5254     }
5255 out:
5256     g_free(big_buf);
5257     return ret;
5258 }
5259 
do_ioctl_blkpg(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5260 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5261                                int cmd, abi_long arg)
5262 {
5263     void *argptr;
5264     int target_size;
5265     const argtype *arg_type = ie->arg_type;
5266     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5267     abi_long ret;
5268 
5269     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5270     struct blkpg_partition host_part;
5271 
5272     /* Read and convert blkpg */
5273     arg_type++;
5274     target_size = thunk_type_size(arg_type, 0);
5275     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5276     if (!argptr) {
5277         ret = -TARGET_EFAULT;
5278         goto out;
5279     }
5280     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5281     unlock_user(argptr, arg, 0);
5282 
5283     switch (host_blkpg->op) {
5284     case BLKPG_ADD_PARTITION:
5285     case BLKPG_DEL_PARTITION:
5286         /* payload is struct blkpg_partition */
5287         break;
5288     default:
5289         /* Unknown opcode */
5290         ret = -TARGET_EINVAL;
5291         goto out;
5292     }
5293 
5294     /* Read and convert blkpg->data */
5295     arg = (abi_long)(uintptr_t)host_blkpg->data;
5296     target_size = thunk_type_size(part_arg_type, 0);
5297     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5298     if (!argptr) {
5299         ret = -TARGET_EFAULT;
5300         goto out;
5301     }
5302     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5303     unlock_user(argptr, arg, 0);
5304 
5305     /* Swizzle the data pointer to our local copy and call! */
5306     host_blkpg->data = &host_part;
5307     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5308 
5309 out:
5310     return ret;
5311 }
5312 
do_ioctl_rt(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5313 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5314                                 int fd, int cmd, abi_long arg)
5315 {
5316     const argtype *arg_type = ie->arg_type;
5317     const StructEntry *se;
5318     const argtype *field_types;
5319     const int *dst_offsets, *src_offsets;
5320     int target_size;
5321     void *argptr;
5322     abi_ulong *target_rt_dev_ptr = NULL;
5323     unsigned long *host_rt_dev_ptr = NULL;
5324     abi_long ret;
5325     int i;
5326 
5327     assert(ie->access == IOC_W);
5328     assert(*arg_type == TYPE_PTR);
5329     arg_type++;
5330     assert(*arg_type == TYPE_STRUCT);
5331     target_size = thunk_type_size(arg_type, 0);
5332     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5333     if (!argptr) {
5334         return -TARGET_EFAULT;
5335     }
5336     arg_type++;
5337     assert(*arg_type == (int)STRUCT_rtentry);
5338     se = struct_entries + *arg_type++;
5339     assert(se->convert[0] == NULL);
5340     /* convert struct here to be able to catch rt_dev string */
5341     field_types = se->field_types;
5342     dst_offsets = se->field_offsets[THUNK_HOST];
5343     src_offsets = se->field_offsets[THUNK_TARGET];
5344     for (i = 0; i < se->nb_fields; i++) {
5345         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5346             assert(*field_types == TYPE_PTRVOID);
5347             target_rt_dev_ptr = argptr + src_offsets[i];
5348             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5349             if (*target_rt_dev_ptr != 0) {
5350                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5351                                                   tswapal(*target_rt_dev_ptr));
5352                 if (!*host_rt_dev_ptr) {
5353                     unlock_user(argptr, arg, 0);
5354                     return -TARGET_EFAULT;
5355                 }
5356             } else {
5357                 *host_rt_dev_ptr = 0;
5358             }
5359             field_types++;
5360             continue;
5361         }
5362         field_types = thunk_convert(buf_temp + dst_offsets[i],
5363                                     argptr + src_offsets[i],
5364                                     field_types, THUNK_HOST);
5365     }
5366     unlock_user(argptr, arg, 0);
5367 
5368     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5369 
5370     assert(host_rt_dev_ptr != NULL);
5371     assert(target_rt_dev_ptr != NULL);
5372     if (*host_rt_dev_ptr != 0) {
5373         unlock_user((void *)*host_rt_dev_ptr,
5374                     *target_rt_dev_ptr, 0);
5375     }
5376     return ret;
5377 }
5378 
do_ioctl_kdsigaccept(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5379 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5380                                      int fd, int cmd, abi_long arg)
5381 {
5382     int sig = target_to_host_signal(arg);
5383     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5384 }
5385 
do_ioctl_SIOCGSTAMP(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5386 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5387                                     int fd, int cmd, abi_long arg)
5388 {
5389     struct timeval tv;
5390     abi_long ret;
5391 
5392     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5393     if (is_error(ret)) {
5394         return ret;
5395     }
5396 
5397     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5398         if (copy_to_user_timeval(arg, &tv)) {
5399             return -TARGET_EFAULT;
5400         }
5401     } else {
5402         if (copy_to_user_timeval64(arg, &tv)) {
5403             return -TARGET_EFAULT;
5404         }
5405     }
5406 
5407     return ret;
5408 }
5409 
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5410 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5411                                       int fd, int cmd, abi_long arg)
5412 {
5413     struct timespec ts;
5414     abi_long ret;
5415 
5416     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5417     if (is_error(ret)) {
5418         return ret;
5419     }
5420 
5421     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5422         if (host_to_target_timespec(arg, &ts)) {
5423             return -TARGET_EFAULT;
5424         }
5425     } else{
5426         if (host_to_target_timespec64(arg, &ts)) {
5427             return -TARGET_EFAULT;
5428         }
5429     }
5430 
5431     return ret;
5432 }
5433 
5434 #ifdef TIOCGPTPEER
do_ioctl_tiocgptpeer(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5435 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5436                                      int fd, int cmd, abi_long arg)
5437 {
5438     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5439     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5440 }
5441 #endif
5442 
5443 #ifdef HAVE_DRM_H
5444 
unlock_drm_version(struct drm_version * host_ver,struct target_drm_version * target_ver,bool copy)5445 static void unlock_drm_version(struct drm_version *host_ver,
5446                                struct target_drm_version *target_ver,
5447                                bool copy)
5448 {
5449     unlock_user(host_ver->name, target_ver->name,
5450                                 copy ? host_ver->name_len : 0);
5451     unlock_user(host_ver->date, target_ver->date,
5452                                 copy ? host_ver->date_len : 0);
5453     unlock_user(host_ver->desc, target_ver->desc,
5454                                 copy ? host_ver->desc_len : 0);
5455 }
5456 
target_to_host_drmversion(struct drm_version * host_ver,struct target_drm_version * target_ver)5457 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5458                                           struct target_drm_version *target_ver)
5459 {
5460     memset(host_ver, 0, sizeof(*host_ver));
5461 
5462     __get_user(host_ver->name_len, &target_ver->name_len);
5463     if (host_ver->name_len) {
5464         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5465                                    target_ver->name_len, 0);
5466         if (!host_ver->name) {
5467             return -EFAULT;
5468         }
5469     }
5470 
5471     __get_user(host_ver->date_len, &target_ver->date_len);
5472     if (host_ver->date_len) {
5473         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5474                                    target_ver->date_len, 0);
5475         if (!host_ver->date) {
5476             goto err;
5477         }
5478     }
5479 
5480     __get_user(host_ver->desc_len, &target_ver->desc_len);
5481     if (host_ver->desc_len) {
5482         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5483                                    target_ver->desc_len, 0);
5484         if (!host_ver->desc) {
5485             goto err;
5486         }
5487     }
5488 
5489     return 0;
5490 err:
5491     unlock_drm_version(host_ver, target_ver, false);
5492     return -EFAULT;
5493 }
5494 
host_to_target_drmversion(struct target_drm_version * target_ver,struct drm_version * host_ver)5495 static inline void host_to_target_drmversion(
5496                                           struct target_drm_version *target_ver,
5497                                           struct drm_version *host_ver)
5498 {
5499     __put_user(host_ver->version_major, &target_ver->version_major);
5500     __put_user(host_ver->version_minor, &target_ver->version_minor);
5501     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5502     __put_user(host_ver->name_len, &target_ver->name_len);
5503     __put_user(host_ver->date_len, &target_ver->date_len);
5504     __put_user(host_ver->desc_len, &target_ver->desc_len);
5505     unlock_drm_version(host_ver, target_ver, true);
5506 }
5507 
do_ioctl_drm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5508 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5509                              int fd, int cmd, abi_long arg)
5510 {
5511     struct drm_version *ver;
5512     struct target_drm_version *target_ver;
5513     abi_long ret;
5514 
5515     switch (ie->host_cmd) {
5516     case DRM_IOCTL_VERSION:
5517         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5518             return -TARGET_EFAULT;
5519         }
5520         ver = (struct drm_version *)buf_temp;
5521         ret = target_to_host_drmversion(ver, target_ver);
5522         if (!is_error(ret)) {
5523             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5524             if (is_error(ret)) {
5525                 unlock_drm_version(ver, target_ver, false);
5526             } else {
5527                 host_to_target_drmversion(target_ver, ver);
5528             }
5529         }
5530         unlock_user_struct(target_ver, arg, 0);
5531         return ret;
5532     }
5533     return -TARGET_ENOSYS;
5534 }
5535 
do_ioctl_drm_i915_getparam(const IOCTLEntry * ie,struct drm_i915_getparam * gparam,int fd,abi_long arg)5536 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5537                                            struct drm_i915_getparam *gparam,
5538                                            int fd, abi_long arg)
5539 {
5540     abi_long ret;
5541     int value;
5542     struct target_drm_i915_getparam *target_gparam;
5543 
5544     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5545         return -TARGET_EFAULT;
5546     }
5547 
5548     __get_user(gparam->param, &target_gparam->param);
5549     gparam->value = &value;
5550     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5551     put_user_s32(value, target_gparam->value);
5552 
5553     unlock_user_struct(target_gparam, arg, 0);
5554     return ret;
5555 }
5556 
do_ioctl_drm_i915(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5557 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5558                                   int fd, int cmd, abi_long arg)
5559 {
5560     switch (ie->host_cmd) {
5561     case DRM_IOCTL_I915_GETPARAM:
5562         return do_ioctl_drm_i915_getparam(ie,
5563                                           (struct drm_i915_getparam *)buf_temp,
5564                                           fd, arg);
5565     default:
5566         return -TARGET_ENOSYS;
5567     }
5568 }
5569 
5570 #endif
5571 
do_ioctl_TUNSETTXFILTER(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5572 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5573                                         int fd, int cmd, abi_long arg)
5574 {
5575     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5576     struct tun_filter *target_filter;
5577     char *target_addr;
5578 
5579     assert(ie->access == IOC_W);
5580 
5581     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5582     if (!target_filter) {
5583         return -TARGET_EFAULT;
5584     }
5585     filter->flags = tswap16(target_filter->flags);
5586     filter->count = tswap16(target_filter->count);
5587     unlock_user(target_filter, arg, 0);
5588 
5589     if (filter->count) {
5590         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5591             MAX_STRUCT_SIZE) {
5592             return -TARGET_EFAULT;
5593         }
5594 
5595         target_addr = lock_user(VERIFY_READ,
5596                                 arg + offsetof(struct tun_filter, addr),
5597                                 filter->count * ETH_ALEN, 1);
5598         if (!target_addr) {
5599             return -TARGET_EFAULT;
5600         }
5601         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5602         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5603     }
5604 
5605     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5606 }
5607 
5608 IOCTLEntry ioctl_entries[] = {
5609 #define IOCTL(cmd, access, ...) \
5610     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5611 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5612     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5613 #define IOCTL_IGNORE(cmd) \
5614     { TARGET_ ## cmd, 0, #cmd },
5615 #include "ioctls.h"
5616     { 0, 0, },
5617 };
5618 
5619 /* ??? Implement proper locking for ioctls.  */
5620 /* do_ioctl() Must return target values and target errnos. */
do_ioctl(int fd,int cmd,abi_long arg)5621 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5622 {
5623     const IOCTLEntry *ie;
5624     const argtype *arg_type;
5625     abi_long ret;
5626     uint8_t buf_temp[MAX_STRUCT_SIZE];
5627     int target_size;
5628     void *argptr;
5629 
5630     ie = ioctl_entries;
5631     for(;;) {
5632         if (ie->target_cmd == 0) {
5633             qemu_log_mask(
5634                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5635             return -TARGET_ENOTTY;
5636         }
5637         if (ie->target_cmd == cmd)
5638             break;
5639         ie++;
5640     }
5641     arg_type = ie->arg_type;
5642     if (ie->do_ioctl) {
5643         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5644     } else if (!ie->host_cmd) {
5645         /* Some architectures define BSD ioctls in their headers
5646            that are not implemented in Linux.  */
5647         return -TARGET_ENOTTY;
5648     }
5649 
5650     switch(arg_type[0]) {
5651     case TYPE_NULL:
5652         /* no argument */
5653         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5654         break;
5655     case TYPE_PTRVOID:
5656     case TYPE_INT:
5657     case TYPE_LONG:
5658     case TYPE_ULONG:
5659         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5660         break;
5661     case TYPE_PTR:
5662         arg_type++;
5663         target_size = thunk_type_size(arg_type, 0);
5664         switch(ie->access) {
5665         case IOC_R:
5666             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5667             if (!is_error(ret)) {
5668                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5669                 if (!argptr)
5670                     return -TARGET_EFAULT;
5671                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5672                 unlock_user(argptr, arg, target_size);
5673             }
5674             break;
5675         case IOC_W:
5676             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5677             if (!argptr)
5678                 return -TARGET_EFAULT;
5679             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5680             unlock_user(argptr, arg, 0);
5681             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5682             break;
5683         default:
5684         case IOC_RW:
5685             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5686             if (!argptr)
5687                 return -TARGET_EFAULT;
5688             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5689             unlock_user(argptr, arg, 0);
5690             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5691             if (!is_error(ret)) {
5692                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5693                 if (!argptr)
5694                     return -TARGET_EFAULT;
5695                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5696                 unlock_user(argptr, arg, target_size);
5697             }
5698             break;
5699         }
5700         break;
5701     default:
5702         qemu_log_mask(LOG_UNIMP,
5703                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5704                       (long)cmd, arg_type[0]);
5705         ret = -TARGET_ENOTTY;
5706         break;
5707     }
5708     return ret;
5709 }
5710 
5711 static const bitmask_transtbl iflag_tbl[] = {
5712         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5713         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5714         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5715         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5716         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5717         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5718         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5719         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5720         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5721         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5722         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5723         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5724         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5725         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5726         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5727 };
5728 
5729 static const bitmask_transtbl oflag_tbl[] = {
5730 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5731 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5732 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5733 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5734 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5735 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5736 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5737 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5738 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5739 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5740 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5741 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5742 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5743 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5744 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5745 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5746 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5747 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5748 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5749 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5750 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5751 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5752 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5753 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5754 };
5755 
5756 static const bitmask_transtbl cflag_tbl[] = {
5757 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5758 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5759 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5760 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5761 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5762 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5763 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5764 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5765 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5766 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5767 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5768 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5769 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5770 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5771 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5772 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5773 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5774 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5775 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5776 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5777 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5778 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5779 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5780 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5781 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5782 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5783 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5784 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5785 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5786 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5787 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5788 };
5789 
5790 static const bitmask_transtbl lflag_tbl[] = {
5791   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5792   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5793   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5794   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5795   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5796   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5797   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5798   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5799   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5800   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5801   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5802   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5803   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5804   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5805   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5806   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5807 };
5808 
target_to_host_termios(void * dst,const void * src)5809 static void target_to_host_termios (void *dst, const void *src)
5810 {
5811     struct host_termios *host = dst;
5812     const struct target_termios *target = src;
5813 
5814     host->c_iflag =
5815         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5816     host->c_oflag =
5817         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5818     host->c_cflag =
5819         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5820     host->c_lflag =
5821         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5822     host->c_line = target->c_line;
5823 
5824     memset(host->c_cc, 0, sizeof(host->c_cc));
5825     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5826     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5827     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5828     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5829     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5830     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5831     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5832     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5833     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5834     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5835     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5836     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5837     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5838     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5839     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5840     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5841     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5842 }
5843 
host_to_target_termios(void * dst,const void * src)5844 static void host_to_target_termios (void *dst, const void *src)
5845 {
5846     struct target_termios *target = dst;
5847     const struct host_termios *host = src;
5848 
5849     target->c_iflag =
5850         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5851     target->c_oflag =
5852         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5853     target->c_cflag =
5854         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5855     target->c_lflag =
5856         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5857     target->c_line = host->c_line;
5858 
5859     memset(target->c_cc, 0, sizeof(target->c_cc));
5860     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5861     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5862     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5863     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5864     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5865     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5866     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5867     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5868     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5869     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5870     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5871     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5872     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5873     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5874     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5875     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5876     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5877 }
5878 
5879 static const StructEntry struct_termios_def = {
5880     .convert = { host_to_target_termios, target_to_host_termios },
5881     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5882     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5883     .print = print_termios,
5884 };
5885 
5886 /* If the host does not provide these bits, they may be safely discarded. */
5887 #ifndef MAP_SYNC
5888 #define MAP_SYNC 0
5889 #endif
5890 #ifndef MAP_UNINITIALIZED
5891 #define MAP_UNINITIALIZED 0
5892 #endif
5893 
5894 static const bitmask_transtbl mmap_flags_tbl[] = {
5895     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5896     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5897       MAP_ANONYMOUS, MAP_ANONYMOUS },
5898     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5899       MAP_GROWSDOWN, MAP_GROWSDOWN },
5900     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5901       MAP_DENYWRITE, MAP_DENYWRITE },
5902     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5903       MAP_EXECUTABLE, MAP_EXECUTABLE },
5904     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5905     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5906       MAP_NORESERVE, MAP_NORESERVE },
5907     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5908     /* MAP_STACK had been ignored by the kernel for quite some time.
5909        Recognize it for the target insofar as we do not want to pass
5910        it through to the host.  */
5911     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5912     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
5913     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
5914     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
5915       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
5916     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
5917       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
5918 };
5919 
5920 /*
5921  * Arrange for legacy / undefined architecture specific flags to be
5922  * ignored by mmap handling code.
5923  */
5924 #ifndef TARGET_MAP_32BIT
5925 #define TARGET_MAP_32BIT 0
5926 #endif
5927 #ifndef TARGET_MAP_HUGE_2MB
5928 #define TARGET_MAP_HUGE_2MB 0
5929 #endif
5930 #ifndef TARGET_MAP_HUGE_1GB
5931 #define TARGET_MAP_HUGE_1GB 0
5932 #endif
5933 
do_mmap(abi_ulong addr,abi_ulong len,int prot,int target_flags,int fd,off_t offset)5934 static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
5935                         int target_flags, int fd, off_t offset)
5936 {
5937     /*
5938      * The historical set of flags that all mmap types implicitly support.
5939      */
5940     enum {
5941         TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
5942                                | TARGET_MAP_PRIVATE
5943                                | TARGET_MAP_FIXED
5944                                | TARGET_MAP_ANONYMOUS
5945                                | TARGET_MAP_DENYWRITE
5946                                | TARGET_MAP_EXECUTABLE
5947                                | TARGET_MAP_UNINITIALIZED
5948                                | TARGET_MAP_GROWSDOWN
5949                                | TARGET_MAP_LOCKED
5950                                | TARGET_MAP_NORESERVE
5951                                | TARGET_MAP_POPULATE
5952                                | TARGET_MAP_NONBLOCK
5953                                | TARGET_MAP_STACK
5954                                | TARGET_MAP_HUGETLB
5955                                | TARGET_MAP_32BIT
5956                                | TARGET_MAP_HUGE_2MB
5957                                | TARGET_MAP_HUGE_1GB
5958     };
5959     int host_flags;
5960 
5961     switch (target_flags & TARGET_MAP_TYPE) {
5962     case TARGET_MAP_PRIVATE:
5963         host_flags = MAP_PRIVATE;
5964         break;
5965     case TARGET_MAP_SHARED:
5966         host_flags = MAP_SHARED;
5967         break;
5968     case TARGET_MAP_SHARED_VALIDATE:
5969         /*
5970          * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
5971          * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
5972          */
5973         if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
5974             return -TARGET_EOPNOTSUPP;
5975         }
5976         host_flags = MAP_SHARED_VALIDATE;
5977         if (target_flags & TARGET_MAP_SYNC) {
5978             host_flags |= MAP_SYNC;
5979         }
5980         break;
5981     default:
5982         return -TARGET_EINVAL;
5983     }
5984     host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
5985 
5986     return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
5987 }
5988 
5989 /*
5990  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5991  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5992  */
5993 #if defined(TARGET_I386)
5994 
5995 /* NOTE: there is really one LDT for all the threads */
5996 static uint8_t *ldt_table;
5997 
read_ldt(abi_ulong ptr,unsigned long bytecount)5998 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5999 {
6000     int size;
6001     void *p;
6002 
6003     if (!ldt_table)
6004         return 0;
6005     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6006     if (size > bytecount)
6007         size = bytecount;
6008     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6009     if (!p)
6010         return -TARGET_EFAULT;
6011     /* ??? Should this by byteswapped?  */
6012     memcpy(p, ldt_table, size);
6013     unlock_user(p, ptr, size);
6014     return size;
6015 }
6016 
6017 /* XXX: add locking support */
write_ldt(CPUX86State * env,abi_ulong ptr,unsigned long bytecount,int oldmode)6018 static abi_long write_ldt(CPUX86State *env,
6019                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6020 {
6021     struct target_modify_ldt_ldt_s ldt_info;
6022     struct target_modify_ldt_ldt_s *target_ldt_info;
6023     int seg_32bit, contents, read_exec_only, limit_in_pages;
6024     int seg_not_present, useable, lm;
6025     uint32_t *lp, entry_1, entry_2;
6026 
6027     if (bytecount != sizeof(ldt_info))
6028         return -TARGET_EINVAL;
6029     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6030         return -TARGET_EFAULT;
6031     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6032     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6033     ldt_info.limit = tswap32(target_ldt_info->limit);
6034     ldt_info.flags = tswap32(target_ldt_info->flags);
6035     unlock_user_struct(target_ldt_info, ptr, 0);
6036 
6037     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6038         return -TARGET_EINVAL;
6039     seg_32bit = ldt_info.flags & 1;
6040     contents = (ldt_info.flags >> 1) & 3;
6041     read_exec_only = (ldt_info.flags >> 3) & 1;
6042     limit_in_pages = (ldt_info.flags >> 4) & 1;
6043     seg_not_present = (ldt_info.flags >> 5) & 1;
6044     useable = (ldt_info.flags >> 6) & 1;
6045 #ifdef TARGET_ABI32
6046     lm = 0;
6047 #else
6048     lm = (ldt_info.flags >> 7) & 1;
6049 #endif
6050     if (contents == 3) {
6051         if (oldmode)
6052             return -TARGET_EINVAL;
6053         if (seg_not_present == 0)
6054             return -TARGET_EINVAL;
6055     }
6056     /* allocate the LDT */
6057     if (!ldt_table) {
6058         env->ldt.base = target_mmap(0,
6059                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6060                                     PROT_READ|PROT_WRITE,
6061                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6062         if (env->ldt.base == -1)
6063             return -TARGET_ENOMEM;
6064         memset(g2h_untagged(env->ldt.base), 0,
6065                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6066         env->ldt.limit = 0xffff;
6067         ldt_table = g2h_untagged(env->ldt.base);
6068     }
6069 
6070     /* NOTE: same code as Linux kernel */
6071     /* Allow LDTs to be cleared by the user. */
6072     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6073         if (oldmode ||
6074             (contents == 0		&&
6075              read_exec_only == 1	&&
6076              seg_32bit == 0		&&
6077              limit_in_pages == 0	&&
6078              seg_not_present == 1	&&
6079              useable == 0 )) {
6080             entry_1 = 0;
6081             entry_2 = 0;
6082             goto install;
6083         }
6084     }
6085 
6086     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6087         (ldt_info.limit & 0x0ffff);
6088     entry_2 = (ldt_info.base_addr & 0xff000000) |
6089         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6090         (ldt_info.limit & 0xf0000) |
6091         ((read_exec_only ^ 1) << 9) |
6092         (contents << 10) |
6093         ((seg_not_present ^ 1) << 15) |
6094         (seg_32bit << 22) |
6095         (limit_in_pages << 23) |
6096         (lm << 21) |
6097         0x7000;
6098     if (!oldmode)
6099         entry_2 |= (useable << 20);
6100 
6101     /* Install the new entry ...  */
6102 install:
6103     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6104     lp[0] = tswap32(entry_1);
6105     lp[1] = tswap32(entry_2);
6106     return 0;
6107 }
6108 
6109 /* specific and weird i386 syscalls */
do_modify_ldt(CPUX86State * env,int func,abi_ulong ptr,unsigned long bytecount)6110 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6111                               unsigned long bytecount)
6112 {
6113     abi_long ret;
6114 
6115     switch (func) {
6116     case 0:
6117         ret = read_ldt(ptr, bytecount);
6118         break;
6119     case 1:
6120         ret = write_ldt(env, ptr, bytecount, 1);
6121         break;
6122     case 0x11:
6123         ret = write_ldt(env, ptr, bytecount, 0);
6124         break;
6125     default:
6126         ret = -TARGET_ENOSYS;
6127         break;
6128     }
6129     return ret;
6130 }
6131 
6132 #if defined(TARGET_ABI32)
do_set_thread_area(CPUX86State * env,abi_ulong ptr)6133 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6134 {
6135     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6136     struct target_modify_ldt_ldt_s ldt_info;
6137     struct target_modify_ldt_ldt_s *target_ldt_info;
6138     int seg_32bit, contents, read_exec_only, limit_in_pages;
6139     int seg_not_present, useable, lm;
6140     uint32_t *lp, entry_1, entry_2;
6141     int i;
6142 
6143     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6144     if (!target_ldt_info)
6145         return -TARGET_EFAULT;
6146     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6147     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6148     ldt_info.limit = tswap32(target_ldt_info->limit);
6149     ldt_info.flags = tswap32(target_ldt_info->flags);
6150     if (ldt_info.entry_number == -1) {
6151         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6152             if (gdt_table[i] == 0) {
6153                 ldt_info.entry_number = i;
6154                 target_ldt_info->entry_number = tswap32(i);
6155                 break;
6156             }
6157         }
6158     }
6159     unlock_user_struct(target_ldt_info, ptr, 1);
6160 
6161     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6162         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6163            return -TARGET_EINVAL;
6164     seg_32bit = ldt_info.flags & 1;
6165     contents = (ldt_info.flags >> 1) & 3;
6166     read_exec_only = (ldt_info.flags >> 3) & 1;
6167     limit_in_pages = (ldt_info.flags >> 4) & 1;
6168     seg_not_present = (ldt_info.flags >> 5) & 1;
6169     useable = (ldt_info.flags >> 6) & 1;
6170 #ifdef TARGET_ABI32
6171     lm = 0;
6172 #else
6173     lm = (ldt_info.flags >> 7) & 1;
6174 #endif
6175 
6176     if (contents == 3) {
6177         if (seg_not_present == 0)
6178             return -TARGET_EINVAL;
6179     }
6180 
6181     /* NOTE: same code as Linux kernel */
6182     /* Allow LDTs to be cleared by the user. */
6183     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6184         if ((contents == 0             &&
6185              read_exec_only == 1       &&
6186              seg_32bit == 0            &&
6187              limit_in_pages == 0       &&
6188              seg_not_present == 1      &&
6189              useable == 0 )) {
6190             entry_1 = 0;
6191             entry_2 = 0;
6192             goto install;
6193         }
6194     }
6195 
6196     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6197         (ldt_info.limit & 0x0ffff);
6198     entry_2 = (ldt_info.base_addr & 0xff000000) |
6199         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6200         (ldt_info.limit & 0xf0000) |
6201         ((read_exec_only ^ 1) << 9) |
6202         (contents << 10) |
6203         ((seg_not_present ^ 1) << 15) |
6204         (seg_32bit << 22) |
6205         (limit_in_pages << 23) |
6206         (useable << 20) |
6207         (lm << 21) |
6208         0x7000;
6209 
6210     /* Install the new entry ...  */
6211 install:
6212     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6213     lp[0] = tswap32(entry_1);
6214     lp[1] = tswap32(entry_2);
6215     return 0;
6216 }
6217 
do_get_thread_area(CPUX86State * env,abi_ulong ptr)6218 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6219 {
6220     struct target_modify_ldt_ldt_s *target_ldt_info;
6221     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6222     uint32_t base_addr, limit, flags;
6223     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6224     int seg_not_present, useable, lm;
6225     uint32_t *lp, entry_1, entry_2;
6226 
6227     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6228     if (!target_ldt_info)
6229         return -TARGET_EFAULT;
6230     idx = tswap32(target_ldt_info->entry_number);
6231     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6232         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6233         unlock_user_struct(target_ldt_info, ptr, 1);
6234         return -TARGET_EINVAL;
6235     }
6236     lp = (uint32_t *)(gdt_table + idx);
6237     entry_1 = tswap32(lp[0]);
6238     entry_2 = tswap32(lp[1]);
6239 
6240     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6241     contents = (entry_2 >> 10) & 3;
6242     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6243     seg_32bit = (entry_2 >> 22) & 1;
6244     limit_in_pages = (entry_2 >> 23) & 1;
6245     useable = (entry_2 >> 20) & 1;
6246 #ifdef TARGET_ABI32
6247     lm = 0;
6248 #else
6249     lm = (entry_2 >> 21) & 1;
6250 #endif
6251     flags = (seg_32bit << 0) | (contents << 1) |
6252         (read_exec_only << 3) | (limit_in_pages << 4) |
6253         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6254     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6255     base_addr = (entry_1 >> 16) |
6256         (entry_2 & 0xff000000) |
6257         ((entry_2 & 0xff) << 16);
6258     target_ldt_info->base_addr = tswapal(base_addr);
6259     target_ldt_info->limit = tswap32(limit);
6260     target_ldt_info->flags = tswap32(flags);
6261     unlock_user_struct(target_ldt_info, ptr, 1);
6262     return 0;
6263 }
6264 
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6265 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6266 {
6267     return -TARGET_ENOSYS;
6268 }
6269 #else
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6270 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6271 {
6272     abi_long ret = 0;
6273     abi_ulong val;
6274     int idx;
6275 
6276     switch(code) {
6277     case TARGET_ARCH_SET_GS:
6278     case TARGET_ARCH_SET_FS:
6279         if (code == TARGET_ARCH_SET_GS)
6280             idx = R_GS;
6281         else
6282             idx = R_FS;
6283         cpu_x86_load_seg(env, idx, 0);
6284         env->segs[idx].base = addr;
6285         break;
6286     case TARGET_ARCH_GET_GS:
6287     case TARGET_ARCH_GET_FS:
6288         if (code == TARGET_ARCH_GET_GS)
6289             idx = R_GS;
6290         else
6291             idx = R_FS;
6292         val = env->segs[idx].base;
6293         if (put_user(val, addr, abi_ulong))
6294             ret = -TARGET_EFAULT;
6295         break;
6296     default:
6297         ret = -TARGET_EINVAL;
6298         break;
6299     }
6300     return ret;
6301 }
6302 #endif /* defined(TARGET_ABI32 */
6303 #endif /* defined(TARGET_I386) */
6304 
6305 /*
6306  * These constants are generic.  Supply any that are missing from the host.
6307  */
6308 #ifndef PR_SET_NAME
6309 # define PR_SET_NAME    15
6310 # define PR_GET_NAME    16
6311 #endif
6312 #ifndef PR_SET_FP_MODE
6313 # define PR_SET_FP_MODE 45
6314 # define PR_GET_FP_MODE 46
6315 # define PR_FP_MODE_FR   (1 << 0)
6316 # define PR_FP_MODE_FRE  (1 << 1)
6317 #endif
6318 #ifndef PR_SVE_SET_VL
6319 # define PR_SVE_SET_VL  50
6320 # define PR_SVE_GET_VL  51
6321 # define PR_SVE_VL_LEN_MASK  0xffff
6322 # define PR_SVE_VL_INHERIT   (1 << 17)
6323 #endif
6324 #ifndef PR_PAC_RESET_KEYS
6325 # define PR_PAC_RESET_KEYS  54
6326 # define PR_PAC_APIAKEY   (1 << 0)
6327 # define PR_PAC_APIBKEY   (1 << 1)
6328 # define PR_PAC_APDAKEY   (1 << 2)
6329 # define PR_PAC_APDBKEY   (1 << 3)
6330 # define PR_PAC_APGAKEY   (1 << 4)
6331 #endif
6332 #ifndef PR_SET_TAGGED_ADDR_CTRL
6333 # define PR_SET_TAGGED_ADDR_CTRL 55
6334 # define PR_GET_TAGGED_ADDR_CTRL 56
6335 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6336 #endif
6337 #ifndef PR_SET_IO_FLUSHER
6338 # define PR_SET_IO_FLUSHER 57
6339 # define PR_GET_IO_FLUSHER 58
6340 #endif
6341 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6342 # define PR_SET_SYSCALL_USER_DISPATCH 59
6343 #endif
6344 #ifndef PR_SME_SET_VL
6345 # define PR_SME_SET_VL  63
6346 # define PR_SME_GET_VL  64
6347 # define PR_SME_VL_LEN_MASK  0xffff
6348 # define PR_SME_VL_INHERIT   (1 << 17)
6349 #endif
6350 
6351 #include "target_prctl.h"
6352 
do_prctl_inval0(CPUArchState * env)6353 static abi_long do_prctl_inval0(CPUArchState *env)
6354 {
6355     return -TARGET_EINVAL;
6356 }
6357 
do_prctl_inval1(CPUArchState * env,abi_long arg2)6358 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6359 {
6360     return -TARGET_EINVAL;
6361 }
6362 
6363 #ifndef do_prctl_get_fp_mode
6364 #define do_prctl_get_fp_mode do_prctl_inval0
6365 #endif
6366 #ifndef do_prctl_set_fp_mode
6367 #define do_prctl_set_fp_mode do_prctl_inval1
6368 #endif
6369 #ifndef do_prctl_sve_get_vl
6370 #define do_prctl_sve_get_vl do_prctl_inval0
6371 #endif
6372 #ifndef do_prctl_sve_set_vl
6373 #define do_prctl_sve_set_vl do_prctl_inval1
6374 #endif
6375 #ifndef do_prctl_reset_keys
6376 #define do_prctl_reset_keys do_prctl_inval1
6377 #endif
6378 #ifndef do_prctl_set_tagged_addr_ctrl
6379 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6380 #endif
6381 #ifndef do_prctl_get_tagged_addr_ctrl
6382 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6383 #endif
6384 #ifndef do_prctl_get_unalign
6385 #define do_prctl_get_unalign do_prctl_inval1
6386 #endif
6387 #ifndef do_prctl_set_unalign
6388 #define do_prctl_set_unalign do_prctl_inval1
6389 #endif
6390 #ifndef do_prctl_sme_get_vl
6391 #define do_prctl_sme_get_vl do_prctl_inval0
6392 #endif
6393 #ifndef do_prctl_sme_set_vl
6394 #define do_prctl_sme_set_vl do_prctl_inval1
6395 #endif
6396 
do_prctl(CPUArchState * env,abi_long option,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5)6397 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6398                          abi_long arg3, abi_long arg4, abi_long arg5)
6399 {
6400     abi_long ret;
6401 
6402     switch (option) {
6403     case PR_GET_PDEATHSIG:
6404         {
6405             int deathsig;
6406             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6407                                   arg3, arg4, arg5));
6408             if (!is_error(ret) &&
6409                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6410                 return -TARGET_EFAULT;
6411             }
6412             return ret;
6413         }
6414     case PR_SET_PDEATHSIG:
6415         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6416                                arg3, arg4, arg5));
6417     case PR_GET_NAME:
6418         {
6419             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6420             if (!name) {
6421                 return -TARGET_EFAULT;
6422             }
6423             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6424                                   arg3, arg4, arg5));
6425             unlock_user(name, arg2, 16);
6426             return ret;
6427         }
6428     case PR_SET_NAME:
6429         {
6430             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6431             if (!name) {
6432                 return -TARGET_EFAULT;
6433             }
6434             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6435                                   arg3, arg4, arg5));
6436             unlock_user(name, arg2, 0);
6437             return ret;
6438         }
6439     case PR_GET_FP_MODE:
6440         return do_prctl_get_fp_mode(env);
6441     case PR_SET_FP_MODE:
6442         return do_prctl_set_fp_mode(env, arg2);
6443     case PR_SVE_GET_VL:
6444         return do_prctl_sve_get_vl(env);
6445     case PR_SVE_SET_VL:
6446         return do_prctl_sve_set_vl(env, arg2);
6447     case PR_SME_GET_VL:
6448         return do_prctl_sme_get_vl(env);
6449     case PR_SME_SET_VL:
6450         return do_prctl_sme_set_vl(env, arg2);
6451     case PR_PAC_RESET_KEYS:
6452         if (arg3 || arg4 || arg5) {
6453             return -TARGET_EINVAL;
6454         }
6455         return do_prctl_reset_keys(env, arg2);
6456     case PR_SET_TAGGED_ADDR_CTRL:
6457         if (arg3 || arg4 || arg5) {
6458             return -TARGET_EINVAL;
6459         }
6460         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6461     case PR_GET_TAGGED_ADDR_CTRL:
6462         if (arg2 || arg3 || arg4 || arg5) {
6463             return -TARGET_EINVAL;
6464         }
6465         return do_prctl_get_tagged_addr_ctrl(env);
6466 
6467     case PR_GET_UNALIGN:
6468         return do_prctl_get_unalign(env, arg2);
6469     case PR_SET_UNALIGN:
6470         return do_prctl_set_unalign(env, arg2);
6471 
6472     case PR_CAP_AMBIENT:
6473     case PR_CAPBSET_READ:
6474     case PR_CAPBSET_DROP:
6475     case PR_GET_DUMPABLE:
6476     case PR_SET_DUMPABLE:
6477     case PR_GET_KEEPCAPS:
6478     case PR_SET_KEEPCAPS:
6479     case PR_GET_SECUREBITS:
6480     case PR_SET_SECUREBITS:
6481     case PR_GET_TIMING:
6482     case PR_SET_TIMING:
6483     case PR_GET_TIMERSLACK:
6484     case PR_SET_TIMERSLACK:
6485     case PR_MCE_KILL:
6486     case PR_MCE_KILL_GET:
6487     case PR_GET_NO_NEW_PRIVS:
6488     case PR_SET_NO_NEW_PRIVS:
6489     case PR_GET_IO_FLUSHER:
6490     case PR_SET_IO_FLUSHER:
6491     case PR_SET_CHILD_SUBREAPER:
6492     case PR_GET_SPECULATION_CTRL:
6493     case PR_SET_SPECULATION_CTRL:
6494         /* Some prctl options have no pointer arguments and we can pass on. */
6495         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6496 
6497     case PR_GET_CHILD_SUBREAPER:
6498         {
6499             int val;
6500             ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
6501                                   arg3, arg4, arg5));
6502             if (!is_error(ret) && put_user_s32(val, arg2)) {
6503                 return -TARGET_EFAULT;
6504             }
6505             return ret;
6506         }
6507 
6508     case PR_GET_TID_ADDRESS:
6509         {
6510             TaskState *ts = get_task_state(env_cpu(env));
6511             return put_user_ual(ts->child_tidptr, arg2);
6512         }
6513 
6514     case PR_GET_FPEXC:
6515     case PR_SET_FPEXC:
6516         /* Was used for SPE on PowerPC. */
6517         return -TARGET_EINVAL;
6518 
6519     case PR_GET_ENDIAN:
6520     case PR_SET_ENDIAN:
6521     case PR_GET_FPEMU:
6522     case PR_SET_FPEMU:
6523     case PR_SET_MM:
6524     case PR_GET_SECCOMP:
6525     case PR_SET_SECCOMP:
6526     case PR_SET_SYSCALL_USER_DISPATCH:
6527     case PR_GET_THP_DISABLE:
6528     case PR_SET_THP_DISABLE:
6529     case PR_GET_TSC:
6530     case PR_SET_TSC:
6531         /* Disable to prevent the target disabling stuff we need. */
6532         return -TARGET_EINVAL;
6533 
6534     default:
6535         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6536                       option);
6537         return -TARGET_EINVAL;
6538     }
6539 }
6540 
6541 #define NEW_STACK_SIZE 0x40000
6542 
6543 
6544 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6545 typedef struct {
6546     CPUArchState *env;
6547     pthread_mutex_t mutex;
6548     pthread_cond_t cond;
6549     pthread_t thread;
6550     uint32_t tid;
6551     abi_ulong child_tidptr;
6552     abi_ulong parent_tidptr;
6553     sigset_t sigmask;
6554 } new_thread_info;
6555 
clone_func(void * arg)6556 static void *clone_func(void *arg)
6557 {
6558     new_thread_info *info = arg;
6559     CPUArchState *env;
6560     CPUState *cpu;
6561     TaskState *ts;
6562 
6563     rcu_register_thread();
6564     tcg_register_thread();
6565     env = info->env;
6566     cpu = env_cpu(env);
6567     thread_cpu = cpu;
6568     ts = get_task_state(cpu);
6569     info->tid = sys_gettid();
6570     task_settid(ts);
6571     if (info->child_tidptr)
6572         put_user_u32(info->tid, info->child_tidptr);
6573     if (info->parent_tidptr)
6574         put_user_u32(info->tid, info->parent_tidptr);
6575     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6576     /* Enable signals.  */
6577     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6578     /* Signal to the parent that we're ready.  */
6579     pthread_mutex_lock(&info->mutex);
6580     pthread_cond_broadcast(&info->cond);
6581     pthread_mutex_unlock(&info->mutex);
6582     /* Wait until the parent has finished initializing the tls state.  */
6583     pthread_mutex_lock(&clone_lock);
6584     pthread_mutex_unlock(&clone_lock);
6585     cpu_loop(env);
6586     /* never exits */
6587     return NULL;
6588 }
6589 
6590 /* do_fork() Must return host values and target errnos (unlike most
6591    do_*() functions). */
do_fork(CPUArchState * env,unsigned int flags,abi_ulong newsp,abi_ulong parent_tidptr,target_ulong newtls,abi_ulong child_tidptr)6592 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6593                    abi_ulong parent_tidptr, target_ulong newtls,
6594                    abi_ulong child_tidptr)
6595 {
6596     CPUState *cpu = env_cpu(env);
6597     int ret;
6598     TaskState *ts;
6599     CPUState *new_cpu;
6600     CPUArchState *new_env;
6601     sigset_t sigmask;
6602 
6603     flags &= ~CLONE_IGNORED_FLAGS;
6604 
6605     /* Emulate vfork() with fork() */
6606     if (flags & CLONE_VFORK)
6607         flags &= ~(CLONE_VFORK | CLONE_VM);
6608 
6609     if (flags & CLONE_VM) {
6610         TaskState *parent_ts = get_task_state(cpu);
6611         new_thread_info info;
6612         pthread_attr_t attr;
6613 
6614         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6615             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6616             return -TARGET_EINVAL;
6617         }
6618 
6619         ts = g_new0(TaskState, 1);
6620         init_task_state(ts);
6621 
6622         /* Grab a mutex so that thread setup appears atomic.  */
6623         pthread_mutex_lock(&clone_lock);
6624 
6625         /*
6626          * If this is our first additional thread, we need to ensure we
6627          * generate code for parallel execution and flush old translations.
6628          * Do this now so that the copy gets CF_PARALLEL too.
6629          */
6630         if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
6631             tcg_cflags_set(cpu, CF_PARALLEL);
6632             tb_flush(cpu);
6633         }
6634 
6635         /* we create a new CPU instance. */
6636         new_env = cpu_copy(env);
6637         /* Init regs that differ from the parent.  */
6638         cpu_clone_regs_child(new_env, newsp, flags);
6639         cpu_clone_regs_parent(env, flags);
6640         new_cpu = env_cpu(new_env);
6641         new_cpu->opaque = ts;
6642         ts->bprm = parent_ts->bprm;
6643         ts->info = parent_ts->info;
6644         ts->signal_mask = parent_ts->signal_mask;
6645 
6646         if (flags & CLONE_CHILD_CLEARTID) {
6647             ts->child_tidptr = child_tidptr;
6648         }
6649 
6650         if (flags & CLONE_SETTLS) {
6651             cpu_set_tls (new_env, newtls);
6652         }
6653 
6654         memset(&info, 0, sizeof(info));
6655         pthread_mutex_init(&info.mutex, NULL);
6656         pthread_mutex_lock(&info.mutex);
6657         pthread_cond_init(&info.cond, NULL);
6658         info.env = new_env;
6659         if (flags & CLONE_CHILD_SETTID) {
6660             info.child_tidptr = child_tidptr;
6661         }
6662         if (flags & CLONE_PARENT_SETTID) {
6663             info.parent_tidptr = parent_tidptr;
6664         }
6665 
6666         ret = pthread_attr_init(&attr);
6667         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6668         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6669         /* It is not safe to deliver signals until the child has finished
6670            initializing, so temporarily block all signals.  */
6671         sigfillset(&sigmask);
6672         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6673         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6674 
6675         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6676         /* TODO: Free new CPU state if thread creation failed.  */
6677 
6678         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6679         pthread_attr_destroy(&attr);
6680         if (ret == 0) {
6681             /* Wait for the child to initialize.  */
6682             pthread_cond_wait(&info.cond, &info.mutex);
6683             ret = info.tid;
6684         } else {
6685             ret = -1;
6686         }
6687         pthread_mutex_unlock(&info.mutex);
6688         pthread_cond_destroy(&info.cond);
6689         pthread_mutex_destroy(&info.mutex);
6690         pthread_mutex_unlock(&clone_lock);
6691     } else {
6692         /* if no CLONE_VM, we consider it is a fork */
6693         if (flags & CLONE_INVALID_FORK_FLAGS) {
6694             return -TARGET_EINVAL;
6695         }
6696 
6697         /* We can't support custom termination signals */
6698         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6699             return -TARGET_EINVAL;
6700         }
6701 
6702 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6703         if (flags & CLONE_PIDFD) {
6704             return -TARGET_EINVAL;
6705         }
6706 #endif
6707 
6708         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6709         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6710             return -TARGET_EINVAL;
6711         }
6712 
6713         if (block_signals()) {
6714             return -QEMU_ERESTARTSYS;
6715         }
6716 
6717         fork_start();
6718         ret = fork();
6719         if (ret == 0) {
6720             /* Child Process.  */
6721             cpu_clone_regs_child(env, newsp, flags);
6722             fork_end(ret);
6723             /* There is a race condition here.  The parent process could
6724                theoretically read the TID in the child process before the child
6725                tid is set.  This would require using either ptrace
6726                (not implemented) or having *_tidptr to point at a shared memory
6727                mapping.  We can't repeat the spinlock hack used above because
6728                the child process gets its own copy of the lock.  */
6729             if (flags & CLONE_CHILD_SETTID)
6730                 put_user_u32(sys_gettid(), child_tidptr);
6731             if (flags & CLONE_PARENT_SETTID)
6732                 put_user_u32(sys_gettid(), parent_tidptr);
6733             ts = get_task_state(cpu);
6734             if (flags & CLONE_SETTLS)
6735                 cpu_set_tls (env, newtls);
6736             if (flags & CLONE_CHILD_CLEARTID)
6737                 ts->child_tidptr = child_tidptr;
6738         } else {
6739             cpu_clone_regs_parent(env, flags);
6740             if (flags & CLONE_PIDFD) {
6741                 int pid_fd = 0;
6742 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6743                 int pid_child = ret;
6744                 pid_fd = pidfd_open(pid_child, 0);
6745                 if (pid_fd >= 0) {
6746                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6747                                                | FD_CLOEXEC);
6748                 } else {
6749                         pid_fd = 0;
6750                 }
6751 #endif
6752                 put_user_u32(pid_fd, parent_tidptr);
6753             }
6754             fork_end(ret);
6755         }
6756         g_assert(!cpu_in_exclusive_context(cpu));
6757     }
6758     return ret;
6759 }
6760 
6761 /* warning : doesn't handle linux specific flags... */
target_to_host_fcntl_cmd(int cmd)6762 static int target_to_host_fcntl_cmd(int cmd)
6763 {
6764     int ret;
6765 
6766     switch(cmd) {
6767     case TARGET_F_DUPFD:
6768     case TARGET_F_GETFD:
6769     case TARGET_F_SETFD:
6770     case TARGET_F_GETFL:
6771     case TARGET_F_SETFL:
6772     case TARGET_F_OFD_GETLK:
6773     case TARGET_F_OFD_SETLK:
6774     case TARGET_F_OFD_SETLKW:
6775         ret = cmd;
6776         break;
6777     case TARGET_F_GETLK:
6778         ret = F_GETLK;
6779         break;
6780     case TARGET_F_SETLK:
6781         ret = F_SETLK;
6782         break;
6783     case TARGET_F_SETLKW:
6784         ret = F_SETLKW;
6785         break;
6786     case TARGET_F_GETOWN:
6787         ret = F_GETOWN;
6788         break;
6789     case TARGET_F_SETOWN:
6790         ret = F_SETOWN;
6791         break;
6792     case TARGET_F_GETSIG:
6793         ret = F_GETSIG;
6794         break;
6795     case TARGET_F_SETSIG:
6796         ret = F_SETSIG;
6797         break;
6798 #if TARGET_ABI_BITS == 32
6799     case TARGET_F_GETLK64:
6800         ret = F_GETLK;
6801         break;
6802     case TARGET_F_SETLK64:
6803         ret = F_SETLK;
6804         break;
6805     case TARGET_F_SETLKW64:
6806         ret = F_SETLKW;
6807         break;
6808 #endif
6809     case TARGET_F_SETLEASE:
6810         ret = F_SETLEASE;
6811         break;
6812     case TARGET_F_GETLEASE:
6813         ret = F_GETLEASE;
6814         break;
6815 #ifdef F_DUPFD_CLOEXEC
6816     case TARGET_F_DUPFD_CLOEXEC:
6817         ret = F_DUPFD_CLOEXEC;
6818         break;
6819 #endif
6820     case TARGET_F_NOTIFY:
6821         ret = F_NOTIFY;
6822         break;
6823 #ifdef F_GETOWN_EX
6824     case TARGET_F_GETOWN_EX:
6825         ret = F_GETOWN_EX;
6826         break;
6827 #endif
6828 #ifdef F_SETOWN_EX
6829     case TARGET_F_SETOWN_EX:
6830         ret = F_SETOWN_EX;
6831         break;
6832 #endif
6833 #ifdef F_SETPIPE_SZ
6834     case TARGET_F_SETPIPE_SZ:
6835         ret = F_SETPIPE_SZ;
6836         break;
6837     case TARGET_F_GETPIPE_SZ:
6838         ret = F_GETPIPE_SZ;
6839         break;
6840 #endif
6841 #ifdef F_ADD_SEALS
6842     case TARGET_F_ADD_SEALS:
6843         ret = F_ADD_SEALS;
6844         break;
6845     case TARGET_F_GET_SEALS:
6846         ret = F_GET_SEALS;
6847         break;
6848 #endif
6849     default:
6850         ret = -TARGET_EINVAL;
6851         break;
6852     }
6853 
6854 #if defined(__powerpc64__)
6855     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6856      * is not supported by kernel. The glibc fcntl call actually adjusts
6857      * them to 5, 6 and 7 before making the syscall(). Since we make the
6858      * syscall directly, adjust to what is supported by the kernel.
6859      */
6860     if (ret >= F_GETLK && ret <= F_SETLKW) {
6861         ret -= F_GETLK - 5;
6862     }
6863 #endif
6864 
6865     return ret;
6866 }
6867 
6868 #define FLOCK_TRANSTBL \
6869     switch (type) { \
6870     TRANSTBL_CONVERT(F_RDLCK); \
6871     TRANSTBL_CONVERT(F_WRLCK); \
6872     TRANSTBL_CONVERT(F_UNLCK); \
6873     }
6874 
target_to_host_flock(int type)6875 static int target_to_host_flock(int type)
6876 {
6877 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6878     FLOCK_TRANSTBL
6879 #undef  TRANSTBL_CONVERT
6880     return -TARGET_EINVAL;
6881 }
6882 
host_to_target_flock(int type)6883 static int host_to_target_flock(int type)
6884 {
6885 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6886     FLOCK_TRANSTBL
6887 #undef  TRANSTBL_CONVERT
6888     /* if we don't know how to convert the value coming
6889      * from the host we copy to the target field as-is
6890      */
6891     return type;
6892 }
6893 
copy_from_user_flock(struct flock * fl,abi_ulong target_flock_addr)6894 static inline abi_long copy_from_user_flock(struct flock *fl,
6895                                             abi_ulong target_flock_addr)
6896 {
6897     struct target_flock *target_fl;
6898     int l_type;
6899 
6900     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6901         return -TARGET_EFAULT;
6902     }
6903 
6904     __get_user(l_type, &target_fl->l_type);
6905     l_type = target_to_host_flock(l_type);
6906     if (l_type < 0) {
6907         return l_type;
6908     }
6909     fl->l_type = l_type;
6910     __get_user(fl->l_whence, &target_fl->l_whence);
6911     __get_user(fl->l_start, &target_fl->l_start);
6912     __get_user(fl->l_len, &target_fl->l_len);
6913     __get_user(fl->l_pid, &target_fl->l_pid);
6914     unlock_user_struct(target_fl, target_flock_addr, 0);
6915     return 0;
6916 }
6917 
copy_to_user_flock(abi_ulong target_flock_addr,const struct flock * fl)6918 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6919                                           const struct flock *fl)
6920 {
6921     struct target_flock *target_fl;
6922     short l_type;
6923 
6924     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6925         return -TARGET_EFAULT;
6926     }
6927 
6928     l_type = host_to_target_flock(fl->l_type);
6929     __put_user(l_type, &target_fl->l_type);
6930     __put_user(fl->l_whence, &target_fl->l_whence);
6931     __put_user(fl->l_start, &target_fl->l_start);
6932     __put_user(fl->l_len, &target_fl->l_len);
6933     __put_user(fl->l_pid, &target_fl->l_pid);
6934     unlock_user_struct(target_fl, target_flock_addr, 1);
6935     return 0;
6936 }
6937 
6938 typedef abi_long from_flock64_fn(struct flock *fl, abi_ulong target_addr);
6939 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock *fl);
6940 
6941 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6942 struct target_oabi_flock64 {
6943     abi_short l_type;
6944     abi_short l_whence;
6945     abi_llong l_start;
6946     abi_llong l_len;
6947     abi_int   l_pid;
6948 } QEMU_PACKED;
6949 
copy_from_user_oabi_flock64(struct flock * fl,abi_ulong target_flock_addr)6950 static inline abi_long copy_from_user_oabi_flock64(struct flock *fl,
6951                                                    abi_ulong target_flock_addr)
6952 {
6953     struct target_oabi_flock64 *target_fl;
6954     int l_type;
6955 
6956     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6957         return -TARGET_EFAULT;
6958     }
6959 
6960     __get_user(l_type, &target_fl->l_type);
6961     l_type = target_to_host_flock(l_type);
6962     if (l_type < 0) {
6963         return l_type;
6964     }
6965     fl->l_type = l_type;
6966     __get_user(fl->l_whence, &target_fl->l_whence);
6967     __get_user(fl->l_start, &target_fl->l_start);
6968     __get_user(fl->l_len, &target_fl->l_len);
6969     __get_user(fl->l_pid, &target_fl->l_pid);
6970     unlock_user_struct(target_fl, target_flock_addr, 0);
6971     return 0;
6972 }
6973 
copy_to_user_oabi_flock64(abi_ulong target_flock_addr,const struct flock * fl)6974 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6975                                                  const struct flock *fl)
6976 {
6977     struct target_oabi_flock64 *target_fl;
6978     short l_type;
6979 
6980     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6981         return -TARGET_EFAULT;
6982     }
6983 
6984     l_type = host_to_target_flock(fl->l_type);
6985     __put_user(l_type, &target_fl->l_type);
6986     __put_user(fl->l_whence, &target_fl->l_whence);
6987     __put_user(fl->l_start, &target_fl->l_start);
6988     __put_user(fl->l_len, &target_fl->l_len);
6989     __put_user(fl->l_pid, &target_fl->l_pid);
6990     unlock_user_struct(target_fl, target_flock_addr, 1);
6991     return 0;
6992 }
6993 #endif
6994 
copy_from_user_flock64(struct flock * fl,abi_ulong target_flock_addr)6995 static inline abi_long copy_from_user_flock64(struct flock *fl,
6996                                               abi_ulong target_flock_addr)
6997 {
6998     struct target_flock64 *target_fl;
6999     int l_type;
7000 
7001     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7002         return -TARGET_EFAULT;
7003     }
7004 
7005     __get_user(l_type, &target_fl->l_type);
7006     l_type = target_to_host_flock(l_type);
7007     if (l_type < 0) {
7008         return l_type;
7009     }
7010     fl->l_type = l_type;
7011     __get_user(fl->l_whence, &target_fl->l_whence);
7012     __get_user(fl->l_start, &target_fl->l_start);
7013     __get_user(fl->l_len, &target_fl->l_len);
7014     __get_user(fl->l_pid, &target_fl->l_pid);
7015     unlock_user_struct(target_fl, target_flock_addr, 0);
7016     return 0;
7017 }
7018 
copy_to_user_flock64(abi_ulong target_flock_addr,const struct flock * fl)7019 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7020                                             const struct flock *fl)
7021 {
7022     struct target_flock64 *target_fl;
7023     short l_type;
7024 
7025     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7026         return -TARGET_EFAULT;
7027     }
7028 
7029     l_type = host_to_target_flock(fl->l_type);
7030     __put_user(l_type, &target_fl->l_type);
7031     __put_user(fl->l_whence, &target_fl->l_whence);
7032     __put_user(fl->l_start, &target_fl->l_start);
7033     __put_user(fl->l_len, &target_fl->l_len);
7034     __put_user(fl->l_pid, &target_fl->l_pid);
7035     unlock_user_struct(target_fl, target_flock_addr, 1);
7036     return 0;
7037 }
7038 
do_fcntl(int fd,int cmd,abi_ulong arg)7039 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7040 {
7041     struct flock fl;
7042 #ifdef F_GETOWN_EX
7043     struct f_owner_ex fox;
7044     struct target_f_owner_ex *target_fox;
7045 #endif
7046     abi_long ret;
7047     int host_cmd = target_to_host_fcntl_cmd(cmd);
7048 
7049     if (host_cmd == -TARGET_EINVAL)
7050 	    return host_cmd;
7051 
7052     switch(cmd) {
7053     case TARGET_F_GETLK:
7054         ret = copy_from_user_flock(&fl, arg);
7055         if (ret) {
7056             return ret;
7057         }
7058         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7059         if (ret == 0) {
7060             ret = copy_to_user_flock(arg, &fl);
7061         }
7062         break;
7063 
7064     case TARGET_F_SETLK:
7065     case TARGET_F_SETLKW:
7066         ret = copy_from_user_flock(&fl, arg);
7067         if (ret) {
7068             return ret;
7069         }
7070         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7071         break;
7072 
7073     case TARGET_F_GETLK64:
7074     case TARGET_F_OFD_GETLK:
7075         ret = copy_from_user_flock64(&fl, arg);
7076         if (ret) {
7077             return ret;
7078         }
7079         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7080         if (ret == 0) {
7081             ret = copy_to_user_flock64(arg, &fl);
7082         }
7083         break;
7084     case TARGET_F_SETLK64:
7085     case TARGET_F_SETLKW64:
7086     case TARGET_F_OFD_SETLK:
7087     case TARGET_F_OFD_SETLKW:
7088         ret = copy_from_user_flock64(&fl, arg);
7089         if (ret) {
7090             return ret;
7091         }
7092         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7093         break;
7094 
7095     case TARGET_F_GETFL:
7096         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7097         if (ret >= 0) {
7098             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7099             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7100             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7101                 ret |= TARGET_O_LARGEFILE;
7102             }
7103         }
7104         break;
7105 
7106     case TARGET_F_SETFL:
7107         ret = get_errno(safe_fcntl(fd, host_cmd,
7108                                    target_to_host_bitmask(arg,
7109                                                           fcntl_flags_tbl)));
7110         break;
7111 
7112 #ifdef F_GETOWN_EX
7113     case TARGET_F_GETOWN_EX:
7114         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7115         if (ret >= 0) {
7116             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7117                 return -TARGET_EFAULT;
7118             target_fox->type = tswap32(fox.type);
7119             target_fox->pid = tswap32(fox.pid);
7120             unlock_user_struct(target_fox, arg, 1);
7121         }
7122         break;
7123 #endif
7124 
7125 #ifdef F_SETOWN_EX
7126     case TARGET_F_SETOWN_EX:
7127         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7128             return -TARGET_EFAULT;
7129         fox.type = tswap32(target_fox->type);
7130         fox.pid = tswap32(target_fox->pid);
7131         unlock_user_struct(target_fox, arg, 0);
7132         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7133         break;
7134 #endif
7135 
7136     case TARGET_F_SETSIG:
7137         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7138         break;
7139 
7140     case TARGET_F_GETSIG:
7141         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7142         break;
7143 
7144     case TARGET_F_SETOWN:
7145     case TARGET_F_GETOWN:
7146     case TARGET_F_SETLEASE:
7147     case TARGET_F_GETLEASE:
7148     case TARGET_F_SETPIPE_SZ:
7149     case TARGET_F_GETPIPE_SZ:
7150     case TARGET_F_ADD_SEALS:
7151     case TARGET_F_GET_SEALS:
7152         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7153         break;
7154 
7155     default:
7156         ret = get_errno(safe_fcntl(fd, cmd, arg));
7157         break;
7158     }
7159     return ret;
7160 }
7161 
7162 #ifdef USE_UID16
7163 
high2lowuid(int uid)7164 static inline int high2lowuid(int uid)
7165 {
7166     if (uid > 65535)
7167         return 65534;
7168     else
7169         return uid;
7170 }
7171 
high2lowgid(int gid)7172 static inline int high2lowgid(int gid)
7173 {
7174     if (gid > 65535)
7175         return 65534;
7176     else
7177         return gid;
7178 }
7179 
low2highuid(int uid)7180 static inline int low2highuid(int uid)
7181 {
7182     if ((int16_t)uid == -1)
7183         return -1;
7184     else
7185         return uid;
7186 }
7187 
low2highgid(int gid)7188 static inline int low2highgid(int gid)
7189 {
7190     if ((int16_t)gid == -1)
7191         return -1;
7192     else
7193         return gid;
7194 }
tswapid(int id)7195 static inline int tswapid(int id)
7196 {
7197     return tswap16(id);
7198 }
7199 
7200 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7201 
7202 #else /* !USE_UID16 */
high2lowuid(int uid)7203 static inline int high2lowuid(int uid)
7204 {
7205     return uid;
7206 }
high2lowgid(int gid)7207 static inline int high2lowgid(int gid)
7208 {
7209     return gid;
7210 }
low2highuid(int uid)7211 static inline int low2highuid(int uid)
7212 {
7213     return uid;
7214 }
low2highgid(int gid)7215 static inline int low2highgid(int gid)
7216 {
7217     return gid;
7218 }
tswapid(int id)7219 static inline int tswapid(int id)
7220 {
7221     return tswap32(id);
7222 }
7223 
7224 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7225 
7226 #endif /* USE_UID16 */
7227 
7228 /* We must do direct syscalls for setting UID/GID, because we want to
7229  * implement the Linux system call semantics of "change only for this thread",
7230  * not the libc/POSIX semantics of "change for all threads in process".
7231  * (See http://ewontfix.com/17/ for more details.)
7232  * We use the 32-bit version of the syscalls if present; if it is not
7233  * then either the host architecture supports 32-bit UIDs natively with
7234  * the standard syscall, or the 16-bit UID is the best we can do.
7235  */
7236 #ifdef __NR_setuid32
7237 #define __NR_sys_setuid __NR_setuid32
7238 #else
7239 #define __NR_sys_setuid __NR_setuid
7240 #endif
7241 #ifdef __NR_setgid32
7242 #define __NR_sys_setgid __NR_setgid32
7243 #else
7244 #define __NR_sys_setgid __NR_setgid
7245 #endif
7246 #ifdef __NR_setresuid32
7247 #define __NR_sys_setresuid __NR_setresuid32
7248 #else
7249 #define __NR_sys_setresuid __NR_setresuid
7250 #endif
7251 #ifdef __NR_setresgid32
7252 #define __NR_sys_setresgid __NR_setresgid32
7253 #else
7254 #define __NR_sys_setresgid __NR_setresgid
7255 #endif
7256 #ifdef __NR_setgroups32
7257 #define __NR_sys_setgroups __NR_setgroups32
7258 #else
7259 #define __NR_sys_setgroups __NR_setgroups
7260 #endif
7261 #ifdef __NR_sys_setreuid32
7262 #define __NR_sys_setreuid __NR_setreuid32
7263 #else
7264 #define __NR_sys_setreuid __NR_setreuid
7265 #endif
7266 #ifdef __NR_sys_setregid32
7267 #define __NR_sys_setregid __NR_setregid32
7268 #else
7269 #define __NR_sys_setregid __NR_setregid
7270 #endif
7271 
7272 _syscall1(int, sys_setuid, uid_t, uid)
7273 _syscall1(int, sys_setgid, gid_t, gid)
7274 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7275 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7276 _syscall2(int, sys_setgroups, int, size, gid_t *, grouplist)
7277 _syscall2(int, sys_setreuid, uid_t, ruid, uid_t, euid);
7278 _syscall2(int, sys_setregid, gid_t, rgid, gid_t, egid);
7279 
syscall_init(void)7280 void syscall_init(void)
7281 {
7282     IOCTLEntry *ie;
7283     const argtype *arg_type;
7284     int size;
7285 
7286     thunk_init(STRUCT_MAX);
7287 
7288 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7289 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7290 #include "syscall_types.h"
7291 #undef STRUCT
7292 #undef STRUCT_SPECIAL
7293 
7294     /* we patch the ioctl size if necessary. We rely on the fact that
7295        no ioctl has all the bits at '1' in the size field */
7296     ie = ioctl_entries;
7297     while (ie->target_cmd != 0) {
7298         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7299             TARGET_IOC_SIZEMASK) {
7300             arg_type = ie->arg_type;
7301             if (arg_type[0] != TYPE_PTR) {
7302                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7303                         ie->target_cmd);
7304                 exit(1);
7305             }
7306             arg_type++;
7307             size = thunk_type_size(arg_type, 0);
7308             ie->target_cmd = (ie->target_cmd &
7309                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7310                 (size << TARGET_IOC_SIZESHIFT);
7311         }
7312 
7313         /* automatic consistency check if same arch */
7314 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7315     (defined(__x86_64__) && defined(TARGET_X86_64))
7316         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7317             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7318                     ie->name, ie->target_cmd, ie->host_cmd);
7319         }
7320 #endif
7321         ie++;
7322     }
7323 }
7324 
7325 #ifdef TARGET_NR_truncate64
target_truncate64(CPUArchState * cpu_env,const char * arg1,abi_long arg2,abi_long arg3,abi_long arg4)7326 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7327                                          abi_long arg2,
7328                                          abi_long arg3,
7329                                          abi_long arg4)
7330 {
7331     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7332         arg2 = arg3;
7333         arg3 = arg4;
7334     }
7335     return get_errno(truncate(arg1, target_offset64(arg2, arg3)));
7336 }
7337 #endif
7338 
7339 #ifdef TARGET_NR_ftruncate64
target_ftruncate64(CPUArchState * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4)7340 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7341                                           abi_long arg2,
7342                                           abi_long arg3,
7343                                           abi_long arg4)
7344 {
7345     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7346         arg2 = arg3;
7347         arg3 = arg4;
7348     }
7349     return get_errno(ftruncate(arg1, target_offset64(arg2, arg3)));
7350 }
7351 #endif
7352 
7353 #if defined(TARGET_NR_timer_settime) || \
7354     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec(struct itimerspec * host_its,abi_ulong target_addr)7355 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7356                                                  abi_ulong target_addr)
7357 {
7358     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7359                                 offsetof(struct target_itimerspec,
7360                                          it_interval)) ||
7361         target_to_host_timespec(&host_its->it_value, target_addr +
7362                                 offsetof(struct target_itimerspec,
7363                                          it_value))) {
7364         return -TARGET_EFAULT;
7365     }
7366 
7367     return 0;
7368 }
7369 #endif
7370 
7371 #if defined(TARGET_NR_timer_settime64) || \
7372     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec64(struct itimerspec * host_its,abi_ulong target_addr)7373 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7374                                                    abi_ulong target_addr)
7375 {
7376     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7377                                   offsetof(struct target__kernel_itimerspec,
7378                                            it_interval)) ||
7379         target_to_host_timespec64(&host_its->it_value, target_addr +
7380                                   offsetof(struct target__kernel_itimerspec,
7381                                            it_value))) {
7382         return -TARGET_EFAULT;
7383     }
7384 
7385     return 0;
7386 }
7387 #endif
7388 
7389 #if ((defined(TARGET_NR_timerfd_gettime) || \
7390       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7391       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
host_to_target_itimerspec(abi_ulong target_addr,struct itimerspec * host_its)7392 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7393                                                  struct itimerspec *host_its)
7394 {
7395     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7396                                                        it_interval),
7397                                 &host_its->it_interval) ||
7398         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7399                                                        it_value),
7400                                 &host_its->it_value)) {
7401         return -TARGET_EFAULT;
7402     }
7403     return 0;
7404 }
7405 #endif
7406 
7407 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7408       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7409       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
host_to_target_itimerspec64(abi_ulong target_addr,struct itimerspec * host_its)7410 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7411                                                    struct itimerspec *host_its)
7412 {
7413     if (host_to_target_timespec64(target_addr +
7414                                   offsetof(struct target__kernel_itimerspec,
7415                                            it_interval),
7416                                   &host_its->it_interval) ||
7417         host_to_target_timespec64(target_addr +
7418                                   offsetof(struct target__kernel_itimerspec,
7419                                            it_value),
7420                                   &host_its->it_value)) {
7421         return -TARGET_EFAULT;
7422     }
7423     return 0;
7424 }
7425 #endif
7426 
7427 #if defined(TARGET_NR_adjtimex) || \
7428     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
target_to_host_timex(struct timex * host_tx,abi_long target_addr)7429 static inline abi_long target_to_host_timex(struct timex *host_tx,
7430                                             abi_long target_addr)
7431 {
7432     struct target_timex *target_tx;
7433 
7434     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7435         return -TARGET_EFAULT;
7436     }
7437 
7438     __get_user(host_tx->modes, &target_tx->modes);
7439     __get_user(host_tx->offset, &target_tx->offset);
7440     __get_user(host_tx->freq, &target_tx->freq);
7441     __get_user(host_tx->maxerror, &target_tx->maxerror);
7442     __get_user(host_tx->esterror, &target_tx->esterror);
7443     __get_user(host_tx->status, &target_tx->status);
7444     __get_user(host_tx->constant, &target_tx->constant);
7445     __get_user(host_tx->precision, &target_tx->precision);
7446     __get_user(host_tx->tolerance, &target_tx->tolerance);
7447     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7448     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7449     __get_user(host_tx->tick, &target_tx->tick);
7450     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7451     __get_user(host_tx->jitter, &target_tx->jitter);
7452     __get_user(host_tx->shift, &target_tx->shift);
7453     __get_user(host_tx->stabil, &target_tx->stabil);
7454     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7455     __get_user(host_tx->calcnt, &target_tx->calcnt);
7456     __get_user(host_tx->errcnt, &target_tx->errcnt);
7457     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7458     __get_user(host_tx->tai, &target_tx->tai);
7459 
7460     unlock_user_struct(target_tx, target_addr, 0);
7461     return 0;
7462 }
7463 
host_to_target_timex(abi_long target_addr,struct timex * host_tx)7464 static inline abi_long host_to_target_timex(abi_long target_addr,
7465                                             struct timex *host_tx)
7466 {
7467     struct target_timex *target_tx;
7468 
7469     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7470         return -TARGET_EFAULT;
7471     }
7472 
7473     __put_user(host_tx->modes, &target_tx->modes);
7474     __put_user(host_tx->offset, &target_tx->offset);
7475     __put_user(host_tx->freq, &target_tx->freq);
7476     __put_user(host_tx->maxerror, &target_tx->maxerror);
7477     __put_user(host_tx->esterror, &target_tx->esterror);
7478     __put_user(host_tx->status, &target_tx->status);
7479     __put_user(host_tx->constant, &target_tx->constant);
7480     __put_user(host_tx->precision, &target_tx->precision);
7481     __put_user(host_tx->tolerance, &target_tx->tolerance);
7482     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7483     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7484     __put_user(host_tx->tick, &target_tx->tick);
7485     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7486     __put_user(host_tx->jitter, &target_tx->jitter);
7487     __put_user(host_tx->shift, &target_tx->shift);
7488     __put_user(host_tx->stabil, &target_tx->stabil);
7489     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7490     __put_user(host_tx->calcnt, &target_tx->calcnt);
7491     __put_user(host_tx->errcnt, &target_tx->errcnt);
7492     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7493     __put_user(host_tx->tai, &target_tx->tai);
7494 
7495     unlock_user_struct(target_tx, target_addr, 1);
7496     return 0;
7497 }
7498 #endif
7499 
7500 
7501 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
target_to_host_timex64(struct timex * host_tx,abi_long target_addr)7502 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7503                                               abi_long target_addr)
7504 {
7505     struct target__kernel_timex *target_tx;
7506 
7507     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7508                                  offsetof(struct target__kernel_timex,
7509                                           time))) {
7510         return -TARGET_EFAULT;
7511     }
7512 
7513     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7514         return -TARGET_EFAULT;
7515     }
7516 
7517     __get_user(host_tx->modes, &target_tx->modes);
7518     __get_user(host_tx->offset, &target_tx->offset);
7519     __get_user(host_tx->freq, &target_tx->freq);
7520     __get_user(host_tx->maxerror, &target_tx->maxerror);
7521     __get_user(host_tx->esterror, &target_tx->esterror);
7522     __get_user(host_tx->status, &target_tx->status);
7523     __get_user(host_tx->constant, &target_tx->constant);
7524     __get_user(host_tx->precision, &target_tx->precision);
7525     __get_user(host_tx->tolerance, &target_tx->tolerance);
7526     __get_user(host_tx->tick, &target_tx->tick);
7527     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7528     __get_user(host_tx->jitter, &target_tx->jitter);
7529     __get_user(host_tx->shift, &target_tx->shift);
7530     __get_user(host_tx->stabil, &target_tx->stabil);
7531     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7532     __get_user(host_tx->calcnt, &target_tx->calcnt);
7533     __get_user(host_tx->errcnt, &target_tx->errcnt);
7534     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7535     __get_user(host_tx->tai, &target_tx->tai);
7536 
7537     unlock_user_struct(target_tx, target_addr, 0);
7538     return 0;
7539 }
7540 
host_to_target_timex64(abi_long target_addr,struct timex * host_tx)7541 static inline abi_long host_to_target_timex64(abi_long target_addr,
7542                                               struct timex *host_tx)
7543 {
7544     struct target__kernel_timex *target_tx;
7545 
7546    if (copy_to_user_timeval64(target_addr +
7547                               offsetof(struct target__kernel_timex, time),
7548                               &host_tx->time)) {
7549         return -TARGET_EFAULT;
7550     }
7551 
7552     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7553         return -TARGET_EFAULT;
7554     }
7555 
7556     __put_user(host_tx->modes, &target_tx->modes);
7557     __put_user(host_tx->offset, &target_tx->offset);
7558     __put_user(host_tx->freq, &target_tx->freq);
7559     __put_user(host_tx->maxerror, &target_tx->maxerror);
7560     __put_user(host_tx->esterror, &target_tx->esterror);
7561     __put_user(host_tx->status, &target_tx->status);
7562     __put_user(host_tx->constant, &target_tx->constant);
7563     __put_user(host_tx->precision, &target_tx->precision);
7564     __put_user(host_tx->tolerance, &target_tx->tolerance);
7565     __put_user(host_tx->tick, &target_tx->tick);
7566     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7567     __put_user(host_tx->jitter, &target_tx->jitter);
7568     __put_user(host_tx->shift, &target_tx->shift);
7569     __put_user(host_tx->stabil, &target_tx->stabil);
7570     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7571     __put_user(host_tx->calcnt, &target_tx->calcnt);
7572     __put_user(host_tx->errcnt, &target_tx->errcnt);
7573     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7574     __put_user(host_tx->tai, &target_tx->tai);
7575 
7576     unlock_user_struct(target_tx, target_addr, 1);
7577     return 0;
7578 }
7579 #endif
7580 
7581 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7582 #define sigev_notify_thread_id _sigev_un._tid
7583 #endif
7584 
target_to_host_sigevent(struct sigevent * host_sevp,abi_ulong target_addr)7585 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7586                                                abi_ulong target_addr)
7587 {
7588     struct target_sigevent *target_sevp;
7589 
7590     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7591         return -TARGET_EFAULT;
7592     }
7593 
7594     /* This union is awkward on 64 bit systems because it has a 32 bit
7595      * integer and a pointer in it; we follow the conversion approach
7596      * used for handling sigval types in signal.c so the guest should get
7597      * the correct value back even if we did a 64 bit byteswap and it's
7598      * using the 32 bit integer.
7599      */
7600     host_sevp->sigev_value.sival_ptr =
7601         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7602     host_sevp->sigev_signo =
7603         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7604     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7605     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7606 
7607     unlock_user_struct(target_sevp, target_addr, 1);
7608     return 0;
7609 }
7610 
7611 #if defined(TARGET_NR_mlockall)
target_to_host_mlockall_arg(int arg)7612 static inline int target_to_host_mlockall_arg(int arg)
7613 {
7614     int result = 0;
7615 
7616     if (arg & TARGET_MCL_CURRENT) {
7617         result |= MCL_CURRENT;
7618     }
7619     if (arg & TARGET_MCL_FUTURE) {
7620         result |= MCL_FUTURE;
7621     }
7622 #ifdef MCL_ONFAULT
7623     if (arg & TARGET_MCL_ONFAULT) {
7624         result |= MCL_ONFAULT;
7625     }
7626 #endif
7627 
7628     return result;
7629 }
7630 #endif
7631 
target_to_host_msync_arg(abi_long arg)7632 static inline int target_to_host_msync_arg(abi_long arg)
7633 {
7634     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7635            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7636            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7637            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7638 }
7639 
7640 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7641      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7642      defined(TARGET_NR_newfstatat))
host_to_target_stat64(CPUArchState * cpu_env,abi_ulong target_addr,struct stat * host_st)7643 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7644                                              abi_ulong target_addr,
7645                                              struct stat *host_st)
7646 {
7647 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7648     if (cpu_env->eabi) {
7649         struct target_eabi_stat64 *target_st;
7650 
7651         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7652             return -TARGET_EFAULT;
7653         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7654         __put_user(host_st->st_dev, &target_st->st_dev);
7655         __put_user(host_st->st_ino, &target_st->st_ino);
7656 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7657         __put_user(host_st->st_ino, &target_st->__st_ino);
7658 #endif
7659         __put_user(host_st->st_mode, &target_st->st_mode);
7660         __put_user(host_st->st_nlink, &target_st->st_nlink);
7661         __put_user(host_st->st_uid, &target_st->st_uid);
7662         __put_user(host_st->st_gid, &target_st->st_gid);
7663         __put_user(host_st->st_rdev, &target_st->st_rdev);
7664         __put_user(host_st->st_size, &target_st->st_size);
7665         __put_user(host_st->st_blksize, &target_st->st_blksize);
7666         __put_user(host_st->st_blocks, &target_st->st_blocks);
7667         __put_user(host_st->st_atime, &target_st->target_st_atime);
7668         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7669         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7670 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7671         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7672         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7673         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7674 #endif
7675         unlock_user_struct(target_st, target_addr, 1);
7676     } else
7677 #endif
7678     {
7679 #if defined(TARGET_HAS_STRUCT_STAT64)
7680         struct target_stat64 *target_st;
7681 #else
7682         struct target_stat *target_st;
7683 #endif
7684 
7685         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7686             return -TARGET_EFAULT;
7687         memset(target_st, 0, sizeof(*target_st));
7688         __put_user(host_st->st_dev, &target_st->st_dev);
7689         __put_user(host_st->st_ino, &target_st->st_ino);
7690 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7691         __put_user(host_st->st_ino, &target_st->__st_ino);
7692 #endif
7693         __put_user(host_st->st_mode, &target_st->st_mode);
7694         __put_user(host_st->st_nlink, &target_st->st_nlink);
7695         __put_user(host_st->st_uid, &target_st->st_uid);
7696         __put_user(host_st->st_gid, &target_st->st_gid);
7697         __put_user(host_st->st_rdev, &target_st->st_rdev);
7698         /* XXX: better use of kernel struct */
7699         __put_user(host_st->st_size, &target_st->st_size);
7700         __put_user(host_st->st_blksize, &target_st->st_blksize);
7701         __put_user(host_st->st_blocks, &target_st->st_blocks);
7702         __put_user(host_st->st_atime, &target_st->target_st_atime);
7703         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7704         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7705 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7706         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7707         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7708         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7709 #endif
7710         unlock_user_struct(target_st, target_addr, 1);
7711     }
7712 
7713     return 0;
7714 }
7715 #endif
7716 
7717 #if defined(TARGET_NR_statx) && defined(__NR_statx)
host_to_target_statx(struct target_statx * host_stx,abi_ulong target_addr)7718 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7719                                             abi_ulong target_addr)
7720 {
7721     struct target_statx *target_stx;
7722 
7723     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7724         return -TARGET_EFAULT;
7725     }
7726     memset(target_stx, 0, sizeof(*target_stx));
7727 
7728     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7729     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7730     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7731     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7732     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7733     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7734     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7735     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7736     __put_user(host_stx->stx_size, &target_stx->stx_size);
7737     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7738     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7739     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7740     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7741     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7742     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7743     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7744     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7745     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7746     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7747     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7748     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7749     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7750     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7751 
7752     unlock_user_struct(target_stx, target_addr, 1);
7753 
7754     return 0;
7755 }
7756 #endif
7757 
do_sys_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7758 static int do_sys_futex(int *uaddr, int op, int val,
7759                          const struct timespec *timeout, int *uaddr2,
7760                          int val3)
7761 {
7762 #if HOST_LONG_BITS == 64
7763 #if defined(__NR_futex)
7764     /* always a 64-bit time_t, it doesn't define _time64 version  */
7765     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7766 
7767 #endif
7768 #else /* HOST_LONG_BITS == 64 */
7769 #if defined(__NR_futex_time64)
7770     if (sizeof(timeout->tv_sec) == 8) {
7771         /* _time64 function on 32bit arch */
7772         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7773     }
7774 #endif
7775 #if defined(__NR_futex)
7776     /* old function on 32bit arch */
7777     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7778 #endif
7779 #endif /* HOST_LONG_BITS == 64 */
7780     g_assert_not_reached();
7781 }
7782 
do_safe_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7783 static int do_safe_futex(int *uaddr, int op, int val,
7784                          const struct timespec *timeout, int *uaddr2,
7785                          int val3)
7786 {
7787 #if HOST_LONG_BITS == 64
7788 #if defined(__NR_futex)
7789     /* always a 64-bit time_t, it doesn't define _time64 version  */
7790     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7791 #endif
7792 #else /* HOST_LONG_BITS == 64 */
7793 #if defined(__NR_futex_time64)
7794     if (sizeof(timeout->tv_sec) == 8) {
7795         /* _time64 function on 32bit arch */
7796         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7797                                            val3));
7798     }
7799 #endif
7800 #if defined(__NR_futex)
7801     /* old function on 32bit arch */
7802     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7803 #endif
7804 #endif /* HOST_LONG_BITS == 64 */
7805     return -TARGET_ENOSYS;
7806 }
7807 
7808 /* ??? Using host futex calls even when target atomic operations
7809    are not really atomic probably breaks things.  However implementing
7810    futexes locally would make futexes shared between multiple processes
7811    tricky.  However they're probably useless because guest atomic
7812    operations won't work either.  */
7813 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
do_futex(CPUState * cpu,bool time64,target_ulong uaddr,int op,int val,target_ulong timeout,target_ulong uaddr2,int val3)7814 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7815                     int op, int val, target_ulong timeout,
7816                     target_ulong uaddr2, int val3)
7817 {
7818     struct timespec ts, *pts = NULL;
7819     void *haddr2 = NULL;
7820     int base_op;
7821 
7822     /* We assume FUTEX_* constants are the same on both host and target. */
7823 #ifdef FUTEX_CMD_MASK
7824     base_op = op & FUTEX_CMD_MASK;
7825 #else
7826     base_op = op;
7827 #endif
7828     switch (base_op) {
7829     case FUTEX_WAIT:
7830     case FUTEX_WAIT_BITSET:
7831         val = tswap32(val);
7832         break;
7833     case FUTEX_WAIT_REQUEUE_PI:
7834         val = tswap32(val);
7835         haddr2 = g2h(cpu, uaddr2);
7836         break;
7837     case FUTEX_LOCK_PI:
7838     case FUTEX_LOCK_PI2:
7839         break;
7840     case FUTEX_WAKE:
7841     case FUTEX_WAKE_BITSET:
7842     case FUTEX_TRYLOCK_PI:
7843     case FUTEX_UNLOCK_PI:
7844         timeout = 0;
7845         break;
7846     case FUTEX_FD:
7847         val = target_to_host_signal(val);
7848         timeout = 0;
7849         break;
7850     case FUTEX_CMP_REQUEUE:
7851     case FUTEX_CMP_REQUEUE_PI:
7852         val3 = tswap32(val3);
7853         /* fall through */
7854     case FUTEX_REQUEUE:
7855     case FUTEX_WAKE_OP:
7856         /*
7857          * For these, the 4th argument is not TIMEOUT, but VAL2.
7858          * But the prototype of do_safe_futex takes a pointer, so
7859          * insert casts to satisfy the compiler.  We do not need
7860          * to tswap VAL2 since it's not compared to guest memory.
7861           */
7862         pts = (struct timespec *)(uintptr_t)timeout;
7863         timeout = 0;
7864         haddr2 = g2h(cpu, uaddr2);
7865         break;
7866     default:
7867         return -TARGET_ENOSYS;
7868     }
7869     if (timeout) {
7870         pts = &ts;
7871         if (time64
7872             ? target_to_host_timespec64(pts, timeout)
7873             : target_to_host_timespec(pts, timeout)) {
7874             return -TARGET_EFAULT;
7875         }
7876     }
7877     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7878 }
7879 #endif
7880 
7881 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_name_to_handle_at(abi_long dirfd,abi_long pathname,abi_long handle,abi_long mount_id,abi_long flags)7882 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7883                                      abi_long handle, abi_long mount_id,
7884                                      abi_long flags)
7885 {
7886     struct file_handle *target_fh;
7887     struct file_handle *fh;
7888     int mid = 0;
7889     abi_long ret;
7890     char *name;
7891     unsigned int size, total_size;
7892 
7893     if (get_user_s32(size, handle)) {
7894         return -TARGET_EFAULT;
7895     }
7896 
7897     name = lock_user_string(pathname);
7898     if (!name) {
7899         return -TARGET_EFAULT;
7900     }
7901 
7902     total_size = sizeof(struct file_handle) + size;
7903     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7904     if (!target_fh) {
7905         unlock_user(name, pathname, 0);
7906         return -TARGET_EFAULT;
7907     }
7908 
7909     fh = g_malloc0(total_size);
7910     fh->handle_bytes = size;
7911 
7912     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7913     unlock_user(name, pathname, 0);
7914 
7915     /* man name_to_handle_at(2):
7916      * Other than the use of the handle_bytes field, the caller should treat
7917      * the file_handle structure as an opaque data type
7918      */
7919 
7920     memcpy(target_fh, fh, total_size);
7921     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7922     target_fh->handle_type = tswap32(fh->handle_type);
7923     g_free(fh);
7924     unlock_user(target_fh, handle, total_size);
7925 
7926     if (put_user_s32(mid, mount_id)) {
7927         return -TARGET_EFAULT;
7928     }
7929 
7930     return ret;
7931 
7932 }
7933 #endif
7934 
7935 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_open_by_handle_at(abi_long mount_fd,abi_long handle,abi_long flags)7936 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7937                                      abi_long flags)
7938 {
7939     struct file_handle *target_fh;
7940     struct file_handle *fh;
7941     unsigned int size, total_size;
7942     abi_long ret;
7943 
7944     if (get_user_s32(size, handle)) {
7945         return -TARGET_EFAULT;
7946     }
7947 
7948     total_size = sizeof(struct file_handle) + size;
7949     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7950     if (!target_fh) {
7951         return -TARGET_EFAULT;
7952     }
7953 
7954     fh = g_memdup(target_fh, total_size);
7955     fh->handle_bytes = size;
7956     fh->handle_type = tswap32(target_fh->handle_type);
7957 
7958     ret = get_errno(open_by_handle_at(mount_fd, fh,
7959                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7960 
7961     g_free(fh);
7962 
7963     unlock_user(target_fh, handle, total_size);
7964 
7965     return ret;
7966 }
7967 #endif
7968 
7969 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7970 
do_signalfd4(int fd,abi_long mask,int flags)7971 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7972 {
7973     int host_flags;
7974     target_sigset_t *target_mask;
7975     sigset_t host_mask;
7976     abi_long ret;
7977 
7978     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7979         return -TARGET_EINVAL;
7980     }
7981     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7982         return -TARGET_EFAULT;
7983     }
7984 
7985     target_to_host_sigset(&host_mask, target_mask);
7986 
7987     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7988 
7989     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7990     if (ret >= 0) {
7991         fd_trans_register(ret, &target_signalfd_trans);
7992     }
7993 
7994     unlock_user_struct(target_mask, mask, 0);
7995 
7996     return ret;
7997 }
7998 #endif
7999 
8000 /* Map host to target signal numbers for the wait family of syscalls.
8001    Assume all other status bits are the same.  */
host_to_target_waitstatus(int status)8002 int host_to_target_waitstatus(int status)
8003 {
8004     if (WIFSIGNALED(status)) {
8005         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8006     }
8007     if (WIFSTOPPED(status)) {
8008         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8009                | (status & 0xff);
8010     }
8011     return status;
8012 }
8013 
open_self_cmdline(CPUArchState * cpu_env,int fd)8014 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8015 {
8016     CPUState *cpu = env_cpu(cpu_env);
8017     struct linux_binprm *bprm = get_task_state(cpu)->bprm;
8018     int i;
8019 
8020     for (i = 0; i < bprm->argc; i++) {
8021         size_t len = strlen(bprm->argv[i]) + 1;
8022 
8023         if (write(fd, bprm->argv[i], len) != len) {
8024             return -1;
8025         }
8026     }
8027 
8028     return 0;
8029 }
8030 
8031 struct open_self_maps_data {
8032     TaskState *ts;
8033     IntervalTreeRoot *host_maps;
8034     int fd;
8035     bool smaps;
8036 };
8037 
8038 /*
8039  * Subroutine to output one line of /proc/self/maps,
8040  * or one region of /proc/self/smaps.
8041  */
8042 
8043 #ifdef TARGET_HPPA
8044 # define test_stack(S, E, L)  (E == L)
8045 #else
8046 # define test_stack(S, E, L)  (S == L)
8047 #endif
8048 
open_self_maps_4(const struct open_self_maps_data * d,const MapInfo * mi,abi_ptr start,abi_ptr end,unsigned flags)8049 static void open_self_maps_4(const struct open_self_maps_data *d,
8050                              const MapInfo *mi, abi_ptr start,
8051                              abi_ptr end, unsigned flags)
8052 {
8053     const struct image_info *info = d->ts->info;
8054     const char *path = mi->path;
8055     uint64_t offset;
8056     int fd = d->fd;
8057     int count;
8058 
8059     if (test_stack(start, end, info->stack_limit)) {
8060         path = "[stack]";
8061     } else if (start == info->brk) {
8062         path = "[heap]";
8063     } else if (start == info->vdso) {
8064         path = "[vdso]";
8065 #ifdef TARGET_X86_64
8066     } else if (start == TARGET_VSYSCALL_PAGE) {
8067         path = "[vsyscall]";
8068 #endif
8069     }
8070 
8071     /* Except null device (MAP_ANON), adjust offset for this fragment. */
8072     offset = mi->offset;
8073     if (mi->dev) {
8074         uintptr_t hstart = (uintptr_t)g2h_untagged(start);
8075         offset += hstart - mi->itree.start;
8076     }
8077 
8078     count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8079                     " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
8080                     start, end,
8081                     (flags & PAGE_READ) ? 'r' : '-',
8082                     (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8083                     (flags & PAGE_EXEC) ? 'x' : '-',
8084                     mi->is_priv ? 'p' : 's',
8085                     offset, major(mi->dev), minor(mi->dev),
8086                     (uint64_t)mi->inode);
8087     if (path) {
8088         dprintf(fd, "%*s%s\n", 73 - count, "", path);
8089     } else {
8090         dprintf(fd, "\n");
8091     }
8092 
8093     if (d->smaps) {
8094         unsigned long size = end - start;
8095         unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8096         unsigned long size_kb = size >> 10;
8097 
8098         dprintf(fd, "Size:                  %lu kB\n"
8099                 "KernelPageSize:        %lu kB\n"
8100                 "MMUPageSize:           %lu kB\n"
8101                 "Rss:                   0 kB\n"
8102                 "Pss:                   0 kB\n"
8103                 "Pss_Dirty:             0 kB\n"
8104                 "Shared_Clean:          0 kB\n"
8105                 "Shared_Dirty:          0 kB\n"
8106                 "Private_Clean:         0 kB\n"
8107                 "Private_Dirty:         0 kB\n"
8108                 "Referenced:            0 kB\n"
8109                 "Anonymous:             %lu kB\n"
8110                 "LazyFree:              0 kB\n"
8111                 "AnonHugePages:         0 kB\n"
8112                 "ShmemPmdMapped:        0 kB\n"
8113                 "FilePmdMapped:         0 kB\n"
8114                 "Shared_Hugetlb:        0 kB\n"
8115                 "Private_Hugetlb:       0 kB\n"
8116                 "Swap:                  0 kB\n"
8117                 "SwapPss:               0 kB\n"
8118                 "Locked:                0 kB\n"
8119                 "THPeligible:    0\n"
8120                 "VmFlags:%s%s%s%s%s%s%s%s\n",
8121                 size_kb, page_size_kb, page_size_kb,
8122                 (flags & PAGE_ANON ? size_kb : 0),
8123                 (flags & PAGE_READ) ? " rd" : "",
8124                 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8125                 (flags & PAGE_EXEC) ? " ex" : "",
8126                 mi->is_priv ? "" : " sh",
8127                 (flags & PAGE_READ) ? " mr" : "",
8128                 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8129                 (flags & PAGE_EXEC) ? " me" : "",
8130                 mi->is_priv ? "" : " ms");
8131     }
8132 }
8133 
8134 /*
8135  * Callback for walk_memory_regions, when read_self_maps() fails.
8136  * Proceed without the benefit of host /proc/self/maps cross-check.
8137  */
open_self_maps_3(void * opaque,vaddr guest_start,vaddr guest_end,int flags)8138 static int open_self_maps_3(void *opaque, vaddr guest_start,
8139                             vaddr guest_end, int flags)
8140 {
8141     static const MapInfo mi = { .is_priv = true };
8142 
8143     open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
8144     return 0;
8145 }
8146 
8147 /*
8148  * Callback for walk_memory_regions, when read_self_maps() succeeds.
8149  */
open_self_maps_2(void * opaque,vaddr guest_start,vaddr guest_end,int flags)8150 static int open_self_maps_2(void *opaque, vaddr guest_start,
8151                             vaddr guest_end, int flags)
8152 {
8153     const struct open_self_maps_data *d = opaque;
8154     uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
8155     uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
8156 
8157 #ifdef TARGET_X86_64
8158     /*
8159      * Because of the extremely high position of the page within the guest
8160      * virtual address space, this is not backed by host memory at all.
8161      * Therefore the loop below would fail.  This is the only instance
8162      * of not having host backing memory.
8163      */
8164     if (guest_start == TARGET_VSYSCALL_PAGE) {
8165         return open_self_maps_3(opaque, guest_start, guest_end, flags);
8166     }
8167 #endif
8168 
8169     while (1) {
8170         IntervalTreeNode *n =
8171             interval_tree_iter_first(d->host_maps, host_start, host_start);
8172         MapInfo *mi = container_of(n, MapInfo, itree);
8173         uintptr_t this_hlast = MIN(host_last, n->last);
8174         target_ulong this_gend = h2g(this_hlast) + 1;
8175 
8176         open_self_maps_4(d, mi, guest_start, this_gend, flags);
8177 
8178         if (this_hlast == host_last) {
8179             return 0;
8180         }
8181         host_start = this_hlast + 1;
8182         guest_start = h2g(host_start);
8183     }
8184 }
8185 
open_self_maps_1(CPUArchState * env,int fd,bool smaps)8186 static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
8187 {
8188     struct open_self_maps_data d = {
8189         .ts = get_task_state(env_cpu(env)),
8190         .fd = fd,
8191         .smaps = smaps
8192     };
8193 
8194     mmap_lock();
8195     d.host_maps = read_self_maps();
8196     if (d.host_maps) {
8197         walk_memory_regions(&d, open_self_maps_2);
8198         free_self_maps(d.host_maps);
8199     } else {
8200         walk_memory_regions(&d, open_self_maps_3);
8201     }
8202     mmap_unlock();
8203     return 0;
8204 }
8205 
open_self_maps(CPUArchState * cpu_env,int fd)8206 static int open_self_maps(CPUArchState *cpu_env, int fd)
8207 {
8208     return open_self_maps_1(cpu_env, fd, false);
8209 }
8210 
open_self_smaps(CPUArchState * cpu_env,int fd)8211 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8212 {
8213     return open_self_maps_1(cpu_env, fd, true);
8214 }
8215 
open_self_stat(CPUArchState * cpu_env,int fd)8216 static int open_self_stat(CPUArchState *cpu_env, int fd)
8217 {
8218     CPUState *cpu = env_cpu(cpu_env);
8219     TaskState *ts = get_task_state(cpu);
8220     g_autoptr(GString) buf = g_string_new(NULL);
8221     int i;
8222 
8223     for (i = 0; i < 44; i++) {
8224         if (i == 0) {
8225             /* pid */
8226             g_string_printf(buf, FMT_pid " ", getpid());
8227         } else if (i == 1) {
8228             /* app name */
8229             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8230             bin = bin ? bin + 1 : ts->bprm->argv[0];
8231             g_string_printf(buf, "(%.15s) ", bin);
8232         } else if (i == 2) {
8233             /* task state */
8234             g_string_assign(buf, "R "); /* we are running right now */
8235         } else if (i == 3) {
8236             /* ppid */
8237             g_string_printf(buf, FMT_pid " ", getppid());
8238         } else if (i == 4) {
8239             /* pgid */
8240             g_string_printf(buf, FMT_pid " ", getpgrp());
8241         } else if (i == 19) {
8242             /* num_threads */
8243             int cpus = 0;
8244             WITH_RCU_READ_LOCK_GUARD() {
8245                 CPUState *cpu_iter;
8246                 CPU_FOREACH(cpu_iter) {
8247                     cpus++;
8248                 }
8249             }
8250             g_string_printf(buf, "%d ", cpus);
8251         } else if (i == 21) {
8252             /* starttime */
8253             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8254         } else if (i == 27) {
8255             /* stack bottom */
8256             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8257         } else {
8258             /* for the rest, there is MasterCard */
8259             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8260         }
8261 
8262         if (write(fd, buf->str, buf->len) != buf->len) {
8263             return -1;
8264         }
8265     }
8266 
8267     return 0;
8268 }
8269 
open_self_auxv(CPUArchState * cpu_env,int fd)8270 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8271 {
8272     CPUState *cpu = env_cpu(cpu_env);
8273     TaskState *ts = get_task_state(cpu);
8274     abi_ulong auxv = ts->info->saved_auxv;
8275     abi_ulong len = ts->info->auxv_len;
8276     char *ptr;
8277 
8278     /*
8279      * Auxiliary vector is stored in target process stack.
8280      * read in whole auxv vector and copy it to file
8281      */
8282     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8283     if (ptr != NULL) {
8284         while (len > 0) {
8285             ssize_t r;
8286             r = write(fd, ptr, len);
8287             if (r <= 0) {
8288                 break;
8289             }
8290             len -= r;
8291             ptr += r;
8292         }
8293         lseek(fd, 0, SEEK_SET);
8294         unlock_user(ptr, auxv, len);
8295     }
8296 
8297     return 0;
8298 }
8299 
is_proc_myself(const char * filename,const char * entry)8300 static int is_proc_myself(const char *filename, const char *entry)
8301 {
8302     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8303         filename += strlen("/proc/");
8304         if (!strncmp(filename, "self/", strlen("self/"))) {
8305             filename += strlen("self/");
8306         } else if (*filename >= '1' && *filename <= '9') {
8307             char myself[80];
8308             snprintf(myself, sizeof(myself), "%d/", getpid());
8309             if (!strncmp(filename, myself, strlen(myself))) {
8310                 filename += strlen(myself);
8311             } else {
8312                 return 0;
8313             }
8314         } else {
8315             return 0;
8316         }
8317         if (!strcmp(filename, entry)) {
8318             return 1;
8319         }
8320     }
8321     return 0;
8322 }
8323 
excp_dump_file(FILE * logfile,CPUArchState * env,const char * fmt,int code)8324 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8325                       const char *fmt, int code)
8326 {
8327     if (logfile) {
8328         CPUState *cs = env_cpu(env);
8329 
8330         fprintf(logfile, fmt, code);
8331         fprintf(logfile, "Failing executable: %s\n", exec_path);
8332         cpu_dump_state(cs, logfile, 0);
8333         open_self_maps(env, fileno(logfile));
8334     }
8335 }
8336 
target_exception_dump(CPUArchState * env,const char * fmt,int code)8337 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8338 {
8339     /* dump to console */
8340     excp_dump_file(stderr, env, fmt, code);
8341 
8342     /* dump to log file */
8343     if (qemu_log_separate()) {
8344         FILE *logfile = qemu_log_trylock();
8345 
8346         excp_dump_file(logfile, env, fmt, code);
8347         qemu_log_unlock(logfile);
8348     }
8349 }
8350 
8351 #include "target_proc.h"
8352 
8353 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8354     defined(HAVE_ARCH_PROC_CPUINFO) || \
8355     defined(HAVE_ARCH_PROC_HARDWARE)
is_proc(const char * filename,const char * entry)8356 static int is_proc(const char *filename, const char *entry)
8357 {
8358     return strcmp(filename, entry) == 0;
8359 }
8360 #endif
8361 
8362 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
open_net_route(CPUArchState * cpu_env,int fd)8363 static int open_net_route(CPUArchState *cpu_env, int fd)
8364 {
8365     FILE *fp;
8366     char *line = NULL;
8367     size_t len = 0;
8368     ssize_t read;
8369 
8370     fp = fopen("/proc/net/route", "r");
8371     if (fp == NULL) {
8372         return -1;
8373     }
8374 
8375     /* read header */
8376 
8377     read = getline(&line, &len, fp);
8378     dprintf(fd, "%s", line);
8379 
8380     /* read routes */
8381 
8382     while ((read = getline(&line, &len, fp)) != -1) {
8383         char iface[16];
8384         uint32_t dest, gw, mask;
8385         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8386         int fields;
8387 
8388         fields = sscanf(line,
8389                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8390                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8391                         &mask, &mtu, &window, &irtt);
8392         if (fields != 11) {
8393             continue;
8394         }
8395         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8396                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8397                 metric, tswap32(mask), mtu, window, irtt);
8398     }
8399 
8400     free(line);
8401     fclose(fp);
8402 
8403     return 0;
8404 }
8405 #endif
8406 
maybe_do_fake_open(CPUArchState * cpu_env,int dirfd,const char * fname,int flags,mode_t mode,int openat2_resolve,bool safe)8407 static int maybe_do_fake_open(CPUArchState *cpu_env, int dirfd,
8408                               const char *fname, int flags, mode_t mode,
8409                               int openat2_resolve, bool safe)
8410 {
8411     g_autofree char *proc_name = NULL;
8412     const char *pathname;
8413     struct fake_open {
8414         const char *filename;
8415         int (*fill)(CPUArchState *cpu_env, int fd);
8416         int (*cmp)(const char *s1, const char *s2);
8417     };
8418     const struct fake_open *fake_open;
8419     static const struct fake_open fakes[] = {
8420         { "maps", open_self_maps, is_proc_myself },
8421         { "smaps", open_self_smaps, is_proc_myself },
8422         { "stat", open_self_stat, is_proc_myself },
8423         { "auxv", open_self_auxv, is_proc_myself },
8424         { "cmdline", open_self_cmdline, is_proc_myself },
8425 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8426         { "/proc/net/route", open_net_route, is_proc },
8427 #endif
8428 #if defined(HAVE_ARCH_PROC_CPUINFO)
8429         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8430 #endif
8431 #if defined(HAVE_ARCH_PROC_HARDWARE)
8432         { "/proc/hardware", open_hardware, is_proc },
8433 #endif
8434         { NULL, NULL, NULL }
8435     };
8436 
8437     /* if this is a file from /proc/ filesystem, expand full name */
8438     proc_name = realpath(fname, NULL);
8439     if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
8440         pathname = proc_name;
8441     } else {
8442         pathname = fname;
8443     }
8444 
8445     if (is_proc_myself(pathname, "exe")) {
8446         /* Honor openat2 resolve flags */
8447         if ((openat2_resolve & RESOLVE_NO_MAGICLINKS) ||
8448             (openat2_resolve & RESOLVE_NO_SYMLINKS)) {
8449             errno = ELOOP;
8450             return -1;
8451         }
8452         if (safe) {
8453             return safe_openat(dirfd, exec_path, flags, mode);
8454         } else {
8455             return openat(dirfd, exec_path, flags, mode);
8456         }
8457     }
8458 
8459     for (fake_open = fakes; fake_open->filename; fake_open++) {
8460         if (fake_open->cmp(pathname, fake_open->filename)) {
8461             break;
8462         }
8463     }
8464 
8465     if (fake_open->filename) {
8466         const char *tmpdir;
8467         char filename[PATH_MAX];
8468         int fd, r;
8469 
8470         fd = memfd_create("qemu-open", 0);
8471         if (fd < 0) {
8472             if (errno != ENOSYS) {
8473                 return fd;
8474             }
8475             /* create temporary file to map stat to */
8476             tmpdir = getenv("TMPDIR");
8477             if (!tmpdir)
8478                 tmpdir = "/tmp";
8479             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8480             fd = mkstemp(filename);
8481             if (fd < 0) {
8482                 return fd;
8483             }
8484             unlink(filename);
8485         }
8486 
8487         if ((r = fake_open->fill(cpu_env, fd))) {
8488             int e = errno;
8489             close(fd);
8490             errno = e;
8491             return r;
8492         }
8493         lseek(fd, 0, SEEK_SET);
8494 
8495         return fd;
8496     }
8497 
8498     return -2;
8499 }
8500 
do_guest_openat(CPUArchState * cpu_env,int dirfd,const char * pathname,int flags,mode_t mode,bool safe)8501 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8502                     int flags, mode_t mode, bool safe)
8503 {
8504     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, flags, mode, 0, safe);
8505     if (fd > -2) {
8506         return fd;
8507     }
8508 
8509     if (safe) {
8510         return safe_openat(dirfd, path(pathname), flags, mode);
8511     } else {
8512         return openat(dirfd, path(pathname), flags, mode);
8513     }
8514 }
8515 
8516 
do_openat2(CPUArchState * cpu_env,abi_long dirfd,abi_ptr guest_pathname,abi_ptr guest_open_how,abi_ulong guest_size)8517 static int do_openat2(CPUArchState *cpu_env, abi_long dirfd,
8518                       abi_ptr guest_pathname, abi_ptr guest_open_how,
8519                       abi_ulong guest_size)
8520 {
8521     struct open_how_ver0 how = {0};
8522     char *pathname;
8523     int ret;
8524 
8525     if (guest_size < sizeof(struct target_open_how_ver0)) {
8526         return -TARGET_EINVAL;
8527     }
8528     ret = copy_struct_from_user(&how, sizeof(how), guest_open_how, guest_size);
8529     if (ret) {
8530         if (ret == -TARGET_E2BIG) {
8531             qemu_log_mask(LOG_UNIMP,
8532                           "Unimplemented openat2 open_how size: "
8533                           TARGET_ABI_FMT_lu "\n", guest_size);
8534         }
8535         return ret;
8536     }
8537     pathname = lock_user_string(guest_pathname);
8538     if (!pathname) {
8539         return -TARGET_EFAULT;
8540     }
8541 
8542     how.flags = target_to_host_bitmask(tswap64(how.flags), fcntl_flags_tbl);
8543     how.mode = tswap64(how.mode);
8544     how.resolve = tswap64(how.resolve);
8545     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, how.flags, how.mode,
8546                                 how.resolve, true);
8547     if (fd > -2) {
8548         ret = get_errno(fd);
8549     } else {
8550         ret = get_errno(safe_openat2(dirfd, pathname, &how,
8551                                      sizeof(struct open_how_ver0)));
8552     }
8553 
8554     fd_trans_unregister(ret);
8555     unlock_user(pathname, guest_pathname, 0);
8556     return ret;
8557 }
8558 
do_guest_readlink(const char * pathname,char * buf,size_t bufsiz)8559 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8560 {
8561     ssize_t ret;
8562 
8563     if (!pathname || !buf) {
8564         errno = EFAULT;
8565         return -1;
8566     }
8567 
8568     if (!bufsiz) {
8569         /* Short circuit this for the magic exe check. */
8570         errno = EINVAL;
8571         return -1;
8572     }
8573 
8574     if (is_proc_myself((const char *)pathname, "exe")) {
8575         /*
8576          * Don't worry about sign mismatch as earlier mapping
8577          * logic would have thrown a bad address error.
8578          */
8579         ret = MIN(strlen(exec_path), bufsiz);
8580         /* We cannot NUL terminate the string. */
8581         memcpy(buf, exec_path, ret);
8582     } else {
8583         ret = readlink(path(pathname), buf, bufsiz);
8584     }
8585 
8586     return ret;
8587 }
8588 
do_execv(CPUArchState * cpu_env,int dirfd,abi_long pathname,abi_long guest_argp,abi_long guest_envp,int flags,bool is_execveat)8589 static int do_execv(CPUArchState *cpu_env, int dirfd,
8590                     abi_long pathname, abi_long guest_argp,
8591                     abi_long guest_envp, int flags, bool is_execveat)
8592 {
8593     int ret;
8594     char **argp, **envp;
8595     int argc, envc;
8596     abi_ulong gp;
8597     abi_ulong addr;
8598     char **q;
8599     void *p;
8600 
8601     argc = 0;
8602 
8603     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8604         if (get_user_ual(addr, gp)) {
8605             return -TARGET_EFAULT;
8606         }
8607         if (!addr) {
8608             break;
8609         }
8610         argc++;
8611     }
8612     envc = 0;
8613     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8614         if (get_user_ual(addr, gp)) {
8615             return -TARGET_EFAULT;
8616         }
8617         if (!addr) {
8618             break;
8619         }
8620         envc++;
8621     }
8622 
8623     argp = g_new0(char *, argc + 1);
8624     envp = g_new0(char *, envc + 1);
8625 
8626     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8627         if (get_user_ual(addr, gp)) {
8628             goto execve_efault;
8629         }
8630         if (!addr) {
8631             break;
8632         }
8633         *q = lock_user_string(addr);
8634         if (!*q) {
8635             goto execve_efault;
8636         }
8637     }
8638     *q = NULL;
8639 
8640     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8641         if (get_user_ual(addr, gp)) {
8642             goto execve_efault;
8643         }
8644         if (!addr) {
8645             break;
8646         }
8647         *q = lock_user_string(addr);
8648         if (!*q) {
8649             goto execve_efault;
8650         }
8651     }
8652     *q = NULL;
8653 
8654     /*
8655      * Although execve() is not an interruptible syscall it is
8656      * a special case where we must use the safe_syscall wrapper:
8657      * if we allow a signal to happen before we make the host
8658      * syscall then we will 'lose' it, because at the point of
8659      * execve the process leaves QEMU's control. So we use the
8660      * safe syscall wrapper to ensure that we either take the
8661      * signal as a guest signal, or else it does not happen
8662      * before the execve completes and makes it the other
8663      * program's problem.
8664      */
8665     p = lock_user_string(pathname);
8666     if (!p) {
8667         goto execve_efault;
8668     }
8669 
8670     const char *exe = p;
8671     if (is_proc_myself(p, "exe")) {
8672         exe = exec_path;
8673     }
8674     ret = is_execveat
8675         ? safe_execveat(dirfd, exe, argp, envp, flags)
8676         : safe_execve(exe, argp, envp);
8677     ret = get_errno(ret);
8678 
8679     unlock_user(p, pathname, 0);
8680 
8681     goto execve_end;
8682 
8683 execve_efault:
8684     ret = -TARGET_EFAULT;
8685 
8686 execve_end:
8687     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8688         if (get_user_ual(addr, gp) || !addr) {
8689             break;
8690         }
8691         unlock_user(*q, addr, 0);
8692     }
8693     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8694         if (get_user_ual(addr, gp) || !addr) {
8695             break;
8696         }
8697         unlock_user(*q, addr, 0);
8698     }
8699 
8700     g_free(argp);
8701     g_free(envp);
8702     return ret;
8703 }
8704 
8705 #define TIMER_MAGIC 0x0caf0000
8706 #define TIMER_MAGIC_MASK 0xffff0000
8707 
8708 /* Convert QEMU provided timer ID back to internal 16bit index format */
get_timer_id(abi_long arg)8709 static target_timer_t get_timer_id(abi_long arg)
8710 {
8711     target_timer_t timerid = arg;
8712 
8713     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8714         return -TARGET_EINVAL;
8715     }
8716 
8717     timerid &= 0xffff;
8718 
8719     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8720         return -TARGET_EINVAL;
8721     }
8722 
8723     return timerid;
8724 }
8725 
target_to_host_cpu_mask(unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8726 static int target_to_host_cpu_mask(unsigned long *host_mask,
8727                                    size_t host_size,
8728                                    abi_ulong target_addr,
8729                                    size_t target_size)
8730 {
8731     unsigned target_bits = sizeof(abi_ulong) * 8;
8732     unsigned host_bits = sizeof(*host_mask) * 8;
8733     abi_ulong *target_mask;
8734     unsigned i, j;
8735 
8736     assert(host_size >= target_size);
8737 
8738     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8739     if (!target_mask) {
8740         return -TARGET_EFAULT;
8741     }
8742     memset(host_mask, 0, host_size);
8743 
8744     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8745         unsigned bit = i * target_bits;
8746         abi_ulong val;
8747 
8748         __get_user(val, &target_mask[i]);
8749         for (j = 0; j < target_bits; j++, bit++) {
8750             if (val & (1UL << j)) {
8751                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8752             }
8753         }
8754     }
8755 
8756     unlock_user(target_mask, target_addr, 0);
8757     return 0;
8758 }
8759 
host_to_target_cpu_mask(const unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8760 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8761                                    size_t host_size,
8762                                    abi_ulong target_addr,
8763                                    size_t target_size)
8764 {
8765     unsigned target_bits = sizeof(abi_ulong) * 8;
8766     unsigned host_bits = sizeof(*host_mask) * 8;
8767     abi_ulong *target_mask;
8768     unsigned i, j;
8769 
8770     assert(host_size >= target_size);
8771 
8772     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8773     if (!target_mask) {
8774         return -TARGET_EFAULT;
8775     }
8776 
8777     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8778         unsigned bit = i * target_bits;
8779         abi_ulong val = 0;
8780 
8781         for (j = 0; j < target_bits; j++, bit++) {
8782             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8783                 val |= 1UL << j;
8784             }
8785         }
8786         __put_user(val, &target_mask[i]);
8787     }
8788 
8789     unlock_user(target_mask, target_addr, target_size);
8790     return 0;
8791 }
8792 
8793 #ifdef TARGET_NR_getdents
do_getdents(abi_long dirfd,abi_long arg2,abi_long count)8794 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8795 {
8796     g_autofree void *hdirp = NULL;
8797     void *tdirp;
8798     int hlen, hoff, toff;
8799     int hreclen, treclen;
8800     off_t prev_diroff = 0;
8801 
8802     hdirp = g_try_malloc(count);
8803     if (!hdirp) {
8804         return -TARGET_ENOMEM;
8805     }
8806 
8807 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8808     hlen = sys_getdents(dirfd, hdirp, count);
8809 #else
8810     hlen = sys_getdents64(dirfd, hdirp, count);
8811 #endif
8812 
8813     hlen = get_errno(hlen);
8814     if (is_error(hlen)) {
8815         return hlen;
8816     }
8817 
8818     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8819     if (!tdirp) {
8820         return -TARGET_EFAULT;
8821     }
8822 
8823     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8824 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8825         struct linux_dirent *hde = hdirp + hoff;
8826 #else
8827         struct linux_dirent64 *hde = hdirp + hoff;
8828 #endif
8829         struct target_dirent *tde = tdirp + toff;
8830         int namelen;
8831         uint8_t type;
8832 
8833         namelen = strlen(hde->d_name);
8834         hreclen = hde->d_reclen;
8835         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8836         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8837 
8838         if (toff + treclen > count) {
8839             /*
8840              * If the host struct is smaller than the target struct, or
8841              * requires less alignment and thus packs into less space,
8842              * then the host can return more entries than we can pass
8843              * on to the guest.
8844              */
8845             if (toff == 0) {
8846                 toff = -TARGET_EINVAL; /* result buffer is too small */
8847                 break;
8848             }
8849             /*
8850              * Return what we have, resetting the file pointer to the
8851              * location of the first record not returned.
8852              */
8853             lseek(dirfd, prev_diroff, SEEK_SET);
8854             break;
8855         }
8856 
8857         prev_diroff = hde->d_off;
8858         tde->d_ino = tswapal(hde->d_ino);
8859         tde->d_off = tswapal(hde->d_off);
8860         tde->d_reclen = tswap16(treclen);
8861         memcpy(tde->d_name, hde->d_name, namelen + 1);
8862 
8863         /*
8864          * The getdents type is in what was formerly a padding byte at the
8865          * end of the structure.
8866          */
8867 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8868         type = *((uint8_t *)hde + hreclen - 1);
8869 #else
8870         type = hde->d_type;
8871 #endif
8872         *((uint8_t *)tde + treclen - 1) = type;
8873     }
8874 
8875     unlock_user(tdirp, arg2, toff);
8876     return toff;
8877 }
8878 #endif /* TARGET_NR_getdents */
8879 
8880 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
do_getdents64(abi_long dirfd,abi_long arg2,abi_long count)8881 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8882 {
8883     g_autofree void *hdirp = NULL;
8884     void *tdirp;
8885     int hlen, hoff, toff;
8886     int hreclen, treclen;
8887     off_t prev_diroff = 0;
8888 
8889     hdirp = g_try_malloc(count);
8890     if (!hdirp) {
8891         return -TARGET_ENOMEM;
8892     }
8893 
8894     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8895     if (is_error(hlen)) {
8896         return hlen;
8897     }
8898 
8899     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8900     if (!tdirp) {
8901         return -TARGET_EFAULT;
8902     }
8903 
8904     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8905         struct linux_dirent64 *hde = hdirp + hoff;
8906         struct target_dirent64 *tde = tdirp + toff;
8907         int namelen;
8908 
8909         namelen = strlen(hde->d_name) + 1;
8910         hreclen = hde->d_reclen;
8911         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8912         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8913 
8914         if (toff + treclen > count) {
8915             /*
8916              * If the host struct is smaller than the target struct, or
8917              * requires less alignment and thus packs into less space,
8918              * then the host can return more entries than we can pass
8919              * on to the guest.
8920              */
8921             if (toff == 0) {
8922                 toff = -TARGET_EINVAL; /* result buffer is too small */
8923                 break;
8924             }
8925             /*
8926              * Return what we have, resetting the file pointer to the
8927              * location of the first record not returned.
8928              */
8929             lseek(dirfd, prev_diroff, SEEK_SET);
8930             break;
8931         }
8932 
8933         prev_diroff = hde->d_off;
8934         tde->d_ino = tswap64(hde->d_ino);
8935         tde->d_off = tswap64(hde->d_off);
8936         tde->d_reclen = tswap16(treclen);
8937         tde->d_type = hde->d_type;
8938         memcpy(tde->d_name, hde->d_name, namelen);
8939     }
8940 
8941     unlock_user(tdirp, arg2, toff);
8942     return toff;
8943 }
8944 #endif /* TARGET_NR_getdents64 */
8945 
8946 #if defined(TARGET_NR_riscv_hwprobe)
8947 
8948 #define RISCV_HWPROBE_KEY_MVENDORID     0
8949 #define RISCV_HWPROBE_KEY_MARCHID       1
8950 #define RISCV_HWPROBE_KEY_MIMPID        2
8951 
8952 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8953 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8954 
8955 #define RISCV_HWPROBE_KEY_IMA_EXT_0         4
8956 #define     RISCV_HWPROBE_IMA_FD            (1 << 0)
8957 #define     RISCV_HWPROBE_IMA_C             (1 << 1)
8958 #define     RISCV_HWPROBE_IMA_V             (1 << 2)
8959 #define     RISCV_HWPROBE_EXT_ZBA           (1 << 3)
8960 #define     RISCV_HWPROBE_EXT_ZBB           (1 << 4)
8961 #define     RISCV_HWPROBE_EXT_ZBS           (1 << 5)
8962 #define     RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
8963 #define     RISCV_HWPROBE_EXT_ZBC           (1 << 7)
8964 #define     RISCV_HWPROBE_EXT_ZBKB          (1 << 8)
8965 #define     RISCV_HWPROBE_EXT_ZBKC          (1 << 9)
8966 #define     RISCV_HWPROBE_EXT_ZBKX          (1 << 10)
8967 #define     RISCV_HWPROBE_EXT_ZKND          (1 << 11)
8968 #define     RISCV_HWPROBE_EXT_ZKNE          (1 << 12)
8969 #define     RISCV_HWPROBE_EXT_ZKNH          (1 << 13)
8970 #define     RISCV_HWPROBE_EXT_ZKSED         (1 << 14)
8971 #define     RISCV_HWPROBE_EXT_ZKSH          (1 << 15)
8972 #define     RISCV_HWPROBE_EXT_ZKT           (1 << 16)
8973 #define     RISCV_HWPROBE_EXT_ZVBB          (1 << 17)
8974 #define     RISCV_HWPROBE_EXT_ZVBC          (1 << 18)
8975 #define     RISCV_HWPROBE_EXT_ZVKB          (1 << 19)
8976 #define     RISCV_HWPROBE_EXT_ZVKG          (1 << 20)
8977 #define     RISCV_HWPROBE_EXT_ZVKNED        (1 << 21)
8978 #define     RISCV_HWPROBE_EXT_ZVKNHA        (1 << 22)
8979 #define     RISCV_HWPROBE_EXT_ZVKNHB        (1 << 23)
8980 #define     RISCV_HWPROBE_EXT_ZVKSED        (1 << 24)
8981 #define     RISCV_HWPROBE_EXT_ZVKSH         (1 << 25)
8982 #define     RISCV_HWPROBE_EXT_ZVKT          (1 << 26)
8983 #define     RISCV_HWPROBE_EXT_ZFH           (1 << 27)
8984 #define     RISCV_HWPROBE_EXT_ZFHMIN        (1 << 28)
8985 #define     RISCV_HWPROBE_EXT_ZIHINTNTL     (1 << 29)
8986 #define     RISCV_HWPROBE_EXT_ZVFH          (1 << 30)
8987 #define     RISCV_HWPROBE_EXT_ZVFHMIN       (1ULL << 31)
8988 #define     RISCV_HWPROBE_EXT_ZFA           (1ULL << 32)
8989 #define     RISCV_HWPROBE_EXT_ZTSO          (1ULL << 33)
8990 #define     RISCV_HWPROBE_EXT_ZACAS         (1ULL << 34)
8991 #define     RISCV_HWPROBE_EXT_ZICOND        (1ULL << 35)
8992 
8993 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
8994 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
8995 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
8996 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
8997 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
8998 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
8999 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
9000 
9001 #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
9002 
9003 struct riscv_hwprobe {
9004     abi_llong  key;
9005     abi_ullong value;
9006 };
9007 
risc_hwprobe_fill_pairs(CPURISCVState * env,struct riscv_hwprobe * pair,size_t pair_count)9008 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
9009                                     struct riscv_hwprobe *pair,
9010                                     size_t pair_count)
9011 {
9012     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
9013 
9014     for (; pair_count > 0; pair_count--, pair++) {
9015         abi_llong key;
9016         abi_ullong value;
9017         __put_user(0, &pair->value);
9018         __get_user(key, &pair->key);
9019         switch (key) {
9020         case RISCV_HWPROBE_KEY_MVENDORID:
9021             __put_user(cfg->mvendorid, &pair->value);
9022             break;
9023         case RISCV_HWPROBE_KEY_MARCHID:
9024             __put_user(cfg->marchid, &pair->value);
9025             break;
9026         case RISCV_HWPROBE_KEY_MIMPID:
9027             __put_user(cfg->mimpid, &pair->value);
9028             break;
9029         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9030             value = riscv_has_ext(env, RVI) &&
9031                     riscv_has_ext(env, RVM) &&
9032                     riscv_has_ext(env, RVA) ?
9033                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9034             __put_user(value, &pair->value);
9035             break;
9036         case RISCV_HWPROBE_KEY_IMA_EXT_0:
9037             value = riscv_has_ext(env, RVF) &&
9038                     riscv_has_ext(env, RVD) ?
9039                     RISCV_HWPROBE_IMA_FD : 0;
9040             value |= riscv_has_ext(env, RVC) ?
9041                      RISCV_HWPROBE_IMA_C : 0;
9042             value |= riscv_has_ext(env, RVV) ?
9043                      RISCV_HWPROBE_IMA_V : 0;
9044             value |= cfg->ext_zba ?
9045                      RISCV_HWPROBE_EXT_ZBA : 0;
9046             value |= cfg->ext_zbb ?
9047                      RISCV_HWPROBE_EXT_ZBB : 0;
9048             value |= cfg->ext_zbs ?
9049                      RISCV_HWPROBE_EXT_ZBS : 0;
9050             value |= cfg->ext_zicboz ?
9051                      RISCV_HWPROBE_EXT_ZICBOZ : 0;
9052             value |= cfg->ext_zbc ?
9053                      RISCV_HWPROBE_EXT_ZBC : 0;
9054             value |= cfg->ext_zbkb ?
9055                      RISCV_HWPROBE_EXT_ZBKB : 0;
9056             value |= cfg->ext_zbkc ?
9057                      RISCV_HWPROBE_EXT_ZBKC : 0;
9058             value |= cfg->ext_zbkx ?
9059                      RISCV_HWPROBE_EXT_ZBKX : 0;
9060             value |= cfg->ext_zknd ?
9061                      RISCV_HWPROBE_EXT_ZKND : 0;
9062             value |= cfg->ext_zkne ?
9063                      RISCV_HWPROBE_EXT_ZKNE : 0;
9064             value |= cfg->ext_zknh ?
9065                      RISCV_HWPROBE_EXT_ZKNH : 0;
9066             value |= cfg->ext_zksed ?
9067                      RISCV_HWPROBE_EXT_ZKSED : 0;
9068             value |= cfg->ext_zksh ?
9069                      RISCV_HWPROBE_EXT_ZKSH : 0;
9070             value |= cfg->ext_zkt ?
9071                      RISCV_HWPROBE_EXT_ZKT : 0;
9072             value |= cfg->ext_zvbb ?
9073                      RISCV_HWPROBE_EXT_ZVBB : 0;
9074             value |= cfg->ext_zvbc ?
9075                      RISCV_HWPROBE_EXT_ZVBC : 0;
9076             value |= cfg->ext_zvkb ?
9077                      RISCV_HWPROBE_EXT_ZVKB : 0;
9078             value |= cfg->ext_zvkg ?
9079                      RISCV_HWPROBE_EXT_ZVKG : 0;
9080             value |= cfg->ext_zvkned ?
9081                      RISCV_HWPROBE_EXT_ZVKNED : 0;
9082             value |= cfg->ext_zvknha ?
9083                      RISCV_HWPROBE_EXT_ZVKNHA : 0;
9084             value |= cfg->ext_zvknhb ?
9085                      RISCV_HWPROBE_EXT_ZVKNHB : 0;
9086             value |= cfg->ext_zvksed ?
9087                      RISCV_HWPROBE_EXT_ZVKSED : 0;
9088             value |= cfg->ext_zvksh ?
9089                      RISCV_HWPROBE_EXT_ZVKSH : 0;
9090             value |= cfg->ext_zvkt ?
9091                      RISCV_HWPROBE_EXT_ZVKT : 0;
9092             value |= cfg->ext_zfh ?
9093                      RISCV_HWPROBE_EXT_ZFH : 0;
9094             value |= cfg->ext_zfhmin ?
9095                      RISCV_HWPROBE_EXT_ZFHMIN : 0;
9096             value |= cfg->ext_zihintntl ?
9097                      RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
9098             value |= cfg->ext_zvfh ?
9099                      RISCV_HWPROBE_EXT_ZVFH : 0;
9100             value |= cfg->ext_zvfhmin ?
9101                      RISCV_HWPROBE_EXT_ZVFHMIN : 0;
9102             value |= cfg->ext_zfa ?
9103                      RISCV_HWPROBE_EXT_ZFA : 0;
9104             value |= cfg->ext_ztso ?
9105                      RISCV_HWPROBE_EXT_ZTSO : 0;
9106             value |= cfg->ext_zacas ?
9107                      RISCV_HWPROBE_EXT_ZACAS : 0;
9108             value |= cfg->ext_zicond ?
9109                      RISCV_HWPROBE_EXT_ZICOND : 0;
9110             __put_user(value, &pair->value);
9111             break;
9112         case RISCV_HWPROBE_KEY_CPUPERF_0:
9113             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9114             break;
9115         case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
9116             value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
9117             __put_user(value, &pair->value);
9118             break;
9119         default:
9120             __put_user(-1, &pair->key);
9121             break;
9122         }
9123     }
9124 }
9125 
9126 /*
9127  * If the cpumask_t of (target_cpus, cpusetsize) cannot be read: -EFAULT.
9128  * If the cpumast_t has no bits set: -EINVAL.
9129  * Otherwise the cpumask_t contains some bit set: 0.
9130  * Unlike the kernel, we do not mask cpumask_t by the set of online cpus,
9131  * nor bound the search by cpumask_size().
9132  */
nonempty_cpu_set(abi_ulong cpusetsize,abi_ptr target_cpus)9133 static int nonempty_cpu_set(abi_ulong cpusetsize, abi_ptr target_cpus)
9134 {
9135     unsigned char *p = lock_user(VERIFY_READ, target_cpus, cpusetsize, 1);
9136     int ret = -TARGET_EFAULT;
9137 
9138     if (p) {
9139         ret = -TARGET_EINVAL;
9140         /*
9141          * Since we only care about the empty/non-empty state of the cpumask_t
9142          * not the individual bits, we do not need to repartition the bits
9143          * from target abi_ulong to host unsigned long.
9144          *
9145          * Note that the kernel does not round up cpusetsize to a multiple of
9146          * sizeof(abi_ulong).  After bounding cpusetsize by cpumask_size(),
9147          * it copies exactly cpusetsize bytes into a zeroed buffer.
9148          */
9149         for (abi_ulong i = 0; i < cpusetsize; ++i) {
9150             if (p[i]) {
9151                 ret = 0;
9152                 break;
9153             }
9154         }
9155         unlock_user(p, target_cpus, 0);
9156     }
9157     return ret;
9158 }
9159 
do_riscv_hwprobe(CPUArchState * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5)9160 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9161                                  abi_long arg2, abi_long arg3,
9162                                  abi_long arg4, abi_long arg5)
9163 {
9164     int ret;
9165     struct riscv_hwprobe *host_pairs;
9166 
9167     /* flags must be 0 */
9168     if (arg5 != 0) {
9169         return -TARGET_EINVAL;
9170     }
9171 
9172     /* check cpu_set */
9173     if (arg3 != 0) {
9174         ret = nonempty_cpu_set(arg3, arg4);
9175         if (ret != 0) {
9176             return ret;
9177         }
9178     } else if (arg4 != 0) {
9179         return -TARGET_EINVAL;
9180     }
9181 
9182     /* no pairs */
9183     if (arg2 == 0) {
9184         return 0;
9185     }
9186 
9187     host_pairs = lock_user(VERIFY_WRITE, arg1,
9188                            sizeof(*host_pairs) * (size_t)arg2, 0);
9189     if (host_pairs == NULL) {
9190         return -TARGET_EFAULT;
9191     }
9192     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9193     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9194     return 0;
9195 }
9196 #endif /* TARGET_NR_riscv_hwprobe */
9197 
9198 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
_syscall2(int,pivot_root,const char *,new_root,const char *,put_old)9199 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9200 #endif
9201 
9202 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9203 #define __NR_sys_open_tree __NR_open_tree
9204 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9205           unsigned int, __flags)
9206 #endif
9207 
9208 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9209 #define __NR_sys_move_mount __NR_move_mount
9210 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9211            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9212 #endif
9213 
9214 /* This is an internal helper for do_syscall so that it is easier
9215  * to have a single return point, so that actions, such as logging
9216  * of syscall results, can be performed.
9217  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9218  */
9219 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9220                             abi_long arg2, abi_long arg3, abi_long arg4,
9221                             abi_long arg5, abi_long arg6, abi_long arg7,
9222                             abi_long arg8)
9223 {
9224     CPUState *cpu = env_cpu(cpu_env);
9225     abi_long ret;
9226 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9227     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9228     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9229     || defined(TARGET_NR_statx)
9230     struct stat st;
9231 #endif
9232 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9233     || defined(TARGET_NR_fstatfs)
9234     struct statfs stfs;
9235 #endif
9236     void *p;
9237 
9238     switch(num) {
9239     case TARGET_NR_exit:
9240         /* In old applications this may be used to implement _exit(2).
9241            However in threaded applications it is used for thread termination,
9242            and _exit_group is used for application termination.
9243            Do thread termination if we have more then one thread.  */
9244 
9245         if (block_signals()) {
9246             return -QEMU_ERESTARTSYS;
9247         }
9248 
9249         pthread_mutex_lock(&clone_lock);
9250 
9251         if (CPU_NEXT(first_cpu)) {
9252             TaskState *ts = get_task_state(cpu);
9253 
9254             if (ts->child_tidptr) {
9255                 put_user_u32(0, ts->child_tidptr);
9256                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9257                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9258             }
9259 
9260             object_unparent(OBJECT(cpu));
9261             object_unref(OBJECT(cpu));
9262             /*
9263              * At this point the CPU should be unrealized and removed
9264              * from cpu lists. We can clean-up the rest of the thread
9265              * data without the lock held.
9266              */
9267 
9268             pthread_mutex_unlock(&clone_lock);
9269 
9270             thread_cpu = NULL;
9271             g_free(ts);
9272             rcu_unregister_thread();
9273             pthread_exit(NULL);
9274         }
9275 
9276         pthread_mutex_unlock(&clone_lock);
9277         preexit_cleanup(cpu_env, arg1);
9278         _exit(arg1);
9279         return 0; /* avoid warning */
9280     case TARGET_NR_read:
9281         if (arg2 == 0 && arg3 == 0) {
9282             return get_errno(safe_read(arg1, 0, 0));
9283         } else {
9284             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9285                 return -TARGET_EFAULT;
9286             ret = get_errno(safe_read(arg1, p, arg3));
9287             if (ret >= 0 &&
9288                 fd_trans_host_to_target_data(arg1)) {
9289                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9290             }
9291             unlock_user(p, arg2, ret);
9292         }
9293         return ret;
9294     case TARGET_NR_write:
9295         if (arg2 == 0 && arg3 == 0) {
9296             return get_errno(safe_write(arg1, 0, 0));
9297         }
9298         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9299             return -TARGET_EFAULT;
9300         if (fd_trans_target_to_host_data(arg1)) {
9301             void *copy = g_malloc(arg3);
9302             memcpy(copy, p, arg3);
9303             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9304             if (ret >= 0) {
9305                 ret = get_errno(safe_write(arg1, copy, ret));
9306             }
9307             g_free(copy);
9308         } else {
9309             ret = get_errno(safe_write(arg1, p, arg3));
9310         }
9311         unlock_user(p, arg2, 0);
9312         return ret;
9313 
9314 #ifdef TARGET_NR_open
9315     case TARGET_NR_open:
9316         if (!(p = lock_user_string(arg1)))
9317             return -TARGET_EFAULT;
9318         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9319                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9320                                   arg3, true));
9321         fd_trans_unregister(ret);
9322         unlock_user(p, arg1, 0);
9323         return ret;
9324 #endif
9325     case TARGET_NR_openat:
9326         if (!(p = lock_user_string(arg2)))
9327             return -TARGET_EFAULT;
9328         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9329                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9330                                   arg4, true));
9331         fd_trans_unregister(ret);
9332         unlock_user(p, arg2, 0);
9333         return ret;
9334     case TARGET_NR_openat2:
9335         ret = do_openat2(cpu_env, arg1, arg2, arg3, arg4);
9336         return ret;
9337 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9338     case TARGET_NR_name_to_handle_at:
9339         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9340         return ret;
9341 #endif
9342 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9343     case TARGET_NR_open_by_handle_at:
9344         ret = do_open_by_handle_at(arg1, arg2, arg3);
9345         fd_trans_unregister(ret);
9346         return ret;
9347 #endif
9348 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9349     case TARGET_NR_pidfd_open:
9350         return get_errno(pidfd_open(arg1, arg2));
9351 #endif
9352 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9353     case TARGET_NR_pidfd_send_signal:
9354         {
9355             siginfo_t uinfo, *puinfo;
9356 
9357             if (arg3) {
9358                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9359                 if (!p) {
9360                     return -TARGET_EFAULT;
9361                  }
9362                  target_to_host_siginfo(&uinfo, p);
9363                  unlock_user(p, arg3, 0);
9364                  puinfo = &uinfo;
9365             } else {
9366                  puinfo = NULL;
9367             }
9368             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9369                                               puinfo, arg4));
9370         }
9371         return ret;
9372 #endif
9373 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9374     case TARGET_NR_pidfd_getfd:
9375         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9376 #endif
9377     case TARGET_NR_close:
9378         fd_trans_unregister(arg1);
9379         return get_errno(close(arg1));
9380 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9381     case TARGET_NR_close_range:
9382         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9383         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9384             abi_long fd, maxfd;
9385             maxfd = MIN(arg2, target_fd_max);
9386             for (fd = arg1; fd < maxfd; fd++) {
9387                 fd_trans_unregister(fd);
9388             }
9389         }
9390         return ret;
9391 #endif
9392 
9393     case TARGET_NR_brk:
9394         return do_brk(arg1);
9395 #ifdef TARGET_NR_fork
9396     case TARGET_NR_fork:
9397         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9398 #endif
9399 #ifdef TARGET_NR_waitpid
9400     case TARGET_NR_waitpid:
9401         {
9402             int status;
9403             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9404             if (!is_error(ret) && arg2 && ret
9405                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9406                 return -TARGET_EFAULT;
9407         }
9408         return ret;
9409 #endif
9410 #ifdef TARGET_NR_waitid
9411     case TARGET_NR_waitid:
9412         {
9413             struct rusage ru;
9414             siginfo_t info;
9415 
9416             ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
9417                                         arg4, (arg5 ? &ru : NULL)));
9418             if (!is_error(ret)) {
9419                 if (arg3) {
9420                     p = lock_user(VERIFY_WRITE, arg3,
9421                                   sizeof(target_siginfo_t), 0);
9422                     if (!p) {
9423                         return -TARGET_EFAULT;
9424                     }
9425                     host_to_target_siginfo(p, &info);
9426                     unlock_user(p, arg3, sizeof(target_siginfo_t));
9427                 }
9428                 if (arg5 && host_to_target_rusage(arg5, &ru)) {
9429                     return -TARGET_EFAULT;
9430                 }
9431             }
9432         }
9433         return ret;
9434 #endif
9435 #ifdef TARGET_NR_creat /* not on alpha */
9436     case TARGET_NR_creat:
9437         if (!(p = lock_user_string(arg1)))
9438             return -TARGET_EFAULT;
9439         ret = get_errno(creat(p, arg2));
9440         fd_trans_unregister(ret);
9441         unlock_user(p, arg1, 0);
9442         return ret;
9443 #endif
9444 #ifdef TARGET_NR_link
9445     case TARGET_NR_link:
9446         {
9447             void * p2;
9448             p = lock_user_string(arg1);
9449             p2 = lock_user_string(arg2);
9450             if (!p || !p2)
9451                 ret = -TARGET_EFAULT;
9452             else
9453                 ret = get_errno(link(p, p2));
9454             unlock_user(p2, arg2, 0);
9455             unlock_user(p, arg1, 0);
9456         }
9457         return ret;
9458 #endif
9459 #if defined(TARGET_NR_linkat)
9460     case TARGET_NR_linkat:
9461         {
9462             void * p2 = NULL;
9463             if (!arg2 || !arg4)
9464                 return -TARGET_EFAULT;
9465             p  = lock_user_string(arg2);
9466             p2 = lock_user_string(arg4);
9467             if (!p || !p2)
9468                 ret = -TARGET_EFAULT;
9469             else
9470                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9471             unlock_user(p, arg2, 0);
9472             unlock_user(p2, arg4, 0);
9473         }
9474         return ret;
9475 #endif
9476 #ifdef TARGET_NR_unlink
9477     case TARGET_NR_unlink:
9478         if (!(p = lock_user_string(arg1)))
9479             return -TARGET_EFAULT;
9480         ret = get_errno(unlink(p));
9481         unlock_user(p, arg1, 0);
9482         return ret;
9483 #endif
9484 #if defined(TARGET_NR_unlinkat)
9485     case TARGET_NR_unlinkat:
9486         if (!(p = lock_user_string(arg2)))
9487             return -TARGET_EFAULT;
9488         ret = get_errno(unlinkat(arg1, p, arg3));
9489         unlock_user(p, arg2, 0);
9490         return ret;
9491 #endif
9492     case TARGET_NR_execveat:
9493         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9494     case TARGET_NR_execve:
9495         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9496     case TARGET_NR_chdir:
9497         if (!(p = lock_user_string(arg1)))
9498             return -TARGET_EFAULT;
9499         ret = get_errno(chdir(p));
9500         unlock_user(p, arg1, 0);
9501         return ret;
9502 #ifdef TARGET_NR_time
9503     case TARGET_NR_time:
9504         {
9505             time_t host_time;
9506             ret = get_errno(time(&host_time));
9507             if (!is_error(ret)
9508                 && arg1
9509                 && put_user_sal(host_time, arg1))
9510                 return -TARGET_EFAULT;
9511         }
9512         return ret;
9513 #endif
9514 #ifdef TARGET_NR_mknod
9515     case TARGET_NR_mknod:
9516         if (!(p = lock_user_string(arg1)))
9517             return -TARGET_EFAULT;
9518         ret = get_errno(mknod(p, arg2, arg3));
9519         unlock_user(p, arg1, 0);
9520         return ret;
9521 #endif
9522 #if defined(TARGET_NR_mknodat)
9523     case TARGET_NR_mknodat:
9524         if (!(p = lock_user_string(arg2)))
9525             return -TARGET_EFAULT;
9526         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9527         unlock_user(p, arg2, 0);
9528         return ret;
9529 #endif
9530 #ifdef TARGET_NR_chmod
9531     case TARGET_NR_chmod:
9532         if (!(p = lock_user_string(arg1)))
9533             return -TARGET_EFAULT;
9534         ret = get_errno(chmod(p, arg2));
9535         unlock_user(p, arg1, 0);
9536         return ret;
9537 #endif
9538 #ifdef TARGET_NR_lseek
9539     case TARGET_NR_lseek:
9540         return get_errno(lseek(arg1, arg2, arg3));
9541 #endif
9542 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9543     /* Alpha specific */
9544     case TARGET_NR_getxpid:
9545         cpu_env->ir[IR_A4] = getppid();
9546         return get_errno(getpid());
9547 #endif
9548 #ifdef TARGET_NR_getpid
9549     case TARGET_NR_getpid:
9550         return get_errno(getpid());
9551 #endif
9552     case TARGET_NR_mount:
9553         {
9554             /* need to look at the data field */
9555             void *p2, *p3;
9556 
9557             if (arg1) {
9558                 p = lock_user_string(arg1);
9559                 if (!p) {
9560                     return -TARGET_EFAULT;
9561                 }
9562             } else {
9563                 p = NULL;
9564             }
9565 
9566             p2 = lock_user_string(arg2);
9567             if (!p2) {
9568                 if (arg1) {
9569                     unlock_user(p, arg1, 0);
9570                 }
9571                 return -TARGET_EFAULT;
9572             }
9573 
9574             if (arg3) {
9575                 p3 = lock_user_string(arg3);
9576                 if (!p3) {
9577                     if (arg1) {
9578                         unlock_user(p, arg1, 0);
9579                     }
9580                     unlock_user(p2, arg2, 0);
9581                     return -TARGET_EFAULT;
9582                 }
9583             } else {
9584                 p3 = NULL;
9585             }
9586 
9587             /* FIXME - arg5 should be locked, but it isn't clear how to
9588              * do that since it's not guaranteed to be a NULL-terminated
9589              * string.
9590              */
9591             if (!arg5) {
9592                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9593             } else {
9594                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9595             }
9596             ret = get_errno(ret);
9597 
9598             if (arg1) {
9599                 unlock_user(p, arg1, 0);
9600             }
9601             unlock_user(p2, arg2, 0);
9602             if (arg3) {
9603                 unlock_user(p3, arg3, 0);
9604             }
9605         }
9606         return ret;
9607 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9608 #if defined(TARGET_NR_umount)
9609     case TARGET_NR_umount:
9610 #endif
9611 #if defined(TARGET_NR_oldumount)
9612     case TARGET_NR_oldumount:
9613 #endif
9614         if (!(p = lock_user_string(arg1)))
9615             return -TARGET_EFAULT;
9616         ret = get_errno(umount(p));
9617         unlock_user(p, arg1, 0);
9618         return ret;
9619 #endif
9620 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9621     case TARGET_NR_move_mount:
9622         {
9623             void *p2, *p4;
9624 
9625             if (!arg2 || !arg4) {
9626                 return -TARGET_EFAULT;
9627             }
9628 
9629             p2 = lock_user_string(arg2);
9630             if (!p2) {
9631                 return -TARGET_EFAULT;
9632             }
9633 
9634             p4 = lock_user_string(arg4);
9635             if (!p4) {
9636                 unlock_user(p2, arg2, 0);
9637                 return -TARGET_EFAULT;
9638             }
9639             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9640 
9641             unlock_user(p2, arg2, 0);
9642             unlock_user(p4, arg4, 0);
9643 
9644             return ret;
9645         }
9646 #endif
9647 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9648     case TARGET_NR_open_tree:
9649         {
9650             void *p2;
9651             int host_flags;
9652 
9653             if (!arg2) {
9654                 return -TARGET_EFAULT;
9655             }
9656 
9657             p2 = lock_user_string(arg2);
9658             if (!p2) {
9659                 return -TARGET_EFAULT;
9660             }
9661 
9662             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9663             if (arg3 & TARGET_O_CLOEXEC) {
9664                 host_flags |= O_CLOEXEC;
9665             }
9666 
9667             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9668 
9669             unlock_user(p2, arg2, 0);
9670 
9671             return ret;
9672         }
9673 #endif
9674 #ifdef TARGET_NR_stime /* not on alpha */
9675     case TARGET_NR_stime:
9676         {
9677             struct timespec ts;
9678             ts.tv_nsec = 0;
9679             if (get_user_sal(ts.tv_sec, arg1)) {
9680                 return -TARGET_EFAULT;
9681             }
9682             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9683         }
9684 #endif
9685 #ifdef TARGET_NR_alarm /* not on alpha */
9686     case TARGET_NR_alarm:
9687         return alarm(arg1);
9688 #endif
9689 #ifdef TARGET_NR_pause /* not on alpha */
9690     case TARGET_NR_pause:
9691         if (!block_signals()) {
9692             sigsuspend(&get_task_state(cpu)->signal_mask);
9693         }
9694         return -TARGET_EINTR;
9695 #endif
9696 #ifdef TARGET_NR_utime
9697     case TARGET_NR_utime:
9698         {
9699             struct utimbuf tbuf, *host_tbuf;
9700             struct target_utimbuf *target_tbuf;
9701             if (arg2) {
9702                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9703                     return -TARGET_EFAULT;
9704                 tbuf.actime = tswapal(target_tbuf->actime);
9705                 tbuf.modtime = tswapal(target_tbuf->modtime);
9706                 unlock_user_struct(target_tbuf, arg2, 0);
9707                 host_tbuf = &tbuf;
9708             } else {
9709                 host_tbuf = NULL;
9710             }
9711             if (!(p = lock_user_string(arg1)))
9712                 return -TARGET_EFAULT;
9713             ret = get_errno(utime(p, host_tbuf));
9714             unlock_user(p, arg1, 0);
9715         }
9716         return ret;
9717 #endif
9718 #ifdef TARGET_NR_utimes
9719     case TARGET_NR_utimes:
9720         {
9721             struct timeval *tvp, tv[2];
9722             if (arg2) {
9723                 if (copy_from_user_timeval(&tv[0], arg2)
9724                     || copy_from_user_timeval(&tv[1],
9725                                               arg2 + sizeof(struct target_timeval)))
9726                     return -TARGET_EFAULT;
9727                 tvp = tv;
9728             } else {
9729                 tvp = NULL;
9730             }
9731             if (!(p = lock_user_string(arg1)))
9732                 return -TARGET_EFAULT;
9733             ret = get_errno(utimes(p, tvp));
9734             unlock_user(p, arg1, 0);
9735         }
9736         return ret;
9737 #endif
9738 #if defined(TARGET_NR_futimesat)
9739     case TARGET_NR_futimesat:
9740         {
9741             struct timeval *tvp, tv[2];
9742             if (arg3) {
9743                 if (copy_from_user_timeval(&tv[0], arg3)
9744                     || copy_from_user_timeval(&tv[1],
9745                                               arg3 + sizeof(struct target_timeval)))
9746                     return -TARGET_EFAULT;
9747                 tvp = tv;
9748             } else {
9749                 tvp = NULL;
9750             }
9751             if (!(p = lock_user_string(arg2))) {
9752                 return -TARGET_EFAULT;
9753             }
9754             ret = get_errno(futimesat(arg1, path(p), tvp));
9755             unlock_user(p, arg2, 0);
9756         }
9757         return ret;
9758 #endif
9759 #ifdef TARGET_NR_access
9760     case TARGET_NR_access:
9761         if (!(p = lock_user_string(arg1))) {
9762             return -TARGET_EFAULT;
9763         }
9764         ret = get_errno(access(path(p), arg2));
9765         unlock_user(p, arg1, 0);
9766         return ret;
9767 #endif
9768 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9769     case TARGET_NR_faccessat:
9770         if (!(p = lock_user_string(arg2))) {
9771             return -TARGET_EFAULT;
9772         }
9773         ret = get_errno(faccessat(arg1, p, arg3, 0));
9774         unlock_user(p, arg2, 0);
9775         return ret;
9776 #endif
9777 #if defined(TARGET_NR_faccessat2)
9778     case TARGET_NR_faccessat2:
9779         if (!(p = lock_user_string(arg2))) {
9780             return -TARGET_EFAULT;
9781         }
9782         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9783         unlock_user(p, arg2, 0);
9784         return ret;
9785 #endif
9786 #ifdef TARGET_NR_nice /* not on alpha */
9787     case TARGET_NR_nice:
9788         return get_errno(nice(arg1));
9789 #endif
9790     case TARGET_NR_sync:
9791         sync();
9792         return 0;
9793 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9794     case TARGET_NR_syncfs:
9795         return get_errno(syncfs(arg1));
9796 #endif
9797     case TARGET_NR_kill:
9798         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9799 #ifdef TARGET_NR_rename
9800     case TARGET_NR_rename:
9801         {
9802             void *p2;
9803             p = lock_user_string(arg1);
9804             p2 = lock_user_string(arg2);
9805             if (!p || !p2)
9806                 ret = -TARGET_EFAULT;
9807             else
9808                 ret = get_errno(rename(p, p2));
9809             unlock_user(p2, arg2, 0);
9810             unlock_user(p, arg1, 0);
9811         }
9812         return ret;
9813 #endif
9814 #if defined(TARGET_NR_renameat)
9815     case TARGET_NR_renameat:
9816         {
9817             void *p2;
9818             p  = lock_user_string(arg2);
9819             p2 = lock_user_string(arg4);
9820             if (!p || !p2)
9821                 ret = -TARGET_EFAULT;
9822             else
9823                 ret = get_errno(renameat(arg1, p, arg3, p2));
9824             unlock_user(p2, arg4, 0);
9825             unlock_user(p, arg2, 0);
9826         }
9827         return ret;
9828 #endif
9829 #if defined(TARGET_NR_renameat2)
9830     case TARGET_NR_renameat2:
9831         {
9832             void *p2;
9833             p  = lock_user_string(arg2);
9834             p2 = lock_user_string(arg4);
9835             if (!p || !p2) {
9836                 ret = -TARGET_EFAULT;
9837             } else {
9838                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9839             }
9840             unlock_user(p2, arg4, 0);
9841             unlock_user(p, arg2, 0);
9842         }
9843         return ret;
9844 #endif
9845 #ifdef TARGET_NR_mkdir
9846     case TARGET_NR_mkdir:
9847         if (!(p = lock_user_string(arg1)))
9848             return -TARGET_EFAULT;
9849         ret = get_errno(mkdir(p, arg2));
9850         unlock_user(p, arg1, 0);
9851         return ret;
9852 #endif
9853 #if defined(TARGET_NR_mkdirat)
9854     case TARGET_NR_mkdirat:
9855         if (!(p = lock_user_string(arg2)))
9856             return -TARGET_EFAULT;
9857         ret = get_errno(mkdirat(arg1, p, arg3));
9858         unlock_user(p, arg2, 0);
9859         return ret;
9860 #endif
9861 #ifdef TARGET_NR_rmdir
9862     case TARGET_NR_rmdir:
9863         if (!(p = lock_user_string(arg1)))
9864             return -TARGET_EFAULT;
9865         ret = get_errno(rmdir(p));
9866         unlock_user(p, arg1, 0);
9867         return ret;
9868 #endif
9869     case TARGET_NR_dup:
9870         ret = get_errno(dup(arg1));
9871         if (ret >= 0) {
9872             fd_trans_dup(arg1, ret);
9873         }
9874         return ret;
9875 #ifdef TARGET_NR_pipe
9876     case TARGET_NR_pipe:
9877         return do_pipe(cpu_env, arg1, 0, 0);
9878 #endif
9879 #ifdef TARGET_NR_pipe2
9880     case TARGET_NR_pipe2:
9881         return do_pipe(cpu_env, arg1,
9882                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9883 #endif
9884     case TARGET_NR_times:
9885         {
9886             struct target_tms *tmsp;
9887             struct tms tms;
9888             ret = get_errno(times(&tms));
9889             if (arg1) {
9890                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9891                 if (!tmsp)
9892                     return -TARGET_EFAULT;
9893                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9894                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9895                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9896                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9897             }
9898             if (!is_error(ret))
9899                 ret = host_to_target_clock_t(ret);
9900         }
9901         return ret;
9902     case TARGET_NR_acct:
9903         if (arg1 == 0) {
9904             ret = get_errno(acct(NULL));
9905         } else {
9906             if (!(p = lock_user_string(arg1))) {
9907                 return -TARGET_EFAULT;
9908             }
9909             ret = get_errno(acct(path(p)));
9910             unlock_user(p, arg1, 0);
9911         }
9912         return ret;
9913 #ifdef TARGET_NR_umount2
9914     case TARGET_NR_umount2:
9915         if (!(p = lock_user_string(arg1)))
9916             return -TARGET_EFAULT;
9917         ret = get_errno(umount2(p, arg2));
9918         unlock_user(p, arg1, 0);
9919         return ret;
9920 #endif
9921     case TARGET_NR_ioctl:
9922         return do_ioctl(arg1, arg2, arg3);
9923 #ifdef TARGET_NR_fcntl
9924     case TARGET_NR_fcntl:
9925         return do_fcntl(arg1, arg2, arg3);
9926 #endif
9927     case TARGET_NR_setpgid:
9928         return get_errno(setpgid(arg1, arg2));
9929     case TARGET_NR_umask:
9930         return get_errno(umask(arg1));
9931     case TARGET_NR_chroot:
9932         if (!(p = lock_user_string(arg1)))
9933             return -TARGET_EFAULT;
9934         ret = get_errno(chroot(p));
9935         unlock_user(p, arg1, 0);
9936         return ret;
9937 #ifdef TARGET_NR_dup2
9938     case TARGET_NR_dup2:
9939         ret = get_errno(dup2(arg1, arg2));
9940         if (ret >= 0) {
9941             fd_trans_dup(arg1, arg2);
9942         }
9943         return ret;
9944 #endif
9945 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9946     case TARGET_NR_dup3:
9947     {
9948         int host_flags;
9949 
9950         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9951             return -EINVAL;
9952         }
9953         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9954         ret = get_errno(dup3(arg1, arg2, host_flags));
9955         if (ret >= 0) {
9956             fd_trans_dup(arg1, arg2);
9957         }
9958         return ret;
9959     }
9960 #endif
9961 #ifdef TARGET_NR_getppid /* not on alpha */
9962     case TARGET_NR_getppid:
9963         return get_errno(getppid());
9964 #endif
9965 #ifdef TARGET_NR_getpgrp
9966     case TARGET_NR_getpgrp:
9967         return get_errno(getpgrp());
9968 #endif
9969     case TARGET_NR_setsid:
9970         return get_errno(setsid());
9971 #ifdef TARGET_NR_sigaction
9972     case TARGET_NR_sigaction:
9973         {
9974 #if defined(TARGET_MIPS)
9975 	    struct target_sigaction act, oact, *pact, *old_act;
9976 
9977 	    if (arg2) {
9978                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9979                     return -TARGET_EFAULT;
9980 		act._sa_handler = old_act->_sa_handler;
9981 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9982 		act.sa_flags = old_act->sa_flags;
9983 		unlock_user_struct(old_act, arg2, 0);
9984 		pact = &act;
9985 	    } else {
9986 		pact = NULL;
9987 	    }
9988 
9989         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9990 
9991 	    if (!is_error(ret) && arg3) {
9992                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9993                     return -TARGET_EFAULT;
9994 		old_act->_sa_handler = oact._sa_handler;
9995 		old_act->sa_flags = oact.sa_flags;
9996 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9997 		old_act->sa_mask.sig[1] = 0;
9998 		old_act->sa_mask.sig[2] = 0;
9999 		old_act->sa_mask.sig[3] = 0;
10000 		unlock_user_struct(old_act, arg3, 1);
10001 	    }
10002 #else
10003             struct target_old_sigaction *old_act;
10004             struct target_sigaction act, oact, *pact;
10005             if (arg2) {
10006                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
10007                     return -TARGET_EFAULT;
10008                 act._sa_handler = old_act->_sa_handler;
10009                 target_siginitset(&act.sa_mask, old_act->sa_mask);
10010                 act.sa_flags = old_act->sa_flags;
10011 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10012                 act.sa_restorer = old_act->sa_restorer;
10013 #endif
10014                 unlock_user_struct(old_act, arg2, 0);
10015                 pact = &act;
10016             } else {
10017                 pact = NULL;
10018             }
10019             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
10020             if (!is_error(ret) && arg3) {
10021                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
10022                     return -TARGET_EFAULT;
10023                 old_act->_sa_handler = oact._sa_handler;
10024                 old_act->sa_mask = oact.sa_mask.sig[0];
10025                 old_act->sa_flags = oact.sa_flags;
10026 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10027                 old_act->sa_restorer = oact.sa_restorer;
10028 #endif
10029                 unlock_user_struct(old_act, arg3, 1);
10030             }
10031 #endif
10032         }
10033         return ret;
10034 #endif
10035     case TARGET_NR_rt_sigaction:
10036         {
10037             /*
10038              * For Alpha and SPARC this is a 5 argument syscall, with
10039              * a 'restorer' parameter which must be copied into the
10040              * sa_restorer field of the sigaction struct.
10041              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
10042              * and arg5 is the sigsetsize.
10043              */
10044 #if defined(TARGET_ALPHA)
10045             target_ulong sigsetsize = arg4;
10046             target_ulong restorer = arg5;
10047 #elif defined(TARGET_SPARC)
10048             target_ulong restorer = arg4;
10049             target_ulong sigsetsize = arg5;
10050 #else
10051             target_ulong sigsetsize = arg4;
10052             target_ulong restorer = 0;
10053 #endif
10054             struct target_sigaction *act = NULL;
10055             struct target_sigaction *oact = NULL;
10056 
10057             if (sigsetsize != sizeof(target_sigset_t)) {
10058                 return -TARGET_EINVAL;
10059             }
10060             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
10061                 return -TARGET_EFAULT;
10062             }
10063             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
10064                 ret = -TARGET_EFAULT;
10065             } else {
10066                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
10067                 if (oact) {
10068                     unlock_user_struct(oact, arg3, 1);
10069                 }
10070             }
10071             if (act) {
10072                 unlock_user_struct(act, arg2, 0);
10073             }
10074         }
10075         return ret;
10076 #ifdef TARGET_NR_sgetmask /* not on alpha */
10077     case TARGET_NR_sgetmask:
10078         {
10079             sigset_t cur_set;
10080             abi_ulong target_set;
10081             ret = do_sigprocmask(0, NULL, &cur_set);
10082             if (!ret) {
10083                 host_to_target_old_sigset(&target_set, &cur_set);
10084                 ret = target_set;
10085             }
10086         }
10087         return ret;
10088 #endif
10089 #ifdef TARGET_NR_ssetmask /* not on alpha */
10090     case TARGET_NR_ssetmask:
10091         {
10092             sigset_t set, oset;
10093             abi_ulong target_set = arg1;
10094             target_to_host_old_sigset(&set, &target_set);
10095             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10096             if (!ret) {
10097                 host_to_target_old_sigset(&target_set, &oset);
10098                 ret = target_set;
10099             }
10100         }
10101         return ret;
10102 #endif
10103 #ifdef TARGET_NR_sigprocmask
10104     case TARGET_NR_sigprocmask:
10105         {
10106 #if defined(TARGET_ALPHA)
10107             sigset_t set, oldset;
10108             abi_ulong mask;
10109             int how;
10110 
10111             switch (arg1) {
10112             case TARGET_SIG_BLOCK:
10113                 how = SIG_BLOCK;
10114                 break;
10115             case TARGET_SIG_UNBLOCK:
10116                 how = SIG_UNBLOCK;
10117                 break;
10118             case TARGET_SIG_SETMASK:
10119                 how = SIG_SETMASK;
10120                 break;
10121             default:
10122                 return -TARGET_EINVAL;
10123             }
10124             mask = arg2;
10125             target_to_host_old_sigset(&set, &mask);
10126 
10127             ret = do_sigprocmask(how, &set, &oldset);
10128             if (!is_error(ret)) {
10129                 host_to_target_old_sigset(&mask, &oldset);
10130                 ret = mask;
10131                 cpu_env->ir[IR_V0] = 0; /* force no error */
10132             }
10133 #else
10134             sigset_t set, oldset, *set_ptr;
10135             int how;
10136 
10137             if (arg2) {
10138                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10139                 if (!p) {
10140                     return -TARGET_EFAULT;
10141                 }
10142                 target_to_host_old_sigset(&set, p);
10143                 unlock_user(p, arg2, 0);
10144                 set_ptr = &set;
10145                 switch (arg1) {
10146                 case TARGET_SIG_BLOCK:
10147                     how = SIG_BLOCK;
10148                     break;
10149                 case TARGET_SIG_UNBLOCK:
10150                     how = SIG_UNBLOCK;
10151                     break;
10152                 case TARGET_SIG_SETMASK:
10153                     how = SIG_SETMASK;
10154                     break;
10155                 default:
10156                     return -TARGET_EINVAL;
10157                 }
10158             } else {
10159                 how = 0;
10160                 set_ptr = NULL;
10161             }
10162             ret = do_sigprocmask(how, set_ptr, &oldset);
10163             if (!is_error(ret) && arg3) {
10164                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10165                     return -TARGET_EFAULT;
10166                 host_to_target_old_sigset(p, &oldset);
10167                 unlock_user(p, arg3, sizeof(target_sigset_t));
10168             }
10169 #endif
10170         }
10171         return ret;
10172 #endif
10173     case TARGET_NR_rt_sigprocmask:
10174         {
10175             int how = arg1;
10176             sigset_t set, oldset, *set_ptr;
10177 
10178             if (arg4 != sizeof(target_sigset_t)) {
10179                 return -TARGET_EINVAL;
10180             }
10181 
10182             if (arg2) {
10183                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10184                 if (!p) {
10185                     return -TARGET_EFAULT;
10186                 }
10187                 target_to_host_sigset(&set, p);
10188                 unlock_user(p, arg2, 0);
10189                 set_ptr = &set;
10190                 switch(how) {
10191                 case TARGET_SIG_BLOCK:
10192                     how = SIG_BLOCK;
10193                     break;
10194                 case TARGET_SIG_UNBLOCK:
10195                     how = SIG_UNBLOCK;
10196                     break;
10197                 case TARGET_SIG_SETMASK:
10198                     how = SIG_SETMASK;
10199                     break;
10200                 default:
10201                     return -TARGET_EINVAL;
10202                 }
10203             } else {
10204                 how = 0;
10205                 set_ptr = NULL;
10206             }
10207             ret = do_sigprocmask(how, set_ptr, &oldset);
10208             if (!is_error(ret) && arg3) {
10209                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10210                     return -TARGET_EFAULT;
10211                 host_to_target_sigset(p, &oldset);
10212                 unlock_user(p, arg3, sizeof(target_sigset_t));
10213             }
10214         }
10215         return ret;
10216 #ifdef TARGET_NR_sigpending
10217     case TARGET_NR_sigpending:
10218         {
10219             sigset_t set;
10220             ret = get_errno(sigpending(&set));
10221             if (!is_error(ret)) {
10222                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10223                     return -TARGET_EFAULT;
10224                 host_to_target_old_sigset(p, &set);
10225                 unlock_user(p, arg1, sizeof(target_sigset_t));
10226             }
10227         }
10228         return ret;
10229 #endif
10230     case TARGET_NR_rt_sigpending:
10231         {
10232             sigset_t set;
10233 
10234             /* Yes, this check is >, not != like most. We follow the kernel's
10235              * logic and it does it like this because it implements
10236              * NR_sigpending through the same code path, and in that case
10237              * the old_sigset_t is smaller in size.
10238              */
10239             if (arg2 > sizeof(target_sigset_t)) {
10240                 return -TARGET_EINVAL;
10241             }
10242 
10243             ret = get_errno(sigpending(&set));
10244             if (!is_error(ret)) {
10245                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10246                     return -TARGET_EFAULT;
10247                 host_to_target_sigset(p, &set);
10248                 unlock_user(p, arg1, sizeof(target_sigset_t));
10249             }
10250         }
10251         return ret;
10252 #ifdef TARGET_NR_sigsuspend
10253     case TARGET_NR_sigsuspend:
10254         {
10255             sigset_t *set;
10256 
10257 #if defined(TARGET_ALPHA)
10258             TaskState *ts = get_task_state(cpu);
10259             /* target_to_host_old_sigset will bswap back */
10260             abi_ulong mask = tswapal(arg1);
10261             set = &ts->sigsuspend_mask;
10262             target_to_host_old_sigset(set, &mask);
10263 #else
10264             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10265             if (ret != 0) {
10266                 return ret;
10267             }
10268 #endif
10269             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10270             finish_sigsuspend_mask(ret);
10271         }
10272         return ret;
10273 #endif
10274     case TARGET_NR_rt_sigsuspend:
10275         {
10276             sigset_t *set;
10277 
10278             ret = process_sigsuspend_mask(&set, arg1, arg2);
10279             if (ret != 0) {
10280                 return ret;
10281             }
10282             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10283             finish_sigsuspend_mask(ret);
10284         }
10285         return ret;
10286 #ifdef TARGET_NR_rt_sigtimedwait
10287     case TARGET_NR_rt_sigtimedwait:
10288         {
10289             sigset_t set;
10290             struct timespec uts, *puts;
10291             siginfo_t uinfo;
10292 
10293             if (arg4 != sizeof(target_sigset_t)) {
10294                 return -TARGET_EINVAL;
10295             }
10296 
10297             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10298                 return -TARGET_EFAULT;
10299             target_to_host_sigset(&set, p);
10300             unlock_user(p, arg1, 0);
10301             if (arg3) {
10302                 puts = &uts;
10303                 if (target_to_host_timespec(puts, arg3)) {
10304                     return -TARGET_EFAULT;
10305                 }
10306             } else {
10307                 puts = NULL;
10308             }
10309             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10310                                                  SIGSET_T_SIZE));
10311             if (!is_error(ret)) {
10312                 if (arg2) {
10313                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10314                                   0);
10315                     if (!p) {
10316                         return -TARGET_EFAULT;
10317                     }
10318                     host_to_target_siginfo(p, &uinfo);
10319                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10320                 }
10321                 ret = host_to_target_signal(ret);
10322             }
10323         }
10324         return ret;
10325 #endif
10326 #ifdef TARGET_NR_rt_sigtimedwait_time64
10327     case TARGET_NR_rt_sigtimedwait_time64:
10328         {
10329             sigset_t set;
10330             struct timespec uts, *puts;
10331             siginfo_t uinfo;
10332 
10333             if (arg4 != sizeof(target_sigset_t)) {
10334                 return -TARGET_EINVAL;
10335             }
10336 
10337             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10338             if (!p) {
10339                 return -TARGET_EFAULT;
10340             }
10341             target_to_host_sigset(&set, p);
10342             unlock_user(p, arg1, 0);
10343             if (arg3) {
10344                 puts = &uts;
10345                 if (target_to_host_timespec64(puts, arg3)) {
10346                     return -TARGET_EFAULT;
10347                 }
10348             } else {
10349                 puts = NULL;
10350             }
10351             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10352                                                  SIGSET_T_SIZE));
10353             if (!is_error(ret)) {
10354                 if (arg2) {
10355                     p = lock_user(VERIFY_WRITE, arg2,
10356                                   sizeof(target_siginfo_t), 0);
10357                     if (!p) {
10358                         return -TARGET_EFAULT;
10359                     }
10360                     host_to_target_siginfo(p, &uinfo);
10361                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10362                 }
10363                 ret = host_to_target_signal(ret);
10364             }
10365         }
10366         return ret;
10367 #endif
10368     case TARGET_NR_rt_sigqueueinfo:
10369         {
10370             siginfo_t uinfo;
10371 
10372             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10373             if (!p) {
10374                 return -TARGET_EFAULT;
10375             }
10376             target_to_host_siginfo(&uinfo, p);
10377             unlock_user(p, arg3, 0);
10378             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10379         }
10380         return ret;
10381     case TARGET_NR_rt_tgsigqueueinfo:
10382         {
10383             siginfo_t uinfo;
10384 
10385             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10386             if (!p) {
10387                 return -TARGET_EFAULT;
10388             }
10389             target_to_host_siginfo(&uinfo, p);
10390             unlock_user(p, arg4, 0);
10391             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10392         }
10393         return ret;
10394 #ifdef TARGET_NR_sigreturn
10395     case TARGET_NR_sigreturn:
10396         if (block_signals()) {
10397             return -QEMU_ERESTARTSYS;
10398         }
10399         return do_sigreturn(cpu_env);
10400 #endif
10401     case TARGET_NR_rt_sigreturn:
10402         if (block_signals()) {
10403             return -QEMU_ERESTARTSYS;
10404         }
10405         return do_rt_sigreturn(cpu_env);
10406     case TARGET_NR_sethostname:
10407         if (!(p = lock_user_string(arg1)))
10408             return -TARGET_EFAULT;
10409         ret = get_errno(sethostname(p, arg2));
10410         unlock_user(p, arg1, 0);
10411         return ret;
10412 #ifdef TARGET_NR_setrlimit
10413     case TARGET_NR_setrlimit:
10414         {
10415             int resource = target_to_host_resource(arg1);
10416             struct target_rlimit *target_rlim;
10417             struct rlimit rlim;
10418             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10419                 return -TARGET_EFAULT;
10420             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10421             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10422             unlock_user_struct(target_rlim, arg2, 0);
10423             /*
10424              * If we just passed through resource limit settings for memory then
10425              * they would also apply to QEMU's own allocations, and QEMU will
10426              * crash or hang or die if its allocations fail. Ideally we would
10427              * track the guest allocations in QEMU and apply the limits ourselves.
10428              * For now, just tell the guest the call succeeded but don't actually
10429              * limit anything.
10430              */
10431             if (resource != RLIMIT_AS &&
10432                 resource != RLIMIT_DATA &&
10433                 resource != RLIMIT_STACK) {
10434                 return get_errno(setrlimit(resource, &rlim));
10435             } else {
10436                 return 0;
10437             }
10438         }
10439 #endif
10440 #ifdef TARGET_NR_getrlimit
10441     case TARGET_NR_getrlimit:
10442         {
10443             int resource = target_to_host_resource(arg1);
10444             struct target_rlimit *target_rlim;
10445             struct rlimit rlim;
10446 
10447             ret = get_errno(getrlimit(resource, &rlim));
10448             if (!is_error(ret)) {
10449                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10450                     return -TARGET_EFAULT;
10451                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10452                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10453                 unlock_user_struct(target_rlim, arg2, 1);
10454             }
10455         }
10456         return ret;
10457 #endif
10458     case TARGET_NR_getrusage:
10459         {
10460             struct rusage rusage;
10461             ret = get_errno(getrusage(arg1, &rusage));
10462             if (!is_error(ret)) {
10463                 ret = host_to_target_rusage(arg2, &rusage);
10464             }
10465         }
10466         return ret;
10467 #if defined(TARGET_NR_gettimeofday)
10468     case TARGET_NR_gettimeofday:
10469         {
10470             struct timeval tv;
10471             struct timezone tz;
10472 
10473             ret = get_errno(gettimeofday(&tv, &tz));
10474             if (!is_error(ret)) {
10475                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10476                     return -TARGET_EFAULT;
10477                 }
10478                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10479                     return -TARGET_EFAULT;
10480                 }
10481             }
10482         }
10483         return ret;
10484 #endif
10485 #if defined(TARGET_NR_settimeofday)
10486     case TARGET_NR_settimeofday:
10487         {
10488             struct timeval tv, *ptv = NULL;
10489             struct timezone tz, *ptz = NULL;
10490 
10491             if (arg1) {
10492                 if (copy_from_user_timeval(&tv, arg1)) {
10493                     return -TARGET_EFAULT;
10494                 }
10495                 ptv = &tv;
10496             }
10497 
10498             if (arg2) {
10499                 if (copy_from_user_timezone(&tz, arg2)) {
10500                     return -TARGET_EFAULT;
10501                 }
10502                 ptz = &tz;
10503             }
10504 
10505             return get_errno(settimeofday(ptv, ptz));
10506         }
10507 #endif
10508 #if defined(TARGET_NR_select)
10509     case TARGET_NR_select:
10510 #if defined(TARGET_WANT_NI_OLD_SELECT)
10511         /* some architectures used to have old_select here
10512          * but now ENOSYS it.
10513          */
10514         ret = -TARGET_ENOSYS;
10515 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10516         ret = do_old_select(arg1);
10517 #else
10518         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10519 #endif
10520         return ret;
10521 #endif
10522 #ifdef TARGET_NR_pselect6
10523     case TARGET_NR_pselect6:
10524         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10525 #endif
10526 #ifdef TARGET_NR_pselect6_time64
10527     case TARGET_NR_pselect6_time64:
10528         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10529 #endif
10530 #ifdef TARGET_NR_symlink
10531     case TARGET_NR_symlink:
10532         {
10533             void *p2;
10534             p = lock_user_string(arg1);
10535             p2 = lock_user_string(arg2);
10536             if (!p || !p2)
10537                 ret = -TARGET_EFAULT;
10538             else
10539                 ret = get_errno(symlink(p, p2));
10540             unlock_user(p2, arg2, 0);
10541             unlock_user(p, arg1, 0);
10542         }
10543         return ret;
10544 #endif
10545 #if defined(TARGET_NR_symlinkat)
10546     case TARGET_NR_symlinkat:
10547         {
10548             void *p2;
10549             p  = lock_user_string(arg1);
10550             p2 = lock_user_string(arg3);
10551             if (!p || !p2)
10552                 ret = -TARGET_EFAULT;
10553             else
10554                 ret = get_errno(symlinkat(p, arg2, p2));
10555             unlock_user(p2, arg3, 0);
10556             unlock_user(p, arg1, 0);
10557         }
10558         return ret;
10559 #endif
10560 #ifdef TARGET_NR_readlink
10561     case TARGET_NR_readlink:
10562         {
10563             void *p2;
10564             p = lock_user_string(arg1);
10565             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10566             ret = get_errno(do_guest_readlink(p, p2, arg3));
10567             unlock_user(p2, arg2, ret);
10568             unlock_user(p, arg1, 0);
10569         }
10570         return ret;
10571 #endif
10572 #if defined(TARGET_NR_readlinkat)
10573     case TARGET_NR_readlinkat:
10574         {
10575             void *p2;
10576             p  = lock_user_string(arg2);
10577             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10578             if (!p || !p2) {
10579                 ret = -TARGET_EFAULT;
10580             } else if (!arg4) {
10581                 /* Short circuit this for the magic exe check. */
10582                 ret = -TARGET_EINVAL;
10583             } else if (is_proc_myself((const char *)p, "exe")) {
10584                 /*
10585                  * Don't worry about sign mismatch as earlier mapping
10586                  * logic would have thrown a bad address error.
10587                  */
10588                 ret = MIN(strlen(exec_path), arg4);
10589                 /* We cannot NUL terminate the string. */
10590                 memcpy(p2, exec_path, ret);
10591             } else {
10592                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10593             }
10594             unlock_user(p2, arg3, ret);
10595             unlock_user(p, arg2, 0);
10596         }
10597         return ret;
10598 #endif
10599 #ifdef TARGET_NR_swapon
10600     case TARGET_NR_swapon:
10601         if (!(p = lock_user_string(arg1)))
10602             return -TARGET_EFAULT;
10603         ret = get_errno(swapon(p, arg2));
10604         unlock_user(p, arg1, 0);
10605         return ret;
10606 #endif
10607     case TARGET_NR_reboot:
10608         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10609            /* arg4 must be ignored in all other cases */
10610            p = lock_user_string(arg4);
10611            if (!p) {
10612                return -TARGET_EFAULT;
10613            }
10614            ret = get_errno(reboot(arg1, arg2, arg3, p));
10615            unlock_user(p, arg4, 0);
10616         } else {
10617            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10618         }
10619         return ret;
10620 #ifdef TARGET_NR_mmap
10621     case TARGET_NR_mmap:
10622 #ifdef TARGET_ARCH_WANT_SYS_OLD_MMAP
10623         {
10624             abi_ulong *v;
10625             abi_ulong v1, v2, v3, v4, v5, v6;
10626             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10627                 return -TARGET_EFAULT;
10628             v1 = tswapal(v[0]);
10629             v2 = tswapal(v[1]);
10630             v3 = tswapal(v[2]);
10631             v4 = tswapal(v[3]);
10632             v5 = tswapal(v[4]);
10633             v6 = tswapal(v[5]);
10634             unlock_user(v, arg1, 0);
10635             return do_mmap(v1, v2, v3, v4, v5, v6);
10636         }
10637 #else
10638         /* mmap pointers are always untagged */
10639         return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
10640 #endif
10641 #endif
10642 #ifdef TARGET_NR_mmap2
10643     case TARGET_NR_mmap2:
10644 #ifndef MMAP_SHIFT
10645 #define MMAP_SHIFT 12
10646 #endif
10647         return do_mmap(arg1, arg2, arg3, arg4, arg5,
10648                        (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10649 #endif
10650     case TARGET_NR_munmap:
10651         arg1 = cpu_untagged_addr(cpu, arg1);
10652         return get_errno(target_munmap(arg1, arg2));
10653     case TARGET_NR_mprotect:
10654         arg1 = cpu_untagged_addr(cpu, arg1);
10655         {
10656             TaskState *ts = get_task_state(cpu);
10657             /* Special hack to detect libc making the stack executable.  */
10658             if ((arg3 & PROT_GROWSDOWN)
10659                 && arg1 >= ts->info->stack_limit
10660                 && arg1 <= ts->info->start_stack) {
10661                 arg3 &= ~PROT_GROWSDOWN;
10662                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10663                 arg1 = ts->info->stack_limit;
10664             }
10665         }
10666         return get_errno(target_mprotect(arg1, arg2, arg3));
10667 #ifdef TARGET_NR_mremap
10668     case TARGET_NR_mremap:
10669         arg1 = cpu_untagged_addr(cpu, arg1);
10670         /* mremap new_addr (arg5) is always untagged */
10671         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10672 #endif
10673         /* ??? msync/mlock/munlock are broken for softmmu.  */
10674 #ifdef TARGET_NR_msync
10675     case TARGET_NR_msync:
10676         return get_errno(msync(g2h(cpu, arg1), arg2,
10677                                target_to_host_msync_arg(arg3)));
10678 #endif
10679 #ifdef TARGET_NR_mlock
10680     case TARGET_NR_mlock:
10681         return get_errno(mlock(g2h(cpu, arg1), arg2));
10682 #endif
10683 #ifdef TARGET_NR_munlock
10684     case TARGET_NR_munlock:
10685         return get_errno(munlock(g2h(cpu, arg1), arg2));
10686 #endif
10687 #ifdef TARGET_NR_mlockall
10688     case TARGET_NR_mlockall:
10689         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10690 #endif
10691 #ifdef TARGET_NR_munlockall
10692     case TARGET_NR_munlockall:
10693         return get_errno(munlockall());
10694 #endif
10695 #ifdef TARGET_NR_truncate
10696     case TARGET_NR_truncate:
10697         if (!(p = lock_user_string(arg1)))
10698             return -TARGET_EFAULT;
10699         ret = get_errno(truncate(p, arg2));
10700         unlock_user(p, arg1, 0);
10701         return ret;
10702 #endif
10703 #ifdef TARGET_NR_ftruncate
10704     case TARGET_NR_ftruncate:
10705         return get_errno(ftruncate(arg1, arg2));
10706 #endif
10707     case TARGET_NR_fchmod:
10708         return get_errno(fchmod(arg1, arg2));
10709 #if defined(TARGET_NR_fchmodat)
10710     case TARGET_NR_fchmodat:
10711         if (!(p = lock_user_string(arg2)))
10712             return -TARGET_EFAULT;
10713         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10714         unlock_user(p, arg2, 0);
10715         return ret;
10716 #endif
10717     case TARGET_NR_getpriority:
10718         /* Note that negative values are valid for getpriority, so we must
10719            differentiate based on errno settings.  */
10720         errno = 0;
10721         ret = getpriority(arg1, arg2);
10722         if (ret == -1 && errno != 0) {
10723             return -host_to_target_errno(errno);
10724         }
10725 #ifdef TARGET_ALPHA
10726         /* Return value is the unbiased priority.  Signal no error.  */
10727         cpu_env->ir[IR_V0] = 0;
10728 #else
10729         /* Return value is a biased priority to avoid negative numbers.  */
10730         ret = 20 - ret;
10731 #endif
10732         return ret;
10733     case TARGET_NR_setpriority:
10734         return get_errno(setpriority(arg1, arg2, arg3));
10735 #ifdef TARGET_NR_statfs
10736     case TARGET_NR_statfs:
10737         if (!(p = lock_user_string(arg1))) {
10738             return -TARGET_EFAULT;
10739         }
10740         ret = get_errno(statfs(path(p), &stfs));
10741         unlock_user(p, arg1, 0);
10742     convert_statfs:
10743         if (!is_error(ret)) {
10744             struct target_statfs *target_stfs;
10745 
10746             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10747                 return -TARGET_EFAULT;
10748             __put_user(stfs.f_type, &target_stfs->f_type);
10749             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10750             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10751             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10752             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10753             __put_user(stfs.f_files, &target_stfs->f_files);
10754             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10755             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10756             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10757             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10758             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10759 #ifdef _STATFS_F_FLAGS
10760             __put_user(stfs.f_flags, &target_stfs->f_flags);
10761 #else
10762             __put_user(0, &target_stfs->f_flags);
10763 #endif
10764             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10765             unlock_user_struct(target_stfs, arg2, 1);
10766         }
10767         return ret;
10768 #endif
10769 #ifdef TARGET_NR_fstatfs
10770     case TARGET_NR_fstatfs:
10771         ret = get_errno(fstatfs(arg1, &stfs));
10772         goto convert_statfs;
10773 #endif
10774 #ifdef TARGET_NR_statfs64
10775     case TARGET_NR_statfs64:
10776         if (!(p = lock_user_string(arg1))) {
10777             return -TARGET_EFAULT;
10778         }
10779         ret = get_errno(statfs(path(p), &stfs));
10780         unlock_user(p, arg1, 0);
10781     convert_statfs64:
10782         if (!is_error(ret)) {
10783             struct target_statfs64 *target_stfs;
10784 
10785             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10786                 return -TARGET_EFAULT;
10787             __put_user(stfs.f_type, &target_stfs->f_type);
10788             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10789             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10790             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10791             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10792             __put_user(stfs.f_files, &target_stfs->f_files);
10793             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10794             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10795             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10796             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10797             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10798 #ifdef _STATFS_F_FLAGS
10799             __put_user(stfs.f_flags, &target_stfs->f_flags);
10800 #else
10801             __put_user(0, &target_stfs->f_flags);
10802 #endif
10803             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10804             unlock_user_struct(target_stfs, arg3, 1);
10805         }
10806         return ret;
10807     case TARGET_NR_fstatfs64:
10808         ret = get_errno(fstatfs(arg1, &stfs));
10809         goto convert_statfs64;
10810 #endif
10811 #ifdef TARGET_NR_socketcall
10812     case TARGET_NR_socketcall:
10813         return do_socketcall(arg1, arg2);
10814 #endif
10815 #ifdef TARGET_NR_accept
10816     case TARGET_NR_accept:
10817         return do_accept4(arg1, arg2, arg3, 0);
10818 #endif
10819 #ifdef TARGET_NR_accept4
10820     case TARGET_NR_accept4:
10821         return do_accept4(arg1, arg2, arg3, arg4);
10822 #endif
10823 #ifdef TARGET_NR_bind
10824     case TARGET_NR_bind:
10825         return do_bind(arg1, arg2, arg3);
10826 #endif
10827 #ifdef TARGET_NR_connect
10828     case TARGET_NR_connect:
10829         return do_connect(arg1, arg2, arg3);
10830 #endif
10831 #ifdef TARGET_NR_getpeername
10832     case TARGET_NR_getpeername:
10833         return do_getpeername(arg1, arg2, arg3);
10834 #endif
10835 #ifdef TARGET_NR_getsockname
10836     case TARGET_NR_getsockname:
10837         return do_getsockname(arg1, arg2, arg3);
10838 #endif
10839 #ifdef TARGET_NR_getsockopt
10840     case TARGET_NR_getsockopt:
10841         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10842 #endif
10843 #ifdef TARGET_NR_listen
10844     case TARGET_NR_listen:
10845         return get_errno(listen(arg1, arg2));
10846 #endif
10847 #ifdef TARGET_NR_recv
10848     case TARGET_NR_recv:
10849         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10850 #endif
10851 #ifdef TARGET_NR_recvfrom
10852     case TARGET_NR_recvfrom:
10853         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10854 #endif
10855 #ifdef TARGET_NR_recvmsg
10856     case TARGET_NR_recvmsg:
10857         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10858 #endif
10859 #ifdef TARGET_NR_send
10860     case TARGET_NR_send:
10861         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10862 #endif
10863 #ifdef TARGET_NR_sendmsg
10864     case TARGET_NR_sendmsg:
10865         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10866 #endif
10867 #ifdef TARGET_NR_sendmmsg
10868     case TARGET_NR_sendmmsg:
10869         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10870 #endif
10871 #ifdef TARGET_NR_recvmmsg
10872     case TARGET_NR_recvmmsg:
10873         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10874 #endif
10875 #ifdef TARGET_NR_sendto
10876     case TARGET_NR_sendto:
10877         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10878 #endif
10879 #ifdef TARGET_NR_shutdown
10880     case TARGET_NR_shutdown:
10881         return get_errno(shutdown(arg1, arg2));
10882 #endif
10883 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10884     case TARGET_NR_getrandom:
10885         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10886         if (!p) {
10887             return -TARGET_EFAULT;
10888         }
10889         ret = get_errno(getrandom(p, arg2, arg3));
10890         unlock_user(p, arg1, ret);
10891         return ret;
10892 #endif
10893 #ifdef TARGET_NR_socket
10894     case TARGET_NR_socket:
10895         return do_socket(arg1, arg2, arg3);
10896 #endif
10897 #ifdef TARGET_NR_socketpair
10898     case TARGET_NR_socketpair:
10899         return do_socketpair(arg1, arg2, arg3, arg4);
10900 #endif
10901 #ifdef TARGET_NR_setsockopt
10902     case TARGET_NR_setsockopt:
10903         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10904 #endif
10905 #if defined(TARGET_NR_syslog)
10906     case TARGET_NR_syslog:
10907         {
10908             int len = arg2;
10909 
10910             switch (arg1) {
10911             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10912             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10913             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10914             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10915             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10916             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10917             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10918             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10919                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10920             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10921             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10922             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10923                 {
10924                     if (len < 0) {
10925                         return -TARGET_EINVAL;
10926                     }
10927                     if (len == 0) {
10928                         return 0;
10929                     }
10930                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10931                     if (!p) {
10932                         return -TARGET_EFAULT;
10933                     }
10934                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10935                     unlock_user(p, arg2, arg3);
10936                 }
10937                 return ret;
10938             default:
10939                 return -TARGET_EINVAL;
10940             }
10941         }
10942         break;
10943 #endif
10944     case TARGET_NR_setitimer:
10945         {
10946             struct itimerval value, ovalue, *pvalue;
10947 
10948             if (arg2) {
10949                 pvalue = &value;
10950                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10951                     || copy_from_user_timeval(&pvalue->it_value,
10952                                               arg2 + sizeof(struct target_timeval)))
10953                     return -TARGET_EFAULT;
10954             } else {
10955                 pvalue = NULL;
10956             }
10957             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10958             if (!is_error(ret) && arg3) {
10959                 if (copy_to_user_timeval(arg3,
10960                                          &ovalue.it_interval)
10961                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10962                                             &ovalue.it_value))
10963                     return -TARGET_EFAULT;
10964             }
10965         }
10966         return ret;
10967     case TARGET_NR_getitimer:
10968         {
10969             struct itimerval value;
10970 
10971             ret = get_errno(getitimer(arg1, &value));
10972             if (!is_error(ret) && arg2) {
10973                 if (copy_to_user_timeval(arg2,
10974                                          &value.it_interval)
10975                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10976                                             &value.it_value))
10977                     return -TARGET_EFAULT;
10978             }
10979         }
10980         return ret;
10981 #ifdef TARGET_NR_stat
10982     case TARGET_NR_stat:
10983         if (!(p = lock_user_string(arg1))) {
10984             return -TARGET_EFAULT;
10985         }
10986         ret = get_errno(stat(path(p), &st));
10987         unlock_user(p, arg1, 0);
10988         goto do_stat;
10989 #endif
10990 #ifdef TARGET_NR_lstat
10991     case TARGET_NR_lstat:
10992         if (!(p = lock_user_string(arg1))) {
10993             return -TARGET_EFAULT;
10994         }
10995         ret = get_errno(lstat(path(p), &st));
10996         unlock_user(p, arg1, 0);
10997         goto do_stat;
10998 #endif
10999 #ifdef TARGET_NR_fstat
11000     case TARGET_NR_fstat:
11001         {
11002             ret = get_errno(fstat(arg1, &st));
11003 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
11004         do_stat:
11005 #endif
11006             if (!is_error(ret)) {
11007                 struct target_stat *target_st;
11008 
11009                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
11010                     return -TARGET_EFAULT;
11011                 memset(target_st, 0, sizeof(*target_st));
11012                 __put_user(st.st_dev, &target_st->st_dev);
11013                 __put_user(st.st_ino, &target_st->st_ino);
11014                 __put_user(st.st_mode, &target_st->st_mode);
11015                 __put_user(st.st_uid, &target_st->st_uid);
11016                 __put_user(st.st_gid, &target_st->st_gid);
11017                 __put_user(st.st_nlink, &target_st->st_nlink);
11018                 __put_user(st.st_rdev, &target_st->st_rdev);
11019                 __put_user(st.st_size, &target_st->st_size);
11020                 __put_user(st.st_blksize, &target_st->st_blksize);
11021                 __put_user(st.st_blocks, &target_st->st_blocks);
11022                 __put_user(st.st_atime, &target_st->target_st_atime);
11023                 __put_user(st.st_mtime, &target_st->target_st_mtime);
11024                 __put_user(st.st_ctime, &target_st->target_st_ctime);
11025 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
11026                 __put_user(st.st_atim.tv_nsec,
11027                            &target_st->target_st_atime_nsec);
11028                 __put_user(st.st_mtim.tv_nsec,
11029                            &target_st->target_st_mtime_nsec);
11030                 __put_user(st.st_ctim.tv_nsec,
11031                            &target_st->target_st_ctime_nsec);
11032 #endif
11033                 unlock_user_struct(target_st, arg2, 1);
11034             }
11035         }
11036         return ret;
11037 #endif
11038     case TARGET_NR_vhangup:
11039         return get_errno(vhangup());
11040 #ifdef TARGET_NR_syscall
11041     case TARGET_NR_syscall:
11042         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
11043                           arg6, arg7, arg8, 0);
11044 #endif
11045 #if defined(TARGET_NR_wait4)
11046     case TARGET_NR_wait4:
11047         {
11048             int status;
11049             abi_long status_ptr = arg2;
11050             struct rusage rusage, *rusage_ptr;
11051             abi_ulong target_rusage = arg4;
11052             abi_long rusage_err;
11053             if (target_rusage)
11054                 rusage_ptr = &rusage;
11055             else
11056                 rusage_ptr = NULL;
11057             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
11058             if (!is_error(ret)) {
11059                 if (status_ptr && ret) {
11060                     status = host_to_target_waitstatus(status);
11061                     if (put_user_s32(status, status_ptr))
11062                         return -TARGET_EFAULT;
11063                 }
11064                 if (target_rusage) {
11065                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
11066                     if (rusage_err) {
11067                         ret = rusage_err;
11068                     }
11069                 }
11070             }
11071         }
11072         return ret;
11073 #endif
11074 #ifdef TARGET_NR_swapoff
11075     case TARGET_NR_swapoff:
11076         if (!(p = lock_user_string(arg1)))
11077             return -TARGET_EFAULT;
11078         ret = get_errno(swapoff(p));
11079         unlock_user(p, arg1, 0);
11080         return ret;
11081 #endif
11082     case TARGET_NR_sysinfo:
11083         {
11084             struct target_sysinfo *target_value;
11085             struct sysinfo value;
11086             ret = get_errno(sysinfo(&value));
11087             if (!is_error(ret) && arg1)
11088             {
11089                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11090                     return -TARGET_EFAULT;
11091                 __put_user(value.uptime, &target_value->uptime);
11092                 __put_user(value.loads[0], &target_value->loads[0]);
11093                 __put_user(value.loads[1], &target_value->loads[1]);
11094                 __put_user(value.loads[2], &target_value->loads[2]);
11095                 __put_user(value.totalram, &target_value->totalram);
11096                 __put_user(value.freeram, &target_value->freeram);
11097                 __put_user(value.sharedram, &target_value->sharedram);
11098                 __put_user(value.bufferram, &target_value->bufferram);
11099                 __put_user(value.totalswap, &target_value->totalswap);
11100                 __put_user(value.freeswap, &target_value->freeswap);
11101                 __put_user(value.procs, &target_value->procs);
11102                 __put_user(value.totalhigh, &target_value->totalhigh);
11103                 __put_user(value.freehigh, &target_value->freehigh);
11104                 __put_user(value.mem_unit, &target_value->mem_unit);
11105                 unlock_user_struct(target_value, arg1, 1);
11106             }
11107         }
11108         return ret;
11109 #ifdef TARGET_NR_ipc
11110     case TARGET_NR_ipc:
11111         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11112 #endif
11113 #ifdef TARGET_NR_semget
11114     case TARGET_NR_semget:
11115         return get_errno(semget(arg1, arg2, arg3));
11116 #endif
11117 #ifdef TARGET_NR_semop
11118     case TARGET_NR_semop:
11119         return do_semtimedop(arg1, arg2, arg3, 0, false);
11120 #endif
11121 #ifdef TARGET_NR_semtimedop
11122     case TARGET_NR_semtimedop:
11123         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11124 #endif
11125 #ifdef TARGET_NR_semtimedop_time64
11126     case TARGET_NR_semtimedop_time64:
11127         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11128 #endif
11129 #ifdef TARGET_NR_semctl
11130     case TARGET_NR_semctl:
11131         return do_semctl(arg1, arg2, arg3, arg4);
11132 #endif
11133 #ifdef TARGET_NR_msgctl
11134     case TARGET_NR_msgctl:
11135         return do_msgctl(arg1, arg2, arg3);
11136 #endif
11137 #ifdef TARGET_NR_msgget
11138     case TARGET_NR_msgget:
11139         return get_errno(msgget(arg1, arg2));
11140 #endif
11141 #ifdef TARGET_NR_msgrcv
11142     case TARGET_NR_msgrcv:
11143         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11144 #endif
11145 #ifdef TARGET_NR_msgsnd
11146     case TARGET_NR_msgsnd:
11147         return do_msgsnd(arg1, arg2, arg3, arg4);
11148 #endif
11149 #ifdef TARGET_NR_shmget
11150     case TARGET_NR_shmget:
11151         return get_errno(shmget(arg1, arg2, arg3));
11152 #endif
11153 #ifdef TARGET_NR_shmctl
11154     case TARGET_NR_shmctl:
11155         return do_shmctl(arg1, arg2, arg3);
11156 #endif
11157 #ifdef TARGET_NR_shmat
11158     case TARGET_NR_shmat:
11159         return target_shmat(cpu_env, arg1, arg2, arg3);
11160 #endif
11161 #ifdef TARGET_NR_shmdt
11162     case TARGET_NR_shmdt:
11163         return target_shmdt(arg1);
11164 #endif
11165     case TARGET_NR_fsync:
11166         return get_errno(fsync(arg1));
11167     case TARGET_NR_clone:
11168         /* Linux manages to have three different orderings for its
11169          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11170          * match the kernel's CONFIG_CLONE_* settings.
11171          * Microblaze is further special in that it uses a sixth
11172          * implicit argument to clone for the TLS pointer.
11173          */
11174 #if defined(TARGET_MICROBLAZE)
11175         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11176 #elif defined(TARGET_CLONE_BACKWARDS)
11177         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11178 #elif defined(TARGET_CLONE_BACKWARDS2)
11179         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11180 #else
11181         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11182 #endif
11183         return ret;
11184 #ifdef __NR_exit_group
11185         /* new thread calls */
11186     case TARGET_NR_exit_group:
11187         preexit_cleanup(cpu_env, arg1);
11188         return get_errno(exit_group(arg1));
11189 #endif
11190     case TARGET_NR_setdomainname:
11191         if (!(p = lock_user_string(arg1)))
11192             return -TARGET_EFAULT;
11193         ret = get_errno(setdomainname(p, arg2));
11194         unlock_user(p, arg1, 0);
11195         return ret;
11196     case TARGET_NR_uname:
11197         /* no need to transcode because we use the linux syscall */
11198         {
11199             struct new_utsname * buf;
11200 
11201             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11202                 return -TARGET_EFAULT;
11203             ret = get_errno(sys_uname(buf));
11204             if (!is_error(ret)) {
11205                 /* Overwrite the native machine name with whatever is being
11206                    emulated. */
11207                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11208                           sizeof(buf->machine));
11209                 /* Allow the user to override the reported release.  */
11210                 if (qemu_uname_release && *qemu_uname_release) {
11211                     g_strlcpy(buf->release, qemu_uname_release,
11212                               sizeof(buf->release));
11213                 }
11214             }
11215             unlock_user_struct(buf, arg1, 1);
11216         }
11217         return ret;
11218 #ifdef TARGET_I386
11219     case TARGET_NR_modify_ldt:
11220         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11221 #if !defined(TARGET_X86_64)
11222     case TARGET_NR_vm86:
11223         return do_vm86(cpu_env, arg1, arg2);
11224 #endif
11225 #endif
11226 #if defined(TARGET_NR_adjtimex)
11227     case TARGET_NR_adjtimex:
11228         {
11229             struct timex host_buf;
11230 
11231             if (target_to_host_timex(&host_buf, arg1) != 0) {
11232                 return -TARGET_EFAULT;
11233             }
11234             ret = get_errno(adjtimex(&host_buf));
11235             if (!is_error(ret)) {
11236                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11237                     return -TARGET_EFAULT;
11238                 }
11239             }
11240         }
11241         return ret;
11242 #endif
11243 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11244     case TARGET_NR_clock_adjtime:
11245         {
11246             struct timex htx;
11247 
11248             if (target_to_host_timex(&htx, arg2) != 0) {
11249                 return -TARGET_EFAULT;
11250             }
11251             ret = get_errno(clock_adjtime(arg1, &htx));
11252             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11253                 return -TARGET_EFAULT;
11254             }
11255         }
11256         return ret;
11257 #endif
11258 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11259     case TARGET_NR_clock_adjtime64:
11260         {
11261             struct timex htx;
11262 
11263             if (target_to_host_timex64(&htx, arg2) != 0) {
11264                 return -TARGET_EFAULT;
11265             }
11266             ret = get_errno(clock_adjtime(arg1, &htx));
11267             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11268                     return -TARGET_EFAULT;
11269             }
11270         }
11271         return ret;
11272 #endif
11273     case TARGET_NR_getpgid:
11274         return get_errno(getpgid(arg1));
11275     case TARGET_NR_fchdir:
11276         return get_errno(fchdir(arg1));
11277     case TARGET_NR_personality:
11278         return get_errno(personality(arg1));
11279 #ifdef TARGET_NR__llseek /* Not on alpha */
11280     case TARGET_NR__llseek:
11281         {
11282             int64_t res;
11283 #if !defined(__NR_llseek)
11284             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11285             if (res == -1) {
11286                 ret = get_errno(res);
11287             } else {
11288                 ret = 0;
11289             }
11290 #else
11291             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11292 #endif
11293             if ((ret == 0) && put_user_s64(res, arg4)) {
11294                 return -TARGET_EFAULT;
11295             }
11296         }
11297         return ret;
11298 #endif
11299 #ifdef TARGET_NR_getdents
11300     case TARGET_NR_getdents:
11301         return do_getdents(arg1, arg2, arg3);
11302 #endif /* TARGET_NR_getdents */
11303 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11304     case TARGET_NR_getdents64:
11305         return do_getdents64(arg1, arg2, arg3);
11306 #endif /* TARGET_NR_getdents64 */
11307 #if defined(TARGET_NR__newselect)
11308     case TARGET_NR__newselect:
11309         return do_select(arg1, arg2, arg3, arg4, arg5);
11310 #endif
11311 #ifdef TARGET_NR_poll
11312     case TARGET_NR_poll:
11313         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11314 #endif
11315 #ifdef TARGET_NR_ppoll
11316     case TARGET_NR_ppoll:
11317         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11318 #endif
11319 #ifdef TARGET_NR_ppoll_time64
11320     case TARGET_NR_ppoll_time64:
11321         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11322 #endif
11323     case TARGET_NR_flock:
11324         /* NOTE: the flock constant seems to be the same for every
11325            Linux platform */
11326         return get_errno(safe_flock(arg1, arg2));
11327     case TARGET_NR_readv:
11328         {
11329             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11330             if (vec != NULL) {
11331                 ret = get_errno(safe_readv(arg1, vec, arg3));
11332                 unlock_iovec(vec, arg2, arg3, 1);
11333             } else {
11334                 ret = -host_to_target_errno(errno);
11335             }
11336         }
11337         return ret;
11338     case TARGET_NR_writev:
11339         {
11340             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11341             if (vec != NULL) {
11342                 ret = get_errno(safe_writev(arg1, vec, arg3));
11343                 unlock_iovec(vec, arg2, arg3, 0);
11344             } else {
11345                 ret = -host_to_target_errno(errno);
11346             }
11347         }
11348         return ret;
11349 #if defined(TARGET_NR_preadv)
11350     case TARGET_NR_preadv:
11351         {
11352             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11353             if (vec != NULL) {
11354                 unsigned long low, high;
11355 
11356                 target_to_host_low_high(arg4, arg5, &low, &high);
11357                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11358                 unlock_iovec(vec, arg2, arg3, 1);
11359             } else {
11360                 ret = -host_to_target_errno(errno);
11361            }
11362         }
11363         return ret;
11364 #endif
11365 #if defined(TARGET_NR_pwritev)
11366     case TARGET_NR_pwritev:
11367         {
11368             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11369             if (vec != NULL) {
11370                 unsigned long low, high;
11371 
11372                 target_to_host_low_high(arg4, arg5, &low, &high);
11373                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11374                 unlock_iovec(vec, arg2, arg3, 0);
11375             } else {
11376                 ret = -host_to_target_errno(errno);
11377            }
11378         }
11379         return ret;
11380 #endif
11381     case TARGET_NR_getsid:
11382         return get_errno(getsid(arg1));
11383 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11384     case TARGET_NR_fdatasync:
11385         return get_errno(fdatasync(arg1));
11386 #endif
11387     case TARGET_NR_sched_getaffinity:
11388         {
11389             unsigned int mask_size;
11390             unsigned long *mask;
11391 
11392             /*
11393              * sched_getaffinity needs multiples of ulong, so need to take
11394              * care of mismatches between target ulong and host ulong sizes.
11395              */
11396             if (arg2 & (sizeof(abi_ulong) - 1)) {
11397                 return -TARGET_EINVAL;
11398             }
11399             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11400 
11401             mask = alloca(mask_size);
11402             memset(mask, 0, mask_size);
11403             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11404 
11405             if (!is_error(ret)) {
11406                 if (ret > arg2) {
11407                     /* More data returned than the caller's buffer will fit.
11408                      * This only happens if sizeof(abi_long) < sizeof(long)
11409                      * and the caller passed us a buffer holding an odd number
11410                      * of abi_longs. If the host kernel is actually using the
11411                      * extra 4 bytes then fail EINVAL; otherwise we can just
11412                      * ignore them and only copy the interesting part.
11413                      */
11414                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11415                     if (numcpus > arg2 * 8) {
11416                         return -TARGET_EINVAL;
11417                     }
11418                     ret = arg2;
11419                 }
11420 
11421                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11422                     return -TARGET_EFAULT;
11423                 }
11424             }
11425         }
11426         return ret;
11427     case TARGET_NR_sched_setaffinity:
11428         {
11429             unsigned int mask_size;
11430             unsigned long *mask;
11431 
11432             /*
11433              * sched_setaffinity needs multiples of ulong, so need to take
11434              * care of mismatches between target ulong and host ulong sizes.
11435              */
11436             if (arg2 & (sizeof(abi_ulong) - 1)) {
11437                 return -TARGET_EINVAL;
11438             }
11439             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11440             mask = alloca(mask_size);
11441 
11442             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11443             if (ret) {
11444                 return ret;
11445             }
11446 
11447             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11448         }
11449     case TARGET_NR_getcpu:
11450         {
11451             unsigned cpuid, node;
11452             ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
11453                                        arg2 ? &node : NULL,
11454                                        NULL));
11455             if (is_error(ret)) {
11456                 return ret;
11457             }
11458             if (arg1 && put_user_u32(cpuid, arg1)) {
11459                 return -TARGET_EFAULT;
11460             }
11461             if (arg2 && put_user_u32(node, arg2)) {
11462                 return -TARGET_EFAULT;
11463             }
11464         }
11465         return ret;
11466     case TARGET_NR_sched_setparam:
11467         {
11468             struct target_sched_param *target_schp;
11469             struct sched_param schp;
11470 
11471             if (arg2 == 0) {
11472                 return -TARGET_EINVAL;
11473             }
11474             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11475                 return -TARGET_EFAULT;
11476             }
11477             schp.sched_priority = tswap32(target_schp->sched_priority);
11478             unlock_user_struct(target_schp, arg2, 0);
11479             return get_errno(sys_sched_setparam(arg1, &schp));
11480         }
11481     case TARGET_NR_sched_getparam:
11482         {
11483             struct target_sched_param *target_schp;
11484             struct sched_param schp;
11485 
11486             if (arg2 == 0) {
11487                 return -TARGET_EINVAL;
11488             }
11489             ret = get_errno(sys_sched_getparam(arg1, &schp));
11490             if (!is_error(ret)) {
11491                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11492                     return -TARGET_EFAULT;
11493                 }
11494                 target_schp->sched_priority = tswap32(schp.sched_priority);
11495                 unlock_user_struct(target_schp, arg2, 1);
11496             }
11497         }
11498         return ret;
11499     case TARGET_NR_sched_setscheduler:
11500         {
11501             struct target_sched_param *target_schp;
11502             struct sched_param schp;
11503             if (arg3 == 0) {
11504                 return -TARGET_EINVAL;
11505             }
11506             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11507                 return -TARGET_EFAULT;
11508             }
11509             schp.sched_priority = tswap32(target_schp->sched_priority);
11510             unlock_user_struct(target_schp, arg3, 0);
11511             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11512         }
11513     case TARGET_NR_sched_getscheduler:
11514         return get_errno(sys_sched_getscheduler(arg1));
11515     case TARGET_NR_sched_getattr:
11516         {
11517             struct target_sched_attr *target_scha;
11518             struct sched_attr scha;
11519             if (arg2 == 0) {
11520                 return -TARGET_EINVAL;
11521             }
11522             if (arg3 > sizeof(scha)) {
11523                 arg3 = sizeof(scha);
11524             }
11525             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11526             if (!is_error(ret)) {
11527                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11528                 if (!target_scha) {
11529                     return -TARGET_EFAULT;
11530                 }
11531                 target_scha->size = tswap32(scha.size);
11532                 target_scha->sched_policy = tswap32(scha.sched_policy);
11533                 target_scha->sched_flags = tswap64(scha.sched_flags);
11534                 target_scha->sched_nice = tswap32(scha.sched_nice);
11535                 target_scha->sched_priority = tswap32(scha.sched_priority);
11536                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11537                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11538                 target_scha->sched_period = tswap64(scha.sched_period);
11539                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11540                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11541                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11542                 }
11543                 unlock_user(target_scha, arg2, arg3);
11544             }
11545             return ret;
11546         }
11547     case TARGET_NR_sched_setattr:
11548         {
11549             struct target_sched_attr *target_scha;
11550             struct sched_attr scha;
11551             uint32_t size;
11552             int zeroed;
11553             if (arg2 == 0) {
11554                 return -TARGET_EINVAL;
11555             }
11556             if (get_user_u32(size, arg2)) {
11557                 return -TARGET_EFAULT;
11558             }
11559             if (!size) {
11560                 size = offsetof(struct target_sched_attr, sched_util_min);
11561             }
11562             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11563                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11564                     return -TARGET_EFAULT;
11565                 }
11566                 return -TARGET_E2BIG;
11567             }
11568 
11569             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11570             if (zeroed < 0) {
11571                 return zeroed;
11572             } else if (zeroed == 0) {
11573                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11574                     return -TARGET_EFAULT;
11575                 }
11576                 return -TARGET_E2BIG;
11577             }
11578             if (size > sizeof(struct target_sched_attr)) {
11579                 size = sizeof(struct target_sched_attr);
11580             }
11581 
11582             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11583             if (!target_scha) {
11584                 return -TARGET_EFAULT;
11585             }
11586             scha.size = size;
11587             scha.sched_policy = tswap32(target_scha->sched_policy);
11588             scha.sched_flags = tswap64(target_scha->sched_flags);
11589             scha.sched_nice = tswap32(target_scha->sched_nice);
11590             scha.sched_priority = tswap32(target_scha->sched_priority);
11591             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11592             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11593             scha.sched_period = tswap64(target_scha->sched_period);
11594             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11595                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11596                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11597             }
11598             unlock_user(target_scha, arg2, 0);
11599             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11600         }
11601     case TARGET_NR_sched_yield:
11602         return get_errno(sched_yield());
11603     case TARGET_NR_sched_get_priority_max:
11604         return get_errno(sched_get_priority_max(arg1));
11605     case TARGET_NR_sched_get_priority_min:
11606         return get_errno(sched_get_priority_min(arg1));
11607 #ifdef TARGET_NR_sched_rr_get_interval
11608     case TARGET_NR_sched_rr_get_interval:
11609         {
11610             struct timespec ts;
11611             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11612             if (!is_error(ret)) {
11613                 ret = host_to_target_timespec(arg2, &ts);
11614             }
11615         }
11616         return ret;
11617 #endif
11618 #ifdef TARGET_NR_sched_rr_get_interval_time64
11619     case TARGET_NR_sched_rr_get_interval_time64:
11620         {
11621             struct timespec ts;
11622             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11623             if (!is_error(ret)) {
11624                 ret = host_to_target_timespec64(arg2, &ts);
11625             }
11626         }
11627         return ret;
11628 #endif
11629 #if defined(TARGET_NR_nanosleep)
11630     case TARGET_NR_nanosleep:
11631         {
11632             struct timespec req, rem;
11633             target_to_host_timespec(&req, arg1);
11634             ret = get_errno(safe_nanosleep(&req, &rem));
11635             if (is_error(ret) && arg2) {
11636                 host_to_target_timespec(arg2, &rem);
11637             }
11638         }
11639         return ret;
11640 #endif
11641     case TARGET_NR_prctl:
11642         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11643         break;
11644 #ifdef TARGET_NR_arch_prctl
11645     case TARGET_NR_arch_prctl:
11646         return do_arch_prctl(cpu_env, arg1, arg2);
11647 #endif
11648 #ifdef TARGET_NR_pread64
11649     case TARGET_NR_pread64:
11650         if (regpairs_aligned(cpu_env, num)) {
11651             arg4 = arg5;
11652             arg5 = arg6;
11653         }
11654         if (arg2 == 0 && arg3 == 0) {
11655             /* Special-case NULL buffer and zero length, which should succeed */
11656             p = 0;
11657         } else {
11658             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11659             if (!p) {
11660                 return -TARGET_EFAULT;
11661             }
11662         }
11663         ret = get_errno(pread(arg1, p, arg3, target_offset64(arg4, arg5)));
11664         unlock_user(p, arg2, ret);
11665         return ret;
11666     case TARGET_NR_pwrite64:
11667         if (regpairs_aligned(cpu_env, num)) {
11668             arg4 = arg5;
11669             arg5 = arg6;
11670         }
11671         if (arg2 == 0 && arg3 == 0) {
11672             /* Special-case NULL buffer and zero length, which should succeed */
11673             p = 0;
11674         } else {
11675             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11676             if (!p) {
11677                 return -TARGET_EFAULT;
11678             }
11679         }
11680         ret = get_errno(pwrite(arg1, p, arg3, target_offset64(arg4, arg5)));
11681         unlock_user(p, arg2, 0);
11682         return ret;
11683 #endif
11684     case TARGET_NR_getcwd:
11685         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11686             return -TARGET_EFAULT;
11687         ret = get_errno(sys_getcwd1(p, arg2));
11688         unlock_user(p, arg1, ret);
11689         return ret;
11690     case TARGET_NR_capget:
11691     case TARGET_NR_capset:
11692     {
11693         struct target_user_cap_header *target_header;
11694         struct target_user_cap_data *target_data = NULL;
11695         struct __user_cap_header_struct header;
11696         struct __user_cap_data_struct data[2];
11697         struct __user_cap_data_struct *dataptr = NULL;
11698         int i, target_datalen;
11699         int data_items = 1;
11700 
11701         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11702             return -TARGET_EFAULT;
11703         }
11704         header.version = tswap32(target_header->version);
11705         header.pid = tswap32(target_header->pid);
11706 
11707         if (header.version != _LINUX_CAPABILITY_VERSION) {
11708             /* Version 2 and up takes pointer to two user_data structs */
11709             data_items = 2;
11710         }
11711 
11712         target_datalen = sizeof(*target_data) * data_items;
11713 
11714         if (arg2) {
11715             if (num == TARGET_NR_capget) {
11716                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11717             } else {
11718                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11719             }
11720             if (!target_data) {
11721                 unlock_user_struct(target_header, arg1, 0);
11722                 return -TARGET_EFAULT;
11723             }
11724 
11725             if (num == TARGET_NR_capset) {
11726                 for (i = 0; i < data_items; i++) {
11727                     data[i].effective = tswap32(target_data[i].effective);
11728                     data[i].permitted = tswap32(target_data[i].permitted);
11729                     data[i].inheritable = tswap32(target_data[i].inheritable);
11730                 }
11731             }
11732 
11733             dataptr = data;
11734         }
11735 
11736         if (num == TARGET_NR_capget) {
11737             ret = get_errno(capget(&header, dataptr));
11738         } else {
11739             ret = get_errno(capset(&header, dataptr));
11740         }
11741 
11742         /* The kernel always updates version for both capget and capset */
11743         target_header->version = tswap32(header.version);
11744         unlock_user_struct(target_header, arg1, 1);
11745 
11746         if (arg2) {
11747             if (num == TARGET_NR_capget) {
11748                 for (i = 0; i < data_items; i++) {
11749                     target_data[i].effective = tswap32(data[i].effective);
11750                     target_data[i].permitted = tswap32(data[i].permitted);
11751                     target_data[i].inheritable = tswap32(data[i].inheritable);
11752                 }
11753                 unlock_user(target_data, arg2, target_datalen);
11754             } else {
11755                 unlock_user(target_data, arg2, 0);
11756             }
11757         }
11758         return ret;
11759     }
11760     case TARGET_NR_sigaltstack:
11761         return do_sigaltstack(arg1, arg2, cpu_env);
11762 
11763 #ifdef CONFIG_SENDFILE
11764 #ifdef TARGET_NR_sendfile
11765     case TARGET_NR_sendfile:
11766     {
11767         off_t *offp = NULL;
11768         off_t off;
11769         if (arg3) {
11770             ret = get_user_sal(off, arg3);
11771             if (is_error(ret)) {
11772                 return ret;
11773             }
11774             offp = &off;
11775         }
11776         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11777         if (!is_error(ret) && arg3) {
11778             abi_long ret2 = put_user_sal(off, arg3);
11779             if (is_error(ret2)) {
11780                 ret = ret2;
11781             }
11782         }
11783         return ret;
11784     }
11785 #endif
11786 #ifdef TARGET_NR_sendfile64
11787     case TARGET_NR_sendfile64:
11788     {
11789         off_t *offp = NULL;
11790         off_t off;
11791         if (arg3) {
11792             ret = get_user_s64(off, arg3);
11793             if (is_error(ret)) {
11794                 return ret;
11795             }
11796             offp = &off;
11797         }
11798         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11799         if (!is_error(ret) && arg3) {
11800             abi_long ret2 = put_user_s64(off, arg3);
11801             if (is_error(ret2)) {
11802                 ret = ret2;
11803             }
11804         }
11805         return ret;
11806     }
11807 #endif
11808 #endif
11809 #ifdef TARGET_NR_vfork
11810     case TARGET_NR_vfork:
11811         return get_errno(do_fork(cpu_env,
11812                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11813                          0, 0, 0, 0));
11814 #endif
11815 #ifdef TARGET_NR_ugetrlimit
11816     case TARGET_NR_ugetrlimit:
11817     {
11818 	struct rlimit rlim;
11819 	int resource = target_to_host_resource(arg1);
11820 	ret = get_errno(getrlimit(resource, &rlim));
11821 	if (!is_error(ret)) {
11822 	    struct target_rlimit *target_rlim;
11823             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11824                 return -TARGET_EFAULT;
11825 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11826 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11827             unlock_user_struct(target_rlim, arg2, 1);
11828 	}
11829         return ret;
11830     }
11831 #endif
11832 #ifdef TARGET_NR_truncate64
11833     case TARGET_NR_truncate64:
11834         if (!(p = lock_user_string(arg1)))
11835             return -TARGET_EFAULT;
11836 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11837         unlock_user(p, arg1, 0);
11838         return ret;
11839 #endif
11840 #ifdef TARGET_NR_ftruncate64
11841     case TARGET_NR_ftruncate64:
11842         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11843 #endif
11844 #ifdef TARGET_NR_stat64
11845     case TARGET_NR_stat64:
11846         if (!(p = lock_user_string(arg1))) {
11847             return -TARGET_EFAULT;
11848         }
11849         ret = get_errno(stat(path(p), &st));
11850         unlock_user(p, arg1, 0);
11851         if (!is_error(ret))
11852             ret = host_to_target_stat64(cpu_env, arg2, &st);
11853         return ret;
11854 #endif
11855 #ifdef TARGET_NR_lstat64
11856     case TARGET_NR_lstat64:
11857         if (!(p = lock_user_string(arg1))) {
11858             return -TARGET_EFAULT;
11859         }
11860         ret = get_errno(lstat(path(p), &st));
11861         unlock_user(p, arg1, 0);
11862         if (!is_error(ret))
11863             ret = host_to_target_stat64(cpu_env, arg2, &st);
11864         return ret;
11865 #endif
11866 #ifdef TARGET_NR_fstat64
11867     case TARGET_NR_fstat64:
11868         ret = get_errno(fstat(arg1, &st));
11869         if (!is_error(ret))
11870             ret = host_to_target_stat64(cpu_env, arg2, &st);
11871         return ret;
11872 #endif
11873 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11874 #ifdef TARGET_NR_fstatat64
11875     case TARGET_NR_fstatat64:
11876 #endif
11877 #ifdef TARGET_NR_newfstatat
11878     case TARGET_NR_newfstatat:
11879 #endif
11880         if (!(p = lock_user_string(arg2))) {
11881             return -TARGET_EFAULT;
11882         }
11883         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11884         unlock_user(p, arg2, 0);
11885         if (!is_error(ret))
11886             ret = host_to_target_stat64(cpu_env, arg3, &st);
11887         return ret;
11888 #endif
11889 #if defined(TARGET_NR_statx)
11890     case TARGET_NR_statx:
11891         {
11892             struct target_statx *target_stx;
11893             int dirfd = arg1;
11894             int flags = arg3;
11895 
11896             p = lock_user_string(arg2);
11897             if (p == NULL) {
11898                 return -TARGET_EFAULT;
11899             }
11900 #if defined(__NR_statx)
11901             {
11902                 /*
11903                  * It is assumed that struct statx is architecture independent.
11904                  */
11905                 struct target_statx host_stx;
11906                 int mask = arg4;
11907 
11908                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11909                 if (!is_error(ret)) {
11910                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11911                         unlock_user(p, arg2, 0);
11912                         return -TARGET_EFAULT;
11913                     }
11914                 }
11915 
11916                 if (ret != -TARGET_ENOSYS) {
11917                     unlock_user(p, arg2, 0);
11918                     return ret;
11919                 }
11920             }
11921 #endif
11922             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11923             unlock_user(p, arg2, 0);
11924 
11925             if (!is_error(ret)) {
11926                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11927                     return -TARGET_EFAULT;
11928                 }
11929                 memset(target_stx, 0, sizeof(*target_stx));
11930                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11931                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11932                 __put_user(st.st_ino, &target_stx->stx_ino);
11933                 __put_user(st.st_mode, &target_stx->stx_mode);
11934                 __put_user(st.st_uid, &target_stx->stx_uid);
11935                 __put_user(st.st_gid, &target_stx->stx_gid);
11936                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11937                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11938                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11939                 __put_user(st.st_size, &target_stx->stx_size);
11940                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11941                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11942                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11943                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11944                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11945                 unlock_user_struct(target_stx, arg5, 1);
11946             }
11947         }
11948         return ret;
11949 #endif
11950 #ifdef TARGET_NR_lchown
11951     case TARGET_NR_lchown:
11952         if (!(p = lock_user_string(arg1)))
11953             return -TARGET_EFAULT;
11954         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11955         unlock_user(p, arg1, 0);
11956         return ret;
11957 #endif
11958 #ifdef TARGET_NR_getuid
11959     case TARGET_NR_getuid:
11960         return get_errno(high2lowuid(getuid()));
11961 #endif
11962 #ifdef TARGET_NR_getgid
11963     case TARGET_NR_getgid:
11964         return get_errno(high2lowgid(getgid()));
11965 #endif
11966 #ifdef TARGET_NR_geteuid
11967     case TARGET_NR_geteuid:
11968         return get_errno(high2lowuid(geteuid()));
11969 #endif
11970 #ifdef TARGET_NR_getegid
11971     case TARGET_NR_getegid:
11972         return get_errno(high2lowgid(getegid()));
11973 #endif
11974     case TARGET_NR_setreuid:
11975         return get_errno(sys_setreuid(low2highuid(arg1), low2highuid(arg2)));
11976     case TARGET_NR_setregid:
11977         return get_errno(sys_setregid(low2highgid(arg1), low2highgid(arg2)));
11978     case TARGET_NR_getgroups:
11979         { /* the same code as for TARGET_NR_getgroups32 */
11980             int gidsetsize = arg1;
11981             target_id *target_grouplist;
11982             g_autofree gid_t *grouplist = NULL;
11983             int i;
11984 
11985             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11986                 return -TARGET_EINVAL;
11987             }
11988             if (gidsetsize > 0) {
11989                 grouplist = g_try_new(gid_t, gidsetsize);
11990                 if (!grouplist) {
11991                     return -TARGET_ENOMEM;
11992                 }
11993             }
11994             ret = get_errno(getgroups(gidsetsize, grouplist));
11995             if (!is_error(ret) && gidsetsize > 0) {
11996                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11997                                              gidsetsize * sizeof(target_id), 0);
11998                 if (!target_grouplist) {
11999                     return -TARGET_EFAULT;
12000                 }
12001                 for (i = 0; i < ret; i++) {
12002                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
12003                 }
12004                 unlock_user(target_grouplist, arg2,
12005                             gidsetsize * sizeof(target_id));
12006             }
12007             return ret;
12008         }
12009     case TARGET_NR_setgroups:
12010         { /* the same code as for TARGET_NR_setgroups32 */
12011             int gidsetsize = arg1;
12012             target_id *target_grouplist;
12013             g_autofree gid_t *grouplist = NULL;
12014             int i;
12015 
12016             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12017                 return -TARGET_EINVAL;
12018             }
12019             if (gidsetsize > 0) {
12020                 grouplist = g_try_new(gid_t, gidsetsize);
12021                 if (!grouplist) {
12022                     return -TARGET_ENOMEM;
12023                 }
12024                 target_grouplist = lock_user(VERIFY_READ, arg2,
12025                                              gidsetsize * sizeof(target_id), 1);
12026                 if (!target_grouplist) {
12027                     return -TARGET_EFAULT;
12028                 }
12029                 for (i = 0; i < gidsetsize; i++) {
12030                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
12031                 }
12032                 unlock_user(target_grouplist, arg2,
12033                             gidsetsize * sizeof(target_id));
12034             }
12035             return get_errno(sys_setgroups(gidsetsize, grouplist));
12036         }
12037     case TARGET_NR_fchown:
12038         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
12039 #if defined(TARGET_NR_fchownat)
12040     case TARGET_NR_fchownat:
12041         if (!(p = lock_user_string(arg2)))
12042             return -TARGET_EFAULT;
12043         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
12044                                  low2highgid(arg4), arg5));
12045         unlock_user(p, arg2, 0);
12046         return ret;
12047 #endif
12048 #ifdef TARGET_NR_setresuid
12049     case TARGET_NR_setresuid:
12050         return get_errno(sys_setresuid(low2highuid(arg1),
12051                                        low2highuid(arg2),
12052                                        low2highuid(arg3)));
12053 #endif
12054 #ifdef TARGET_NR_getresuid
12055     case TARGET_NR_getresuid:
12056         {
12057             uid_t ruid, euid, suid;
12058             ret = get_errno(getresuid(&ruid, &euid, &suid));
12059             if (!is_error(ret)) {
12060                 if (put_user_id(high2lowuid(ruid), arg1)
12061                     || put_user_id(high2lowuid(euid), arg2)
12062                     || put_user_id(high2lowuid(suid), arg3))
12063                     return -TARGET_EFAULT;
12064             }
12065         }
12066         return ret;
12067 #endif
12068 #ifdef TARGET_NR_getresgid
12069     case TARGET_NR_setresgid:
12070         return get_errno(sys_setresgid(low2highgid(arg1),
12071                                        low2highgid(arg2),
12072                                        low2highgid(arg3)));
12073 #endif
12074 #ifdef TARGET_NR_getresgid
12075     case TARGET_NR_getresgid:
12076         {
12077             gid_t rgid, egid, sgid;
12078             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12079             if (!is_error(ret)) {
12080                 if (put_user_id(high2lowgid(rgid), arg1)
12081                     || put_user_id(high2lowgid(egid), arg2)
12082                     || put_user_id(high2lowgid(sgid), arg3))
12083                     return -TARGET_EFAULT;
12084             }
12085         }
12086         return ret;
12087 #endif
12088 #ifdef TARGET_NR_chown
12089     case TARGET_NR_chown:
12090         if (!(p = lock_user_string(arg1)))
12091             return -TARGET_EFAULT;
12092         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12093         unlock_user(p, arg1, 0);
12094         return ret;
12095 #endif
12096     case TARGET_NR_setuid:
12097         return get_errno(sys_setuid(low2highuid(arg1)));
12098     case TARGET_NR_setgid:
12099         return get_errno(sys_setgid(low2highgid(arg1)));
12100     case TARGET_NR_setfsuid:
12101         return get_errno(setfsuid(arg1));
12102     case TARGET_NR_setfsgid:
12103         return get_errno(setfsgid(arg1));
12104 
12105 #ifdef TARGET_NR_lchown32
12106     case TARGET_NR_lchown32:
12107         if (!(p = lock_user_string(arg1)))
12108             return -TARGET_EFAULT;
12109         ret = get_errno(lchown(p, arg2, arg3));
12110         unlock_user(p, arg1, 0);
12111         return ret;
12112 #endif
12113 #ifdef TARGET_NR_getuid32
12114     case TARGET_NR_getuid32:
12115         return get_errno(getuid());
12116 #endif
12117 
12118 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12119    /* Alpha specific */
12120     case TARGET_NR_getxuid:
12121          {
12122             uid_t euid;
12123             euid=geteuid();
12124             cpu_env->ir[IR_A4]=euid;
12125          }
12126         return get_errno(getuid());
12127 #endif
12128 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12129    /* Alpha specific */
12130     case TARGET_NR_getxgid:
12131          {
12132             uid_t egid;
12133             egid=getegid();
12134             cpu_env->ir[IR_A4]=egid;
12135          }
12136         return get_errno(getgid());
12137 #endif
12138 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12139     /* Alpha specific */
12140     case TARGET_NR_osf_getsysinfo:
12141         ret = -TARGET_EOPNOTSUPP;
12142         switch (arg1) {
12143           case TARGET_GSI_IEEE_FP_CONTROL:
12144             {
12145                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12146                 uint64_t swcr = cpu_env->swcr;
12147 
12148                 swcr &= ~SWCR_STATUS_MASK;
12149                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12150 
12151                 if (put_user_u64 (swcr, arg2))
12152                         return -TARGET_EFAULT;
12153                 ret = 0;
12154             }
12155             break;
12156 
12157           /* case GSI_IEEE_STATE_AT_SIGNAL:
12158              -- Not implemented in linux kernel.
12159              case GSI_UACPROC:
12160              -- Retrieves current unaligned access state; not much used.
12161              case GSI_PROC_TYPE:
12162              -- Retrieves implver information; surely not used.
12163              case GSI_GET_HWRPB:
12164              -- Grabs a copy of the HWRPB; surely not used.
12165           */
12166         }
12167         return ret;
12168 #endif
12169 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12170     /* Alpha specific */
12171     case TARGET_NR_osf_setsysinfo:
12172         ret = -TARGET_EOPNOTSUPP;
12173         switch (arg1) {
12174           case TARGET_SSI_IEEE_FP_CONTROL:
12175             {
12176                 uint64_t swcr, fpcr;
12177 
12178                 if (get_user_u64 (swcr, arg2)) {
12179                     return -TARGET_EFAULT;
12180                 }
12181 
12182                 /*
12183                  * The kernel calls swcr_update_status to update the
12184                  * status bits from the fpcr at every point that it
12185                  * could be queried.  Therefore, we store the status
12186                  * bits only in FPCR.
12187                  */
12188                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12189 
12190                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12191                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12192                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12193                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12194                 ret = 0;
12195             }
12196             break;
12197 
12198           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12199             {
12200                 uint64_t exc, fpcr, fex;
12201 
12202                 if (get_user_u64(exc, arg2)) {
12203                     return -TARGET_EFAULT;
12204                 }
12205                 exc &= SWCR_STATUS_MASK;
12206                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12207 
12208                 /* Old exceptions are not signaled.  */
12209                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12210                 fex = exc & ~fex;
12211                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12212                 fex &= (cpu_env)->swcr;
12213 
12214                 /* Update the hardware fpcr.  */
12215                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12216                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12217 
12218                 if (fex) {
12219                     int si_code = TARGET_FPE_FLTUNK;
12220                     target_siginfo_t info;
12221 
12222                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12223                         si_code = TARGET_FPE_FLTUND;
12224                     }
12225                     if (fex & SWCR_TRAP_ENABLE_INE) {
12226                         si_code = TARGET_FPE_FLTRES;
12227                     }
12228                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12229                         si_code = TARGET_FPE_FLTUND;
12230                     }
12231                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12232                         si_code = TARGET_FPE_FLTOVF;
12233                     }
12234                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12235                         si_code = TARGET_FPE_FLTDIV;
12236                     }
12237                     if (fex & SWCR_TRAP_ENABLE_INV) {
12238                         si_code = TARGET_FPE_FLTINV;
12239                     }
12240 
12241                     info.si_signo = SIGFPE;
12242                     info.si_errno = 0;
12243                     info.si_code = si_code;
12244                     info._sifields._sigfault._addr = (cpu_env)->pc;
12245                     queue_signal(cpu_env, info.si_signo,
12246                                  QEMU_SI_FAULT, &info);
12247                 }
12248                 ret = 0;
12249             }
12250             break;
12251 
12252           /* case SSI_NVPAIRS:
12253              -- Used with SSIN_UACPROC to enable unaligned accesses.
12254              case SSI_IEEE_STATE_AT_SIGNAL:
12255              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12256              -- Not implemented in linux kernel
12257           */
12258         }
12259         return ret;
12260 #endif
12261 #ifdef TARGET_NR_osf_sigprocmask
12262     /* Alpha specific.  */
12263     case TARGET_NR_osf_sigprocmask:
12264         {
12265             abi_ulong mask;
12266             int how;
12267             sigset_t set, oldset;
12268 
12269             switch(arg1) {
12270             case TARGET_SIG_BLOCK:
12271                 how = SIG_BLOCK;
12272                 break;
12273             case TARGET_SIG_UNBLOCK:
12274                 how = SIG_UNBLOCK;
12275                 break;
12276             case TARGET_SIG_SETMASK:
12277                 how = SIG_SETMASK;
12278                 break;
12279             default:
12280                 return -TARGET_EINVAL;
12281             }
12282             mask = arg2;
12283             target_to_host_old_sigset(&set, &mask);
12284             ret = do_sigprocmask(how, &set, &oldset);
12285             if (!ret) {
12286                 host_to_target_old_sigset(&mask, &oldset);
12287                 ret = mask;
12288             }
12289         }
12290         return ret;
12291 #endif
12292 
12293 #ifdef TARGET_NR_getgid32
12294     case TARGET_NR_getgid32:
12295         return get_errno(getgid());
12296 #endif
12297 #ifdef TARGET_NR_geteuid32
12298     case TARGET_NR_geteuid32:
12299         return get_errno(geteuid());
12300 #endif
12301 #ifdef TARGET_NR_getegid32
12302     case TARGET_NR_getegid32:
12303         return get_errno(getegid());
12304 #endif
12305 #ifdef TARGET_NR_setreuid32
12306     case TARGET_NR_setreuid32:
12307         return get_errno(sys_setreuid(arg1, arg2));
12308 #endif
12309 #ifdef TARGET_NR_setregid32
12310     case TARGET_NR_setregid32:
12311         return get_errno(sys_setregid(arg1, arg2));
12312 #endif
12313 #ifdef TARGET_NR_getgroups32
12314     case TARGET_NR_getgroups32:
12315         { /* the same code as for TARGET_NR_getgroups */
12316             int gidsetsize = arg1;
12317             uint32_t *target_grouplist;
12318             g_autofree gid_t *grouplist = NULL;
12319             int i;
12320 
12321             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12322                 return -TARGET_EINVAL;
12323             }
12324             if (gidsetsize > 0) {
12325                 grouplist = g_try_new(gid_t, gidsetsize);
12326                 if (!grouplist) {
12327                     return -TARGET_ENOMEM;
12328                 }
12329             }
12330             ret = get_errno(getgroups(gidsetsize, grouplist));
12331             if (!is_error(ret) && gidsetsize > 0) {
12332                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12333                                              gidsetsize * 4, 0);
12334                 if (!target_grouplist) {
12335                     return -TARGET_EFAULT;
12336                 }
12337                 for (i = 0; i < ret; i++) {
12338                     target_grouplist[i] = tswap32(grouplist[i]);
12339                 }
12340                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12341             }
12342             return ret;
12343         }
12344 #endif
12345 #ifdef TARGET_NR_setgroups32
12346     case TARGET_NR_setgroups32:
12347         { /* the same code as for TARGET_NR_setgroups */
12348             int gidsetsize = arg1;
12349             uint32_t *target_grouplist;
12350             g_autofree gid_t *grouplist = NULL;
12351             int i;
12352 
12353             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12354                 return -TARGET_EINVAL;
12355             }
12356             if (gidsetsize > 0) {
12357                 grouplist = g_try_new(gid_t, gidsetsize);
12358                 if (!grouplist) {
12359                     return -TARGET_ENOMEM;
12360                 }
12361                 target_grouplist = lock_user(VERIFY_READ, arg2,
12362                                              gidsetsize * 4, 1);
12363                 if (!target_grouplist) {
12364                     return -TARGET_EFAULT;
12365                 }
12366                 for (i = 0; i < gidsetsize; i++) {
12367                     grouplist[i] = tswap32(target_grouplist[i]);
12368                 }
12369                 unlock_user(target_grouplist, arg2, 0);
12370             }
12371             return get_errno(sys_setgroups(gidsetsize, grouplist));
12372         }
12373 #endif
12374 #ifdef TARGET_NR_fchown32
12375     case TARGET_NR_fchown32:
12376         return get_errno(fchown(arg1, arg2, arg3));
12377 #endif
12378 #ifdef TARGET_NR_setresuid32
12379     case TARGET_NR_setresuid32:
12380         return get_errno(sys_setresuid(arg1, arg2, arg3));
12381 #endif
12382 #ifdef TARGET_NR_getresuid32
12383     case TARGET_NR_getresuid32:
12384         {
12385             uid_t ruid, euid, suid;
12386             ret = get_errno(getresuid(&ruid, &euid, &suid));
12387             if (!is_error(ret)) {
12388                 if (put_user_u32(ruid, arg1)
12389                     || put_user_u32(euid, arg2)
12390                     || put_user_u32(suid, arg3))
12391                     return -TARGET_EFAULT;
12392             }
12393         }
12394         return ret;
12395 #endif
12396 #ifdef TARGET_NR_setresgid32
12397     case TARGET_NR_setresgid32:
12398         return get_errno(sys_setresgid(arg1, arg2, arg3));
12399 #endif
12400 #ifdef TARGET_NR_getresgid32
12401     case TARGET_NR_getresgid32:
12402         {
12403             gid_t rgid, egid, sgid;
12404             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12405             if (!is_error(ret)) {
12406                 if (put_user_u32(rgid, arg1)
12407                     || put_user_u32(egid, arg2)
12408                     || put_user_u32(sgid, arg3))
12409                     return -TARGET_EFAULT;
12410             }
12411         }
12412         return ret;
12413 #endif
12414 #ifdef TARGET_NR_chown32
12415     case TARGET_NR_chown32:
12416         if (!(p = lock_user_string(arg1)))
12417             return -TARGET_EFAULT;
12418         ret = get_errno(chown(p, arg2, arg3));
12419         unlock_user(p, arg1, 0);
12420         return ret;
12421 #endif
12422 #ifdef TARGET_NR_setuid32
12423     case TARGET_NR_setuid32:
12424         return get_errno(sys_setuid(arg1));
12425 #endif
12426 #ifdef TARGET_NR_setgid32
12427     case TARGET_NR_setgid32:
12428         return get_errno(sys_setgid(arg1));
12429 #endif
12430 #ifdef TARGET_NR_setfsuid32
12431     case TARGET_NR_setfsuid32:
12432         return get_errno(setfsuid(arg1));
12433 #endif
12434 #ifdef TARGET_NR_setfsgid32
12435     case TARGET_NR_setfsgid32:
12436         return get_errno(setfsgid(arg1));
12437 #endif
12438 #ifdef TARGET_NR_mincore
12439     case TARGET_NR_mincore:
12440         {
12441             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12442             if (!a) {
12443                 return -TARGET_ENOMEM;
12444             }
12445             p = lock_user_string(arg3);
12446             if (!p) {
12447                 ret = -TARGET_EFAULT;
12448             } else {
12449                 ret = get_errno(mincore(a, arg2, p));
12450                 unlock_user(p, arg3, ret);
12451             }
12452             unlock_user(a, arg1, 0);
12453         }
12454         return ret;
12455 #endif
12456 #ifdef TARGET_NR_arm_fadvise64_64
12457     case TARGET_NR_arm_fadvise64_64:
12458         /* arm_fadvise64_64 looks like fadvise64_64 but
12459          * with different argument order: fd, advice, offset, len
12460          * rather than the usual fd, offset, len, advice.
12461          * Note that offset and len are both 64-bit so appear as
12462          * pairs of 32-bit registers.
12463          */
12464         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12465                             target_offset64(arg5, arg6), arg2);
12466         return -host_to_target_errno(ret);
12467 #endif
12468 
12469 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12470 
12471 #ifdef TARGET_NR_fadvise64_64
12472     case TARGET_NR_fadvise64_64:
12473 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12474         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12475         ret = arg2;
12476         arg2 = arg3;
12477         arg3 = arg4;
12478         arg4 = arg5;
12479         arg5 = arg6;
12480         arg6 = ret;
12481 #else
12482         /* 6 args: fd, offset (high, low), len (high, low), advice */
12483         if (regpairs_aligned(cpu_env, num)) {
12484             /* offset is in (3,4), len in (5,6) and advice in 7 */
12485             arg2 = arg3;
12486             arg3 = arg4;
12487             arg4 = arg5;
12488             arg5 = arg6;
12489             arg6 = arg7;
12490         }
12491 #endif
12492         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12493                             target_offset64(arg4, arg5), arg6);
12494         return -host_to_target_errno(ret);
12495 #endif
12496 
12497 #ifdef TARGET_NR_fadvise64
12498     case TARGET_NR_fadvise64:
12499         /* 5 args: fd, offset (high, low), len, advice */
12500         if (regpairs_aligned(cpu_env, num)) {
12501             /* offset is in (3,4), len in 5 and advice in 6 */
12502             arg2 = arg3;
12503             arg3 = arg4;
12504             arg4 = arg5;
12505             arg5 = arg6;
12506         }
12507         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12508         return -host_to_target_errno(ret);
12509 #endif
12510 
12511 #else /* not a 32-bit ABI */
12512 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12513 #ifdef TARGET_NR_fadvise64_64
12514     case TARGET_NR_fadvise64_64:
12515 #endif
12516 #ifdef TARGET_NR_fadvise64
12517     case TARGET_NR_fadvise64:
12518 #endif
12519 #ifdef TARGET_S390X
12520         switch (arg4) {
12521         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12522         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12523         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12524         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12525         default: break;
12526         }
12527 #endif
12528         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12529 #endif
12530 #endif /* end of 64-bit ABI fadvise handling */
12531 
12532 #ifdef TARGET_NR_madvise
12533     case TARGET_NR_madvise:
12534         return target_madvise(arg1, arg2, arg3);
12535 #endif
12536 #ifdef TARGET_NR_fcntl64
12537     case TARGET_NR_fcntl64:
12538     {
12539         int cmd;
12540         struct flock fl;
12541         from_flock64_fn *copyfrom = copy_from_user_flock64;
12542         to_flock64_fn *copyto = copy_to_user_flock64;
12543 
12544 #ifdef TARGET_ARM
12545         if (!cpu_env->eabi) {
12546             copyfrom = copy_from_user_oabi_flock64;
12547             copyto = copy_to_user_oabi_flock64;
12548         }
12549 #endif
12550 
12551         cmd = target_to_host_fcntl_cmd(arg2);
12552         if (cmd == -TARGET_EINVAL) {
12553             return cmd;
12554         }
12555 
12556         switch(arg2) {
12557         case TARGET_F_GETLK64:
12558             ret = copyfrom(&fl, arg3);
12559             if (ret) {
12560                 break;
12561             }
12562             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12563             if (ret == 0) {
12564                 ret = copyto(arg3, &fl);
12565             }
12566 	    break;
12567 
12568         case TARGET_F_SETLK64:
12569         case TARGET_F_SETLKW64:
12570             ret = copyfrom(&fl, arg3);
12571             if (ret) {
12572                 break;
12573             }
12574             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12575 	    break;
12576         default:
12577             ret = do_fcntl(arg1, arg2, arg3);
12578             break;
12579         }
12580         return ret;
12581     }
12582 #endif
12583 #ifdef TARGET_NR_cacheflush
12584     case TARGET_NR_cacheflush:
12585         /* self-modifying code is handled automatically, so nothing needed */
12586         return 0;
12587 #endif
12588 #ifdef TARGET_NR_getpagesize
12589     case TARGET_NR_getpagesize:
12590         return TARGET_PAGE_SIZE;
12591 #endif
12592     case TARGET_NR_gettid:
12593         return get_errno(sys_gettid());
12594 #ifdef TARGET_NR_readahead
12595     case TARGET_NR_readahead:
12596 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12597         if (regpairs_aligned(cpu_env, num)) {
12598             arg2 = arg3;
12599             arg3 = arg4;
12600             arg4 = arg5;
12601         }
12602         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12603 #else
12604         ret = get_errno(readahead(arg1, arg2, arg3));
12605 #endif
12606         return ret;
12607 #endif
12608 #ifdef CONFIG_ATTR
12609 #ifdef TARGET_NR_setxattr
12610     case TARGET_NR_listxattr:
12611     case TARGET_NR_llistxattr:
12612     {
12613         void *b = 0;
12614         if (arg2) {
12615             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12616             if (!b) {
12617                 return -TARGET_EFAULT;
12618             }
12619         }
12620         p = lock_user_string(arg1);
12621         if (p) {
12622             if (num == TARGET_NR_listxattr) {
12623                 ret = get_errno(listxattr(p, b, arg3));
12624             } else {
12625                 ret = get_errno(llistxattr(p, b, arg3));
12626             }
12627         } else {
12628             ret = -TARGET_EFAULT;
12629         }
12630         unlock_user(p, arg1, 0);
12631         unlock_user(b, arg2, arg3);
12632         return ret;
12633     }
12634     case TARGET_NR_flistxattr:
12635     {
12636         void *b = 0;
12637         if (arg2) {
12638             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12639             if (!b) {
12640                 return -TARGET_EFAULT;
12641             }
12642         }
12643         ret = get_errno(flistxattr(arg1, b, arg3));
12644         unlock_user(b, arg2, arg3);
12645         return ret;
12646     }
12647     case TARGET_NR_setxattr:
12648     case TARGET_NR_lsetxattr:
12649         {
12650             void *n, *v = 0;
12651             if (arg3) {
12652                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12653                 if (!v) {
12654                     return -TARGET_EFAULT;
12655                 }
12656             }
12657             p = lock_user_string(arg1);
12658             n = lock_user_string(arg2);
12659             if (p && n) {
12660                 if (num == TARGET_NR_setxattr) {
12661                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12662                 } else {
12663                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12664                 }
12665             } else {
12666                 ret = -TARGET_EFAULT;
12667             }
12668             unlock_user(p, arg1, 0);
12669             unlock_user(n, arg2, 0);
12670             unlock_user(v, arg3, 0);
12671         }
12672         return ret;
12673     case TARGET_NR_fsetxattr:
12674         {
12675             void *n, *v = 0;
12676             if (arg3) {
12677                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12678                 if (!v) {
12679                     return -TARGET_EFAULT;
12680                 }
12681             }
12682             n = lock_user_string(arg2);
12683             if (n) {
12684                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12685             } else {
12686                 ret = -TARGET_EFAULT;
12687             }
12688             unlock_user(n, arg2, 0);
12689             unlock_user(v, arg3, 0);
12690         }
12691         return ret;
12692     case TARGET_NR_getxattr:
12693     case TARGET_NR_lgetxattr:
12694         {
12695             void *n, *v = 0;
12696             if (arg3) {
12697                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12698                 if (!v) {
12699                     return -TARGET_EFAULT;
12700                 }
12701             }
12702             p = lock_user_string(arg1);
12703             n = lock_user_string(arg2);
12704             if (p && n) {
12705                 if (num == TARGET_NR_getxattr) {
12706                     ret = get_errno(getxattr(p, n, v, arg4));
12707                 } else {
12708                     ret = get_errno(lgetxattr(p, n, v, arg4));
12709                 }
12710             } else {
12711                 ret = -TARGET_EFAULT;
12712             }
12713             unlock_user(p, arg1, 0);
12714             unlock_user(n, arg2, 0);
12715             unlock_user(v, arg3, arg4);
12716         }
12717         return ret;
12718     case TARGET_NR_fgetxattr:
12719         {
12720             void *n, *v = 0;
12721             if (arg3) {
12722                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12723                 if (!v) {
12724                     return -TARGET_EFAULT;
12725                 }
12726             }
12727             n = lock_user_string(arg2);
12728             if (n) {
12729                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12730             } else {
12731                 ret = -TARGET_EFAULT;
12732             }
12733             unlock_user(n, arg2, 0);
12734             unlock_user(v, arg3, arg4);
12735         }
12736         return ret;
12737     case TARGET_NR_removexattr:
12738     case TARGET_NR_lremovexattr:
12739         {
12740             void *n;
12741             p = lock_user_string(arg1);
12742             n = lock_user_string(arg2);
12743             if (p && n) {
12744                 if (num == TARGET_NR_removexattr) {
12745                     ret = get_errno(removexattr(p, n));
12746                 } else {
12747                     ret = get_errno(lremovexattr(p, n));
12748                 }
12749             } else {
12750                 ret = -TARGET_EFAULT;
12751             }
12752             unlock_user(p, arg1, 0);
12753             unlock_user(n, arg2, 0);
12754         }
12755         return ret;
12756     case TARGET_NR_fremovexattr:
12757         {
12758             void *n;
12759             n = lock_user_string(arg2);
12760             if (n) {
12761                 ret = get_errno(fremovexattr(arg1, n));
12762             } else {
12763                 ret = -TARGET_EFAULT;
12764             }
12765             unlock_user(n, arg2, 0);
12766         }
12767         return ret;
12768 #endif
12769 #endif /* CONFIG_ATTR */
12770 #ifdef TARGET_NR_set_thread_area
12771     case TARGET_NR_set_thread_area:
12772 #if defined(TARGET_MIPS)
12773       cpu_env->active_tc.CP0_UserLocal = arg1;
12774       return 0;
12775 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12776       return do_set_thread_area(cpu_env, arg1);
12777 #elif defined(TARGET_M68K)
12778       {
12779           TaskState *ts = get_task_state(cpu);
12780           ts->tp_value = arg1;
12781           return 0;
12782       }
12783 #else
12784       return -TARGET_ENOSYS;
12785 #endif
12786 #endif
12787 #ifdef TARGET_NR_get_thread_area
12788     case TARGET_NR_get_thread_area:
12789 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12790         return do_get_thread_area(cpu_env, arg1);
12791 #elif defined(TARGET_M68K)
12792         {
12793             TaskState *ts = get_task_state(cpu);
12794             return ts->tp_value;
12795         }
12796 #else
12797         return -TARGET_ENOSYS;
12798 #endif
12799 #endif
12800 #ifdef TARGET_NR_getdomainname
12801     case TARGET_NR_getdomainname:
12802         return -TARGET_ENOSYS;
12803 #endif
12804 
12805 #ifdef TARGET_NR_clock_settime
12806     case TARGET_NR_clock_settime:
12807     {
12808         struct timespec ts;
12809 
12810         ret = target_to_host_timespec(&ts, arg2);
12811         if (!is_error(ret)) {
12812             ret = get_errno(clock_settime(arg1, &ts));
12813         }
12814         return ret;
12815     }
12816 #endif
12817 #ifdef TARGET_NR_clock_settime64
12818     case TARGET_NR_clock_settime64:
12819     {
12820         struct timespec ts;
12821 
12822         ret = target_to_host_timespec64(&ts, arg2);
12823         if (!is_error(ret)) {
12824             ret = get_errno(clock_settime(arg1, &ts));
12825         }
12826         return ret;
12827     }
12828 #endif
12829 #ifdef TARGET_NR_clock_gettime
12830     case TARGET_NR_clock_gettime:
12831     {
12832         struct timespec ts;
12833         ret = get_errno(clock_gettime(arg1, &ts));
12834         if (!is_error(ret)) {
12835             ret = host_to_target_timespec(arg2, &ts);
12836         }
12837         return ret;
12838     }
12839 #endif
12840 #ifdef TARGET_NR_clock_gettime64
12841     case TARGET_NR_clock_gettime64:
12842     {
12843         struct timespec ts;
12844         ret = get_errno(clock_gettime(arg1, &ts));
12845         if (!is_error(ret)) {
12846             ret = host_to_target_timespec64(arg2, &ts);
12847         }
12848         return ret;
12849     }
12850 #endif
12851 #ifdef TARGET_NR_clock_getres
12852     case TARGET_NR_clock_getres:
12853     {
12854         struct timespec ts;
12855         ret = get_errno(clock_getres(arg1, &ts));
12856         if (!is_error(ret)) {
12857             host_to_target_timespec(arg2, &ts);
12858         }
12859         return ret;
12860     }
12861 #endif
12862 #ifdef TARGET_NR_clock_getres_time64
12863     case TARGET_NR_clock_getres_time64:
12864     {
12865         struct timespec ts;
12866         ret = get_errno(clock_getres(arg1, &ts));
12867         if (!is_error(ret)) {
12868             host_to_target_timespec64(arg2, &ts);
12869         }
12870         return ret;
12871     }
12872 #endif
12873 #ifdef TARGET_NR_clock_nanosleep
12874     case TARGET_NR_clock_nanosleep:
12875     {
12876         struct timespec ts;
12877         if (target_to_host_timespec(&ts, arg3)) {
12878             return -TARGET_EFAULT;
12879         }
12880         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12881                                              &ts, arg4 ? &ts : NULL));
12882         /*
12883          * if the call is interrupted by a signal handler, it fails
12884          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12885          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12886          */
12887         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12888             host_to_target_timespec(arg4, &ts)) {
12889               return -TARGET_EFAULT;
12890         }
12891 
12892         return ret;
12893     }
12894 #endif
12895 #ifdef TARGET_NR_clock_nanosleep_time64
12896     case TARGET_NR_clock_nanosleep_time64:
12897     {
12898         struct timespec ts;
12899 
12900         if (target_to_host_timespec64(&ts, arg3)) {
12901             return -TARGET_EFAULT;
12902         }
12903 
12904         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12905                                              &ts, arg4 ? &ts : NULL));
12906 
12907         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12908             host_to_target_timespec64(arg4, &ts)) {
12909             return -TARGET_EFAULT;
12910         }
12911         return ret;
12912     }
12913 #endif
12914 
12915 #if defined(TARGET_NR_set_tid_address)
12916     case TARGET_NR_set_tid_address:
12917     {
12918         TaskState *ts = get_task_state(cpu);
12919         ts->child_tidptr = arg1;
12920         /* do not call host set_tid_address() syscall, instead return tid() */
12921         return get_errno(sys_gettid());
12922     }
12923 #endif
12924 
12925     case TARGET_NR_tkill:
12926         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12927 
12928     case TARGET_NR_tgkill:
12929         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12930                          target_to_host_signal(arg3)));
12931 
12932 #ifdef TARGET_NR_set_robust_list
12933     case TARGET_NR_set_robust_list:
12934     case TARGET_NR_get_robust_list:
12935         /* The ABI for supporting robust futexes has userspace pass
12936          * the kernel a pointer to a linked list which is updated by
12937          * userspace after the syscall; the list is walked by the kernel
12938          * when the thread exits. Since the linked list in QEMU guest
12939          * memory isn't a valid linked list for the host and we have
12940          * no way to reliably intercept the thread-death event, we can't
12941          * support these. Silently return ENOSYS so that guest userspace
12942          * falls back to a non-robust futex implementation (which should
12943          * be OK except in the corner case of the guest crashing while
12944          * holding a mutex that is shared with another process via
12945          * shared memory).
12946          */
12947         return -TARGET_ENOSYS;
12948 #endif
12949 
12950 #if defined(TARGET_NR_utimensat)
12951     case TARGET_NR_utimensat:
12952         {
12953             struct timespec *tsp, ts[2];
12954             if (!arg3) {
12955                 tsp = NULL;
12956             } else {
12957                 if (target_to_host_timespec(ts, arg3)) {
12958                     return -TARGET_EFAULT;
12959                 }
12960                 if (target_to_host_timespec(ts + 1, arg3 +
12961                                             sizeof(struct target_timespec))) {
12962                     return -TARGET_EFAULT;
12963                 }
12964                 tsp = ts;
12965             }
12966             if (!arg2)
12967                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12968             else {
12969                 if (!(p = lock_user_string(arg2))) {
12970                     return -TARGET_EFAULT;
12971                 }
12972                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12973                 unlock_user(p, arg2, 0);
12974             }
12975         }
12976         return ret;
12977 #endif
12978 #ifdef TARGET_NR_utimensat_time64
12979     case TARGET_NR_utimensat_time64:
12980         {
12981             struct timespec *tsp, ts[2];
12982             if (!arg3) {
12983                 tsp = NULL;
12984             } else {
12985                 if (target_to_host_timespec64(ts, arg3)) {
12986                     return -TARGET_EFAULT;
12987                 }
12988                 if (target_to_host_timespec64(ts + 1, arg3 +
12989                                      sizeof(struct target__kernel_timespec))) {
12990                     return -TARGET_EFAULT;
12991                 }
12992                 tsp = ts;
12993             }
12994             if (!arg2)
12995                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12996             else {
12997                 p = lock_user_string(arg2);
12998                 if (!p) {
12999                     return -TARGET_EFAULT;
13000                 }
13001                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
13002                 unlock_user(p, arg2, 0);
13003             }
13004         }
13005         return ret;
13006 #endif
13007 #ifdef TARGET_NR_futex
13008     case TARGET_NR_futex:
13009         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
13010 #endif
13011 #ifdef TARGET_NR_futex_time64
13012     case TARGET_NR_futex_time64:
13013         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
13014 #endif
13015 #ifdef CONFIG_INOTIFY
13016 #if defined(TARGET_NR_inotify_init)
13017     case TARGET_NR_inotify_init:
13018         ret = get_errno(inotify_init());
13019         if (ret >= 0) {
13020             fd_trans_register(ret, &target_inotify_trans);
13021         }
13022         return ret;
13023 #endif
13024 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
13025     case TARGET_NR_inotify_init1:
13026         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
13027                                           fcntl_flags_tbl)));
13028         if (ret >= 0) {
13029             fd_trans_register(ret, &target_inotify_trans);
13030         }
13031         return ret;
13032 #endif
13033 #if defined(TARGET_NR_inotify_add_watch)
13034     case TARGET_NR_inotify_add_watch:
13035         p = lock_user_string(arg2);
13036         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
13037         unlock_user(p, arg2, 0);
13038         return ret;
13039 #endif
13040 #if defined(TARGET_NR_inotify_rm_watch)
13041     case TARGET_NR_inotify_rm_watch:
13042         return get_errno(inotify_rm_watch(arg1, arg2));
13043 #endif
13044 #endif
13045 
13046 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13047     case TARGET_NR_mq_open:
13048         {
13049             struct mq_attr posix_mq_attr;
13050             struct mq_attr *pposix_mq_attr;
13051             int host_flags;
13052 
13053             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13054             pposix_mq_attr = NULL;
13055             if (arg4) {
13056                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13057                     return -TARGET_EFAULT;
13058                 }
13059                 pposix_mq_attr = &posix_mq_attr;
13060             }
13061             p = lock_user_string(arg1 - 1);
13062             if (!p) {
13063                 return -TARGET_EFAULT;
13064             }
13065             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13066             unlock_user (p, arg1, 0);
13067         }
13068         return ret;
13069 
13070     case TARGET_NR_mq_unlink:
13071         p = lock_user_string(arg1 - 1);
13072         if (!p) {
13073             return -TARGET_EFAULT;
13074         }
13075         ret = get_errno(mq_unlink(p));
13076         unlock_user (p, arg1, 0);
13077         return ret;
13078 
13079 #ifdef TARGET_NR_mq_timedsend
13080     case TARGET_NR_mq_timedsend:
13081         {
13082             struct timespec ts;
13083 
13084             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13085             if (arg5 != 0) {
13086                 if (target_to_host_timespec(&ts, arg5)) {
13087                     return -TARGET_EFAULT;
13088                 }
13089                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13090                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13091                     return -TARGET_EFAULT;
13092                 }
13093             } else {
13094                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13095             }
13096             unlock_user (p, arg2, arg3);
13097         }
13098         return ret;
13099 #endif
13100 #ifdef TARGET_NR_mq_timedsend_time64
13101     case TARGET_NR_mq_timedsend_time64:
13102         {
13103             struct timespec ts;
13104 
13105             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13106             if (arg5 != 0) {
13107                 if (target_to_host_timespec64(&ts, arg5)) {
13108                     return -TARGET_EFAULT;
13109                 }
13110                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13111                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13112                     return -TARGET_EFAULT;
13113                 }
13114             } else {
13115                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13116             }
13117             unlock_user(p, arg2, arg3);
13118         }
13119         return ret;
13120 #endif
13121 
13122 #ifdef TARGET_NR_mq_timedreceive
13123     case TARGET_NR_mq_timedreceive:
13124         {
13125             struct timespec ts;
13126             unsigned int prio;
13127 
13128             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13129             if (arg5 != 0) {
13130                 if (target_to_host_timespec(&ts, arg5)) {
13131                     return -TARGET_EFAULT;
13132                 }
13133                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13134                                                      &prio, &ts));
13135                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13136                     return -TARGET_EFAULT;
13137                 }
13138             } else {
13139                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13140                                                      &prio, NULL));
13141             }
13142             unlock_user (p, arg2, arg3);
13143             if (arg4 != 0)
13144                 put_user_u32(prio, arg4);
13145         }
13146         return ret;
13147 #endif
13148 #ifdef TARGET_NR_mq_timedreceive_time64
13149     case TARGET_NR_mq_timedreceive_time64:
13150         {
13151             struct timespec ts;
13152             unsigned int prio;
13153 
13154             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13155             if (arg5 != 0) {
13156                 if (target_to_host_timespec64(&ts, arg5)) {
13157                     return -TARGET_EFAULT;
13158                 }
13159                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13160                                                      &prio, &ts));
13161                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13162                     return -TARGET_EFAULT;
13163                 }
13164             } else {
13165                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13166                                                      &prio, NULL));
13167             }
13168             unlock_user(p, arg2, arg3);
13169             if (arg4 != 0) {
13170                 put_user_u32(prio, arg4);
13171             }
13172         }
13173         return ret;
13174 #endif
13175 
13176     /* Not implemented for now... */
13177 /*     case TARGET_NR_mq_notify: */
13178 /*         break; */
13179 
13180     case TARGET_NR_mq_getsetattr:
13181         {
13182             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13183             ret = 0;
13184             if (arg2 != 0) {
13185                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13186                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13187                                            &posix_mq_attr_out));
13188             } else if (arg3 != 0) {
13189                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13190             }
13191             if (ret == 0 && arg3 != 0) {
13192                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13193             }
13194         }
13195         return ret;
13196 #endif
13197 
13198 #ifdef CONFIG_SPLICE
13199 #ifdef TARGET_NR_tee
13200     case TARGET_NR_tee:
13201         {
13202             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13203         }
13204         return ret;
13205 #endif
13206 #ifdef TARGET_NR_splice
13207     case TARGET_NR_splice:
13208         {
13209             loff_t loff_in, loff_out;
13210             loff_t *ploff_in = NULL, *ploff_out = NULL;
13211             if (arg2) {
13212                 if (get_user_u64(loff_in, arg2)) {
13213                     return -TARGET_EFAULT;
13214                 }
13215                 ploff_in = &loff_in;
13216             }
13217             if (arg4) {
13218                 if (get_user_u64(loff_out, arg4)) {
13219                     return -TARGET_EFAULT;
13220                 }
13221                 ploff_out = &loff_out;
13222             }
13223             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13224             if (arg2) {
13225                 if (put_user_u64(loff_in, arg2)) {
13226                     return -TARGET_EFAULT;
13227                 }
13228             }
13229             if (arg4) {
13230                 if (put_user_u64(loff_out, arg4)) {
13231                     return -TARGET_EFAULT;
13232                 }
13233             }
13234         }
13235         return ret;
13236 #endif
13237 #ifdef TARGET_NR_vmsplice
13238 	case TARGET_NR_vmsplice:
13239         {
13240             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13241             if (vec != NULL) {
13242                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13243                 unlock_iovec(vec, arg2, arg3, 0);
13244             } else {
13245                 ret = -host_to_target_errno(errno);
13246             }
13247         }
13248         return ret;
13249 #endif
13250 #endif /* CONFIG_SPLICE */
13251 #ifdef CONFIG_EVENTFD
13252 #if defined(TARGET_NR_eventfd)
13253     case TARGET_NR_eventfd:
13254         ret = get_errno(eventfd(arg1, 0));
13255         if (ret >= 0) {
13256             fd_trans_register(ret, &target_eventfd_trans);
13257         }
13258         return ret;
13259 #endif
13260 #if defined(TARGET_NR_eventfd2)
13261     case TARGET_NR_eventfd2:
13262     {
13263         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13264         if (arg2 & TARGET_O_NONBLOCK) {
13265             host_flags |= O_NONBLOCK;
13266         }
13267         if (arg2 & TARGET_O_CLOEXEC) {
13268             host_flags |= O_CLOEXEC;
13269         }
13270         ret = get_errno(eventfd(arg1, host_flags));
13271         if (ret >= 0) {
13272             fd_trans_register(ret, &target_eventfd_trans);
13273         }
13274         return ret;
13275     }
13276 #endif
13277 #endif /* CONFIG_EVENTFD  */
13278 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13279     case TARGET_NR_fallocate:
13280 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13281         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13282                                   target_offset64(arg5, arg6)));
13283 #else
13284         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13285 #endif
13286         return ret;
13287 #endif
13288 #if defined(CONFIG_SYNC_FILE_RANGE)
13289 #if defined(TARGET_NR_sync_file_range)
13290     case TARGET_NR_sync_file_range:
13291 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13292 #if defined(TARGET_MIPS)
13293         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13294                                         target_offset64(arg5, arg6), arg7));
13295 #else
13296         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13297                                         target_offset64(arg4, arg5), arg6));
13298 #endif /* !TARGET_MIPS */
13299 #else
13300         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13301 #endif
13302         return ret;
13303 #endif
13304 #if defined(TARGET_NR_sync_file_range2) || \
13305     defined(TARGET_NR_arm_sync_file_range)
13306 #if defined(TARGET_NR_sync_file_range2)
13307     case TARGET_NR_sync_file_range2:
13308 #endif
13309 #if defined(TARGET_NR_arm_sync_file_range)
13310     case TARGET_NR_arm_sync_file_range:
13311 #endif
13312         /* This is like sync_file_range but the arguments are reordered */
13313 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13314         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13315                                         target_offset64(arg5, arg6), arg2));
13316 #else
13317         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13318 #endif
13319         return ret;
13320 #endif
13321 #endif
13322 #if defined(TARGET_NR_signalfd4)
13323     case TARGET_NR_signalfd4:
13324         return do_signalfd4(arg1, arg2, arg4);
13325 #endif
13326 #if defined(TARGET_NR_signalfd)
13327     case TARGET_NR_signalfd:
13328         return do_signalfd4(arg1, arg2, 0);
13329 #endif
13330 #if defined(CONFIG_EPOLL)
13331 #if defined(TARGET_NR_epoll_create)
13332     case TARGET_NR_epoll_create:
13333         return get_errno(epoll_create(arg1));
13334 #endif
13335 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13336     case TARGET_NR_epoll_create1:
13337         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13338 #endif
13339 #if defined(TARGET_NR_epoll_ctl)
13340     case TARGET_NR_epoll_ctl:
13341     {
13342         struct epoll_event ep;
13343         struct epoll_event *epp = 0;
13344         if (arg4) {
13345             if (arg2 != EPOLL_CTL_DEL) {
13346                 struct target_epoll_event *target_ep;
13347                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13348                     return -TARGET_EFAULT;
13349                 }
13350                 ep.events = tswap32(target_ep->events);
13351                 /*
13352                  * The epoll_data_t union is just opaque data to the kernel,
13353                  * so we transfer all 64 bits across and need not worry what
13354                  * actual data type it is.
13355                  */
13356                 ep.data.u64 = tswap64(target_ep->data.u64);
13357                 unlock_user_struct(target_ep, arg4, 0);
13358             }
13359             /*
13360              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13361              * non-null pointer, even though this argument is ignored.
13362              *
13363              */
13364             epp = &ep;
13365         }
13366         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13367     }
13368 #endif
13369 
13370 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13371 #if defined(TARGET_NR_epoll_wait)
13372     case TARGET_NR_epoll_wait:
13373 #endif
13374 #if defined(TARGET_NR_epoll_pwait)
13375     case TARGET_NR_epoll_pwait:
13376 #endif
13377     {
13378         struct target_epoll_event *target_ep;
13379         struct epoll_event *ep;
13380         int epfd = arg1;
13381         int maxevents = arg3;
13382         int timeout = arg4;
13383 
13384         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13385             return -TARGET_EINVAL;
13386         }
13387 
13388         target_ep = lock_user(VERIFY_WRITE, arg2,
13389                               maxevents * sizeof(struct target_epoll_event), 1);
13390         if (!target_ep) {
13391             return -TARGET_EFAULT;
13392         }
13393 
13394         ep = g_try_new(struct epoll_event, maxevents);
13395         if (!ep) {
13396             unlock_user(target_ep, arg2, 0);
13397             return -TARGET_ENOMEM;
13398         }
13399 
13400         switch (num) {
13401 #if defined(TARGET_NR_epoll_pwait)
13402         case TARGET_NR_epoll_pwait:
13403         {
13404             sigset_t *set = NULL;
13405 
13406             if (arg5) {
13407                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13408                 if (ret != 0) {
13409                     break;
13410                 }
13411             }
13412 
13413             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13414                                              set, SIGSET_T_SIZE));
13415 
13416             if (set) {
13417                 finish_sigsuspend_mask(ret);
13418             }
13419             break;
13420         }
13421 #endif
13422 #if defined(TARGET_NR_epoll_wait)
13423         case TARGET_NR_epoll_wait:
13424             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13425                                              NULL, 0));
13426             break;
13427 #endif
13428         default:
13429             ret = -TARGET_ENOSYS;
13430         }
13431         if (!is_error(ret)) {
13432             int i;
13433             for (i = 0; i < ret; i++) {
13434                 target_ep[i].events = tswap32(ep[i].events);
13435                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13436             }
13437             unlock_user(target_ep, arg2,
13438                         ret * sizeof(struct target_epoll_event));
13439         } else {
13440             unlock_user(target_ep, arg2, 0);
13441         }
13442         g_free(ep);
13443         return ret;
13444     }
13445 #endif
13446 #endif
13447 #ifdef TARGET_NR_prlimit64
13448     case TARGET_NR_prlimit64:
13449     {
13450         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13451         struct target_rlimit64 *target_rnew, *target_rold;
13452         struct host_rlimit64 rnew, rold, *rnewp = 0;
13453         int resource = target_to_host_resource(arg2);
13454 
13455         if (arg3 && (resource != RLIMIT_AS &&
13456                      resource != RLIMIT_DATA &&
13457                      resource != RLIMIT_STACK)) {
13458             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13459                 return -TARGET_EFAULT;
13460             }
13461             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13462             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13463             unlock_user_struct(target_rnew, arg3, 0);
13464             rnewp = &rnew;
13465         }
13466 
13467         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13468         if (!is_error(ret) && arg4) {
13469             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13470                 return -TARGET_EFAULT;
13471             }
13472             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13473             __put_user(rold.rlim_max, &target_rold->rlim_max);
13474             unlock_user_struct(target_rold, arg4, 1);
13475         }
13476         return ret;
13477     }
13478 #endif
13479 #ifdef TARGET_NR_gethostname
13480     case TARGET_NR_gethostname:
13481     {
13482         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13483         if (name) {
13484             ret = get_errno(gethostname(name, arg2));
13485             unlock_user(name, arg1, arg2);
13486         } else {
13487             ret = -TARGET_EFAULT;
13488         }
13489         return ret;
13490     }
13491 #endif
13492 #ifdef TARGET_NR_atomic_cmpxchg_32
13493     case TARGET_NR_atomic_cmpxchg_32:
13494     {
13495         /* should use start_exclusive from main.c */
13496         abi_ulong mem_value;
13497         if (get_user_u32(mem_value, arg6)) {
13498             target_siginfo_t info;
13499             info.si_signo = SIGSEGV;
13500             info.si_errno = 0;
13501             info.si_code = TARGET_SEGV_MAPERR;
13502             info._sifields._sigfault._addr = arg6;
13503             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13504             ret = 0xdeadbeef;
13505 
13506         }
13507         if (mem_value == arg2)
13508             put_user_u32(arg1, arg6);
13509         return mem_value;
13510     }
13511 #endif
13512 #ifdef TARGET_NR_atomic_barrier
13513     case TARGET_NR_atomic_barrier:
13514         /* Like the kernel implementation and the
13515            qemu arm barrier, no-op this? */
13516         return 0;
13517 #endif
13518 
13519 #ifdef TARGET_NR_timer_create
13520     case TARGET_NR_timer_create:
13521     {
13522         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13523 
13524         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13525 
13526         int clkid = arg1;
13527         int timer_index = next_free_host_timer();
13528 
13529         if (timer_index < 0) {
13530             ret = -TARGET_EAGAIN;
13531         } else {
13532             timer_t *phtimer = g_posix_timers  + timer_index;
13533 
13534             if (arg2) {
13535                 phost_sevp = &host_sevp;
13536                 ret = target_to_host_sigevent(phost_sevp, arg2);
13537                 if (ret != 0) {
13538                     free_host_timer_slot(timer_index);
13539                     return ret;
13540                 }
13541             }
13542 
13543             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13544             if (ret) {
13545                 free_host_timer_slot(timer_index);
13546             } else {
13547                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13548                     timer_delete(*phtimer);
13549                     free_host_timer_slot(timer_index);
13550                     return -TARGET_EFAULT;
13551                 }
13552             }
13553         }
13554         return ret;
13555     }
13556 #endif
13557 
13558 #ifdef TARGET_NR_timer_settime
13559     case TARGET_NR_timer_settime:
13560     {
13561         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13562          * struct itimerspec * old_value */
13563         target_timer_t timerid = get_timer_id(arg1);
13564 
13565         if (timerid < 0) {
13566             ret = timerid;
13567         } else if (arg3 == 0) {
13568             ret = -TARGET_EINVAL;
13569         } else {
13570             timer_t htimer = g_posix_timers[timerid];
13571             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13572 
13573             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13574                 return -TARGET_EFAULT;
13575             }
13576             ret = get_errno(
13577                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13578             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13579                 return -TARGET_EFAULT;
13580             }
13581         }
13582         return ret;
13583     }
13584 #endif
13585 
13586 #ifdef TARGET_NR_timer_settime64
13587     case TARGET_NR_timer_settime64:
13588     {
13589         target_timer_t timerid = get_timer_id(arg1);
13590 
13591         if (timerid < 0) {
13592             ret = timerid;
13593         } else if (arg3 == 0) {
13594             ret = -TARGET_EINVAL;
13595         } else {
13596             timer_t htimer = g_posix_timers[timerid];
13597             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13598 
13599             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13600                 return -TARGET_EFAULT;
13601             }
13602             ret = get_errno(
13603                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13604             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13605                 return -TARGET_EFAULT;
13606             }
13607         }
13608         return ret;
13609     }
13610 #endif
13611 
13612 #ifdef TARGET_NR_timer_gettime
13613     case TARGET_NR_timer_gettime:
13614     {
13615         /* args: timer_t timerid, struct itimerspec *curr_value */
13616         target_timer_t timerid = get_timer_id(arg1);
13617 
13618         if (timerid < 0) {
13619             ret = timerid;
13620         } else if (!arg2) {
13621             ret = -TARGET_EFAULT;
13622         } else {
13623             timer_t htimer = g_posix_timers[timerid];
13624             struct itimerspec hspec;
13625             ret = get_errno(timer_gettime(htimer, &hspec));
13626 
13627             if (host_to_target_itimerspec(arg2, &hspec)) {
13628                 ret = -TARGET_EFAULT;
13629             }
13630         }
13631         return ret;
13632     }
13633 #endif
13634 
13635 #ifdef TARGET_NR_timer_gettime64
13636     case TARGET_NR_timer_gettime64:
13637     {
13638         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13639         target_timer_t timerid = get_timer_id(arg1);
13640 
13641         if (timerid < 0) {
13642             ret = timerid;
13643         } else if (!arg2) {
13644             ret = -TARGET_EFAULT;
13645         } else {
13646             timer_t htimer = g_posix_timers[timerid];
13647             struct itimerspec hspec;
13648             ret = get_errno(timer_gettime(htimer, &hspec));
13649 
13650             if (host_to_target_itimerspec64(arg2, &hspec)) {
13651                 ret = -TARGET_EFAULT;
13652             }
13653         }
13654         return ret;
13655     }
13656 #endif
13657 
13658 #ifdef TARGET_NR_timer_getoverrun
13659     case TARGET_NR_timer_getoverrun:
13660     {
13661         /* args: timer_t timerid */
13662         target_timer_t timerid = get_timer_id(arg1);
13663 
13664         if (timerid < 0) {
13665             ret = timerid;
13666         } else {
13667             timer_t htimer = g_posix_timers[timerid];
13668             ret = get_errno(timer_getoverrun(htimer));
13669         }
13670         return ret;
13671     }
13672 #endif
13673 
13674 #ifdef TARGET_NR_timer_delete
13675     case TARGET_NR_timer_delete:
13676     {
13677         /* args: timer_t timerid */
13678         target_timer_t timerid = get_timer_id(arg1);
13679 
13680         if (timerid < 0) {
13681             ret = timerid;
13682         } else {
13683             timer_t htimer = g_posix_timers[timerid];
13684             ret = get_errno(timer_delete(htimer));
13685             free_host_timer_slot(timerid);
13686         }
13687         return ret;
13688     }
13689 #endif
13690 
13691 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13692     case TARGET_NR_timerfd_create:
13693         ret = get_errno(timerfd_create(arg1,
13694                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13695         if (ret >= 0) {
13696             fd_trans_register(ret, &target_timerfd_trans);
13697         }
13698         return ret;
13699 #endif
13700 
13701 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13702     case TARGET_NR_timerfd_gettime:
13703         {
13704             struct itimerspec its_curr;
13705 
13706             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13707 
13708             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13709                 return -TARGET_EFAULT;
13710             }
13711         }
13712         return ret;
13713 #endif
13714 
13715 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13716     case TARGET_NR_timerfd_gettime64:
13717         {
13718             struct itimerspec its_curr;
13719 
13720             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13721 
13722             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13723                 return -TARGET_EFAULT;
13724             }
13725         }
13726         return ret;
13727 #endif
13728 
13729 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13730     case TARGET_NR_timerfd_settime:
13731         {
13732             struct itimerspec its_new, its_old, *p_new;
13733 
13734             if (arg3) {
13735                 if (target_to_host_itimerspec(&its_new, arg3)) {
13736                     return -TARGET_EFAULT;
13737                 }
13738                 p_new = &its_new;
13739             } else {
13740                 p_new = NULL;
13741             }
13742 
13743             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13744 
13745             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13746                 return -TARGET_EFAULT;
13747             }
13748         }
13749         return ret;
13750 #endif
13751 
13752 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13753     case TARGET_NR_timerfd_settime64:
13754         {
13755             struct itimerspec its_new, its_old, *p_new;
13756 
13757             if (arg3) {
13758                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13759                     return -TARGET_EFAULT;
13760                 }
13761                 p_new = &its_new;
13762             } else {
13763                 p_new = NULL;
13764             }
13765 
13766             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13767 
13768             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13769                 return -TARGET_EFAULT;
13770             }
13771         }
13772         return ret;
13773 #endif
13774 
13775 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13776     case TARGET_NR_ioprio_get:
13777         return get_errno(ioprio_get(arg1, arg2));
13778 #endif
13779 
13780 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13781     case TARGET_NR_ioprio_set:
13782         return get_errno(ioprio_set(arg1, arg2, arg3));
13783 #endif
13784 
13785 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13786     case TARGET_NR_setns:
13787         return get_errno(setns(arg1, arg2));
13788 #endif
13789 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13790     case TARGET_NR_unshare:
13791         return get_errno(unshare(arg1));
13792 #endif
13793 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13794     case TARGET_NR_kcmp:
13795         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13796 #endif
13797 #ifdef TARGET_NR_swapcontext
13798     case TARGET_NR_swapcontext:
13799         /* PowerPC specific.  */
13800         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13801 #endif
13802 #ifdef TARGET_NR_memfd_create
13803     case TARGET_NR_memfd_create:
13804         p = lock_user_string(arg1);
13805         if (!p) {
13806             return -TARGET_EFAULT;
13807         }
13808         ret = get_errno(memfd_create(p, arg2));
13809         fd_trans_unregister(ret);
13810         unlock_user(p, arg1, 0);
13811         return ret;
13812 #endif
13813 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13814     case TARGET_NR_membarrier:
13815         return get_errno(membarrier(arg1, arg2));
13816 #endif
13817 
13818 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13819     case TARGET_NR_copy_file_range:
13820         {
13821             loff_t inoff, outoff;
13822             loff_t *pinoff = NULL, *poutoff = NULL;
13823 
13824             if (arg2) {
13825                 if (get_user_u64(inoff, arg2)) {
13826                     return -TARGET_EFAULT;
13827                 }
13828                 pinoff = &inoff;
13829             }
13830             if (arg4) {
13831                 if (get_user_u64(outoff, arg4)) {
13832                     return -TARGET_EFAULT;
13833                 }
13834                 poutoff = &outoff;
13835             }
13836             /* Do not sign-extend the count parameter. */
13837             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13838                                                  (abi_ulong)arg5, arg6));
13839             if (!is_error(ret) && ret > 0) {
13840                 if (arg2) {
13841                     if (put_user_u64(inoff, arg2)) {
13842                         return -TARGET_EFAULT;
13843                     }
13844                 }
13845                 if (arg4) {
13846                     if (put_user_u64(outoff, arg4)) {
13847                         return -TARGET_EFAULT;
13848                     }
13849                 }
13850             }
13851         }
13852         return ret;
13853 #endif
13854 
13855 #if defined(TARGET_NR_pivot_root)
13856     case TARGET_NR_pivot_root:
13857         {
13858             void *p2;
13859             p = lock_user_string(arg1); /* new_root */
13860             p2 = lock_user_string(arg2); /* put_old */
13861             if (!p || !p2) {
13862                 ret = -TARGET_EFAULT;
13863             } else {
13864                 ret = get_errno(pivot_root(p, p2));
13865             }
13866             unlock_user(p2, arg2, 0);
13867             unlock_user(p, arg1, 0);
13868         }
13869         return ret;
13870 #endif
13871 
13872 #if defined(TARGET_NR_riscv_hwprobe)
13873     case TARGET_NR_riscv_hwprobe:
13874         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13875 #endif
13876 
13877     default:
13878         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13879         return -TARGET_ENOSYS;
13880     }
13881     return ret;
13882 }
13883 
do_syscall(CPUArchState * cpu_env,int num,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,abi_long arg7,abi_long arg8)13884 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13885                     abi_long arg2, abi_long arg3, abi_long arg4,
13886                     abi_long arg5, abi_long arg6, abi_long arg7,
13887                     abi_long arg8)
13888 {
13889     CPUState *cpu = env_cpu(cpu_env);
13890     abi_long ret;
13891 
13892 #ifdef DEBUG_ERESTARTSYS
13893     /* Debug-only code for exercising the syscall-restart code paths
13894      * in the per-architecture cpu main loops: restart every syscall
13895      * the guest makes once before letting it through.
13896      */
13897     {
13898         static bool flag;
13899         flag = !flag;
13900         if (flag) {
13901             return -QEMU_ERESTARTSYS;
13902         }
13903     }
13904 #endif
13905 
13906     record_syscall_start(cpu, num, arg1,
13907                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13908 
13909     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13910         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13911     }
13912 
13913     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13914                       arg5, arg6, arg7, arg8);
13915 
13916     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13917         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13918                           arg3, arg4, arg5, arg6);
13919     }
13920 
13921     record_syscall_return(cpu, num, ret);
13922     return ret;
13923 }
13924