xref: /qemu/bsd-user/bsd-mem.h (revision fc524567087c2537b5103cdfc1d41e4f442892b6)
1 /*
2  *  memory management system call shims and definitions
3  *
4  *  Copyright (c) 2013-15 Stacey D. Son
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 /*
21  * Copyright (c) 1982, 1986, 1993
22  *      The Regents of the University of California.  All rights reserved.
23  *
24  * Redistribution and use in source and binary forms, with or without
25  * modification, are permitted provided that the following conditions
26  * are met:
27  * 1. Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  * 2. Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in the
31  *    documentation and/or other materials provided with the distribution.
32  * 4. Neither the name of the University nor the names of its contributors
33  *    may be used to endorse or promote products derived from this software
34  *    without specific prior written permission.
35  *
36  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
37  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
40  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
42  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
44  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
45  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
46  * SUCH DAMAGE.
47  */
48 
49 #ifndef BSD_USER_BSD_MEM_H
50 #define BSD_USER_BSD_MEM_H
51 
52 #include <sys/types.h>
53 #include <sys/ipc.h>
54 #include <sys/mman.h>
55 #include <sys/shm.h>
56 #include <fcntl.h>
57 
58 #include "qemu-bsd.h"
59 #include "exec/mmap-lock.h"
60 #include "exec/page-protection.h"
61 #include "user/page-protection.h"
62 
63 extern struct bsd_shm_regions bsd_shm_regions[];
64 extern abi_ulong target_brk;
65 extern abi_ulong initial_target_brk;
66 
67 /* mmap(2) */
do_bsd_mmap(void * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,abi_long arg7,abi_long arg8)68 static inline abi_long do_bsd_mmap(void *cpu_env, abi_long arg1, abi_long arg2,
69     abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6, abi_long arg7,
70     abi_long arg8)
71 {
72     if (regpairs_aligned(cpu_env) != 0) {
73         arg6 = arg7;
74         arg7 = arg8;
75     }
76     return get_errno(target_mmap(arg1, arg2, arg3,
77                                  target_to_host_bitmask(arg4, mmap_flags_tbl),
78                                  arg5, target_arg64(arg6, arg7)));
79 }
80 
81 /* munmap(2) */
do_bsd_munmap(abi_long arg1,abi_long arg2)82 static inline abi_long do_bsd_munmap(abi_long arg1, abi_long arg2)
83 {
84     return get_errno(target_munmap(arg1, arg2));
85 }
86 
87 /* mprotect(2) */
do_bsd_mprotect(abi_long arg1,abi_long arg2,abi_long arg3)88 static inline abi_long do_bsd_mprotect(abi_long arg1, abi_long arg2,
89         abi_long arg3)
90 {
91     return get_errno(target_mprotect(arg1, arg2, arg3));
92 }
93 
94 /* msync(2) */
do_bsd_msync(abi_long addr,abi_long len,abi_long flags)95 static inline abi_long do_bsd_msync(abi_long addr, abi_long len, abi_long flags)
96 {
97     if (!guest_range_valid_untagged(addr, len)) {
98         /* It seems odd, but POSIX wants this to be ENOMEM */
99         return -TARGET_ENOMEM;
100     }
101 
102     return get_errno(msync(g2h_untagged(addr), len, flags));
103 }
104 
105 /* mlock(2) */
do_bsd_mlock(abi_long arg1,abi_long arg2)106 static inline abi_long do_bsd_mlock(abi_long arg1, abi_long arg2)
107 {
108     if (!guest_range_valid_untagged(arg1, arg2)) {
109         return -TARGET_EINVAL;
110     }
111     return get_errno(mlock(g2h_untagged(arg1), arg2));
112 }
113 
114 /* munlock(2) */
do_bsd_munlock(abi_long arg1,abi_long arg2)115 static inline abi_long do_bsd_munlock(abi_long arg1, abi_long arg2)
116 {
117     if (!guest_range_valid_untagged(arg1, arg2)) {
118         return -TARGET_EINVAL;
119     }
120     return get_errno(munlock(g2h_untagged(arg1), arg2));
121 }
122 
123 /* mlockall(2) */
do_bsd_mlockall(abi_long arg1)124 static inline abi_long do_bsd_mlockall(abi_long arg1)
125 {
126     return get_errno(mlockall(arg1));
127 }
128 
129 /* munlockall(2) */
do_bsd_munlockall(void)130 static inline abi_long do_bsd_munlockall(void)
131 {
132     return get_errno(munlockall());
133 }
134 
135 /* madvise(2) */
do_bsd_madvise(abi_long arg1,abi_long arg2,abi_long arg3)136 static inline abi_long do_bsd_madvise(abi_long arg1, abi_long arg2,
137         abi_long arg3)
138 {
139     abi_ulong len;
140     int ret = 0;
141     abi_long start = arg1;
142     abi_long len_in = arg2;
143     abi_long advice = arg3;
144 
145     if (start & ~TARGET_PAGE_MASK) {
146         return -TARGET_EINVAL;
147     }
148     if (len_in == 0) {
149         return 0;
150     }
151     len = TARGET_PAGE_ALIGN(len_in);
152     if (len == 0 || !guest_range_valid_untagged(start, len)) {
153         return -TARGET_EINVAL;
154     }
155 
156     /*
157      * Most advice values are hints, so ignoring and returning success is ok.
158      *
159      * However, some advice values such as MADV_DONTNEED, are not hints and
160      * need to be emulated.
161      *
162      * A straight passthrough for those may not be safe because qemu sometimes
163      * turns private file-backed mappings into anonymous mappings.
164      * If all guest pages have PAGE_PASSTHROUGH set, mappings have the
165      * same semantics for the host as for the guest.
166      *
167      * MADV_DONTNEED is passed through, if possible.
168      * If passthrough isn't possible, we nevertheless (wrongly!) return
169      * success, which is broken but some userspace programs fail to work
170      * otherwise. Completely implementing such emulation is quite complicated
171      * though.
172      */
173     mmap_lock();
174     switch (advice) {
175     case MADV_DONTNEED:
176         if (page_check_range(start, len, PAGE_PASSTHROUGH)) {
177             ret = get_errno(madvise(g2h_untagged(start), len, advice));
178             if (ret == 0) {
179                 page_reset_target_data(start, start + len - 1);
180             }
181         }
182     }
183     mmap_unlock();
184 
185     return ret;
186 }
187 
188 /* minherit(2) */
do_bsd_minherit(abi_long addr,abi_long len,abi_long inherit)189 static inline abi_long do_bsd_minherit(abi_long addr, abi_long len,
190         abi_long inherit)
191 {
192     return get_errno(minherit(g2h_untagged(addr), len, inherit));
193 }
194 
195 /* mincore(2) */
do_bsd_mincore(abi_ulong target_addr,abi_ulong len,abi_ulong target_vec)196 static inline abi_long do_bsd_mincore(abi_ulong target_addr, abi_ulong len,
197         abi_ulong target_vec)
198 {
199     abi_long ret;
200     void *p;
201     abi_ulong vec_len = DIV_ROUND_UP(len, TARGET_PAGE_SIZE);
202 
203     if (!guest_range_valid_untagged(target_addr, len)
204         || !page_check_range(target_addr, len, PAGE_VALID)) {
205         return -TARGET_EFAULT;
206     }
207 
208     p = lock_user(VERIFY_WRITE, target_vec, vec_len, 0);
209     if (p == NULL) {
210         return -TARGET_EFAULT;
211     }
212     ret = get_errno(mincore(g2h_untagged(target_addr), len, p));
213     unlock_user(p, target_vec, vec_len);
214 
215     return ret;
216 }
217 
218 /* do_brk() must return target values and target errnos. */
do_obreak(abi_ulong brk_val)219 static inline abi_long do_obreak(abi_ulong brk_val)
220 {
221     abi_long mapped_addr;
222     abi_ulong new_brk;
223     abi_ulong old_brk;
224 
225     /* brk pointers are always untagged */
226 
227     /* do not allow to shrink below initial brk value */
228     if (brk_val < initial_target_brk) {
229         return target_brk;
230     }
231 
232     new_brk = TARGET_PAGE_ALIGN(brk_val);
233     old_brk = TARGET_PAGE_ALIGN(target_brk);
234 
235     /* new and old target_brk might be on the same page */
236     if (new_brk == old_brk) {
237         target_brk = brk_val;
238         return target_brk;
239     }
240 
241     /* Release heap if necessary */
242     if (new_brk < old_brk) {
243         target_munmap(new_brk, old_brk - new_brk);
244 
245         target_brk = brk_val;
246         return target_brk;
247     }
248 
249     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
250                               PROT_READ | PROT_WRITE,
251                               MAP_FIXED | MAP_EXCL | MAP_ANON | MAP_PRIVATE,
252                               -1, 0);
253 
254     if (mapped_addr == old_brk) {
255         target_brk = brk_val;
256         return target_brk;
257     }
258 
259     /* For everything else, return the previous break. */
260     return target_brk;
261 }
262 
263 /* shm_open(2) */
do_bsd_shm_open(abi_ulong arg1,abi_long arg2,abi_long arg3)264 static inline abi_long do_bsd_shm_open(abi_ulong arg1, abi_long arg2,
265         abi_long arg3)
266 {
267     int ret;
268     void *p;
269 
270     if (arg1 == (uintptr_t)SHM_ANON) {
271         p = SHM_ANON;
272     } else {
273         p = lock_user_string(arg1);
274         if (p == NULL) {
275             return -TARGET_EFAULT;
276         }
277     }
278     ret = get_errno(shm_open(p, target_to_host_bitmask(arg2, fcntl_flags_tbl),
279                              arg3));
280 
281     if (p != SHM_ANON) {
282         unlock_user(p, arg1, 0);
283     }
284 
285     return ret;
286 }
287 
288 /* shm_unlink(2) */
do_bsd_shm_unlink(abi_ulong arg1)289 static inline abi_long do_bsd_shm_unlink(abi_ulong arg1)
290 {
291     int ret;
292     void *p;
293 
294     p = lock_user_string(arg1);
295     if (p == NULL) {
296         return -TARGET_EFAULT;
297     }
298     ret = get_errno(shm_unlink(p)); /* XXX path(p)? */
299     unlock_user(p, arg1, 0);
300 
301     return ret;
302 }
303 
304 /* shmget(2) */
do_bsd_shmget(abi_long arg1,abi_ulong arg2,abi_long arg3)305 static inline abi_long do_bsd_shmget(abi_long arg1, abi_ulong arg2,
306         abi_long arg3)
307 {
308     return get_errno(shmget(arg1, arg2, arg3));
309 }
310 
311 /* shmctl(2) */
do_bsd_shmctl(abi_long shmid,abi_long cmd,abi_ulong buff)312 static inline abi_long do_bsd_shmctl(abi_long shmid, abi_long cmd,
313         abi_ulong buff)
314 {
315     struct shmid_ds dsarg;
316     abi_long ret = -TARGET_EINVAL;
317 
318     cmd &= 0xff;
319 
320     switch (cmd) {
321     case IPC_STAT:
322         if (target_to_host_shmid_ds(&dsarg, buff)) {
323             return -TARGET_EFAULT;
324         }
325         ret = get_errno(shmctl(shmid, cmd, &dsarg));
326         if (host_to_target_shmid_ds(buff, &dsarg)) {
327             return -TARGET_EFAULT;
328         }
329         break;
330 
331     case IPC_SET:
332         if (target_to_host_shmid_ds(&dsarg, buff)) {
333             return -TARGET_EFAULT;
334         }
335         ret = get_errno(shmctl(shmid, cmd, &dsarg));
336         break;
337 
338     case IPC_RMID:
339         ret = get_errno(shmctl(shmid, cmd, NULL));
340         break;
341 
342     default:
343         ret = -TARGET_EINVAL;
344         break;
345     }
346 
347     return ret;
348 }
349 
350 /* shmat(2) */
do_bsd_shmat(int shmid,abi_ulong shmaddr,int shmflg)351 static inline abi_long do_bsd_shmat(int shmid, abi_ulong shmaddr, int shmflg)
352 {
353     abi_ulong raddr;
354     abi_long ret;
355     struct shmid_ds shm_info;
356 
357     /* Find out the length of the shared memory segment. */
358     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
359     if (is_error(ret)) {
360         /* Can't get the length */
361         return ret;
362     }
363 
364     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
365         return -TARGET_EINVAL;
366     }
367 
368     WITH_MMAP_LOCK_GUARD() {
369         void *host_raddr;
370 
371         if (shmaddr) {
372             host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
373         } else {
374             abi_ulong alignment;
375             abi_ulong mmap_start;
376 
377             alignment = 0; /* alignment above page size not required */
378             mmap_start = mmap_find_vma(0, shm_info.shm_segsz, alignment);
379 
380             if (mmap_start == -1) {
381                 return -TARGET_ENOMEM;
382             }
383             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
384                                shmflg | SHM_REMAP);
385         }
386 
387         if (host_raddr == (void *)-1) {
388             return get_errno(-1);
389         }
390         raddr = h2g(host_raddr);
391 
392         page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
393                        PAGE_VALID | PAGE_RESET | PAGE_READ |
394                        (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
395 
396         for (int i = 0; i < N_BSD_SHM_REGIONS; i++) {
397             if (bsd_shm_regions[i].start == 0) {
398                 bsd_shm_regions[i].start = raddr;
399                 bsd_shm_regions[i].size = shm_info.shm_segsz;
400                 break;
401             }
402         }
403     }
404 
405     return raddr;
406 }
407 
408 /* shmdt(2) */
do_bsd_shmdt(abi_ulong shmaddr)409 static inline abi_long do_bsd_shmdt(abi_ulong shmaddr)
410 {
411     abi_long ret;
412 
413     WITH_MMAP_LOCK_GUARD() {
414         int i;
415 
416         for (i = 0; i < N_BSD_SHM_REGIONS; ++i) {
417             if (bsd_shm_regions[i].start == shmaddr) {
418                 break;
419             }
420         }
421 
422         if (i == N_BSD_SHM_REGIONS) {
423             return -TARGET_EINVAL;
424         }
425 
426         ret = get_errno(shmdt(g2h_untagged(shmaddr)));
427         if (ret == 0) {
428             abi_ulong size = bsd_shm_regions[i].size;
429 
430             bsd_shm_regions[i].start = 0;
431             page_set_flags(shmaddr, shmaddr + size - 1, 0);
432             mmap_reserve(shmaddr, size);
433         }
434     }
435 
436     return ret;
437 }
438 
do_bsd_vadvise(void)439 static inline abi_long do_bsd_vadvise(void)
440 {
441     /* See sys_ovadvise() in vm_unix.c */
442     return -TARGET_EINVAL;
443 }
444 
do_bsd_sbrk(void)445 static inline abi_long do_bsd_sbrk(void)
446 {
447     /* see sys_sbrk() in vm_mmap.c */
448     return -TARGET_EOPNOTSUPP;
449 }
450 
do_bsd_sstk(void)451 static inline abi_long do_bsd_sstk(void)
452 {
453     /* see sys_sstk() in vm_mmap.c */
454     return -TARGET_EOPNOTSUPP;
455 }
456 
457 #endif /* BSD_USER_BSD_MEM_H */
458