xref: /qemu/bsd-user/mmap.c (revision cc944932ecef3b7a56ae62d89dd92fb9e56c5cc8)
1 /*
2  *  mmap support for qemu
3  *
4  *  Copyright (c) 2003 - 2008 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "exec/mmap-lock.h"
21 #include "exec/page-protection.h"
22 #include "user/page-protection.h"
23 
24 #include "qemu.h"
25 
26 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
27 static __thread int mmap_lock_count;
28 
29 void mmap_lock(void)
30 {
31     if (mmap_lock_count++ == 0) {
32         pthread_mutex_lock(&mmap_mutex);
33     }
34 }
35 
36 void mmap_unlock(void)
37 {
38     assert(mmap_lock_count > 0);
39     if (--mmap_lock_count == 0) {
40         pthread_mutex_unlock(&mmap_mutex);
41     }
42 }
43 
44 bool have_mmap_lock(void)
45 {
46     return mmap_lock_count > 0 ? true : false;
47 }
48 
49 /* Grab lock to make sure things are in a consistent state after fork().  */
50 void mmap_fork_start(void)
51 {
52     if (mmap_lock_count)
53         abort();
54     pthread_mutex_lock(&mmap_mutex);
55 }
56 
57 void mmap_fork_end(int child)
58 {
59     if (child)
60         pthread_mutex_init(&mmap_mutex, NULL);
61     else
62         pthread_mutex_unlock(&mmap_mutex);
63 }
64 
65 /* NOTE: all the constants are the HOST ones, but addresses are target. */
66 int target_mprotect(abi_ulong start, abi_ulong len, int prot)
67 {
68     abi_ulong end, host_start, host_end, addr;
69     int prot1, ret;
70 
71     qemu_log_mask(CPU_LOG_PAGE, "mprotect: start=0x" TARGET_ABI_FMT_lx
72                   " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
73                   prot & PROT_READ ? 'r' : '-',
74                   prot & PROT_WRITE ? 'w' : '-',
75                   prot & PROT_EXEC ? 'x' : '-');
76     if ((start & ~TARGET_PAGE_MASK) != 0)
77         return -EINVAL;
78     len = TARGET_PAGE_ALIGN(len);
79     end = start + len;
80     if (end < start)
81         return -EINVAL;
82     prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
83     if (len == 0)
84         return 0;
85 
86     mmap_lock();
87     host_start = start & qemu_host_page_mask;
88     host_end = HOST_PAGE_ALIGN(end);
89     if (start > host_start) {
90         /* handle host page containing start */
91         prot1 = prot;
92         for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
93             prot1 |= page_get_flags(addr);
94         }
95         if (host_end == host_start + qemu_host_page_size) {
96             for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
97                 prot1 |= page_get_flags(addr);
98             }
99             end = host_end;
100         }
101         ret = mprotect(g2h_untagged(host_start),
102                        qemu_host_page_size, prot1 & PAGE_RWX);
103         if (ret != 0)
104             goto error;
105         host_start += qemu_host_page_size;
106     }
107     if (end < host_end) {
108         prot1 = prot;
109         for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
110             prot1 |= page_get_flags(addr);
111         }
112         ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
113                        qemu_host_page_size, prot1 & PAGE_RWX);
114         if (ret != 0)
115             goto error;
116         host_end -= qemu_host_page_size;
117     }
118 
119     /* handle the pages in the middle */
120     if (host_start < host_end) {
121         ret = mprotect(g2h_untagged(host_start), host_end - host_start, prot);
122         if (ret != 0)
123             goto error;
124     }
125     page_set_flags(start, start + len - 1, prot | PAGE_VALID);
126     mmap_unlock();
127     return 0;
128 error:
129     mmap_unlock();
130     return ret;
131 }
132 
133 /*
134  * Perform a pread on behalf of target_mmap.  We can reach EOF, we can be
135  * interrupted by signals, and in general there's no good error return path.
136  * If @zero, zero the rest of the block at EOF.
137  * Return true on success.
138  */
139 static bool mmap_pread(int fd, void *p, size_t len, off_t offset, bool zero)
140 {
141     while (1) {
142         ssize_t r = pread(fd, p, len, offset);
143 
144         if (likely(r == len)) {
145             /* Complete */
146             return true;
147         }
148         if (r == 0) {
149             /* EOF */
150             if (zero) {
151                 memset(p, 0, len);
152             }
153             return true;
154         }
155         if (r > 0) {
156             /* Short read */
157             p += r;
158             len -= r;
159             offset += r;
160         } else if (errno != EINTR) {
161             /* Error */
162             return false;
163         }
164     }
165 }
166 
167 /*
168  * map an incomplete host page
169  *
170  * mmap_frag can be called with a valid fd, if flags doesn't contain one of
171  * MAP_ANON, MAP_STACK, MAP_GUARD. If we need to map a page in those cases, we
172  * pass fd == -1. However, if flags contains MAP_GUARD then MAP_ANON cannot be
173  * added.
174  *
175  * * If fd is valid (not -1) we want to map the pages with MAP_ANON.
176  * * If flags contains MAP_GUARD we don't want to add MAP_ANON because it
177  *   will be rejected.  See kern_mmap's enforcing of constraints for MAP_GUARD
178  *   in sys/vm/vm_mmap.c.
179  * * If flags contains MAP_ANON it doesn't matter if we add it or not.
180  * * If flags contains MAP_STACK, mmap adds MAP_ANON when called so doesn't
181  *   matter if we add it or not either. See enforcing of constraints for
182  *   MAP_STACK in kern_mmap.
183  *
184  * Don't add MAP_ANON for the flags that use fd == -1 without specifying the
185  * flags directly, with the assumption that future flags that require fd == -1
186  * will also not require MAP_ANON.
187  */
188 static int mmap_frag(abi_ulong real_start,
189                      abi_ulong start, abi_ulong end,
190                      int prot, int flags, int fd, abi_ulong offset)
191 {
192     abi_ulong real_end, addr;
193     void *host_start;
194     int prot1, prot_new;
195 
196     real_end = real_start + qemu_host_page_size;
197     host_start = g2h_untagged(real_start);
198 
199     /* get the protection of the target pages outside the mapping */
200     prot1 = 0;
201     for (addr = real_start; addr < real_end; addr++) {
202         if (addr < start || addr >= end)
203             prot1 |= page_get_flags(addr);
204     }
205 
206     if (prot1 == 0) {
207         /* no page was there, so we allocate one. See also above. */
208         void *p = mmap(host_start, qemu_host_page_size, prot,
209                        flags | ((fd != -1) ? MAP_ANON : 0), -1, 0);
210         if (p == MAP_FAILED)
211             return -1;
212         prot1 = prot;
213     }
214     prot1 &= PAGE_RWX;
215 
216     prot_new = prot | prot1;
217     if (fd != -1) {
218         /* msync() won't work here, so we return an error if write is
219            possible while it is a shared mapping */
220         if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
221             (prot & PROT_WRITE))
222             return -1;
223 
224         /* adjust protection to be able to read */
225         if (!(prot1 & PROT_WRITE))
226             mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
227 
228         /* read the corresponding file data */
229         if (!mmap_pread(fd, g2h_untagged(start), end - start, offset, true)) {
230             return -1;
231         }
232 
233         /* put final protection */
234         if (prot_new != (prot1 | PROT_WRITE))
235             mprotect(host_start, qemu_host_page_size, prot_new);
236     } else {
237         if (prot_new != prot1) {
238             mprotect(host_start, qemu_host_page_size, prot_new);
239         }
240         if (prot_new & PROT_WRITE) {
241             memset(g2h_untagged(start), 0, end - start);
242         }
243     }
244     return 0;
245 }
246 
247 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
248 # define TASK_UNMAPPED_BASE  (1ul << 38)
249 #else
250 # define TASK_UNMAPPED_BASE  0x40000000
251 #endif
252 abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
253 
254 /*
255  * Subroutine of mmap_find_vma, used when we have pre-allocated a chunk of guest
256  * address space.
257  */
258 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
259                                         abi_ulong alignment)
260 {
261     abi_ulong ret;
262 
263     ret = page_find_range_empty(start, reserved_va, size, alignment);
264     if (ret == -1 && start > TARGET_PAGE_SIZE) {
265         /* Restart at the beginning of the address space. */
266         ret = page_find_range_empty(TARGET_PAGE_SIZE, start - 1,
267                                     size, alignment);
268     }
269 
270     return ret;
271 }
272 
273 /*
274  * Find and reserve a free memory area of size 'size'. The search
275  * starts at 'start'.
276  * It must be called with mmap_lock() held.
277  * Return -1 if error.
278  */
279 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong alignment)
280 {
281     void *ptr, *prev;
282     abi_ulong addr;
283     int flags;
284     int wrapped, repeat;
285 
286     /* If 'start' == 0, then a default start address is used. */
287     if (start == 0) {
288         start = mmap_next_start;
289     } else {
290         start &= qemu_host_page_mask;
291     }
292 
293     size = HOST_PAGE_ALIGN(size);
294 
295     if (reserved_va) {
296         return mmap_find_vma_reserved(start, size,
297             (alignment != 0 ? 1 << alignment :
298              MAX(qemu_host_page_size, TARGET_PAGE_SIZE)));
299     }
300 
301     addr = start;
302     wrapped = repeat = 0;
303     prev = 0;
304     flags = MAP_ANON | MAP_PRIVATE;
305     if (alignment != 0) {
306         flags |= MAP_ALIGNED(alignment);
307     }
308 
309     for (;; prev = ptr) {
310         /*
311          * Reserve needed memory area to avoid a race.
312          * It should be discarded using:
313          *  - mmap() with MAP_FIXED flag
314          *  - mremap() with MREMAP_FIXED flag
315          *  - shmat() with SHM_REMAP flag
316          */
317         ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
318                    flags, -1, 0);
319 
320         /* ENOMEM, if host address space has no memory */
321         if (ptr == MAP_FAILED) {
322             return (abi_ulong)-1;
323         }
324 
325         /*
326          * Count the number of sequential returns of the same address.
327          * This is used to modify the search algorithm below.
328          */
329         repeat = (ptr == prev ? repeat + 1 : 0);
330 
331         if (h2g_valid(ptr + size - 1)) {
332             addr = h2g(ptr);
333 
334             if ((addr & ~TARGET_PAGE_MASK) == 0) {
335                 /* Success.  */
336                 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
337                     mmap_next_start = addr + size;
338                 }
339                 return addr;
340             }
341 
342             /* The address is not properly aligned for the target.  */
343             switch (repeat) {
344             case 0:
345                 /*
346                  * Assume the result that the kernel gave us is the
347                  * first with enough free space, so start again at the
348                  * next higher target page.
349                  */
350                 addr = TARGET_PAGE_ALIGN(addr);
351                 break;
352             case 1:
353                 /*
354                  * Sometimes the kernel decides to perform the allocation
355                  * at the top end of memory instead.
356                  */
357                 addr &= TARGET_PAGE_MASK;
358                 break;
359             case 2:
360                 /* Start over at low memory.  */
361                 addr = 0;
362                 break;
363             default:
364                 /* Fail.  This unaligned block must the last.  */
365                 addr = -1;
366                 break;
367             }
368         } else {
369             /*
370              * Since the result the kernel gave didn't fit, start
371              * again at low memory.  If any repetition, fail.
372              */
373             addr = (repeat ? -1 : 0);
374         }
375 
376         /* Unmap and try again.  */
377         munmap(ptr, size);
378 
379         /* ENOMEM if we checked the whole of the target address space.  */
380         if (addr == (abi_ulong)-1) {
381             return (abi_ulong)-1;
382         } else if (addr == 0) {
383             if (wrapped) {
384                 return (abi_ulong)-1;
385             }
386             wrapped = 1;
387             /*
388              * Don't actually use 0 when wrapping, instead indicate
389              * that we'd truly like an allocation in low memory.
390              */
391             addr = TARGET_PAGE_SIZE;
392         } else if (wrapped && addr >= start) {
393             return (abi_ulong)-1;
394         }
395     }
396 }
397 
398 /* NOTE: all the constants are the HOST ones */
399 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
400                      int flags, int fd, off_t offset)
401 {
402     abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
403 
404     mmap_lock();
405     if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
406         qemu_log("mmap: start=0x" TARGET_ABI_FMT_lx
407                  " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
408                  start, len,
409                  prot & PROT_READ ? 'r' : '-',
410                  prot & PROT_WRITE ? 'w' : '-',
411                  prot & PROT_EXEC ? 'x' : '-');
412         if (flags & MAP_ALIGNMENT_MASK) {
413             qemu_log("MAP_ALIGNED(%u) ",
414                      (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT);
415         }
416         if (flags & MAP_GUARD) {
417             qemu_log("MAP_GUARD ");
418         }
419         if (flags & MAP_FIXED) {
420             qemu_log("MAP_FIXED ");
421         }
422         if (flags & MAP_ANON) {
423             qemu_log("MAP_ANON ");
424         }
425         if (flags & MAP_EXCL) {
426             qemu_log("MAP_EXCL ");
427         }
428         if (flags & MAP_PRIVATE) {
429             qemu_log("MAP_PRIVATE ");
430         }
431         if (flags & MAP_SHARED) {
432             qemu_log("MAP_SHARED ");
433         }
434         if (flags & MAP_NOCORE) {
435             qemu_log("MAP_NOCORE ");
436         }
437         if (flags & MAP_STACK) {
438             qemu_log("MAP_STACK ");
439         }
440         qemu_log("fd=%d offset=0x%lx\n", fd, offset);
441     }
442 
443     if ((flags & MAP_ANON) && fd != -1) {
444         errno = EINVAL;
445         goto fail;
446     }
447     if (flags & MAP_STACK) {
448         if ((fd != -1) || ((prot & (PROT_READ | PROT_WRITE)) !=
449                     (PROT_READ | PROT_WRITE))) {
450             errno = EINVAL;
451             goto fail;
452         }
453     }
454     if ((flags & MAP_GUARD) && (prot != PROT_NONE || fd != -1 ||
455         offset != 0 || (flags & (MAP_SHARED | MAP_PRIVATE |
456         /* MAP_PREFAULT | */ /* MAP_PREFAULT not in mman.h */
457         MAP_PREFAULT_READ | MAP_ANON | MAP_STACK)) != 0)) {
458         errno = EINVAL;
459         goto fail;
460     }
461 
462     if (offset & ~TARGET_PAGE_MASK) {
463         errno = EINVAL;
464         goto fail;
465     }
466 
467     if (len == 0) {
468         errno = EINVAL;
469         goto fail;
470     }
471 
472     /* Check for overflows */
473     len = TARGET_PAGE_ALIGN(len);
474     if (len == 0) {
475         errno = ENOMEM;
476         goto fail;
477     }
478 
479     real_start = start & qemu_host_page_mask;
480     host_offset = offset & qemu_host_page_mask;
481 
482     /*
483      * If the user is asking for the kernel to find a location, do that
484      * before we truncate the length for mapping files below.
485      */
486     if (!(flags & MAP_FIXED)) {
487         abi_ulong alignment;
488 
489         host_len = len + offset - host_offset;
490         host_len = HOST_PAGE_ALIGN(host_len);
491         alignment = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT;
492         start = mmap_find_vma(real_start, host_len, alignment);
493         if (start == (abi_ulong)-1) {
494             errno = ENOMEM;
495             goto fail;
496         }
497     }
498 
499     /*
500      * When mapping files into a memory area larger than the file, accesses
501      * to pages beyond the file size will cause a SIGBUS.
502      *
503      * For example, if mmaping a file of 100 bytes on a host with 4K pages
504      * emulating a target with 8K pages, the target expects to be able to
505      * access the first 8K. But the host will trap us on any access beyond
506      * 4K.
507      *
508      * When emulating a target with a larger page-size than the hosts, we
509      * may need to truncate file maps at EOF and add extra anonymous pages
510      * up to the targets page boundary.
511      */
512 
513     if ((qemu_real_host_page_size() < qemu_host_page_size) && fd != -1) {
514         struct stat sb;
515 
516         if (fstat(fd, &sb) == -1) {
517             goto fail;
518         }
519 
520         /* Are we trying to create a map beyond EOF?.  */
521         if (offset + len > sb.st_size) {
522             /*
523              * If so, truncate the file map at eof aligned with
524              * the hosts real pagesize. Additional anonymous maps
525              * will be created beyond EOF.
526              */
527             len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
528         }
529     }
530 
531     if (!(flags & MAP_FIXED)) {
532         unsigned long host_start;
533         void *p;
534 
535         host_len = len + offset - host_offset;
536         host_len = HOST_PAGE_ALIGN(host_len);
537 
538         /*
539          * Note: we prefer to control the mapping address. It is
540          * especially important if qemu_host_page_size >
541          * qemu_real_host_page_size
542          */
543         p = mmap(g2h_untagged(start), host_len, prot,
544                  flags | MAP_FIXED | ((fd != -1) ? MAP_ANON : 0), -1, 0);
545         if (p == MAP_FAILED)
546             goto fail;
547         /* update start so that it points to the file position at 'offset' */
548         host_start = (unsigned long)p;
549         if (fd != -1) {
550             p = mmap(g2h_untagged(start), len, prot,
551                      flags | MAP_FIXED, fd, host_offset);
552             if (p == MAP_FAILED) {
553                 munmap(g2h_untagged(start), host_len);
554                 goto fail;
555             }
556             host_start += offset - host_offset;
557         }
558         start = h2g(host_start);
559     } else {
560         if (start & ~TARGET_PAGE_MASK) {
561             errno = EINVAL;
562             goto fail;
563         }
564         end = start + len;
565         real_end = HOST_PAGE_ALIGN(end);
566 
567         /*
568          * Test if requested memory area fits target address space
569          * It can fail only on 64-bit host with 32-bit target.
570          * On any other target/host host mmap() handles this error correctly.
571          */
572         if (!guest_range_valid_untagged(start, len)) {
573             errno = EINVAL;
574             goto fail;
575         }
576 
577         /*
578          * worst case: we cannot map the file because the offset is not
579          * aligned, so we read it
580          */
581         if (fd != -1 &&
582             (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
583             /*
584              * msync() won't work here, so we return an error if write is
585              * possible while it is a shared mapping
586              */
587             if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
588                 (prot & PROT_WRITE)) {
589                 errno = EINVAL;
590                 goto fail;
591             }
592             retaddr = target_mmap(start, len, prot | PROT_WRITE,
593                                   MAP_FIXED | MAP_PRIVATE | MAP_ANON,
594                                   -1, 0);
595             if (retaddr == -1)
596                 goto fail;
597             if (!mmap_pread(fd, g2h_untagged(start), len, offset, false)) {
598                 goto fail;
599             }
600             if (!(prot & PROT_WRITE)) {
601                 ret = target_mprotect(start, len, prot);
602                 assert(ret == 0);
603             }
604             goto the_end;
605         }
606 
607         /* Reject the mapping if any page within the range is mapped */
608         if ((flags & MAP_EXCL) && !page_check_range_empty(start, end - 1)) {
609             errno = EINVAL;
610             goto fail;
611         }
612 
613         /* handle the start of the mapping */
614         if (start > real_start) {
615             if (real_end == real_start + qemu_host_page_size) {
616                 /* one single host page */
617                 ret = mmap_frag(real_start, start, end,
618                                 prot, flags, fd, offset);
619                 if (ret == -1)
620                     goto fail;
621                 goto the_end1;
622             }
623             ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
624                             prot, flags, fd, offset);
625             if (ret == -1)
626                 goto fail;
627             real_start += qemu_host_page_size;
628         }
629         /* handle the end of the mapping */
630         if (end < real_end) {
631             ret = mmap_frag(real_end - qemu_host_page_size,
632                             real_end - qemu_host_page_size, end,
633                             prot, flags, fd,
634                             offset + real_end - qemu_host_page_size - start);
635             if (ret == -1)
636                 goto fail;
637             real_end -= qemu_host_page_size;
638         }
639 
640         /* map the middle (easier) */
641         if (real_start < real_end) {
642             void *p;
643             unsigned long offset1;
644             if (flags & MAP_ANON)
645                 offset1 = 0;
646             else
647                 offset1 = offset + real_start - start;
648             p = mmap(g2h_untagged(real_start), real_end - real_start,
649                      prot, flags, fd, offset1);
650             if (p == MAP_FAILED)
651                 goto fail;
652         }
653     }
654  the_end1:
655     page_set_flags(start, start + len - 1, prot | PAGE_VALID);
656  the_end:
657 #ifdef DEBUG_MMAP
658     printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
659     page_dump(stdout);
660     printf("\n");
661 #endif
662     mmap_unlock();
663     return start;
664 fail:
665     mmap_unlock();
666     return -1;
667 }
668 
669 void mmap_reserve(abi_ulong start, abi_ulong size)
670 {
671     abi_ulong real_start;
672     abi_ulong real_end;
673     abi_ulong addr;
674     abi_ulong end;
675     int prot;
676 
677     real_start = start & qemu_host_page_mask;
678     real_end = HOST_PAGE_ALIGN(start + size);
679     end = start + size;
680     if (start > real_start) {
681         /* handle host page containing start */
682         prot = 0;
683         for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
684             prot |= page_get_flags(addr);
685         }
686         if (real_end == real_start + qemu_host_page_size) {
687             for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
688                 prot |= page_get_flags(addr);
689             }
690             end = real_end;
691         }
692         if (prot != 0) {
693             real_start += qemu_host_page_size;
694         }
695     }
696     if (end < real_end) {
697         prot = 0;
698         for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
699             prot |= page_get_flags(addr);
700         }
701         if (prot != 0) {
702             real_end -= qemu_host_page_size;
703         }
704     }
705     if (real_start != real_end) {
706         mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
707                  MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
708     }
709 }
710 
711 int target_munmap(abi_ulong start, abi_ulong len)
712 {
713     abi_ulong end, real_start, real_end, addr;
714     int prot, ret;
715 
716 #ifdef DEBUG_MMAP
717     printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
718            TARGET_ABI_FMT_lx "\n",
719            start, len);
720 #endif
721     if (start & ~TARGET_PAGE_MASK)
722         return -EINVAL;
723     len = TARGET_PAGE_ALIGN(len);
724     if (len == 0)
725         return -EINVAL;
726     mmap_lock();
727     end = start + len;
728     real_start = start & qemu_host_page_mask;
729     real_end = HOST_PAGE_ALIGN(end);
730 
731     if (start > real_start) {
732         /* handle host page containing start */
733         prot = 0;
734         for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
735             prot |= page_get_flags(addr);
736         }
737         if (real_end == real_start + qemu_host_page_size) {
738             for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
739                 prot |= page_get_flags(addr);
740             }
741             end = real_end;
742         }
743         if (prot != 0)
744             real_start += qemu_host_page_size;
745     }
746     if (end < real_end) {
747         prot = 0;
748         for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
749             prot |= page_get_flags(addr);
750         }
751         if (prot != 0)
752             real_end -= qemu_host_page_size;
753     }
754 
755     ret = 0;
756     /* unmap what we can */
757     if (real_start < real_end) {
758         if (reserved_va) {
759             mmap_reserve(real_start, real_end - real_start);
760         } else {
761             ret = munmap(g2h_untagged(real_start), real_end - real_start);
762         }
763     }
764 
765     if (ret == 0) {
766         page_set_flags(start, start + len - 1, 0);
767     }
768     mmap_unlock();
769     return ret;
770 }
771 
772 int target_msync(abi_ulong start, abi_ulong len, int flags)
773 {
774     abi_ulong end;
775 
776     if (start & ~TARGET_PAGE_MASK)
777         return -EINVAL;
778     len = TARGET_PAGE_ALIGN(len);
779     end = start + len;
780     if (end < start)
781         return -EINVAL;
782     if (end == start)
783         return 0;
784 
785     start &= qemu_host_page_mask;
786     return msync(g2h_untagged(start), end - start, flags);
787 }
788