1 /* 2 * mmap support for qemu 3 * 4 * Copyright (c) 2003 - 2008 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "exec/page-protection.h" 21 #include "user/page-protection.h" 22 23 #include "qemu.h" 24 25 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER; 26 static __thread int mmap_lock_count; 27 28 void mmap_lock(void) 29 { 30 if (mmap_lock_count++ == 0) { 31 pthread_mutex_lock(&mmap_mutex); 32 } 33 } 34 35 void mmap_unlock(void) 36 { 37 assert(mmap_lock_count > 0); 38 if (--mmap_lock_count == 0) { 39 pthread_mutex_unlock(&mmap_mutex); 40 } 41 } 42 43 bool have_mmap_lock(void) 44 { 45 return mmap_lock_count > 0 ? true : false; 46 } 47 48 /* Grab lock to make sure things are in a consistent state after fork(). */ 49 void mmap_fork_start(void) 50 { 51 if (mmap_lock_count) 52 abort(); 53 pthread_mutex_lock(&mmap_mutex); 54 } 55 56 void mmap_fork_end(int child) 57 { 58 if (child) 59 pthread_mutex_init(&mmap_mutex, NULL); 60 else 61 pthread_mutex_unlock(&mmap_mutex); 62 } 63 64 /* NOTE: all the constants are the HOST ones, but addresses are target. */ 65 int target_mprotect(abi_ulong start, abi_ulong len, int prot) 66 { 67 abi_ulong end, host_start, host_end, addr; 68 int prot1, ret; 69 70 qemu_log_mask(CPU_LOG_PAGE, "mprotect: start=0x" TARGET_ABI_FMT_lx 71 " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len, 72 prot & PROT_READ ? 'r' : '-', 73 prot & PROT_WRITE ? 'w' : '-', 74 prot & PROT_EXEC ? 'x' : '-'); 75 if ((start & ~TARGET_PAGE_MASK) != 0) 76 return -EINVAL; 77 len = TARGET_PAGE_ALIGN(len); 78 end = start + len; 79 if (end < start) 80 return -EINVAL; 81 prot &= PROT_READ | PROT_WRITE | PROT_EXEC; 82 if (len == 0) 83 return 0; 84 85 mmap_lock(); 86 host_start = start & qemu_host_page_mask; 87 host_end = HOST_PAGE_ALIGN(end); 88 if (start > host_start) { 89 /* handle host page containing start */ 90 prot1 = prot; 91 for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) { 92 prot1 |= page_get_flags(addr); 93 } 94 if (host_end == host_start + qemu_host_page_size) { 95 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) { 96 prot1 |= page_get_flags(addr); 97 } 98 end = host_end; 99 } 100 ret = mprotect(g2h_untagged(host_start), 101 qemu_host_page_size, prot1 & PAGE_RWX); 102 if (ret != 0) 103 goto error; 104 host_start += qemu_host_page_size; 105 } 106 if (end < host_end) { 107 prot1 = prot; 108 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) { 109 prot1 |= page_get_flags(addr); 110 } 111 ret = mprotect(g2h_untagged(host_end - qemu_host_page_size), 112 qemu_host_page_size, prot1 & PAGE_RWX); 113 if (ret != 0) 114 goto error; 115 host_end -= qemu_host_page_size; 116 } 117 118 /* handle the pages in the middle */ 119 if (host_start < host_end) { 120 ret = mprotect(g2h_untagged(host_start), host_end - host_start, prot); 121 if (ret != 0) 122 goto error; 123 } 124 page_set_flags(start, start + len - 1, prot | PAGE_VALID); 125 mmap_unlock(); 126 return 0; 127 error: 128 mmap_unlock(); 129 return ret; 130 } 131 132 /* 133 * Perform a pread on behalf of target_mmap. We can reach EOF, we can be 134 * interrupted by signals, and in general there's no good error return path. 135 * If @zero, zero the rest of the block at EOF. 136 * Return true on success. 137 */ 138 static bool mmap_pread(int fd, void *p, size_t len, off_t offset, bool zero) 139 { 140 while (1) { 141 ssize_t r = pread(fd, p, len, offset); 142 143 if (likely(r == len)) { 144 /* Complete */ 145 return true; 146 } 147 if (r == 0) { 148 /* EOF */ 149 if (zero) { 150 memset(p, 0, len); 151 } 152 return true; 153 } 154 if (r > 0) { 155 /* Short read */ 156 p += r; 157 len -= r; 158 offset += r; 159 } else if (errno != EINTR) { 160 /* Error */ 161 return false; 162 } 163 } 164 } 165 166 /* 167 * map an incomplete host page 168 * 169 * mmap_frag can be called with a valid fd, if flags doesn't contain one of 170 * MAP_ANON, MAP_STACK, MAP_GUARD. If we need to map a page in those cases, we 171 * pass fd == -1. However, if flags contains MAP_GUARD then MAP_ANON cannot be 172 * added. 173 * 174 * * If fd is valid (not -1) we want to map the pages with MAP_ANON. 175 * * If flags contains MAP_GUARD we don't want to add MAP_ANON because it 176 * will be rejected. See kern_mmap's enforcing of constraints for MAP_GUARD 177 * in sys/vm/vm_mmap.c. 178 * * If flags contains MAP_ANON it doesn't matter if we add it or not. 179 * * If flags contains MAP_STACK, mmap adds MAP_ANON when called so doesn't 180 * matter if we add it or not either. See enforcing of constraints for 181 * MAP_STACK in kern_mmap. 182 * 183 * Don't add MAP_ANON for the flags that use fd == -1 without specifying the 184 * flags directly, with the assumption that future flags that require fd == -1 185 * will also not require MAP_ANON. 186 */ 187 static int mmap_frag(abi_ulong real_start, 188 abi_ulong start, abi_ulong end, 189 int prot, int flags, int fd, abi_ulong offset) 190 { 191 abi_ulong real_end, addr; 192 void *host_start; 193 int prot1, prot_new; 194 195 real_end = real_start + qemu_host_page_size; 196 host_start = g2h_untagged(real_start); 197 198 /* get the protection of the target pages outside the mapping */ 199 prot1 = 0; 200 for (addr = real_start; addr < real_end; addr++) { 201 if (addr < start || addr >= end) 202 prot1 |= page_get_flags(addr); 203 } 204 205 if (prot1 == 0) { 206 /* no page was there, so we allocate one. See also above. */ 207 void *p = mmap(host_start, qemu_host_page_size, prot, 208 flags | ((fd != -1) ? MAP_ANON : 0), -1, 0); 209 if (p == MAP_FAILED) 210 return -1; 211 prot1 = prot; 212 } 213 prot1 &= PAGE_RWX; 214 215 prot_new = prot | prot1; 216 if (fd != -1) { 217 /* msync() won't work here, so we return an error if write is 218 possible while it is a shared mapping */ 219 if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED && 220 (prot & PROT_WRITE)) 221 return -1; 222 223 /* adjust protection to be able to read */ 224 if (!(prot1 & PROT_WRITE)) 225 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE); 226 227 /* read the corresponding file data */ 228 if (!mmap_pread(fd, g2h_untagged(start), end - start, offset, true)) { 229 return -1; 230 } 231 232 /* put final protection */ 233 if (prot_new != (prot1 | PROT_WRITE)) 234 mprotect(host_start, qemu_host_page_size, prot_new); 235 } else { 236 if (prot_new != prot1) { 237 mprotect(host_start, qemu_host_page_size, prot_new); 238 } 239 if (prot_new & PROT_WRITE) { 240 memset(g2h_untagged(start), 0, end - start); 241 } 242 } 243 return 0; 244 } 245 246 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64 247 # define TASK_UNMAPPED_BASE (1ul << 38) 248 #else 249 # define TASK_UNMAPPED_BASE 0x40000000 250 #endif 251 abi_ulong mmap_next_start = TASK_UNMAPPED_BASE; 252 253 /* 254 * Subroutine of mmap_find_vma, used when we have pre-allocated a chunk of guest 255 * address space. 256 */ 257 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size, 258 abi_ulong alignment) 259 { 260 abi_ulong ret; 261 262 ret = page_find_range_empty(start, reserved_va, size, alignment); 263 if (ret == -1 && start > TARGET_PAGE_SIZE) { 264 /* Restart at the beginning of the address space. */ 265 ret = page_find_range_empty(TARGET_PAGE_SIZE, start - 1, 266 size, alignment); 267 } 268 269 return ret; 270 } 271 272 /* 273 * Find and reserve a free memory area of size 'size'. The search 274 * starts at 'start'. 275 * It must be called with mmap_lock() held. 276 * Return -1 if error. 277 */ 278 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong alignment) 279 { 280 void *ptr, *prev; 281 abi_ulong addr; 282 int flags; 283 int wrapped, repeat; 284 285 /* If 'start' == 0, then a default start address is used. */ 286 if (start == 0) { 287 start = mmap_next_start; 288 } else { 289 start &= qemu_host_page_mask; 290 } 291 292 size = HOST_PAGE_ALIGN(size); 293 294 if (reserved_va) { 295 return mmap_find_vma_reserved(start, size, 296 (alignment != 0 ? 1 << alignment : 297 MAX(qemu_host_page_size, TARGET_PAGE_SIZE))); 298 } 299 300 addr = start; 301 wrapped = repeat = 0; 302 prev = 0; 303 flags = MAP_ANON | MAP_PRIVATE; 304 if (alignment != 0) { 305 flags |= MAP_ALIGNED(alignment); 306 } 307 308 for (;; prev = ptr) { 309 /* 310 * Reserve needed memory area to avoid a race. 311 * It should be discarded using: 312 * - mmap() with MAP_FIXED flag 313 * - mremap() with MREMAP_FIXED flag 314 * - shmat() with SHM_REMAP flag 315 */ 316 ptr = mmap(g2h_untagged(addr), size, PROT_NONE, 317 flags, -1, 0); 318 319 /* ENOMEM, if host address space has no memory */ 320 if (ptr == MAP_FAILED) { 321 return (abi_ulong)-1; 322 } 323 324 /* 325 * Count the number of sequential returns of the same address. 326 * This is used to modify the search algorithm below. 327 */ 328 repeat = (ptr == prev ? repeat + 1 : 0); 329 330 if (h2g_valid(ptr + size - 1)) { 331 addr = h2g(ptr); 332 333 if ((addr & ~TARGET_PAGE_MASK) == 0) { 334 /* Success. */ 335 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) { 336 mmap_next_start = addr + size; 337 } 338 return addr; 339 } 340 341 /* The address is not properly aligned for the target. */ 342 switch (repeat) { 343 case 0: 344 /* 345 * Assume the result that the kernel gave us is the 346 * first with enough free space, so start again at the 347 * next higher target page. 348 */ 349 addr = TARGET_PAGE_ALIGN(addr); 350 break; 351 case 1: 352 /* 353 * Sometimes the kernel decides to perform the allocation 354 * at the top end of memory instead. 355 */ 356 addr &= TARGET_PAGE_MASK; 357 break; 358 case 2: 359 /* Start over at low memory. */ 360 addr = 0; 361 break; 362 default: 363 /* Fail. This unaligned block must the last. */ 364 addr = -1; 365 break; 366 } 367 } else { 368 /* 369 * Since the result the kernel gave didn't fit, start 370 * again at low memory. If any repetition, fail. 371 */ 372 addr = (repeat ? -1 : 0); 373 } 374 375 /* Unmap and try again. */ 376 munmap(ptr, size); 377 378 /* ENOMEM if we checked the whole of the target address space. */ 379 if (addr == (abi_ulong)-1) { 380 return (abi_ulong)-1; 381 } else if (addr == 0) { 382 if (wrapped) { 383 return (abi_ulong)-1; 384 } 385 wrapped = 1; 386 /* 387 * Don't actually use 0 when wrapping, instead indicate 388 * that we'd truly like an allocation in low memory. 389 */ 390 addr = TARGET_PAGE_SIZE; 391 } else if (wrapped && addr >= start) { 392 return (abi_ulong)-1; 393 } 394 } 395 } 396 397 /* NOTE: all the constants are the HOST ones */ 398 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, 399 int flags, int fd, off_t offset) 400 { 401 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len; 402 403 mmap_lock(); 404 if (qemu_loglevel_mask(CPU_LOG_PAGE)) { 405 qemu_log("mmap: start=0x" TARGET_ABI_FMT_lx 406 " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=", 407 start, len, 408 prot & PROT_READ ? 'r' : '-', 409 prot & PROT_WRITE ? 'w' : '-', 410 prot & PROT_EXEC ? 'x' : '-'); 411 if (flags & MAP_ALIGNMENT_MASK) { 412 qemu_log("MAP_ALIGNED(%u) ", 413 (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT); 414 } 415 if (flags & MAP_GUARD) { 416 qemu_log("MAP_GUARD "); 417 } 418 if (flags & MAP_FIXED) { 419 qemu_log("MAP_FIXED "); 420 } 421 if (flags & MAP_ANON) { 422 qemu_log("MAP_ANON "); 423 } 424 if (flags & MAP_EXCL) { 425 qemu_log("MAP_EXCL "); 426 } 427 if (flags & MAP_PRIVATE) { 428 qemu_log("MAP_PRIVATE "); 429 } 430 if (flags & MAP_SHARED) { 431 qemu_log("MAP_SHARED "); 432 } 433 if (flags & MAP_NOCORE) { 434 qemu_log("MAP_NOCORE "); 435 } 436 if (flags & MAP_STACK) { 437 qemu_log("MAP_STACK "); 438 } 439 qemu_log("fd=%d offset=0x%lx\n", fd, offset); 440 } 441 442 if ((flags & MAP_ANON) && fd != -1) { 443 errno = EINVAL; 444 goto fail; 445 } 446 if (flags & MAP_STACK) { 447 if ((fd != -1) || ((prot & (PROT_READ | PROT_WRITE)) != 448 (PROT_READ | PROT_WRITE))) { 449 errno = EINVAL; 450 goto fail; 451 } 452 } 453 if ((flags & MAP_GUARD) && (prot != PROT_NONE || fd != -1 || 454 offset != 0 || (flags & (MAP_SHARED | MAP_PRIVATE | 455 /* MAP_PREFAULT | */ /* MAP_PREFAULT not in mman.h */ 456 MAP_PREFAULT_READ | MAP_ANON | MAP_STACK)) != 0)) { 457 errno = EINVAL; 458 goto fail; 459 } 460 461 if (offset & ~TARGET_PAGE_MASK) { 462 errno = EINVAL; 463 goto fail; 464 } 465 466 if (len == 0) { 467 errno = EINVAL; 468 goto fail; 469 } 470 471 /* Check for overflows */ 472 len = TARGET_PAGE_ALIGN(len); 473 if (len == 0) { 474 errno = ENOMEM; 475 goto fail; 476 } 477 478 real_start = start & qemu_host_page_mask; 479 host_offset = offset & qemu_host_page_mask; 480 481 /* 482 * If the user is asking for the kernel to find a location, do that 483 * before we truncate the length for mapping files below. 484 */ 485 if (!(flags & MAP_FIXED)) { 486 abi_ulong alignment; 487 488 host_len = len + offset - host_offset; 489 host_len = HOST_PAGE_ALIGN(host_len); 490 alignment = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT; 491 start = mmap_find_vma(real_start, host_len, alignment); 492 if (start == (abi_ulong)-1) { 493 errno = ENOMEM; 494 goto fail; 495 } 496 } 497 498 /* 499 * When mapping files into a memory area larger than the file, accesses 500 * to pages beyond the file size will cause a SIGBUS. 501 * 502 * For example, if mmaping a file of 100 bytes on a host with 4K pages 503 * emulating a target with 8K pages, the target expects to be able to 504 * access the first 8K. But the host will trap us on any access beyond 505 * 4K. 506 * 507 * When emulating a target with a larger page-size than the hosts, we 508 * may need to truncate file maps at EOF and add extra anonymous pages 509 * up to the targets page boundary. 510 */ 511 512 if ((qemu_real_host_page_size() < qemu_host_page_size) && fd != -1) { 513 struct stat sb; 514 515 if (fstat(fd, &sb) == -1) { 516 goto fail; 517 } 518 519 /* Are we trying to create a map beyond EOF?. */ 520 if (offset + len > sb.st_size) { 521 /* 522 * If so, truncate the file map at eof aligned with 523 * the hosts real pagesize. Additional anonymous maps 524 * will be created beyond EOF. 525 */ 526 len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset); 527 } 528 } 529 530 if (!(flags & MAP_FIXED)) { 531 unsigned long host_start; 532 void *p; 533 534 host_len = len + offset - host_offset; 535 host_len = HOST_PAGE_ALIGN(host_len); 536 537 /* 538 * Note: we prefer to control the mapping address. It is 539 * especially important if qemu_host_page_size > 540 * qemu_real_host_page_size 541 */ 542 p = mmap(g2h_untagged(start), host_len, prot, 543 flags | MAP_FIXED | ((fd != -1) ? MAP_ANON : 0), -1, 0); 544 if (p == MAP_FAILED) 545 goto fail; 546 /* update start so that it points to the file position at 'offset' */ 547 host_start = (unsigned long)p; 548 if (fd != -1) { 549 p = mmap(g2h_untagged(start), len, prot, 550 flags | MAP_FIXED, fd, host_offset); 551 if (p == MAP_FAILED) { 552 munmap(g2h_untagged(start), host_len); 553 goto fail; 554 } 555 host_start += offset - host_offset; 556 } 557 start = h2g(host_start); 558 } else { 559 if (start & ~TARGET_PAGE_MASK) { 560 errno = EINVAL; 561 goto fail; 562 } 563 end = start + len; 564 real_end = HOST_PAGE_ALIGN(end); 565 566 /* 567 * Test if requested memory area fits target address space 568 * It can fail only on 64-bit host with 32-bit target. 569 * On any other target/host host mmap() handles this error correctly. 570 */ 571 if (!guest_range_valid_untagged(start, len)) { 572 errno = EINVAL; 573 goto fail; 574 } 575 576 /* 577 * worst case: we cannot map the file because the offset is not 578 * aligned, so we read it 579 */ 580 if (fd != -1 && 581 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) { 582 /* 583 * msync() won't work here, so we return an error if write is 584 * possible while it is a shared mapping 585 */ 586 if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED && 587 (prot & PROT_WRITE)) { 588 errno = EINVAL; 589 goto fail; 590 } 591 retaddr = target_mmap(start, len, prot | PROT_WRITE, 592 MAP_FIXED | MAP_PRIVATE | MAP_ANON, 593 -1, 0); 594 if (retaddr == -1) 595 goto fail; 596 if (!mmap_pread(fd, g2h_untagged(start), len, offset, false)) { 597 goto fail; 598 } 599 if (!(prot & PROT_WRITE)) { 600 ret = target_mprotect(start, len, prot); 601 assert(ret == 0); 602 } 603 goto the_end; 604 } 605 606 /* Reject the mapping if any page within the range is mapped */ 607 if ((flags & MAP_EXCL) && !page_check_range_empty(start, end - 1)) { 608 errno = EINVAL; 609 goto fail; 610 } 611 612 /* handle the start of the mapping */ 613 if (start > real_start) { 614 if (real_end == real_start + qemu_host_page_size) { 615 /* one single host page */ 616 ret = mmap_frag(real_start, start, end, 617 prot, flags, fd, offset); 618 if (ret == -1) 619 goto fail; 620 goto the_end1; 621 } 622 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size, 623 prot, flags, fd, offset); 624 if (ret == -1) 625 goto fail; 626 real_start += qemu_host_page_size; 627 } 628 /* handle the end of the mapping */ 629 if (end < real_end) { 630 ret = mmap_frag(real_end - qemu_host_page_size, 631 real_end - qemu_host_page_size, end, 632 prot, flags, fd, 633 offset + real_end - qemu_host_page_size - start); 634 if (ret == -1) 635 goto fail; 636 real_end -= qemu_host_page_size; 637 } 638 639 /* map the middle (easier) */ 640 if (real_start < real_end) { 641 void *p; 642 unsigned long offset1; 643 if (flags & MAP_ANON) 644 offset1 = 0; 645 else 646 offset1 = offset + real_start - start; 647 p = mmap(g2h_untagged(real_start), real_end - real_start, 648 prot, flags, fd, offset1); 649 if (p == MAP_FAILED) 650 goto fail; 651 } 652 } 653 the_end1: 654 page_set_flags(start, start + len - 1, prot | PAGE_VALID); 655 the_end: 656 #ifdef DEBUG_MMAP 657 printf("ret=0x" TARGET_ABI_FMT_lx "\n", start); 658 page_dump(stdout); 659 printf("\n"); 660 #endif 661 mmap_unlock(); 662 return start; 663 fail: 664 mmap_unlock(); 665 return -1; 666 } 667 668 void mmap_reserve(abi_ulong start, abi_ulong size) 669 { 670 abi_ulong real_start; 671 abi_ulong real_end; 672 abi_ulong addr; 673 abi_ulong end; 674 int prot; 675 676 real_start = start & qemu_host_page_mask; 677 real_end = HOST_PAGE_ALIGN(start + size); 678 end = start + size; 679 if (start > real_start) { 680 /* handle host page containing start */ 681 prot = 0; 682 for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) { 683 prot |= page_get_flags(addr); 684 } 685 if (real_end == real_start + qemu_host_page_size) { 686 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) { 687 prot |= page_get_flags(addr); 688 } 689 end = real_end; 690 } 691 if (prot != 0) { 692 real_start += qemu_host_page_size; 693 } 694 } 695 if (end < real_end) { 696 prot = 0; 697 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) { 698 prot |= page_get_flags(addr); 699 } 700 if (prot != 0) { 701 real_end -= qemu_host_page_size; 702 } 703 } 704 if (real_start != real_end) { 705 mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE, 706 MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); 707 } 708 } 709 710 int target_munmap(abi_ulong start, abi_ulong len) 711 { 712 abi_ulong end, real_start, real_end, addr; 713 int prot, ret; 714 715 #ifdef DEBUG_MMAP 716 printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x" 717 TARGET_ABI_FMT_lx "\n", 718 start, len); 719 #endif 720 if (start & ~TARGET_PAGE_MASK) 721 return -EINVAL; 722 len = TARGET_PAGE_ALIGN(len); 723 if (len == 0) 724 return -EINVAL; 725 mmap_lock(); 726 end = start + len; 727 real_start = start & qemu_host_page_mask; 728 real_end = HOST_PAGE_ALIGN(end); 729 730 if (start > real_start) { 731 /* handle host page containing start */ 732 prot = 0; 733 for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) { 734 prot |= page_get_flags(addr); 735 } 736 if (real_end == real_start + qemu_host_page_size) { 737 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) { 738 prot |= page_get_flags(addr); 739 } 740 end = real_end; 741 } 742 if (prot != 0) 743 real_start += qemu_host_page_size; 744 } 745 if (end < real_end) { 746 prot = 0; 747 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) { 748 prot |= page_get_flags(addr); 749 } 750 if (prot != 0) 751 real_end -= qemu_host_page_size; 752 } 753 754 ret = 0; 755 /* unmap what we can */ 756 if (real_start < real_end) { 757 if (reserved_va) { 758 mmap_reserve(real_start, real_end - real_start); 759 } else { 760 ret = munmap(g2h_untagged(real_start), real_end - real_start); 761 } 762 } 763 764 if (ret == 0) { 765 page_set_flags(start, start + len - 1, 0); 766 } 767 mmap_unlock(); 768 return ret; 769 } 770 771 int target_msync(abi_ulong start, abi_ulong len, int flags) 772 { 773 abi_ulong end; 774 775 if (start & ~TARGET_PAGE_MASK) 776 return -EINVAL; 777 len = TARGET_PAGE_ALIGN(len); 778 end = start + len; 779 if (end < start) 780 return -EINVAL; 781 if (end == start) 782 return 0; 783 784 start &= qemu_host_page_mask; 785 return msync(g2h_untagged(start), end - start, flags); 786 } 787