1 /* 2 * mmap support for qemu 3 * 4 * Copyright (c) 2003 - 2008 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "exec/page-protection.h" 21 #include "user/page-protection.h" 22 23 #include "qemu.h" 24 25 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER; 26 static __thread int mmap_lock_count; 27 28 void mmap_lock(void) 29 { 30 if (mmap_lock_count++ == 0) { 31 pthread_mutex_lock(&mmap_mutex); 32 } 33 } 34 35 void mmap_unlock(void) 36 { 37 assert(mmap_lock_count > 0); 38 if (--mmap_lock_count == 0) { 39 pthread_mutex_unlock(&mmap_mutex); 40 } 41 } 42 43 bool have_mmap_lock(void) 44 { 45 return mmap_lock_count > 0 ? true : false; 46 } 47 48 /* Grab lock to make sure things are in a consistent state after fork(). */ 49 void mmap_fork_start(void) 50 { 51 if (mmap_lock_count) 52 abort(); 53 pthread_mutex_lock(&mmap_mutex); 54 } 55 56 void mmap_fork_end(int child) 57 { 58 if (child) 59 pthread_mutex_init(&mmap_mutex, NULL); 60 else 61 pthread_mutex_unlock(&mmap_mutex); 62 } 63 64 /* NOTE: all the constants are the HOST ones, but addresses are target. */ 65 int target_mprotect(abi_ulong start, abi_ulong len, int prot) 66 { 67 abi_ulong end, host_start, host_end, addr; 68 int prot1, ret; 69 70 qemu_log_mask(CPU_LOG_PAGE, "mprotect: start=0x" TARGET_ABI_FMT_lx 71 " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len, 72 prot & PROT_READ ? 'r' : '-', 73 prot & PROT_WRITE ? 'w' : '-', 74 prot & PROT_EXEC ? 'x' : '-'); 75 if ((start & ~TARGET_PAGE_MASK) != 0) 76 return -EINVAL; 77 len = TARGET_PAGE_ALIGN(len); 78 end = start + len; 79 if (end < start) 80 return -EINVAL; 81 prot &= PROT_READ | PROT_WRITE | PROT_EXEC; 82 if (len == 0) 83 return 0; 84 85 mmap_lock(); 86 host_start = start & qemu_host_page_mask; 87 host_end = HOST_PAGE_ALIGN(end); 88 if (start > host_start) { 89 /* handle host page containing start */ 90 prot1 = prot; 91 for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) { 92 prot1 |= page_get_flags(addr); 93 } 94 if (host_end == host_start + qemu_host_page_size) { 95 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) { 96 prot1 |= page_get_flags(addr); 97 } 98 end = host_end; 99 } 100 ret = mprotect(g2h_untagged(host_start), 101 qemu_host_page_size, prot1 & PAGE_RWX); 102 if (ret != 0) 103 goto error; 104 host_start += qemu_host_page_size; 105 } 106 if (end < host_end) { 107 prot1 = prot; 108 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) { 109 prot1 |= page_get_flags(addr); 110 } 111 ret = mprotect(g2h_untagged(host_end - qemu_host_page_size), 112 qemu_host_page_size, prot1 & PAGE_RWX); 113 if (ret != 0) 114 goto error; 115 host_end -= qemu_host_page_size; 116 } 117 118 /* handle the pages in the middle */ 119 if (host_start < host_end) { 120 ret = mprotect(g2h_untagged(host_start), host_end - host_start, prot); 121 if (ret != 0) 122 goto error; 123 } 124 page_set_flags(start, start + len - 1, prot | PAGE_VALID); 125 mmap_unlock(); 126 return 0; 127 error: 128 mmap_unlock(); 129 return ret; 130 } 131 132 /* 133 * Perform a pread on behalf of target_mmap. We can reach EOF, we can be 134 * interrupted by signals, and in general there's no good error return path. 135 * If @zero, zero the rest of the block at EOF. 136 * Return true on success. 137 */ 138 static bool mmap_pread(int fd, void *p, size_t len, off_t offset, bool zero) 139 { 140 while (1) { 141 ssize_t r = pread(fd, p, len, offset); 142 143 if (likely(r == len)) { 144 /* Complete */ 145 return true; 146 } 147 if (r == 0) { 148 /* EOF */ 149 if (zero) { 150 memset(p, 0, len); 151 } 152 return true; 153 } 154 if (r > 0) { 155 /* Short read */ 156 p += r; 157 len -= r; 158 offset += r; 159 } else if (errno != EINTR) { 160 /* Error */ 161 return false; 162 } 163 } 164 } 165 166 /* 167 * map an incomplete host page 168 * 169 * mmap_frag can be called with a valid fd, if flags doesn't contain one of 170 * MAP_ANON, MAP_STACK, MAP_GUARD. If we need to map a page in those cases, we 171 * pass fd == -1. However, if flags contains MAP_GUARD then MAP_ANON cannot be 172 * added. 173 * 174 * * If fd is valid (not -1) we want to map the pages with MAP_ANON. 175 * * If flags contains MAP_GUARD we don't want to add MAP_ANON because it 176 * will be rejected. See kern_mmap's enforcing of constraints for MAP_GUARD 177 * in sys/vm/vm_mmap.c. 178 * * If flags contains MAP_ANON it doesn't matter if we add it or not. 179 * * If flags contains MAP_STACK, mmap adds MAP_ANON when called so doesn't 180 * matter if we add it or not either. See enforcing of constraints for 181 * MAP_STACK in kern_mmap. 182 * 183 * Don't add MAP_ANON for the flags that use fd == -1 without specifying the 184 * flags directly, with the assumption that future flags that require fd == -1 185 * will also not require MAP_ANON. 186 */ 187 static int mmap_frag(abi_ulong real_start, 188 abi_ulong start, abi_ulong end, 189 int prot, int flags, int fd, abi_ulong offset) 190 { 191 abi_ulong real_end, addr; 192 void *host_start; 193 int prot1, prot_new; 194 195 real_end = real_start + qemu_host_page_size; 196 host_start = g2h_untagged(real_start); 197 198 /* get the protection of the target pages outside the mapping */ 199 prot1 = 0; 200 for (addr = real_start; addr < real_end; addr++) { 201 if (addr < start || addr >= end) 202 prot1 |= page_get_flags(addr); 203 } 204 205 if (prot1 == 0) { 206 /* no page was there, so we allocate one. See also above. */ 207 void *p = mmap(host_start, qemu_host_page_size, prot, 208 flags | ((fd != -1) ? MAP_ANON : 0), -1, 0); 209 if (p == MAP_FAILED) 210 return -1; 211 prot1 = prot; 212 } 213 prot1 &= PAGE_RWX; 214 215 prot_new = prot | prot1; 216 if (fd != -1) { 217 /* msync() won't work here, so we return an error if write is 218 possible while it is a shared mapping */ 219 if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED && 220 (prot & PROT_WRITE)) 221 return -1; 222 223 /* adjust protection to be able to read */ 224 if (!(prot1 & PROT_WRITE)) 225 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE); 226 227 /* read the corresponding file data */ 228 if (!mmap_pread(fd, g2h_untagged(start), end - start, offset, true)) { 229 return -1; 230 } 231 232 /* put final protection */ 233 if (prot_new != (prot1 | PROT_WRITE)) 234 mprotect(host_start, qemu_host_page_size, prot_new); 235 } else { 236 if (prot_new != prot1) { 237 mprotect(host_start, qemu_host_page_size, prot_new); 238 } 239 if (prot_new & PROT_WRITE) { 240 memset(g2h_untagged(start), 0, end - start); 241 } 242 } 243 return 0; 244 } 245 246 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64 247 # define TASK_UNMAPPED_BASE (1ul << 38) 248 #else 249 # define TASK_UNMAPPED_BASE 0x40000000 250 #endif 251 abi_ulong mmap_next_start = TASK_UNMAPPED_BASE; 252 253 /* 254 * Subroutine of mmap_find_vma, used when we have pre-allocated a chunk of guest 255 * address space. 256 */ 257 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size, 258 abi_ulong alignment) 259 { 260 abi_ulong ret; 261 262 ret = page_find_range_empty(start, reserved_va, size, alignment); 263 if (ret == -1 && start > TARGET_PAGE_SIZE) { 264 /* Restart at the beginning of the address space. */ 265 ret = page_find_range_empty(TARGET_PAGE_SIZE, start - 1, 266 size, alignment); 267 } 268 269 return ret; 270 } 271 272 /* 273 * Find and reserve a free memory area of size 'size'. The search 274 * starts at 'start'. 275 * It must be called with mmap_lock() held. 276 * Return -1 if error. 277 */ 278 static abi_ulong mmap_find_vma_aligned(abi_ulong start, abi_ulong size, 279 abi_ulong alignment) 280 { 281 void *ptr, *prev; 282 abi_ulong addr; 283 int flags; 284 int wrapped, repeat; 285 286 /* If 'start' == 0, then a default start address is used. */ 287 if (start == 0) { 288 start = mmap_next_start; 289 } else { 290 start &= qemu_host_page_mask; 291 } 292 293 size = HOST_PAGE_ALIGN(size); 294 295 if (reserved_va) { 296 return mmap_find_vma_reserved(start, size, 297 (alignment != 0 ? 1 << alignment : 298 MAX(qemu_host_page_size, TARGET_PAGE_SIZE))); 299 } 300 301 addr = start; 302 wrapped = repeat = 0; 303 prev = 0; 304 flags = MAP_ANON | MAP_PRIVATE; 305 if (alignment != 0) { 306 flags |= MAP_ALIGNED(alignment); 307 } 308 309 for (;; prev = ptr) { 310 /* 311 * Reserve needed memory area to avoid a race. 312 * It should be discarded using: 313 * - mmap() with MAP_FIXED flag 314 * - mremap() with MREMAP_FIXED flag 315 * - shmat() with SHM_REMAP flag 316 */ 317 ptr = mmap(g2h_untagged(addr), size, PROT_NONE, 318 flags, -1, 0); 319 320 /* ENOMEM, if host address space has no memory */ 321 if (ptr == MAP_FAILED) { 322 return (abi_ulong)-1; 323 } 324 325 /* 326 * Count the number of sequential returns of the same address. 327 * This is used to modify the search algorithm below. 328 */ 329 repeat = (ptr == prev ? repeat + 1 : 0); 330 331 if (h2g_valid(ptr + size - 1)) { 332 addr = h2g(ptr); 333 334 if ((addr & ~TARGET_PAGE_MASK) == 0) { 335 /* Success. */ 336 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) { 337 mmap_next_start = addr + size; 338 } 339 return addr; 340 } 341 342 /* The address is not properly aligned for the target. */ 343 switch (repeat) { 344 case 0: 345 /* 346 * Assume the result that the kernel gave us is the 347 * first with enough free space, so start again at the 348 * next higher target page. 349 */ 350 addr = TARGET_PAGE_ALIGN(addr); 351 break; 352 case 1: 353 /* 354 * Sometimes the kernel decides to perform the allocation 355 * at the top end of memory instead. 356 */ 357 addr &= TARGET_PAGE_MASK; 358 break; 359 case 2: 360 /* Start over at low memory. */ 361 addr = 0; 362 break; 363 default: 364 /* Fail. This unaligned block must the last. */ 365 addr = -1; 366 break; 367 } 368 } else { 369 /* 370 * Since the result the kernel gave didn't fit, start 371 * again at low memory. If any repetition, fail. 372 */ 373 addr = (repeat ? -1 : 0); 374 } 375 376 /* Unmap and try again. */ 377 munmap(ptr, size); 378 379 /* ENOMEM if we checked the whole of the target address space. */ 380 if (addr == (abi_ulong)-1) { 381 return (abi_ulong)-1; 382 } else if (addr == 0) { 383 if (wrapped) { 384 return (abi_ulong)-1; 385 } 386 wrapped = 1; 387 /* 388 * Don't actually use 0 when wrapping, instead indicate 389 * that we'd truly like an allocation in low memory. 390 */ 391 addr = TARGET_PAGE_SIZE; 392 } else if (wrapped && addr >= start) { 393 return (abi_ulong)-1; 394 } 395 } 396 } 397 398 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size) 399 { 400 return mmap_find_vma_aligned(start, size, 0); 401 } 402 403 /* NOTE: all the constants are the HOST ones */ 404 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, 405 int flags, int fd, off_t offset) 406 { 407 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len; 408 409 mmap_lock(); 410 if (qemu_loglevel_mask(CPU_LOG_PAGE)) { 411 qemu_log("mmap: start=0x" TARGET_ABI_FMT_lx 412 " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=", 413 start, len, 414 prot & PROT_READ ? 'r' : '-', 415 prot & PROT_WRITE ? 'w' : '-', 416 prot & PROT_EXEC ? 'x' : '-'); 417 if (flags & MAP_ALIGNMENT_MASK) { 418 qemu_log("MAP_ALIGNED(%u) ", 419 (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT); 420 } 421 if (flags & MAP_GUARD) { 422 qemu_log("MAP_GUARD "); 423 } 424 if (flags & MAP_FIXED) { 425 qemu_log("MAP_FIXED "); 426 } 427 if (flags & MAP_ANON) { 428 qemu_log("MAP_ANON "); 429 } 430 if (flags & MAP_EXCL) { 431 qemu_log("MAP_EXCL "); 432 } 433 if (flags & MAP_PRIVATE) { 434 qemu_log("MAP_PRIVATE "); 435 } 436 if (flags & MAP_SHARED) { 437 qemu_log("MAP_SHARED "); 438 } 439 if (flags & MAP_NOCORE) { 440 qemu_log("MAP_NOCORE "); 441 } 442 if (flags & MAP_STACK) { 443 qemu_log("MAP_STACK "); 444 } 445 qemu_log("fd=%d offset=0x%lx\n", fd, offset); 446 } 447 448 if ((flags & MAP_ANON) && fd != -1) { 449 errno = EINVAL; 450 goto fail; 451 } 452 if (flags & MAP_STACK) { 453 if ((fd != -1) || ((prot & (PROT_READ | PROT_WRITE)) != 454 (PROT_READ | PROT_WRITE))) { 455 errno = EINVAL; 456 goto fail; 457 } 458 } 459 if ((flags & MAP_GUARD) && (prot != PROT_NONE || fd != -1 || 460 offset != 0 || (flags & (MAP_SHARED | MAP_PRIVATE | 461 /* MAP_PREFAULT | */ /* MAP_PREFAULT not in mman.h */ 462 MAP_PREFAULT_READ | MAP_ANON | MAP_STACK)) != 0)) { 463 errno = EINVAL; 464 goto fail; 465 } 466 467 if (offset & ~TARGET_PAGE_MASK) { 468 errno = EINVAL; 469 goto fail; 470 } 471 472 if (len == 0) { 473 errno = EINVAL; 474 goto fail; 475 } 476 477 /* Check for overflows */ 478 len = TARGET_PAGE_ALIGN(len); 479 if (len == 0) { 480 errno = ENOMEM; 481 goto fail; 482 } 483 484 real_start = start & qemu_host_page_mask; 485 host_offset = offset & qemu_host_page_mask; 486 487 /* 488 * If the user is asking for the kernel to find a location, do that 489 * before we truncate the length for mapping files below. 490 */ 491 if (!(flags & MAP_FIXED)) { 492 host_len = len + offset - host_offset; 493 host_len = HOST_PAGE_ALIGN(host_len); 494 if ((flags & MAP_ALIGNMENT_MASK) != 0) 495 start = mmap_find_vma_aligned(real_start, host_len, 496 (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT); 497 else 498 start = mmap_find_vma(real_start, host_len); 499 if (start == (abi_ulong)-1) { 500 errno = ENOMEM; 501 goto fail; 502 } 503 } 504 505 /* 506 * When mapping files into a memory area larger than the file, accesses 507 * to pages beyond the file size will cause a SIGBUS. 508 * 509 * For example, if mmaping a file of 100 bytes on a host with 4K pages 510 * emulating a target with 8K pages, the target expects to be able to 511 * access the first 8K. But the host will trap us on any access beyond 512 * 4K. 513 * 514 * When emulating a target with a larger page-size than the hosts, we 515 * may need to truncate file maps at EOF and add extra anonymous pages 516 * up to the targets page boundary. 517 */ 518 519 if ((qemu_real_host_page_size() < qemu_host_page_size) && fd != -1) { 520 struct stat sb; 521 522 if (fstat(fd, &sb) == -1) { 523 goto fail; 524 } 525 526 /* Are we trying to create a map beyond EOF?. */ 527 if (offset + len > sb.st_size) { 528 /* 529 * If so, truncate the file map at eof aligned with 530 * the hosts real pagesize. Additional anonymous maps 531 * will be created beyond EOF. 532 */ 533 len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset); 534 } 535 } 536 537 if (!(flags & MAP_FIXED)) { 538 unsigned long host_start; 539 void *p; 540 541 host_len = len + offset - host_offset; 542 host_len = HOST_PAGE_ALIGN(host_len); 543 544 /* 545 * Note: we prefer to control the mapping address. It is 546 * especially important if qemu_host_page_size > 547 * qemu_real_host_page_size 548 */ 549 p = mmap(g2h_untagged(start), host_len, prot, 550 flags | MAP_FIXED | ((fd != -1) ? MAP_ANON : 0), -1, 0); 551 if (p == MAP_FAILED) 552 goto fail; 553 /* update start so that it points to the file position at 'offset' */ 554 host_start = (unsigned long)p; 555 if (fd != -1) { 556 p = mmap(g2h_untagged(start), len, prot, 557 flags | MAP_FIXED, fd, host_offset); 558 if (p == MAP_FAILED) { 559 munmap(g2h_untagged(start), host_len); 560 goto fail; 561 } 562 host_start += offset - host_offset; 563 } 564 start = h2g(host_start); 565 } else { 566 if (start & ~TARGET_PAGE_MASK) { 567 errno = EINVAL; 568 goto fail; 569 } 570 end = start + len; 571 real_end = HOST_PAGE_ALIGN(end); 572 573 /* 574 * Test if requested memory area fits target address space 575 * It can fail only on 64-bit host with 32-bit target. 576 * On any other target/host host mmap() handles this error correctly. 577 */ 578 if (!guest_range_valid_untagged(start, len)) { 579 errno = EINVAL; 580 goto fail; 581 } 582 583 /* 584 * worst case: we cannot map the file because the offset is not 585 * aligned, so we read it 586 */ 587 if (fd != -1 && 588 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) { 589 /* 590 * msync() won't work here, so we return an error if write is 591 * possible while it is a shared mapping 592 */ 593 if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED && 594 (prot & PROT_WRITE)) { 595 errno = EINVAL; 596 goto fail; 597 } 598 retaddr = target_mmap(start, len, prot | PROT_WRITE, 599 MAP_FIXED | MAP_PRIVATE | MAP_ANON, 600 -1, 0); 601 if (retaddr == -1) 602 goto fail; 603 if (!mmap_pread(fd, g2h_untagged(start), len, offset, false)) { 604 goto fail; 605 } 606 if (!(prot & PROT_WRITE)) { 607 ret = target_mprotect(start, len, prot); 608 assert(ret == 0); 609 } 610 goto the_end; 611 } 612 613 /* Reject the mapping if any page within the range is mapped */ 614 if ((flags & MAP_EXCL) && !page_check_range_empty(start, end - 1)) { 615 errno = EINVAL; 616 goto fail; 617 } 618 619 /* handle the start of the mapping */ 620 if (start > real_start) { 621 if (real_end == real_start + qemu_host_page_size) { 622 /* one single host page */ 623 ret = mmap_frag(real_start, start, end, 624 prot, flags, fd, offset); 625 if (ret == -1) 626 goto fail; 627 goto the_end1; 628 } 629 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size, 630 prot, flags, fd, offset); 631 if (ret == -1) 632 goto fail; 633 real_start += qemu_host_page_size; 634 } 635 /* handle the end of the mapping */ 636 if (end < real_end) { 637 ret = mmap_frag(real_end - qemu_host_page_size, 638 real_end - qemu_host_page_size, end, 639 prot, flags, fd, 640 offset + real_end - qemu_host_page_size - start); 641 if (ret == -1) 642 goto fail; 643 real_end -= qemu_host_page_size; 644 } 645 646 /* map the middle (easier) */ 647 if (real_start < real_end) { 648 void *p; 649 unsigned long offset1; 650 if (flags & MAP_ANON) 651 offset1 = 0; 652 else 653 offset1 = offset + real_start - start; 654 p = mmap(g2h_untagged(real_start), real_end - real_start, 655 prot, flags, fd, offset1); 656 if (p == MAP_FAILED) 657 goto fail; 658 } 659 } 660 the_end1: 661 page_set_flags(start, start + len - 1, prot | PAGE_VALID); 662 the_end: 663 #ifdef DEBUG_MMAP 664 printf("ret=0x" TARGET_ABI_FMT_lx "\n", start); 665 page_dump(stdout); 666 printf("\n"); 667 #endif 668 mmap_unlock(); 669 return start; 670 fail: 671 mmap_unlock(); 672 return -1; 673 } 674 675 void mmap_reserve(abi_ulong start, abi_ulong size) 676 { 677 abi_ulong real_start; 678 abi_ulong real_end; 679 abi_ulong addr; 680 abi_ulong end; 681 int prot; 682 683 real_start = start & qemu_host_page_mask; 684 real_end = HOST_PAGE_ALIGN(start + size); 685 end = start + size; 686 if (start > real_start) { 687 /* handle host page containing start */ 688 prot = 0; 689 for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) { 690 prot |= page_get_flags(addr); 691 } 692 if (real_end == real_start + qemu_host_page_size) { 693 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) { 694 prot |= page_get_flags(addr); 695 } 696 end = real_end; 697 } 698 if (prot != 0) { 699 real_start += qemu_host_page_size; 700 } 701 } 702 if (end < real_end) { 703 prot = 0; 704 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) { 705 prot |= page_get_flags(addr); 706 } 707 if (prot != 0) { 708 real_end -= qemu_host_page_size; 709 } 710 } 711 if (real_start != real_end) { 712 mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE, 713 MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); 714 } 715 } 716 717 int target_munmap(abi_ulong start, abi_ulong len) 718 { 719 abi_ulong end, real_start, real_end, addr; 720 int prot, ret; 721 722 #ifdef DEBUG_MMAP 723 printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x" 724 TARGET_ABI_FMT_lx "\n", 725 start, len); 726 #endif 727 if (start & ~TARGET_PAGE_MASK) 728 return -EINVAL; 729 len = TARGET_PAGE_ALIGN(len); 730 if (len == 0) 731 return -EINVAL; 732 mmap_lock(); 733 end = start + len; 734 real_start = start & qemu_host_page_mask; 735 real_end = HOST_PAGE_ALIGN(end); 736 737 if (start > real_start) { 738 /* handle host page containing start */ 739 prot = 0; 740 for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) { 741 prot |= page_get_flags(addr); 742 } 743 if (real_end == real_start + qemu_host_page_size) { 744 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) { 745 prot |= page_get_flags(addr); 746 } 747 end = real_end; 748 } 749 if (prot != 0) 750 real_start += qemu_host_page_size; 751 } 752 if (end < real_end) { 753 prot = 0; 754 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) { 755 prot |= page_get_flags(addr); 756 } 757 if (prot != 0) 758 real_end -= qemu_host_page_size; 759 } 760 761 ret = 0; 762 /* unmap what we can */ 763 if (real_start < real_end) { 764 if (reserved_va) { 765 mmap_reserve(real_start, real_end - real_start); 766 } else { 767 ret = munmap(g2h_untagged(real_start), real_end - real_start); 768 } 769 } 770 771 if (ret == 0) { 772 page_set_flags(start, start + len - 1, 0); 773 } 774 mmap_unlock(); 775 return ret; 776 } 777 778 int target_msync(abi_ulong start, abi_ulong len, int flags) 779 { 780 abi_ulong end; 781 782 if (start & ~TARGET_PAGE_MASK) 783 return -EINVAL; 784 len = TARGET_PAGE_ALIGN(len); 785 end = start + len; 786 if (end < start) 787 return -EINVAL; 788 if (end == start) 789 return 0; 790 791 start &= qemu_host_page_mask; 792 return msync(g2h_untagged(start), end - start, flags); 793 } 794