1 /* 2 * mmap support for qemu 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include <sys/shm.h> 21 #include "trace.h" 22 #include "exec/log.h" 23 #include "exec/page-protection.h" 24 #include "qemu.h" 25 #include "user/page-protection.h" 26 #include "user-internals.h" 27 #include "user-mmap.h" 28 #include "target_mman.h" 29 #include "qemu/interval-tree.h" 30 31 #ifdef TARGET_ARM 32 #include "target/arm/cpu-features.h" 33 #endif 34 35 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER; 36 static __thread int mmap_lock_count; 37 38 void mmap_lock(void) 39 { 40 if (mmap_lock_count++ == 0) { 41 pthread_mutex_lock(&mmap_mutex); 42 } 43 } 44 45 void mmap_unlock(void) 46 { 47 assert(mmap_lock_count > 0); 48 if (--mmap_lock_count == 0) { 49 pthread_mutex_unlock(&mmap_mutex); 50 } 51 } 52 53 bool have_mmap_lock(void) 54 { 55 return mmap_lock_count > 0 ? true : false; 56 } 57 58 /* Grab lock to make sure things are in a consistent state after fork(). */ 59 void mmap_fork_start(void) 60 { 61 if (mmap_lock_count) 62 abort(); 63 pthread_mutex_lock(&mmap_mutex); 64 } 65 66 void mmap_fork_end(int child) 67 { 68 if (child) { 69 pthread_mutex_init(&mmap_mutex, NULL); 70 } else { 71 pthread_mutex_unlock(&mmap_mutex); 72 } 73 } 74 75 /* Protected by mmap_lock. */ 76 static IntervalTreeRoot shm_regions; 77 78 static void shm_region_add(abi_ptr start, abi_ptr last) 79 { 80 IntervalTreeNode *i = g_new0(IntervalTreeNode, 1); 81 82 i->start = start; 83 i->last = last; 84 interval_tree_insert(i, &shm_regions); 85 } 86 87 static abi_ptr shm_region_find(abi_ptr start) 88 { 89 IntervalTreeNode *i; 90 91 for (i = interval_tree_iter_first(&shm_regions, start, start); i; 92 i = interval_tree_iter_next(i, start, start)) { 93 if (i->start == start) { 94 return i->last; 95 } 96 } 97 return 0; 98 } 99 100 static void shm_region_rm_complete(abi_ptr start, abi_ptr last) 101 { 102 IntervalTreeNode *i, *n; 103 104 for (i = interval_tree_iter_first(&shm_regions, start, last); i; i = n) { 105 n = interval_tree_iter_next(i, start, last); 106 if (i->start >= start && i->last <= last) { 107 interval_tree_remove(i, &shm_regions); 108 g_free(i); 109 } 110 } 111 } 112 113 /* 114 * Validate target prot bitmask. 115 * Return the prot bitmask for the host in *HOST_PROT. 116 * Return 0 if the target prot bitmask is invalid, otherwise 117 * the internal qemu page_flags (which will include PAGE_VALID). 118 */ 119 static int validate_prot_to_pageflags(int prot) 120 { 121 int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM; 122 int page_flags = (prot & PAGE_RWX) | PAGE_VALID; 123 124 #ifdef TARGET_AARCH64 125 { 126 ARMCPU *cpu = ARM_CPU(thread_cpu); 127 128 /* 129 * The PROT_BTI bit is only accepted if the cpu supports the feature. 130 * Since this is the unusual case, don't bother checking unless 131 * the bit has been requested. If set and valid, record the bit 132 * within QEMU's page_flags. 133 */ 134 if ((prot & TARGET_PROT_BTI) && cpu_isar_feature(aa64_bti, cpu)) { 135 valid |= TARGET_PROT_BTI; 136 page_flags |= PAGE_BTI; 137 } 138 /* Similarly for the PROT_MTE bit. */ 139 if ((prot & TARGET_PROT_MTE) && cpu_isar_feature(aa64_mte, cpu)) { 140 valid |= TARGET_PROT_MTE; 141 page_flags |= PAGE_MTE; 142 } 143 } 144 #elif defined(TARGET_HPPA) 145 valid |= PROT_GROWSDOWN | PROT_GROWSUP; 146 #endif 147 148 return prot & ~valid ? 0 : page_flags; 149 } 150 151 /* 152 * For the host, we need not pass anything except read/write/exec. 153 * While PROT_SEM is allowed by all hosts, it is also ignored, so 154 * don't bother transforming guest bit to host bit. Any other 155 * target-specific prot bits will not be understood by the host 156 * and will need to be encoded into page_flags for qemu emulation. 157 * 158 * Pages that are executable by the guest will never be executed 159 * by the host, but the host will need to be able to read them. 160 */ 161 static int target_to_host_prot(int prot) 162 { 163 return (prot & (PROT_READ | PROT_WRITE)) | 164 (prot & PROT_EXEC ? PROT_READ : 0); 165 } 166 167 /* NOTE: all the constants are the HOST ones, but addresses are target. */ 168 int target_mprotect(abi_ulong start, abi_ulong len, int target_prot) 169 { 170 int host_page_size = qemu_real_host_page_size(); 171 abi_ulong starts[3]; 172 abi_ulong lens[3]; 173 int prots[3]; 174 abi_ulong host_start, host_last, last; 175 int prot1, ret, page_flags, nranges; 176 177 trace_target_mprotect(start, len, target_prot); 178 179 if ((start & ~TARGET_PAGE_MASK) != 0) { 180 return -TARGET_EINVAL; 181 } 182 page_flags = validate_prot_to_pageflags(target_prot); 183 if (!page_flags) { 184 return -TARGET_EINVAL; 185 } 186 if (len == 0) { 187 return 0; 188 } 189 len = TARGET_PAGE_ALIGN(len); 190 if (!guest_range_valid_untagged(start, len)) { 191 return -TARGET_ENOMEM; 192 } 193 194 last = start + len - 1; 195 host_start = start & -host_page_size; 196 host_last = ROUND_UP(last, host_page_size) - 1; 197 nranges = 0; 198 199 mmap_lock(); 200 201 if (host_last - host_start < host_page_size) { 202 /* Single host page contains all guest pages: sum the prot. */ 203 prot1 = target_prot; 204 for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) { 205 prot1 |= page_get_flags(a); 206 } 207 for (abi_ulong a = last; a < host_last; a += TARGET_PAGE_SIZE) { 208 prot1 |= page_get_flags(a + 1); 209 } 210 starts[nranges] = host_start; 211 lens[nranges] = host_page_size; 212 prots[nranges] = prot1; 213 nranges++; 214 } else { 215 if (host_start < start) { 216 /* Host page contains more than one guest page: sum the prot. */ 217 prot1 = target_prot; 218 for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) { 219 prot1 |= page_get_flags(a); 220 } 221 /* If the resulting sum differs, create a new range. */ 222 if (prot1 != target_prot) { 223 starts[nranges] = host_start; 224 lens[nranges] = host_page_size; 225 prots[nranges] = prot1; 226 nranges++; 227 host_start += host_page_size; 228 } 229 } 230 231 if (last < host_last) { 232 /* Host page contains more than one guest page: sum the prot. */ 233 prot1 = target_prot; 234 for (abi_ulong a = last; a < host_last; a += TARGET_PAGE_SIZE) { 235 prot1 |= page_get_flags(a + 1); 236 } 237 /* If the resulting sum differs, create a new range. */ 238 if (prot1 != target_prot) { 239 host_last -= host_page_size; 240 starts[nranges] = host_last + 1; 241 lens[nranges] = host_page_size; 242 prots[nranges] = prot1; 243 nranges++; 244 } 245 } 246 247 /* Create a range for the middle, if any remains. */ 248 if (host_start < host_last) { 249 starts[nranges] = host_start; 250 lens[nranges] = host_last - host_start + 1; 251 prots[nranges] = target_prot; 252 nranges++; 253 } 254 } 255 256 for (int i = 0; i < nranges; ++i) { 257 ret = mprotect(g2h_untagged(starts[i]), lens[i], 258 target_to_host_prot(prots[i])); 259 if (ret != 0) { 260 goto error; 261 } 262 } 263 264 page_set_flags(start, last, page_flags); 265 ret = 0; 266 267 error: 268 mmap_unlock(); 269 return ret; 270 } 271 272 /* 273 * Perform munmap on behalf of the target, with host parameters. 274 * If reserved_va, we must replace the memory reservation. 275 */ 276 static int do_munmap(void *addr, size_t len) 277 { 278 if (reserved_va) { 279 void *ptr = mmap(addr, len, PROT_NONE, 280 MAP_FIXED | MAP_ANONYMOUS 281 | MAP_PRIVATE | MAP_NORESERVE, -1, 0); 282 return ptr == addr ? 0 : -1; 283 } 284 return munmap(addr, len); 285 } 286 287 /* 288 * Perform a pread on behalf of target_mmap. We can reach EOF, we can be 289 * interrupted by signals, and in general there's no good error return path. 290 * If @zero, zero the rest of the block at EOF. 291 * Return true on success. 292 */ 293 static bool mmap_pread(int fd, void *p, size_t len, off_t offset, bool zero) 294 { 295 while (1) { 296 ssize_t r = pread(fd, p, len, offset); 297 298 if (likely(r == len)) { 299 /* Complete */ 300 return true; 301 } 302 if (r == 0) { 303 /* EOF */ 304 if (zero) { 305 memset(p, 0, len); 306 } 307 return true; 308 } 309 if (r > 0) { 310 /* Short read */ 311 p += r; 312 len -= r; 313 offset += r; 314 } else if (errno != EINTR) { 315 /* Error */ 316 return false; 317 } 318 } 319 } 320 321 /* 322 * Map an incomplete host page. 323 * 324 * Here be dragons. This case will not work if there is an existing 325 * overlapping host page, which is file mapped, and for which the mapping 326 * is beyond the end of the file. In that case, we will see SIGBUS when 327 * trying to write a portion of this page. 328 * 329 * FIXME: Work around this with a temporary signal handler and longjmp. 330 */ 331 static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last, 332 int prot, int flags, int fd, off_t offset) 333 { 334 int host_page_size = qemu_real_host_page_size(); 335 abi_ulong real_last; 336 void *host_start; 337 int prot_old, prot_new; 338 int host_prot_old, host_prot_new; 339 340 if (!(flags & MAP_ANONYMOUS) 341 && (flags & MAP_TYPE) == MAP_SHARED 342 && (prot & PROT_WRITE)) { 343 /* 344 * msync() won't work with the partial page, so we return an 345 * error if write is possible while it is a shared mapping. 346 */ 347 errno = EINVAL; 348 return false; 349 } 350 351 real_last = real_start + host_page_size - 1; 352 host_start = g2h_untagged(real_start); 353 354 /* Get the protection of the target pages outside the mapping. */ 355 prot_old = 0; 356 for (abi_ulong a = real_start; a < start; a += TARGET_PAGE_SIZE) { 357 prot_old |= page_get_flags(a); 358 } 359 for (abi_ulong a = real_last; a > last; a -= TARGET_PAGE_SIZE) { 360 prot_old |= page_get_flags(a); 361 } 362 363 if (prot_old == 0) { 364 /* 365 * Since !(prot_old & PAGE_VALID), there were no guest pages 366 * outside of the fragment we need to map. Allocate a new host 367 * page to cover, discarding whatever else may have been present. 368 */ 369 void *p = mmap(host_start, host_page_size, 370 target_to_host_prot(prot), 371 flags | MAP_ANONYMOUS, -1, 0); 372 if (p != host_start) { 373 if (p != MAP_FAILED) { 374 do_munmap(p, host_page_size); 375 errno = EEXIST; 376 } 377 return false; 378 } 379 prot_old = prot; 380 } 381 prot_new = prot | prot_old; 382 383 host_prot_old = target_to_host_prot(prot_old); 384 host_prot_new = target_to_host_prot(prot_new); 385 386 /* Adjust protection to be able to write. */ 387 if (!(host_prot_old & PROT_WRITE)) { 388 host_prot_old |= PROT_WRITE; 389 mprotect(host_start, host_page_size, host_prot_old); 390 } 391 392 /* Read or zero the new guest pages. */ 393 if (flags & MAP_ANONYMOUS) { 394 memset(g2h_untagged(start), 0, last - start + 1); 395 } else if (!mmap_pread(fd, g2h_untagged(start), last - start + 1, 396 offset, true)) { 397 return false; 398 } 399 400 /* Put final protection */ 401 if (host_prot_new != host_prot_old) { 402 mprotect(host_start, host_page_size, host_prot_new); 403 } 404 return true; 405 } 406 407 abi_ulong task_unmapped_base; 408 abi_ulong elf_et_dyn_base; 409 abi_ulong mmap_next_start; 410 411 /* 412 * Subroutine of mmap_find_vma, used when we have pre-allocated 413 * a chunk of guest address space. 414 */ 415 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size, 416 abi_ulong align) 417 { 418 target_ulong ret; 419 420 ret = page_find_range_empty(start, reserved_va, size, align); 421 if (ret == -1 && start > mmap_min_addr) { 422 /* Restart at the beginning of the address space. */ 423 ret = page_find_range_empty(mmap_min_addr, start - 1, size, align); 424 } 425 426 return ret; 427 } 428 429 /* 430 * Find and reserve a free memory area of size 'size'. The search 431 * starts at 'start'. 432 * It must be called with mmap_lock() held. 433 * Return -1 if error. 434 */ 435 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align) 436 { 437 int host_page_size = qemu_real_host_page_size(); 438 void *ptr, *prev; 439 abi_ulong addr; 440 int wrapped, repeat; 441 442 align = MAX(align, host_page_size); 443 444 /* If 'start' == 0, then a default start address is used. */ 445 if (start == 0) { 446 start = mmap_next_start; 447 } else { 448 start &= -host_page_size; 449 } 450 start = ROUND_UP(start, align); 451 size = ROUND_UP(size, host_page_size); 452 453 if (reserved_va) { 454 return mmap_find_vma_reserved(start, size, align); 455 } 456 457 addr = start; 458 wrapped = repeat = 0; 459 prev = 0; 460 461 for (;; prev = ptr) { 462 /* 463 * Reserve needed memory area to avoid a race. 464 * It should be discarded using: 465 * - mmap() with MAP_FIXED flag 466 * - mremap() with MREMAP_FIXED flag 467 * - shmat() with SHM_REMAP flag 468 */ 469 ptr = mmap(g2h_untagged(addr), size, PROT_NONE, 470 MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0); 471 472 /* ENOMEM, if host address space has no memory */ 473 if (ptr == MAP_FAILED) { 474 return (abi_ulong)-1; 475 } 476 477 /* 478 * Count the number of sequential returns of the same address. 479 * This is used to modify the search algorithm below. 480 */ 481 repeat = (ptr == prev ? repeat + 1 : 0); 482 483 if (h2g_valid(ptr + size - 1)) { 484 addr = h2g(ptr); 485 486 if ((addr & (align - 1)) == 0) { 487 /* Success. */ 488 if (start == mmap_next_start && addr >= task_unmapped_base) { 489 mmap_next_start = addr + size; 490 } 491 return addr; 492 } 493 494 /* The address is not properly aligned for the target. */ 495 switch (repeat) { 496 case 0: 497 /* 498 * Assume the result that the kernel gave us is the 499 * first with enough free space, so start again at the 500 * next higher target page. 501 */ 502 addr = ROUND_UP(addr, align); 503 break; 504 case 1: 505 /* 506 * Sometimes the kernel decides to perform the allocation 507 * at the top end of memory instead. 508 */ 509 addr &= -align; 510 break; 511 case 2: 512 /* Start over at low memory. */ 513 addr = 0; 514 break; 515 default: 516 /* Fail. This unaligned block must the last. */ 517 addr = -1; 518 break; 519 } 520 } else { 521 /* 522 * Since the result the kernel gave didn't fit, start 523 * again at low memory. If any repetition, fail. 524 */ 525 addr = (repeat ? -1 : 0); 526 } 527 528 /* Unmap and try again. */ 529 munmap(ptr, size); 530 531 /* ENOMEM if we checked the whole of the target address space. */ 532 if (addr == (abi_ulong)-1) { 533 return (abi_ulong)-1; 534 } else if (addr == 0) { 535 if (wrapped) { 536 return (abi_ulong)-1; 537 } 538 wrapped = 1; 539 /* 540 * Don't actually use 0 when wrapping, instead indicate 541 * that we'd truly like an allocation in low memory. 542 */ 543 addr = (mmap_min_addr > TARGET_PAGE_SIZE 544 ? TARGET_PAGE_ALIGN(mmap_min_addr) 545 : TARGET_PAGE_SIZE); 546 } else if (wrapped && addr >= start) { 547 return (abi_ulong)-1; 548 } 549 } 550 } 551 552 /* 553 * Record a successful mmap within the user-exec interval tree. 554 */ 555 static abi_long mmap_end(abi_ulong start, abi_ulong last, 556 abi_ulong passthrough_start, 557 abi_ulong passthrough_last, 558 int flags, int page_flags) 559 { 560 if (flags & MAP_ANONYMOUS) { 561 page_flags |= PAGE_ANON; 562 } 563 page_flags |= PAGE_RESET; 564 if (passthrough_start > passthrough_last) { 565 page_set_flags(start, last, page_flags); 566 } else { 567 if (start < passthrough_start) { 568 page_set_flags(start, passthrough_start - 1, page_flags); 569 } 570 page_set_flags(passthrough_start, passthrough_last, 571 page_flags | PAGE_PASSTHROUGH); 572 if (passthrough_last < last) { 573 page_set_flags(passthrough_last + 1, last, page_flags); 574 } 575 } 576 shm_region_rm_complete(start, last); 577 trace_target_mmap_complete(start); 578 if (qemu_loglevel_mask(CPU_LOG_PAGE)) { 579 FILE *f = qemu_log_trylock(); 580 if (f) { 581 fprintf(f, "page layout changed following mmap\n"); 582 page_dump(f); 583 qemu_log_unlock(f); 584 } 585 } 586 return start; 587 } 588 589 /* 590 * Special case host page size == target page size, 591 * where there are no edge conditions. 592 */ 593 static abi_long mmap_h_eq_g(abi_ulong start, abi_ulong len, 594 int host_prot, int flags, int page_flags, 595 int fd, off_t offset) 596 { 597 void *p, *want_p = NULL; 598 abi_ulong last; 599 600 if (start || (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) { 601 want_p = g2h_untagged(start); 602 } 603 604 p = mmap(want_p, len, host_prot, flags, fd, offset); 605 if (p == MAP_FAILED) { 606 return -1; 607 } 608 /* If the host kernel does not support MAP_FIXED_NOREPLACE, emulate. */ 609 if ((flags & MAP_FIXED_NOREPLACE) && p != want_p) { 610 do_munmap(p, len); 611 errno = EEXIST; 612 return -1; 613 } 614 615 start = h2g(p); 616 last = start + len - 1; 617 return mmap_end(start, last, start, last, flags, page_flags); 618 } 619 620 /* 621 * Special case host page size < target page size. 622 * 623 * The two special cases are increased guest alignment, and mapping 624 * past the end of a file. 625 * 626 * When mapping files into a memory area larger than the file, 627 * accesses to pages beyond the file size will cause a SIGBUS. 628 * 629 * For example, if mmaping a file of 100 bytes on a host with 4K 630 * pages emulating a target with 8K pages, the target expects to 631 * be able to access the first 8K. But the host will trap us on 632 * any access beyond 4K. 633 * 634 * When emulating a target with a larger page-size than the hosts, 635 * we may need to truncate file maps at EOF and add extra anonymous 636 * pages up to the targets page boundary. 637 * 638 * This workaround only works for files that do not change. 639 * If the file is later extended (e.g. ftruncate), the SIGBUS 640 * vanishes and the proper behaviour is that changes within the 641 * anon page should be reflected in the file. 642 * 643 * However, this case is rather common with executable images, 644 * so the workaround is important for even trivial tests, whereas 645 * the mmap of of a file being extended is less common. 646 */ 647 static abi_long mmap_h_lt_g(abi_ulong start, abi_ulong len, int host_prot, 648 int mmap_flags, int page_flags, int fd, 649 off_t offset, int host_page_size) 650 { 651 void *p, *want_p = NULL; 652 off_t fileend_adj = 0; 653 int flags = mmap_flags; 654 abi_ulong last, pass_last; 655 656 if (start || (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) { 657 want_p = g2h_untagged(start); 658 } 659 660 if (!(flags & MAP_ANONYMOUS)) { 661 struct stat sb; 662 663 if (fstat(fd, &sb) == -1) { 664 return -1; 665 } 666 if (offset >= sb.st_size) { 667 /* 668 * The entire map is beyond the end of the file. 669 * Transform it to an anonymous mapping. 670 */ 671 flags |= MAP_ANONYMOUS; 672 fd = -1; 673 offset = 0; 674 } else if (offset + len > sb.st_size) { 675 /* 676 * A portion of the map is beyond the end of the file. 677 * Truncate the file portion of the allocation. 678 */ 679 fileend_adj = offset + len - sb.st_size; 680 } 681 } 682 683 if (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE)) { 684 if (fileend_adj) { 685 p = mmap(want_p, len, host_prot, flags | MAP_ANONYMOUS, -1, 0); 686 } else { 687 p = mmap(want_p, len, host_prot, flags, fd, offset); 688 } 689 if (p != want_p) { 690 if (p != MAP_FAILED) { 691 /* Host does not support MAP_FIXED_NOREPLACE: emulate. */ 692 do_munmap(p, len); 693 errno = EEXIST; 694 } 695 return -1; 696 } 697 698 if (fileend_adj) { 699 void *t = mmap(p, len - fileend_adj, host_prot, 700 (flags & ~MAP_FIXED_NOREPLACE) | MAP_FIXED, 701 fd, offset); 702 703 if (t == MAP_FAILED) { 704 int save_errno = errno; 705 706 /* 707 * We failed a map over the top of the successful anonymous 708 * mapping above. The only failure mode is running out of VMAs, 709 * and there's nothing that we can do to detect that earlier. 710 * If we have replaced an existing mapping with MAP_FIXED, 711 * then we cannot properly recover. It's a coin toss whether 712 * it would be better to exit or continue here. 713 */ 714 if (!(flags & MAP_FIXED_NOREPLACE) && 715 !page_check_range_empty(start, start + len - 1)) { 716 qemu_log("QEMU target_mmap late failure: %s", 717 strerror(save_errno)); 718 } 719 720 do_munmap(want_p, len); 721 errno = save_errno; 722 return -1; 723 } 724 } 725 } else { 726 size_t host_len, part_len; 727 728 /* 729 * Take care to align the host memory. Perform a larger anonymous 730 * allocation and extract the aligned portion. Remap the file on 731 * top of that. 732 */ 733 host_len = len + TARGET_PAGE_SIZE - host_page_size; 734 p = mmap(want_p, host_len, host_prot, flags | MAP_ANONYMOUS, -1, 0); 735 if (p == MAP_FAILED) { 736 return -1; 737 } 738 739 part_len = (uintptr_t)p & (TARGET_PAGE_SIZE - 1); 740 if (part_len) { 741 part_len = TARGET_PAGE_SIZE - part_len; 742 do_munmap(p, part_len); 743 p += part_len; 744 host_len -= part_len; 745 } 746 if (len < host_len) { 747 do_munmap(p + len, host_len - len); 748 } 749 750 if (!(flags & MAP_ANONYMOUS)) { 751 void *t = mmap(p, len - fileend_adj, host_prot, 752 flags | MAP_FIXED, fd, offset); 753 754 if (t == MAP_FAILED) { 755 int save_errno = errno; 756 do_munmap(p, len); 757 errno = save_errno; 758 return -1; 759 } 760 } 761 762 start = h2g(p); 763 } 764 765 last = start + len - 1; 766 if (fileend_adj) { 767 pass_last = ROUND_UP(last - fileend_adj, host_page_size) - 1; 768 } else { 769 pass_last = last; 770 } 771 return mmap_end(start, last, start, pass_last, mmap_flags, page_flags); 772 } 773 774 /* 775 * Special case host page size > target page size. 776 * 777 * The two special cases are address and file offsets that are valid 778 * for the guest that cannot be directly represented by the host. 779 */ 780 static abi_long mmap_h_gt_g(abi_ulong start, abi_ulong len, 781 int target_prot, int host_prot, 782 int flags, int page_flags, int fd, 783 off_t offset, int host_page_size) 784 { 785 void *p, *want_p = NULL; 786 off_t host_offset = offset & -host_page_size; 787 abi_ulong last, real_start, real_last; 788 bool misaligned_offset = false; 789 size_t host_len; 790 791 if (start || (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) { 792 want_p = g2h_untagged(start); 793 } 794 795 if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) { 796 /* 797 * Adjust the offset to something representable on the host. 798 */ 799 host_len = len + offset - host_offset; 800 p = mmap(want_p, host_len, host_prot, flags, fd, host_offset); 801 if (p == MAP_FAILED) { 802 return -1; 803 } 804 805 /* Update start to the file position at offset. */ 806 p += offset - host_offset; 807 808 start = h2g(p); 809 last = start + len - 1; 810 return mmap_end(start, last, start, last, flags, page_flags); 811 } 812 813 if (!(flags & MAP_ANONYMOUS)) { 814 misaligned_offset = (start ^ offset) & (host_page_size - 1); 815 816 /* 817 * The fallback for misalignment is a private mapping + read. 818 * This carries none of semantics required of MAP_SHARED. 819 */ 820 if (misaligned_offset && (flags & MAP_TYPE) != MAP_PRIVATE) { 821 errno = EINVAL; 822 return -1; 823 } 824 } 825 826 last = start + len - 1; 827 real_start = start & -host_page_size; 828 real_last = ROUND_UP(last, host_page_size) - 1; 829 830 /* 831 * Handle the start and end of the mapping. 832 */ 833 if (real_start < start) { 834 abi_ulong real_page_last = real_start + host_page_size - 1; 835 if (last <= real_page_last) { 836 /* Entire allocation a subset of one host page. */ 837 if (!mmap_frag(real_start, start, last, target_prot, 838 flags, fd, offset)) { 839 return -1; 840 } 841 return mmap_end(start, last, -1, 0, flags, page_flags); 842 } 843 844 if (!mmap_frag(real_start, start, real_page_last, target_prot, 845 flags, fd, offset)) { 846 return -1; 847 } 848 real_start = real_page_last + 1; 849 } 850 851 if (last < real_last) { 852 abi_ulong real_page_start = real_last - host_page_size + 1; 853 if (!mmap_frag(real_page_start, real_page_start, last, 854 target_prot, flags, fd, 855 offset + real_page_start - start)) { 856 return -1; 857 } 858 real_last = real_page_start - 1; 859 } 860 861 if (real_start > real_last) { 862 return mmap_end(start, last, -1, 0, flags, page_flags); 863 } 864 865 /* 866 * Handle the middle of the mapping. 867 */ 868 869 host_len = real_last - real_start + 1; 870 want_p += real_start - start; 871 872 if (flags & MAP_ANONYMOUS) { 873 p = mmap(want_p, host_len, host_prot, flags, -1, 0); 874 } else if (!misaligned_offset) { 875 p = mmap(want_p, host_len, host_prot, flags, fd, 876 offset + real_start - start); 877 } else { 878 p = mmap(want_p, host_len, host_prot | PROT_WRITE, 879 flags | MAP_ANONYMOUS, -1, 0); 880 } 881 if (p != want_p) { 882 if (p != MAP_FAILED) { 883 do_munmap(p, host_len); 884 errno = EEXIST; 885 } 886 return -1; 887 } 888 889 if (misaligned_offset) { 890 if (!mmap_pread(fd, p, host_len, offset + real_start - start, false)) { 891 do_munmap(p, host_len); 892 return -1; 893 } 894 if (!(host_prot & PROT_WRITE)) { 895 mprotect(p, host_len, host_prot); 896 } 897 } 898 899 return mmap_end(start, last, -1, 0, flags, page_flags); 900 } 901 902 static abi_long target_mmap__locked(abi_ulong start, abi_ulong len, 903 int target_prot, int flags, int page_flags, 904 int fd, off_t offset) 905 { 906 int host_page_size = qemu_real_host_page_size(); 907 int host_prot; 908 909 /* 910 * For reserved_va, we are in full control of the allocation. 911 * Find a suitable hole and convert to MAP_FIXED. 912 */ 913 if (reserved_va) { 914 if (flags & MAP_FIXED_NOREPLACE) { 915 /* Validate that the chosen range is empty. */ 916 if (!page_check_range_empty(start, start + len - 1)) { 917 errno = EEXIST; 918 return -1; 919 } 920 flags = (flags & ~MAP_FIXED_NOREPLACE) | MAP_FIXED; 921 } else if (!(flags & MAP_FIXED)) { 922 abi_ulong real_start = start & -host_page_size; 923 off_t host_offset = offset & -host_page_size; 924 size_t real_len = len + offset - host_offset; 925 abi_ulong align = MAX(host_page_size, TARGET_PAGE_SIZE); 926 927 start = mmap_find_vma(real_start, real_len, align); 928 if (start == (abi_ulong)-1) { 929 errno = ENOMEM; 930 return -1; 931 } 932 start += offset - host_offset; 933 flags |= MAP_FIXED; 934 } 935 } 936 937 host_prot = target_to_host_prot(target_prot); 938 939 if (host_page_size == TARGET_PAGE_SIZE) { 940 return mmap_h_eq_g(start, len, host_prot, flags, 941 page_flags, fd, offset); 942 } else if (host_page_size < TARGET_PAGE_SIZE) { 943 return mmap_h_lt_g(start, len, host_prot, flags, 944 page_flags, fd, offset, host_page_size); 945 } else { 946 return mmap_h_gt_g(start, len, target_prot, host_prot, flags, 947 page_flags, fd, offset, host_page_size); 948 } 949 } 950 951 /* NOTE: all the constants are the HOST ones */ 952 abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot, 953 int flags, int fd, off_t offset) 954 { 955 abi_long ret; 956 int page_flags; 957 958 trace_target_mmap(start, len, target_prot, flags, fd, offset); 959 960 if (!len) { 961 errno = EINVAL; 962 return -1; 963 } 964 965 page_flags = validate_prot_to_pageflags(target_prot); 966 if (!page_flags) { 967 errno = EINVAL; 968 return -1; 969 } 970 971 /* Also check for overflows... */ 972 len = TARGET_PAGE_ALIGN(len); 973 if (!len || len != (size_t)len) { 974 errno = ENOMEM; 975 return -1; 976 } 977 978 if (offset & ~TARGET_PAGE_MASK) { 979 errno = EINVAL; 980 return -1; 981 } 982 if (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE)) { 983 if (start & ~TARGET_PAGE_MASK) { 984 errno = EINVAL; 985 return -1; 986 } 987 if (!guest_range_valid_untagged(start, len)) { 988 errno = ENOMEM; 989 return -1; 990 } 991 } 992 993 mmap_lock(); 994 995 ret = target_mmap__locked(start, len, target_prot, flags, 996 page_flags, fd, offset); 997 998 mmap_unlock(); 999 1000 /* 1001 * If we're mapping shared memory, ensure we generate code for parallel 1002 * execution and flush old translations. This will work up to the level 1003 * supported by the host -- anything that requires EXCP_ATOMIC will not 1004 * be atomic with respect to an external process. 1005 */ 1006 if (ret != -1 && (flags & MAP_TYPE) != MAP_PRIVATE) { 1007 CPUState *cpu = thread_cpu; 1008 if (!tcg_cflags_has(cpu, CF_PARALLEL)) { 1009 tcg_cflags_set(cpu, CF_PARALLEL); 1010 tb_flush(cpu); 1011 } 1012 } 1013 1014 return ret; 1015 } 1016 1017 static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len) 1018 { 1019 int host_page_size = qemu_real_host_page_size(); 1020 abi_ulong real_start; 1021 abi_ulong real_last; 1022 abi_ulong real_len; 1023 abi_ulong last; 1024 abi_ulong a; 1025 void *host_start; 1026 int prot; 1027 1028 last = start + len - 1; 1029 real_start = start & -host_page_size; 1030 real_last = ROUND_UP(last, host_page_size) - 1; 1031 1032 /* 1033 * If guest pages remain on the first or last host pages, 1034 * adjust the deallocation to retain those guest pages. 1035 * The single page special case is required for the last page, 1036 * lest real_start overflow to zero. 1037 */ 1038 if (real_last - real_start < host_page_size) { 1039 prot = 0; 1040 for (a = real_start; a < start; a += TARGET_PAGE_SIZE) { 1041 prot |= page_get_flags(a); 1042 } 1043 for (a = last; a < real_last; a += TARGET_PAGE_SIZE) { 1044 prot |= page_get_flags(a + 1); 1045 } 1046 if (prot != 0) { 1047 return 0; 1048 } 1049 } else { 1050 for (prot = 0, a = real_start; a < start; a += TARGET_PAGE_SIZE) { 1051 prot |= page_get_flags(a); 1052 } 1053 if (prot != 0) { 1054 real_start += host_page_size; 1055 } 1056 1057 for (prot = 0, a = last; a < real_last; a += TARGET_PAGE_SIZE) { 1058 prot |= page_get_flags(a + 1); 1059 } 1060 if (prot != 0) { 1061 real_last -= host_page_size; 1062 } 1063 1064 if (real_last < real_start) { 1065 return 0; 1066 } 1067 } 1068 1069 real_len = real_last - real_start + 1; 1070 host_start = g2h_untagged(real_start); 1071 1072 return do_munmap(host_start, real_len); 1073 } 1074 1075 int target_munmap(abi_ulong start, abi_ulong len) 1076 { 1077 int ret; 1078 1079 trace_target_munmap(start, len); 1080 1081 if (start & ~TARGET_PAGE_MASK) { 1082 errno = EINVAL; 1083 return -1; 1084 } 1085 len = TARGET_PAGE_ALIGN(len); 1086 if (len == 0 || !guest_range_valid_untagged(start, len)) { 1087 errno = EINVAL; 1088 return -1; 1089 } 1090 1091 mmap_lock(); 1092 ret = mmap_reserve_or_unmap(start, len); 1093 if (likely(ret == 0)) { 1094 page_set_flags(start, start + len - 1, 0); 1095 shm_region_rm_complete(start, start + len - 1); 1096 } 1097 mmap_unlock(); 1098 1099 return ret; 1100 } 1101 1102 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size, 1103 abi_ulong new_size, unsigned long flags, 1104 abi_ulong new_addr) 1105 { 1106 int prot; 1107 void *host_addr; 1108 1109 if (!guest_range_valid_untagged(old_addr, old_size) || 1110 ((flags & MREMAP_FIXED) && 1111 !guest_range_valid_untagged(new_addr, new_size)) || 1112 ((flags & MREMAP_MAYMOVE) == 0 && 1113 !guest_range_valid_untagged(old_addr, new_size))) { 1114 errno = ENOMEM; 1115 return -1; 1116 } 1117 1118 mmap_lock(); 1119 1120 if (flags & MREMAP_FIXED) { 1121 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size, 1122 flags, g2h_untagged(new_addr)); 1123 1124 if (reserved_va && host_addr != MAP_FAILED) { 1125 /* 1126 * If new and old addresses overlap then the above mremap will 1127 * already have failed with EINVAL. 1128 */ 1129 mmap_reserve_or_unmap(old_addr, old_size); 1130 } 1131 } else if (flags & MREMAP_MAYMOVE) { 1132 abi_ulong mmap_start; 1133 1134 mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE); 1135 1136 if (mmap_start == -1) { 1137 errno = ENOMEM; 1138 host_addr = MAP_FAILED; 1139 } else { 1140 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size, 1141 flags | MREMAP_FIXED, 1142 g2h_untagged(mmap_start)); 1143 if (reserved_va) { 1144 mmap_reserve_or_unmap(old_addr, old_size); 1145 } 1146 } 1147 } else { 1148 int page_flags = 0; 1149 if (reserved_va && old_size < new_size) { 1150 abi_ulong addr; 1151 for (addr = old_addr + old_size; 1152 addr < old_addr + new_size; 1153 addr++) { 1154 page_flags |= page_get_flags(addr); 1155 } 1156 } 1157 if (page_flags == 0) { 1158 host_addr = mremap(g2h_untagged(old_addr), 1159 old_size, new_size, flags); 1160 1161 if (host_addr != MAP_FAILED) { 1162 /* Check if address fits target address space */ 1163 if (!guest_range_valid_untagged(h2g(host_addr), new_size)) { 1164 /* Revert mremap() changes */ 1165 host_addr = mremap(g2h_untagged(old_addr), 1166 new_size, old_size, flags); 1167 errno = ENOMEM; 1168 host_addr = MAP_FAILED; 1169 } else if (reserved_va && old_size > new_size) { 1170 mmap_reserve_or_unmap(old_addr + old_size, 1171 old_size - new_size); 1172 } 1173 } 1174 } else { 1175 errno = ENOMEM; 1176 host_addr = MAP_FAILED; 1177 } 1178 } 1179 1180 if (host_addr == MAP_FAILED) { 1181 new_addr = -1; 1182 } else { 1183 new_addr = h2g(host_addr); 1184 prot = page_get_flags(old_addr); 1185 page_set_flags(old_addr, old_addr + old_size - 1, 0); 1186 shm_region_rm_complete(old_addr, old_addr + old_size - 1); 1187 page_set_flags(new_addr, new_addr + new_size - 1, 1188 prot | PAGE_VALID | PAGE_RESET); 1189 shm_region_rm_complete(new_addr, new_addr + new_size - 1); 1190 } 1191 mmap_unlock(); 1192 return new_addr; 1193 } 1194 1195 abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice) 1196 { 1197 abi_ulong len; 1198 int ret = 0; 1199 1200 if (start & ~TARGET_PAGE_MASK) { 1201 return -TARGET_EINVAL; 1202 } 1203 if (len_in == 0) { 1204 return 0; 1205 } 1206 len = TARGET_PAGE_ALIGN(len_in); 1207 if (len == 0 || !guest_range_valid_untagged(start, len)) { 1208 return -TARGET_EINVAL; 1209 } 1210 1211 /* Translate for some architectures which have different MADV_xxx values */ 1212 switch (advice) { 1213 case TARGET_MADV_DONTNEED: /* alpha */ 1214 advice = MADV_DONTNEED; 1215 break; 1216 case TARGET_MADV_WIPEONFORK: /* parisc */ 1217 advice = MADV_WIPEONFORK; 1218 break; 1219 case TARGET_MADV_KEEPONFORK: /* parisc */ 1220 advice = MADV_KEEPONFORK; 1221 break; 1222 /* we do not care about the other MADV_xxx values yet */ 1223 } 1224 1225 /* 1226 * Most advice values are hints, so ignoring and returning success is ok. 1227 * 1228 * However, some advice values such as MADV_DONTNEED, MADV_WIPEONFORK and 1229 * MADV_KEEPONFORK are not hints and need to be emulated. 1230 * 1231 * A straight passthrough for those may not be safe because qemu sometimes 1232 * turns private file-backed mappings into anonymous mappings. 1233 * If all guest pages have PAGE_PASSTHROUGH set, mappings have the 1234 * same semantics for the host as for the guest. 1235 * 1236 * We pass through MADV_WIPEONFORK and MADV_KEEPONFORK if possible and 1237 * return failure if not. 1238 * 1239 * MADV_DONTNEED is passed through as well, if possible. 1240 * If passthrough isn't possible, we nevertheless (wrongly!) return 1241 * success, which is broken but some userspace programs fail to work 1242 * otherwise. Completely implementing such emulation is quite complicated 1243 * though. 1244 */ 1245 mmap_lock(); 1246 switch (advice) { 1247 case MADV_WIPEONFORK: 1248 case MADV_KEEPONFORK: 1249 ret = -EINVAL; 1250 /* fall through */ 1251 case MADV_DONTNEED: 1252 if (page_check_range(start, len, PAGE_PASSTHROUGH)) { 1253 ret = get_errno(madvise(g2h_untagged(start), len, advice)); 1254 if ((advice == MADV_DONTNEED) && (ret == 0)) { 1255 page_reset_target_data(start, start + len - 1); 1256 } 1257 } 1258 } 1259 mmap_unlock(); 1260 1261 return ret; 1262 } 1263 1264 #ifndef TARGET_FORCE_SHMLBA 1265 /* 1266 * For most architectures, SHMLBA is the same as the page size; 1267 * some architectures have larger values, in which case they should 1268 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function. 1269 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA 1270 * and defining its own value for SHMLBA. 1271 * 1272 * The kernel also permits SHMLBA to be set by the architecture to a 1273 * value larger than the page size without setting __ARCH_FORCE_SHMLBA; 1274 * this means that addresses are rounded to the large size if 1275 * SHM_RND is set but addresses not aligned to that size are not rejected 1276 * as long as they are at least page-aligned. Since the only architecture 1277 * which uses this is ia64 this code doesn't provide for that oddity. 1278 */ 1279 static inline abi_ulong target_shmlba(CPUArchState *cpu_env) 1280 { 1281 return TARGET_PAGE_SIZE; 1282 } 1283 #endif 1284 1285 #if defined(__arm__) || defined(__mips__) || defined(__sparc__) 1286 #define HOST_FORCE_SHMLBA 1 1287 #else 1288 #define HOST_FORCE_SHMLBA 0 1289 #endif 1290 1291 abi_ulong target_shmat(CPUArchState *cpu_env, int shmid, 1292 abi_ulong shmaddr, int shmflg) 1293 { 1294 CPUState *cpu = env_cpu(cpu_env); 1295 struct shmid_ds shm_info; 1296 int ret; 1297 int h_pagesize; 1298 int t_shmlba, h_shmlba, m_shmlba; 1299 size_t t_len, h_len, m_len; 1300 1301 /* shmat pointers are always untagged */ 1302 1303 /* 1304 * Because we can't use host shmat() unless the address is sufficiently 1305 * aligned for the host, we'll need to check both. 1306 * TODO: Could be fixed with softmmu. 1307 */ 1308 t_shmlba = target_shmlba(cpu_env); 1309 h_pagesize = qemu_real_host_page_size(); 1310 h_shmlba = (HOST_FORCE_SHMLBA ? SHMLBA : h_pagesize); 1311 m_shmlba = MAX(t_shmlba, h_shmlba); 1312 1313 if (shmaddr) { 1314 if (shmaddr & (m_shmlba - 1)) { 1315 if (shmflg & SHM_RND) { 1316 /* 1317 * The guest is allowing the kernel to round the address. 1318 * Assume that the guest is ok with us rounding to the 1319 * host required alignment too. Anyway if we don't, we'll 1320 * get an error from the kernel. 1321 */ 1322 shmaddr &= ~(m_shmlba - 1); 1323 if (shmaddr == 0 && (shmflg & SHM_REMAP)) { 1324 return -TARGET_EINVAL; 1325 } 1326 } else { 1327 int require = TARGET_PAGE_SIZE; 1328 #ifdef TARGET_FORCE_SHMLBA 1329 require = t_shmlba; 1330 #endif 1331 /* 1332 * Include host required alignment, as otherwise we cannot 1333 * use host shmat at all. 1334 */ 1335 require = MAX(require, h_shmlba); 1336 if (shmaddr & (require - 1)) { 1337 return -TARGET_EINVAL; 1338 } 1339 } 1340 } 1341 } else { 1342 if (shmflg & SHM_REMAP) { 1343 return -TARGET_EINVAL; 1344 } 1345 } 1346 /* All rounding now manually concluded. */ 1347 shmflg &= ~SHM_RND; 1348 1349 /* Find out the length of the shared memory segment. */ 1350 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 1351 if (is_error(ret)) { 1352 /* can't get length, bail out */ 1353 return ret; 1354 } 1355 t_len = TARGET_PAGE_ALIGN(shm_info.shm_segsz); 1356 h_len = ROUND_UP(shm_info.shm_segsz, h_pagesize); 1357 m_len = MAX(t_len, h_len); 1358 1359 if (!guest_range_valid_untagged(shmaddr, m_len)) { 1360 return -TARGET_EINVAL; 1361 } 1362 1363 WITH_MMAP_LOCK_GUARD() { 1364 bool mapped = false; 1365 void *want, *test; 1366 abi_ulong last; 1367 1368 if (!shmaddr) { 1369 shmaddr = mmap_find_vma(0, m_len, m_shmlba); 1370 if (shmaddr == -1) { 1371 return -TARGET_ENOMEM; 1372 } 1373 mapped = !reserved_va; 1374 } else if (shmflg & SHM_REMAP) { 1375 /* 1376 * If host page size > target page size, the host shmat may map 1377 * more memory than the guest expects. Reject a mapping that 1378 * would replace memory in the unexpected gap. 1379 * TODO: Could be fixed with softmmu. 1380 */ 1381 if (t_len < h_len && 1382 !page_check_range_empty(shmaddr + t_len, 1383 shmaddr + h_len - 1)) { 1384 return -TARGET_EINVAL; 1385 } 1386 } else { 1387 if (!page_check_range_empty(shmaddr, shmaddr + m_len - 1)) { 1388 return -TARGET_EINVAL; 1389 } 1390 } 1391 1392 /* All placement is now complete. */ 1393 want = (void *)g2h_untagged(shmaddr); 1394 1395 /* 1396 * Map anonymous pages across the entire range, then remap with 1397 * the shared memory. This is required for a number of corner 1398 * cases for which host and guest page sizes differ. 1399 */ 1400 if (h_len != t_len) { 1401 int mmap_p = PROT_READ | (shmflg & SHM_RDONLY ? 0 : PROT_WRITE); 1402 int mmap_f = MAP_PRIVATE | MAP_ANONYMOUS 1403 | (reserved_va || mapped || (shmflg & SHM_REMAP) 1404 ? MAP_FIXED : MAP_FIXED_NOREPLACE); 1405 1406 test = mmap(want, m_len, mmap_p, mmap_f, -1, 0); 1407 if (unlikely(test != want)) { 1408 /* shmat returns EINVAL not EEXIST like mmap. */ 1409 ret = (test == MAP_FAILED && errno != EEXIST 1410 ? get_errno(-1) : -TARGET_EINVAL); 1411 if (mapped) { 1412 do_munmap(want, m_len); 1413 } 1414 return ret; 1415 } 1416 mapped = true; 1417 } 1418 1419 if (reserved_va || mapped) { 1420 shmflg |= SHM_REMAP; 1421 } 1422 test = shmat(shmid, want, shmflg); 1423 if (test == MAP_FAILED) { 1424 ret = get_errno(-1); 1425 if (mapped) { 1426 do_munmap(want, m_len); 1427 } 1428 return ret; 1429 } 1430 assert(test == want); 1431 1432 last = shmaddr + m_len - 1; 1433 page_set_flags(shmaddr, last, 1434 PAGE_VALID | PAGE_RESET | PAGE_READ | 1435 (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE) | 1436 (shmflg & SHM_EXEC ? PAGE_EXEC : 0)); 1437 1438 shm_region_rm_complete(shmaddr, last); 1439 shm_region_add(shmaddr, last); 1440 } 1441 1442 /* 1443 * We're mapping shared memory, so ensure we generate code for parallel 1444 * execution and flush old translations. This will work up to the level 1445 * supported by the host -- anything that requires EXCP_ATOMIC will not 1446 * be atomic with respect to an external process. 1447 */ 1448 if (!tcg_cflags_has(cpu, CF_PARALLEL)) { 1449 tcg_cflags_set(cpu, CF_PARALLEL); 1450 tb_flush(cpu); 1451 } 1452 1453 if (qemu_loglevel_mask(CPU_LOG_PAGE)) { 1454 FILE *f = qemu_log_trylock(); 1455 if (f) { 1456 fprintf(f, "page layout changed following shmat\n"); 1457 page_dump(f); 1458 qemu_log_unlock(f); 1459 } 1460 } 1461 return shmaddr; 1462 } 1463 1464 abi_long target_shmdt(abi_ulong shmaddr) 1465 { 1466 abi_long rv; 1467 1468 /* shmdt pointers are always untagged */ 1469 1470 WITH_MMAP_LOCK_GUARD() { 1471 abi_ulong last = shm_region_find(shmaddr); 1472 if (last == 0) { 1473 return -TARGET_EINVAL; 1474 } 1475 1476 rv = get_errno(shmdt(g2h_untagged(shmaddr))); 1477 if (rv == 0) { 1478 abi_ulong size = last - shmaddr + 1; 1479 1480 page_set_flags(shmaddr, last, 0); 1481 shm_region_rm_complete(shmaddr, last); 1482 mmap_reserve_or_unmap(shmaddr, size); 1483 } 1484 } 1485 return rv; 1486 } 1487