1 /* 2 * mmap support for qemu 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include <sys/shm.h> 21 #include "trace.h" 22 #include "exec/log.h" 23 #include "exec/page-protection.h" 24 #include "exec/translation-block.h" 25 #include "qemu.h" 26 #include "user/page-protection.h" 27 #include "user-internals.h" 28 #include "user-mmap.h" 29 #include "target_mman.h" 30 #include "qemu/interval-tree.h" 31 32 #ifdef TARGET_ARM 33 #include "target/arm/cpu-features.h" 34 #endif 35 36 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER; 37 static __thread int mmap_lock_count; 38 39 void mmap_lock(void) 40 { 41 if (mmap_lock_count++ == 0) { 42 pthread_mutex_lock(&mmap_mutex); 43 } 44 } 45 46 void mmap_unlock(void) 47 { 48 assert(mmap_lock_count > 0); 49 if (--mmap_lock_count == 0) { 50 pthread_mutex_unlock(&mmap_mutex); 51 } 52 } 53 54 bool have_mmap_lock(void) 55 { 56 return mmap_lock_count > 0 ? true : false; 57 } 58 59 /* Grab lock to make sure things are in a consistent state after fork(). */ 60 void mmap_fork_start(void) 61 { 62 if (mmap_lock_count) 63 abort(); 64 pthread_mutex_lock(&mmap_mutex); 65 } 66 67 void mmap_fork_end(int child) 68 { 69 if (child) { 70 pthread_mutex_init(&mmap_mutex, NULL); 71 } else { 72 pthread_mutex_unlock(&mmap_mutex); 73 } 74 } 75 76 /* Protected by mmap_lock. */ 77 static IntervalTreeRoot shm_regions; 78 79 static void shm_region_add(abi_ptr start, abi_ptr last) 80 { 81 IntervalTreeNode *i = g_new0(IntervalTreeNode, 1); 82 83 i->start = start; 84 i->last = last; 85 interval_tree_insert(i, &shm_regions); 86 } 87 88 static abi_ptr shm_region_find(abi_ptr start) 89 { 90 IntervalTreeNode *i; 91 92 for (i = interval_tree_iter_first(&shm_regions, start, start); i; 93 i = interval_tree_iter_next(i, start, start)) { 94 if (i->start == start) { 95 return i->last; 96 } 97 } 98 return 0; 99 } 100 101 static void shm_region_rm_complete(abi_ptr start, abi_ptr last) 102 { 103 IntervalTreeNode *i, *n; 104 105 for (i = interval_tree_iter_first(&shm_regions, start, last); i; i = n) { 106 n = interval_tree_iter_next(i, start, last); 107 if (i->start >= start && i->last <= last) { 108 interval_tree_remove(i, &shm_regions); 109 g_free(i); 110 } 111 } 112 } 113 114 /* 115 * Validate target prot bitmask. 116 * Return the prot bitmask for the host in *HOST_PROT. 117 * Return 0 if the target prot bitmask is invalid, otherwise 118 * the internal qemu page_flags (which will include PAGE_VALID). 119 */ 120 static int validate_prot_to_pageflags(int prot) 121 { 122 int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM; 123 int page_flags = (prot & PAGE_RWX) | PAGE_VALID; 124 125 #ifdef TARGET_AARCH64 126 { 127 ARMCPU *cpu = ARM_CPU(thread_cpu); 128 129 /* 130 * The PROT_BTI bit is only accepted if the cpu supports the feature. 131 * Since this is the unusual case, don't bother checking unless 132 * the bit has been requested. If set and valid, record the bit 133 * within QEMU's page_flags. 134 */ 135 if ((prot & TARGET_PROT_BTI) && cpu_isar_feature(aa64_bti, cpu)) { 136 valid |= TARGET_PROT_BTI; 137 page_flags |= PAGE_BTI; 138 } 139 /* Similarly for the PROT_MTE bit. */ 140 if ((prot & TARGET_PROT_MTE) && cpu_isar_feature(aa64_mte, cpu)) { 141 valid |= TARGET_PROT_MTE; 142 page_flags |= PAGE_MTE; 143 } 144 } 145 #elif defined(TARGET_HPPA) 146 valid |= PROT_GROWSDOWN | PROT_GROWSUP; 147 #endif 148 149 return prot & ~valid ? 0 : page_flags; 150 } 151 152 /* 153 * For the host, we need not pass anything except read/write/exec. 154 * While PROT_SEM is allowed by all hosts, it is also ignored, so 155 * don't bother transforming guest bit to host bit. Any other 156 * target-specific prot bits will not be understood by the host 157 * and will need to be encoded into page_flags for qemu emulation. 158 * 159 * Pages that are executable by the guest will never be executed 160 * by the host, but the host will need to be able to read them. 161 */ 162 static int target_to_host_prot(int prot) 163 { 164 return (prot & (PROT_READ | PROT_WRITE)) | 165 (prot & PROT_EXEC ? PROT_READ : 0); 166 } 167 168 /* NOTE: all the constants are the HOST ones, but addresses are target. */ 169 int target_mprotect(abi_ulong start, abi_ulong len, int target_prot) 170 { 171 int host_page_size = qemu_real_host_page_size(); 172 abi_ulong starts[3]; 173 abi_ulong lens[3]; 174 int prots[3]; 175 abi_ulong host_start, host_last, last; 176 int prot1, ret, page_flags, nranges; 177 178 trace_target_mprotect(start, len, target_prot); 179 180 if ((start & ~TARGET_PAGE_MASK) != 0) { 181 return -TARGET_EINVAL; 182 } 183 page_flags = validate_prot_to_pageflags(target_prot); 184 if (!page_flags) { 185 return -TARGET_EINVAL; 186 } 187 if (len == 0) { 188 return 0; 189 } 190 len = TARGET_PAGE_ALIGN(len); 191 if (!guest_range_valid_untagged(start, len)) { 192 return -TARGET_ENOMEM; 193 } 194 195 last = start + len - 1; 196 host_start = start & -host_page_size; 197 host_last = ROUND_UP(last, host_page_size) - 1; 198 nranges = 0; 199 200 mmap_lock(); 201 202 if (host_last - host_start < host_page_size) { 203 /* Single host page contains all guest pages: sum the prot. */ 204 prot1 = target_prot; 205 for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) { 206 prot1 |= page_get_flags(a); 207 } 208 for (abi_ulong a = last; a < host_last; a += TARGET_PAGE_SIZE) { 209 prot1 |= page_get_flags(a + 1); 210 } 211 starts[nranges] = host_start; 212 lens[nranges] = host_page_size; 213 prots[nranges] = prot1; 214 nranges++; 215 } else { 216 if (host_start < start) { 217 /* Host page contains more than one guest page: sum the prot. */ 218 prot1 = target_prot; 219 for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) { 220 prot1 |= page_get_flags(a); 221 } 222 /* If the resulting sum differs, create a new range. */ 223 if (prot1 != target_prot) { 224 starts[nranges] = host_start; 225 lens[nranges] = host_page_size; 226 prots[nranges] = prot1; 227 nranges++; 228 host_start += host_page_size; 229 } 230 } 231 232 if (last < host_last) { 233 /* Host page contains more than one guest page: sum the prot. */ 234 prot1 = target_prot; 235 for (abi_ulong a = last; a < host_last; a += TARGET_PAGE_SIZE) { 236 prot1 |= page_get_flags(a + 1); 237 } 238 /* If the resulting sum differs, create a new range. */ 239 if (prot1 != target_prot) { 240 host_last -= host_page_size; 241 starts[nranges] = host_last + 1; 242 lens[nranges] = host_page_size; 243 prots[nranges] = prot1; 244 nranges++; 245 } 246 } 247 248 /* Create a range for the middle, if any remains. */ 249 if (host_start < host_last) { 250 starts[nranges] = host_start; 251 lens[nranges] = host_last - host_start + 1; 252 prots[nranges] = target_prot; 253 nranges++; 254 } 255 } 256 257 for (int i = 0; i < nranges; ++i) { 258 ret = mprotect(g2h_untagged(starts[i]), lens[i], 259 target_to_host_prot(prots[i])); 260 if (ret != 0) { 261 goto error; 262 } 263 } 264 265 page_set_flags(start, last, page_flags); 266 ret = 0; 267 268 error: 269 mmap_unlock(); 270 return ret; 271 } 272 273 /* 274 * Perform munmap on behalf of the target, with host parameters. 275 * If reserved_va, we must replace the memory reservation. 276 */ 277 static int do_munmap(void *addr, size_t len) 278 { 279 if (reserved_va) { 280 void *ptr = mmap(addr, len, PROT_NONE, 281 MAP_FIXED | MAP_ANONYMOUS 282 | MAP_PRIVATE | MAP_NORESERVE, -1, 0); 283 return ptr == addr ? 0 : -1; 284 } 285 return munmap(addr, len); 286 } 287 288 /* 289 * Perform a pread on behalf of target_mmap. We can reach EOF, we can be 290 * interrupted by signals, and in general there's no good error return path. 291 * If @zero, zero the rest of the block at EOF. 292 * Return true on success. 293 */ 294 static bool mmap_pread(int fd, void *p, size_t len, off_t offset, bool zero) 295 { 296 while (1) { 297 ssize_t r = pread(fd, p, len, offset); 298 299 if (likely(r == len)) { 300 /* Complete */ 301 return true; 302 } 303 if (r == 0) { 304 /* EOF */ 305 if (zero) { 306 memset(p, 0, len); 307 } 308 return true; 309 } 310 if (r > 0) { 311 /* Short read */ 312 p += r; 313 len -= r; 314 offset += r; 315 } else if (errno != EINTR) { 316 /* Error */ 317 return false; 318 } 319 } 320 } 321 322 /* 323 * Map an incomplete host page. 324 * 325 * Here be dragons. This case will not work if there is an existing 326 * overlapping host page, which is file mapped, and for which the mapping 327 * is beyond the end of the file. In that case, we will see SIGBUS when 328 * trying to write a portion of this page. 329 * 330 * FIXME: Work around this with a temporary signal handler and longjmp. 331 */ 332 static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last, 333 int prot, int flags, int fd, off_t offset) 334 { 335 int host_page_size = qemu_real_host_page_size(); 336 abi_ulong real_last; 337 void *host_start; 338 int prot_old, prot_new; 339 int host_prot_old, host_prot_new; 340 341 if (!(flags & MAP_ANONYMOUS) 342 && (flags & MAP_TYPE) == MAP_SHARED 343 && (prot & PROT_WRITE)) { 344 /* 345 * msync() won't work with the partial page, so we return an 346 * error if write is possible while it is a shared mapping. 347 */ 348 errno = EINVAL; 349 return false; 350 } 351 352 real_last = real_start + host_page_size - 1; 353 host_start = g2h_untagged(real_start); 354 355 /* Get the protection of the target pages outside the mapping. */ 356 prot_old = 0; 357 for (abi_ulong a = real_start; a < start; a += TARGET_PAGE_SIZE) { 358 prot_old |= page_get_flags(a); 359 } 360 for (abi_ulong a = real_last; a > last; a -= TARGET_PAGE_SIZE) { 361 prot_old |= page_get_flags(a); 362 } 363 364 if (prot_old == 0) { 365 /* 366 * Since !(prot_old & PAGE_VALID), there were no guest pages 367 * outside of the fragment we need to map. Allocate a new host 368 * page to cover, discarding whatever else may have been present. 369 */ 370 void *p = mmap(host_start, host_page_size, 371 target_to_host_prot(prot), 372 flags | MAP_ANONYMOUS, -1, 0); 373 if (p != host_start) { 374 if (p != MAP_FAILED) { 375 do_munmap(p, host_page_size); 376 errno = EEXIST; 377 } 378 return false; 379 } 380 prot_old = prot; 381 } 382 prot_new = prot | prot_old; 383 384 host_prot_old = target_to_host_prot(prot_old); 385 host_prot_new = target_to_host_prot(prot_new); 386 387 /* Adjust protection to be able to write. */ 388 if (!(host_prot_old & PROT_WRITE)) { 389 host_prot_old |= PROT_WRITE; 390 mprotect(host_start, host_page_size, host_prot_old); 391 } 392 393 /* Read or zero the new guest pages. */ 394 if (flags & MAP_ANONYMOUS) { 395 memset(g2h_untagged(start), 0, last - start + 1); 396 } else if (!mmap_pread(fd, g2h_untagged(start), last - start + 1, 397 offset, true)) { 398 return false; 399 } 400 401 /* Put final protection */ 402 if (host_prot_new != host_prot_old) { 403 mprotect(host_start, host_page_size, host_prot_new); 404 } 405 return true; 406 } 407 408 abi_ulong task_unmapped_base; 409 abi_ulong elf_et_dyn_base; 410 abi_ulong mmap_next_start; 411 412 /* 413 * Subroutine of mmap_find_vma, used when we have pre-allocated 414 * a chunk of guest address space. 415 */ 416 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size, 417 abi_ulong align) 418 { 419 target_ulong ret; 420 421 ret = page_find_range_empty(start, reserved_va, size, align); 422 if (ret == -1 && start > mmap_min_addr) { 423 /* Restart at the beginning of the address space. */ 424 ret = page_find_range_empty(mmap_min_addr, start - 1, size, align); 425 } 426 427 return ret; 428 } 429 430 /* 431 * Find and reserve a free memory area of size 'size'. The search 432 * starts at 'start'. 433 * It must be called with mmap_lock() held. 434 * Return -1 if error. 435 */ 436 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align) 437 { 438 int host_page_size = qemu_real_host_page_size(); 439 void *ptr, *prev; 440 abi_ulong addr; 441 int wrapped, repeat; 442 443 align = MAX(align, host_page_size); 444 445 /* If 'start' == 0, then a default start address is used. */ 446 if (start == 0) { 447 start = mmap_next_start; 448 } else { 449 start &= -host_page_size; 450 } 451 start = ROUND_UP(start, align); 452 size = ROUND_UP(size, host_page_size); 453 454 if (reserved_va) { 455 return mmap_find_vma_reserved(start, size, align); 456 } 457 458 addr = start; 459 wrapped = repeat = 0; 460 prev = 0; 461 462 for (;; prev = ptr) { 463 /* 464 * Reserve needed memory area to avoid a race. 465 * It should be discarded using: 466 * - mmap() with MAP_FIXED flag 467 * - mremap() with MREMAP_FIXED flag 468 * - shmat() with SHM_REMAP flag 469 */ 470 ptr = mmap(g2h_untagged(addr), size, PROT_NONE, 471 MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0); 472 473 /* ENOMEM, if host address space has no memory */ 474 if (ptr == MAP_FAILED) { 475 return (abi_ulong)-1; 476 } 477 478 /* 479 * Count the number of sequential returns of the same address. 480 * This is used to modify the search algorithm below. 481 */ 482 repeat = (ptr == prev ? repeat + 1 : 0); 483 484 if (h2g_valid(ptr + size - 1)) { 485 addr = h2g(ptr); 486 487 if ((addr & (align - 1)) == 0) { 488 /* Success. */ 489 if (start == mmap_next_start && addr >= task_unmapped_base) { 490 mmap_next_start = addr + size; 491 } 492 return addr; 493 } 494 495 /* The address is not properly aligned for the target. */ 496 switch (repeat) { 497 case 0: 498 /* 499 * Assume the result that the kernel gave us is the 500 * first with enough free space, so start again at the 501 * next higher target page. 502 */ 503 addr = ROUND_UP(addr, align); 504 break; 505 case 1: 506 /* 507 * Sometimes the kernel decides to perform the allocation 508 * at the top end of memory instead. 509 */ 510 addr &= -align; 511 break; 512 case 2: 513 /* Start over at low memory. */ 514 addr = 0; 515 break; 516 default: 517 /* Fail. This unaligned block must the last. */ 518 addr = -1; 519 break; 520 } 521 } else { 522 /* 523 * Since the result the kernel gave didn't fit, start 524 * again at low memory. If any repetition, fail. 525 */ 526 addr = (repeat ? -1 : 0); 527 } 528 529 /* Unmap and try again. */ 530 munmap(ptr, size); 531 532 /* ENOMEM if we checked the whole of the target address space. */ 533 if (addr == (abi_ulong)-1) { 534 return (abi_ulong)-1; 535 } else if (addr == 0) { 536 if (wrapped) { 537 return (abi_ulong)-1; 538 } 539 wrapped = 1; 540 /* 541 * Don't actually use 0 when wrapping, instead indicate 542 * that we'd truly like an allocation in low memory. 543 */ 544 addr = (mmap_min_addr > TARGET_PAGE_SIZE 545 ? TARGET_PAGE_ALIGN(mmap_min_addr) 546 : TARGET_PAGE_SIZE); 547 } else if (wrapped && addr >= start) { 548 return (abi_ulong)-1; 549 } 550 } 551 } 552 553 /* 554 * Record a successful mmap within the user-exec interval tree. 555 */ 556 static abi_long mmap_end(abi_ulong start, abi_ulong last, 557 abi_ulong passthrough_start, 558 abi_ulong passthrough_last, 559 int flags, int page_flags) 560 { 561 if (flags & MAP_ANONYMOUS) { 562 page_flags |= PAGE_ANON; 563 } 564 page_flags |= PAGE_RESET; 565 if (passthrough_start > passthrough_last) { 566 page_set_flags(start, last, page_flags); 567 } else { 568 if (start < passthrough_start) { 569 page_set_flags(start, passthrough_start - 1, page_flags); 570 } 571 page_set_flags(passthrough_start, passthrough_last, 572 page_flags | PAGE_PASSTHROUGH); 573 if (passthrough_last < last) { 574 page_set_flags(passthrough_last + 1, last, page_flags); 575 } 576 } 577 shm_region_rm_complete(start, last); 578 trace_target_mmap_complete(start); 579 if (qemu_loglevel_mask(CPU_LOG_PAGE)) { 580 FILE *f = qemu_log_trylock(); 581 if (f) { 582 fprintf(f, "page layout changed following mmap\n"); 583 page_dump(f); 584 qemu_log_unlock(f); 585 } 586 } 587 return start; 588 } 589 590 /* 591 * Special case host page size == target page size, 592 * where there are no edge conditions. 593 */ 594 static abi_long mmap_h_eq_g(abi_ulong start, abi_ulong len, 595 int host_prot, int flags, int page_flags, 596 int fd, off_t offset) 597 { 598 void *p, *want_p = NULL; 599 abi_ulong last; 600 601 if (start || (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) { 602 want_p = g2h_untagged(start); 603 } 604 605 p = mmap(want_p, len, host_prot, flags, fd, offset); 606 if (p == MAP_FAILED) { 607 return -1; 608 } 609 /* If the host kernel does not support MAP_FIXED_NOREPLACE, emulate. */ 610 if ((flags & MAP_FIXED_NOREPLACE) && p != want_p) { 611 do_munmap(p, len); 612 errno = EEXIST; 613 return -1; 614 } 615 616 start = h2g(p); 617 last = start + len - 1; 618 return mmap_end(start, last, start, last, flags, page_flags); 619 } 620 621 /* 622 * Special case host page size < target page size. 623 * 624 * The two special cases are increased guest alignment, and mapping 625 * past the end of a file. 626 * 627 * When mapping files into a memory area larger than the file, 628 * accesses to pages beyond the file size will cause a SIGBUS. 629 * 630 * For example, if mmaping a file of 100 bytes on a host with 4K 631 * pages emulating a target with 8K pages, the target expects to 632 * be able to access the first 8K. But the host will trap us on 633 * any access beyond 4K. 634 * 635 * When emulating a target with a larger page-size than the hosts, 636 * we may need to truncate file maps at EOF and add extra anonymous 637 * pages up to the targets page boundary. 638 * 639 * This workaround only works for files that do not change. 640 * If the file is later extended (e.g. ftruncate), the SIGBUS 641 * vanishes and the proper behaviour is that changes within the 642 * anon page should be reflected in the file. 643 * 644 * However, this case is rather common with executable images, 645 * so the workaround is important for even trivial tests, whereas 646 * the mmap of of a file being extended is less common. 647 */ 648 static abi_long mmap_h_lt_g(abi_ulong start, abi_ulong len, int host_prot, 649 int mmap_flags, int page_flags, int fd, 650 off_t offset, int host_page_size) 651 { 652 void *p, *want_p = NULL; 653 off_t fileend_adj = 0; 654 int flags = mmap_flags; 655 abi_ulong last, pass_last; 656 657 if (start || (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) { 658 want_p = g2h_untagged(start); 659 } 660 661 if (!(flags & MAP_ANONYMOUS)) { 662 struct stat sb; 663 664 if (fstat(fd, &sb) == -1) { 665 return -1; 666 } 667 if (offset >= sb.st_size) { 668 /* 669 * The entire map is beyond the end of the file. 670 * Transform it to an anonymous mapping. 671 */ 672 flags |= MAP_ANONYMOUS; 673 fd = -1; 674 offset = 0; 675 } else if (offset + len > sb.st_size) { 676 /* 677 * A portion of the map is beyond the end of the file. 678 * Truncate the file portion of the allocation. 679 */ 680 fileend_adj = offset + len - sb.st_size; 681 } 682 } 683 684 if (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE)) { 685 if (fileend_adj) { 686 p = mmap(want_p, len, host_prot, flags | MAP_ANONYMOUS, -1, 0); 687 } else { 688 p = mmap(want_p, len, host_prot, flags, fd, offset); 689 } 690 if (p != want_p) { 691 if (p != MAP_FAILED) { 692 /* Host does not support MAP_FIXED_NOREPLACE: emulate. */ 693 do_munmap(p, len); 694 errno = EEXIST; 695 } 696 return -1; 697 } 698 699 if (fileend_adj) { 700 void *t = mmap(p, len - fileend_adj, host_prot, 701 (flags & ~MAP_FIXED_NOREPLACE) | MAP_FIXED, 702 fd, offset); 703 704 if (t == MAP_FAILED) { 705 int save_errno = errno; 706 707 /* 708 * We failed a map over the top of the successful anonymous 709 * mapping above. The only failure mode is running out of VMAs, 710 * and there's nothing that we can do to detect that earlier. 711 * If we have replaced an existing mapping with MAP_FIXED, 712 * then we cannot properly recover. It's a coin toss whether 713 * it would be better to exit or continue here. 714 */ 715 if (!(flags & MAP_FIXED_NOREPLACE) && 716 !page_check_range_empty(start, start + len - 1)) { 717 qemu_log("QEMU target_mmap late failure: %s", 718 strerror(save_errno)); 719 } 720 721 do_munmap(want_p, len); 722 errno = save_errno; 723 return -1; 724 } 725 } 726 } else { 727 size_t host_len, part_len; 728 729 /* 730 * Take care to align the host memory. Perform a larger anonymous 731 * allocation and extract the aligned portion. Remap the file on 732 * top of that. 733 */ 734 host_len = len + TARGET_PAGE_SIZE - host_page_size; 735 p = mmap(want_p, host_len, host_prot, flags | MAP_ANONYMOUS, -1, 0); 736 if (p == MAP_FAILED) { 737 return -1; 738 } 739 740 part_len = (uintptr_t)p & (TARGET_PAGE_SIZE - 1); 741 if (part_len) { 742 part_len = TARGET_PAGE_SIZE - part_len; 743 do_munmap(p, part_len); 744 p += part_len; 745 host_len -= part_len; 746 } 747 if (len < host_len) { 748 do_munmap(p + len, host_len - len); 749 } 750 751 if (!(flags & MAP_ANONYMOUS)) { 752 void *t = mmap(p, len - fileend_adj, host_prot, 753 flags | MAP_FIXED, fd, offset); 754 755 if (t == MAP_FAILED) { 756 int save_errno = errno; 757 do_munmap(p, len); 758 errno = save_errno; 759 return -1; 760 } 761 } 762 763 start = h2g(p); 764 } 765 766 last = start + len - 1; 767 if (fileend_adj) { 768 pass_last = ROUND_UP(last - fileend_adj, host_page_size) - 1; 769 } else { 770 pass_last = last; 771 } 772 return mmap_end(start, last, start, pass_last, mmap_flags, page_flags); 773 } 774 775 /* 776 * Special case host page size > target page size. 777 * 778 * The two special cases are address and file offsets that are valid 779 * for the guest that cannot be directly represented by the host. 780 */ 781 static abi_long mmap_h_gt_g(abi_ulong start, abi_ulong len, 782 int target_prot, int host_prot, 783 int flags, int page_flags, int fd, 784 off_t offset, int host_page_size) 785 { 786 void *p, *want_p = NULL; 787 off_t host_offset = offset & -host_page_size; 788 abi_ulong last, real_start, real_last; 789 bool misaligned_offset = false; 790 size_t host_len; 791 792 if (start || (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) { 793 want_p = g2h_untagged(start); 794 } 795 796 if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) { 797 /* 798 * Adjust the offset to something representable on the host. 799 */ 800 host_len = len + offset - host_offset; 801 p = mmap(want_p, host_len, host_prot, flags, fd, host_offset); 802 if (p == MAP_FAILED) { 803 return -1; 804 } 805 806 /* Update start to the file position at offset. */ 807 p += offset - host_offset; 808 809 start = h2g(p); 810 last = start + len - 1; 811 return mmap_end(start, last, start, last, flags, page_flags); 812 } 813 814 if (!(flags & MAP_ANONYMOUS)) { 815 misaligned_offset = (start ^ offset) & (host_page_size - 1); 816 817 /* 818 * The fallback for misalignment is a private mapping + read. 819 * This carries none of semantics required of MAP_SHARED. 820 */ 821 if (misaligned_offset && (flags & MAP_TYPE) != MAP_PRIVATE) { 822 errno = EINVAL; 823 return -1; 824 } 825 } 826 827 last = start + len - 1; 828 real_start = start & -host_page_size; 829 real_last = ROUND_UP(last, host_page_size) - 1; 830 831 /* 832 * Handle the start and end of the mapping. 833 */ 834 if (real_start < start) { 835 abi_ulong real_page_last = real_start + host_page_size - 1; 836 if (last <= real_page_last) { 837 /* Entire allocation a subset of one host page. */ 838 if (!mmap_frag(real_start, start, last, target_prot, 839 flags, fd, offset)) { 840 return -1; 841 } 842 return mmap_end(start, last, -1, 0, flags, page_flags); 843 } 844 845 if (!mmap_frag(real_start, start, real_page_last, target_prot, 846 flags, fd, offset)) { 847 return -1; 848 } 849 real_start = real_page_last + 1; 850 } 851 852 if (last < real_last) { 853 abi_ulong real_page_start = real_last - host_page_size + 1; 854 if (!mmap_frag(real_page_start, real_page_start, last, 855 target_prot, flags, fd, 856 offset + real_page_start - start)) { 857 return -1; 858 } 859 real_last = real_page_start - 1; 860 } 861 862 if (real_start > real_last) { 863 return mmap_end(start, last, -1, 0, flags, page_flags); 864 } 865 866 /* 867 * Handle the middle of the mapping. 868 */ 869 870 host_len = real_last - real_start + 1; 871 want_p += real_start - start; 872 873 if (flags & MAP_ANONYMOUS) { 874 p = mmap(want_p, host_len, host_prot, flags, -1, 0); 875 } else if (!misaligned_offset) { 876 p = mmap(want_p, host_len, host_prot, flags, fd, 877 offset + real_start - start); 878 } else { 879 p = mmap(want_p, host_len, host_prot | PROT_WRITE, 880 flags | MAP_ANONYMOUS, -1, 0); 881 } 882 if (p != want_p) { 883 if (p != MAP_FAILED) { 884 do_munmap(p, host_len); 885 errno = EEXIST; 886 } 887 return -1; 888 } 889 890 if (misaligned_offset) { 891 if (!mmap_pread(fd, p, host_len, offset + real_start - start, false)) { 892 do_munmap(p, host_len); 893 return -1; 894 } 895 if (!(host_prot & PROT_WRITE)) { 896 mprotect(p, host_len, host_prot); 897 } 898 } 899 900 return mmap_end(start, last, -1, 0, flags, page_flags); 901 } 902 903 static abi_long target_mmap__locked(abi_ulong start, abi_ulong len, 904 int target_prot, int flags, int page_flags, 905 int fd, off_t offset) 906 { 907 int host_page_size = qemu_real_host_page_size(); 908 int host_prot; 909 910 /* 911 * For reserved_va, we are in full control of the allocation. 912 * Find a suitable hole and convert to MAP_FIXED. 913 */ 914 if (reserved_va) { 915 if (flags & MAP_FIXED_NOREPLACE) { 916 /* Validate that the chosen range is empty. */ 917 if (!page_check_range_empty(start, start + len - 1)) { 918 errno = EEXIST; 919 return -1; 920 } 921 flags = (flags & ~MAP_FIXED_NOREPLACE) | MAP_FIXED; 922 } else if (!(flags & MAP_FIXED)) { 923 abi_ulong real_start = start & -host_page_size; 924 off_t host_offset = offset & -host_page_size; 925 size_t real_len = len + offset - host_offset; 926 abi_ulong align = MAX(host_page_size, TARGET_PAGE_SIZE); 927 928 start = mmap_find_vma(real_start, real_len, align); 929 if (start == (abi_ulong)-1) { 930 errno = ENOMEM; 931 return -1; 932 } 933 start += offset - host_offset; 934 flags |= MAP_FIXED; 935 } 936 } 937 938 host_prot = target_to_host_prot(target_prot); 939 940 if (host_page_size == TARGET_PAGE_SIZE) { 941 return mmap_h_eq_g(start, len, host_prot, flags, 942 page_flags, fd, offset); 943 } else if (host_page_size < TARGET_PAGE_SIZE) { 944 return mmap_h_lt_g(start, len, host_prot, flags, 945 page_flags, fd, offset, host_page_size); 946 } else { 947 return mmap_h_gt_g(start, len, target_prot, host_prot, flags, 948 page_flags, fd, offset, host_page_size); 949 } 950 } 951 952 /* NOTE: all the constants are the HOST ones */ 953 abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot, 954 int flags, int fd, off_t offset) 955 { 956 abi_long ret; 957 int page_flags; 958 959 trace_target_mmap(start, len, target_prot, flags, fd, offset); 960 961 if (!len) { 962 errno = EINVAL; 963 return -1; 964 } 965 966 page_flags = validate_prot_to_pageflags(target_prot); 967 if (!page_flags) { 968 errno = EINVAL; 969 return -1; 970 } 971 972 /* Also check for overflows... */ 973 len = TARGET_PAGE_ALIGN(len); 974 if (!len || len != (size_t)len) { 975 errno = ENOMEM; 976 return -1; 977 } 978 979 if (offset & ~TARGET_PAGE_MASK) { 980 errno = EINVAL; 981 return -1; 982 } 983 if (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE)) { 984 if (start & ~TARGET_PAGE_MASK) { 985 errno = EINVAL; 986 return -1; 987 } 988 if (!guest_range_valid_untagged(start, len)) { 989 errno = ENOMEM; 990 return -1; 991 } 992 } 993 994 mmap_lock(); 995 996 ret = target_mmap__locked(start, len, target_prot, flags, 997 page_flags, fd, offset); 998 999 mmap_unlock(); 1000 1001 /* 1002 * If we're mapping shared memory, ensure we generate code for parallel 1003 * execution and flush old translations. This will work up to the level 1004 * supported by the host -- anything that requires EXCP_ATOMIC will not 1005 * be atomic with respect to an external process. 1006 */ 1007 if (ret != -1 && (flags & MAP_TYPE) != MAP_PRIVATE) { 1008 CPUState *cpu = thread_cpu; 1009 if (!tcg_cflags_has(cpu, CF_PARALLEL)) { 1010 tcg_cflags_set(cpu, CF_PARALLEL); 1011 tb_flush(cpu); 1012 } 1013 } 1014 1015 return ret; 1016 } 1017 1018 static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len) 1019 { 1020 int host_page_size = qemu_real_host_page_size(); 1021 abi_ulong real_start; 1022 abi_ulong real_last; 1023 abi_ulong real_len; 1024 abi_ulong last; 1025 abi_ulong a; 1026 void *host_start; 1027 int prot; 1028 1029 last = start + len - 1; 1030 real_start = start & -host_page_size; 1031 real_last = ROUND_UP(last, host_page_size) - 1; 1032 1033 /* 1034 * If guest pages remain on the first or last host pages, 1035 * adjust the deallocation to retain those guest pages. 1036 * The single page special case is required for the last page, 1037 * lest real_start overflow to zero. 1038 */ 1039 if (real_last - real_start < host_page_size) { 1040 prot = 0; 1041 for (a = real_start; a < start; a += TARGET_PAGE_SIZE) { 1042 prot |= page_get_flags(a); 1043 } 1044 for (a = last; a < real_last; a += TARGET_PAGE_SIZE) { 1045 prot |= page_get_flags(a + 1); 1046 } 1047 if (prot != 0) { 1048 return 0; 1049 } 1050 } else { 1051 for (prot = 0, a = real_start; a < start; a += TARGET_PAGE_SIZE) { 1052 prot |= page_get_flags(a); 1053 } 1054 if (prot != 0) { 1055 real_start += host_page_size; 1056 } 1057 1058 for (prot = 0, a = last; a < real_last; a += TARGET_PAGE_SIZE) { 1059 prot |= page_get_flags(a + 1); 1060 } 1061 if (prot != 0) { 1062 real_last -= host_page_size; 1063 } 1064 1065 if (real_last < real_start) { 1066 return 0; 1067 } 1068 } 1069 1070 real_len = real_last - real_start + 1; 1071 host_start = g2h_untagged(real_start); 1072 1073 return do_munmap(host_start, real_len); 1074 } 1075 1076 int target_munmap(abi_ulong start, abi_ulong len) 1077 { 1078 int ret; 1079 1080 trace_target_munmap(start, len); 1081 1082 if (start & ~TARGET_PAGE_MASK) { 1083 errno = EINVAL; 1084 return -1; 1085 } 1086 len = TARGET_PAGE_ALIGN(len); 1087 if (len == 0 || !guest_range_valid_untagged(start, len)) { 1088 errno = EINVAL; 1089 return -1; 1090 } 1091 1092 mmap_lock(); 1093 ret = mmap_reserve_or_unmap(start, len); 1094 if (likely(ret == 0)) { 1095 page_set_flags(start, start + len - 1, 0); 1096 shm_region_rm_complete(start, start + len - 1); 1097 } 1098 mmap_unlock(); 1099 1100 return ret; 1101 } 1102 1103 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size, 1104 abi_ulong new_size, unsigned long flags, 1105 abi_ulong new_addr) 1106 { 1107 int prot; 1108 void *host_addr; 1109 1110 if (!guest_range_valid_untagged(old_addr, old_size) || 1111 ((flags & MREMAP_FIXED) && 1112 !guest_range_valid_untagged(new_addr, new_size)) || 1113 ((flags & MREMAP_MAYMOVE) == 0 && 1114 !guest_range_valid_untagged(old_addr, new_size))) { 1115 errno = ENOMEM; 1116 return -1; 1117 } 1118 1119 mmap_lock(); 1120 1121 if (flags & MREMAP_FIXED) { 1122 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size, 1123 flags, g2h_untagged(new_addr)); 1124 1125 if (reserved_va && host_addr != MAP_FAILED) { 1126 /* 1127 * If new and old addresses overlap then the above mremap will 1128 * already have failed with EINVAL. 1129 */ 1130 mmap_reserve_or_unmap(old_addr, old_size); 1131 } 1132 } else if (flags & MREMAP_MAYMOVE) { 1133 abi_ulong mmap_start; 1134 1135 mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE); 1136 1137 if (mmap_start == -1) { 1138 errno = ENOMEM; 1139 host_addr = MAP_FAILED; 1140 } else { 1141 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size, 1142 flags | MREMAP_FIXED, 1143 g2h_untagged(mmap_start)); 1144 if (reserved_va) { 1145 mmap_reserve_or_unmap(old_addr, old_size); 1146 } 1147 } 1148 } else { 1149 int page_flags = 0; 1150 if (reserved_va && old_size < new_size) { 1151 abi_ulong addr; 1152 for (addr = old_addr + old_size; 1153 addr < old_addr + new_size; 1154 addr++) { 1155 page_flags |= page_get_flags(addr); 1156 } 1157 } 1158 if (page_flags == 0) { 1159 host_addr = mremap(g2h_untagged(old_addr), 1160 old_size, new_size, flags); 1161 1162 if (host_addr != MAP_FAILED) { 1163 /* Check if address fits target address space */ 1164 if (!guest_range_valid_untagged(h2g(host_addr), new_size)) { 1165 /* Revert mremap() changes */ 1166 host_addr = mremap(g2h_untagged(old_addr), 1167 new_size, old_size, flags); 1168 errno = ENOMEM; 1169 host_addr = MAP_FAILED; 1170 } else if (reserved_va && old_size > new_size) { 1171 mmap_reserve_or_unmap(old_addr + old_size, 1172 old_size - new_size); 1173 } 1174 } 1175 } else { 1176 errno = ENOMEM; 1177 host_addr = MAP_FAILED; 1178 } 1179 } 1180 1181 if (host_addr == MAP_FAILED) { 1182 new_addr = -1; 1183 } else { 1184 new_addr = h2g(host_addr); 1185 prot = page_get_flags(old_addr); 1186 page_set_flags(old_addr, old_addr + old_size - 1, 0); 1187 shm_region_rm_complete(old_addr, old_addr + old_size - 1); 1188 page_set_flags(new_addr, new_addr + new_size - 1, 1189 prot | PAGE_VALID | PAGE_RESET); 1190 shm_region_rm_complete(new_addr, new_addr + new_size - 1); 1191 } 1192 mmap_unlock(); 1193 return new_addr; 1194 } 1195 1196 abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice) 1197 { 1198 abi_ulong len; 1199 int ret = 0; 1200 1201 if (start & ~TARGET_PAGE_MASK) { 1202 return -TARGET_EINVAL; 1203 } 1204 if (len_in == 0) { 1205 return 0; 1206 } 1207 len = TARGET_PAGE_ALIGN(len_in); 1208 if (len == 0 || !guest_range_valid_untagged(start, len)) { 1209 return -TARGET_EINVAL; 1210 } 1211 1212 /* Translate for some architectures which have different MADV_xxx values */ 1213 switch (advice) { 1214 case TARGET_MADV_DONTNEED: /* alpha */ 1215 advice = MADV_DONTNEED; 1216 break; 1217 case TARGET_MADV_WIPEONFORK: /* parisc */ 1218 advice = MADV_WIPEONFORK; 1219 break; 1220 case TARGET_MADV_KEEPONFORK: /* parisc */ 1221 advice = MADV_KEEPONFORK; 1222 break; 1223 /* we do not care about the other MADV_xxx values yet */ 1224 } 1225 1226 /* 1227 * Most advice values are hints, so ignoring and returning success is ok. 1228 * 1229 * However, some advice values such as MADV_DONTNEED, MADV_WIPEONFORK and 1230 * MADV_KEEPONFORK are not hints and need to be emulated. 1231 * 1232 * A straight passthrough for those may not be safe because qemu sometimes 1233 * turns private file-backed mappings into anonymous mappings. 1234 * If all guest pages have PAGE_PASSTHROUGH set, mappings have the 1235 * same semantics for the host as for the guest. 1236 * 1237 * We pass through MADV_WIPEONFORK and MADV_KEEPONFORK if possible and 1238 * return failure if not. 1239 * 1240 * MADV_DONTNEED is passed through as well, if possible. 1241 * If passthrough isn't possible, we nevertheless (wrongly!) return 1242 * success, which is broken but some userspace programs fail to work 1243 * otherwise. Completely implementing such emulation is quite complicated 1244 * though. 1245 */ 1246 mmap_lock(); 1247 switch (advice) { 1248 case MADV_WIPEONFORK: 1249 case MADV_KEEPONFORK: 1250 ret = -EINVAL; 1251 /* fall through */ 1252 case MADV_DONTNEED: 1253 if (page_check_range(start, len, PAGE_PASSTHROUGH)) { 1254 ret = get_errno(madvise(g2h_untagged(start), len, advice)); 1255 if ((advice == MADV_DONTNEED) && (ret == 0)) { 1256 page_reset_target_data(start, start + len - 1); 1257 } 1258 } 1259 } 1260 mmap_unlock(); 1261 1262 return ret; 1263 } 1264 1265 #ifndef TARGET_FORCE_SHMLBA 1266 /* 1267 * For most architectures, SHMLBA is the same as the page size; 1268 * some architectures have larger values, in which case they should 1269 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function. 1270 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA 1271 * and defining its own value for SHMLBA. 1272 * 1273 * The kernel also permits SHMLBA to be set by the architecture to a 1274 * value larger than the page size without setting __ARCH_FORCE_SHMLBA; 1275 * this means that addresses are rounded to the large size if 1276 * SHM_RND is set but addresses not aligned to that size are not rejected 1277 * as long as they are at least page-aligned. Since the only architecture 1278 * which uses this is ia64 this code doesn't provide for that oddity. 1279 */ 1280 static inline abi_ulong target_shmlba(CPUArchState *cpu_env) 1281 { 1282 return TARGET_PAGE_SIZE; 1283 } 1284 #endif 1285 1286 #if defined(__arm__) || defined(__mips__) || defined(__sparc__) 1287 #define HOST_FORCE_SHMLBA 1 1288 #else 1289 #define HOST_FORCE_SHMLBA 0 1290 #endif 1291 1292 abi_ulong target_shmat(CPUArchState *cpu_env, int shmid, 1293 abi_ulong shmaddr, int shmflg) 1294 { 1295 CPUState *cpu = env_cpu(cpu_env); 1296 struct shmid_ds shm_info; 1297 int ret; 1298 int h_pagesize; 1299 int t_shmlba, h_shmlba, m_shmlba; 1300 size_t t_len, h_len, m_len; 1301 1302 /* shmat pointers are always untagged */ 1303 1304 /* 1305 * Because we can't use host shmat() unless the address is sufficiently 1306 * aligned for the host, we'll need to check both. 1307 * TODO: Could be fixed with softmmu. 1308 */ 1309 t_shmlba = target_shmlba(cpu_env); 1310 h_pagesize = qemu_real_host_page_size(); 1311 h_shmlba = (HOST_FORCE_SHMLBA ? SHMLBA : h_pagesize); 1312 m_shmlba = MAX(t_shmlba, h_shmlba); 1313 1314 if (shmaddr) { 1315 if (shmaddr & (m_shmlba - 1)) { 1316 if (shmflg & SHM_RND) { 1317 /* 1318 * The guest is allowing the kernel to round the address. 1319 * Assume that the guest is ok with us rounding to the 1320 * host required alignment too. Anyway if we don't, we'll 1321 * get an error from the kernel. 1322 */ 1323 shmaddr &= ~(m_shmlba - 1); 1324 if (shmaddr == 0 && (shmflg & SHM_REMAP)) { 1325 return -TARGET_EINVAL; 1326 } 1327 } else { 1328 int require = TARGET_PAGE_SIZE; 1329 #ifdef TARGET_FORCE_SHMLBA 1330 require = t_shmlba; 1331 #endif 1332 /* 1333 * Include host required alignment, as otherwise we cannot 1334 * use host shmat at all. 1335 */ 1336 require = MAX(require, h_shmlba); 1337 if (shmaddr & (require - 1)) { 1338 return -TARGET_EINVAL; 1339 } 1340 } 1341 } 1342 } else { 1343 if (shmflg & SHM_REMAP) { 1344 return -TARGET_EINVAL; 1345 } 1346 } 1347 /* All rounding now manually concluded. */ 1348 shmflg &= ~SHM_RND; 1349 1350 /* Find out the length of the shared memory segment. */ 1351 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 1352 if (is_error(ret)) { 1353 /* can't get length, bail out */ 1354 return ret; 1355 } 1356 t_len = TARGET_PAGE_ALIGN(shm_info.shm_segsz); 1357 h_len = ROUND_UP(shm_info.shm_segsz, h_pagesize); 1358 m_len = MAX(t_len, h_len); 1359 1360 if (!guest_range_valid_untagged(shmaddr, m_len)) { 1361 return -TARGET_EINVAL; 1362 } 1363 1364 WITH_MMAP_LOCK_GUARD() { 1365 bool mapped = false; 1366 void *want, *test; 1367 abi_ulong last; 1368 1369 if (!shmaddr) { 1370 shmaddr = mmap_find_vma(0, m_len, m_shmlba); 1371 if (shmaddr == -1) { 1372 return -TARGET_ENOMEM; 1373 } 1374 mapped = !reserved_va; 1375 } else if (shmflg & SHM_REMAP) { 1376 /* 1377 * If host page size > target page size, the host shmat may map 1378 * more memory than the guest expects. Reject a mapping that 1379 * would replace memory in the unexpected gap. 1380 * TODO: Could be fixed with softmmu. 1381 */ 1382 if (t_len < h_len && 1383 !page_check_range_empty(shmaddr + t_len, 1384 shmaddr + h_len - 1)) { 1385 return -TARGET_EINVAL; 1386 } 1387 } else { 1388 if (!page_check_range_empty(shmaddr, shmaddr + m_len - 1)) { 1389 return -TARGET_EINVAL; 1390 } 1391 } 1392 1393 /* All placement is now complete. */ 1394 want = (void *)g2h_untagged(shmaddr); 1395 1396 /* 1397 * Map anonymous pages across the entire range, then remap with 1398 * the shared memory. This is required for a number of corner 1399 * cases for which host and guest page sizes differ. 1400 */ 1401 if (h_len != t_len) { 1402 int mmap_p = PROT_READ | (shmflg & SHM_RDONLY ? 0 : PROT_WRITE); 1403 int mmap_f = MAP_PRIVATE | MAP_ANONYMOUS 1404 | (reserved_va || mapped || (shmflg & SHM_REMAP) 1405 ? MAP_FIXED : MAP_FIXED_NOREPLACE); 1406 1407 test = mmap(want, m_len, mmap_p, mmap_f, -1, 0); 1408 if (unlikely(test != want)) { 1409 /* shmat returns EINVAL not EEXIST like mmap. */ 1410 ret = (test == MAP_FAILED && errno != EEXIST 1411 ? get_errno(-1) : -TARGET_EINVAL); 1412 if (mapped) { 1413 do_munmap(want, m_len); 1414 } 1415 return ret; 1416 } 1417 mapped = true; 1418 } 1419 1420 if (reserved_va || mapped) { 1421 shmflg |= SHM_REMAP; 1422 } 1423 test = shmat(shmid, want, shmflg); 1424 if (test == MAP_FAILED) { 1425 ret = get_errno(-1); 1426 if (mapped) { 1427 do_munmap(want, m_len); 1428 } 1429 return ret; 1430 } 1431 assert(test == want); 1432 1433 last = shmaddr + m_len - 1; 1434 page_set_flags(shmaddr, last, 1435 PAGE_VALID | PAGE_RESET | PAGE_READ | 1436 (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE) | 1437 (shmflg & SHM_EXEC ? PAGE_EXEC : 0)); 1438 1439 shm_region_rm_complete(shmaddr, last); 1440 shm_region_add(shmaddr, last); 1441 } 1442 1443 /* 1444 * We're mapping shared memory, so ensure we generate code for parallel 1445 * execution and flush old translations. This will work up to the level 1446 * supported by the host -- anything that requires EXCP_ATOMIC will not 1447 * be atomic with respect to an external process. 1448 */ 1449 if (!tcg_cflags_has(cpu, CF_PARALLEL)) { 1450 tcg_cflags_set(cpu, CF_PARALLEL); 1451 tb_flush(cpu); 1452 } 1453 1454 if (qemu_loglevel_mask(CPU_LOG_PAGE)) { 1455 FILE *f = qemu_log_trylock(); 1456 if (f) { 1457 fprintf(f, "page layout changed following shmat\n"); 1458 page_dump(f); 1459 qemu_log_unlock(f); 1460 } 1461 } 1462 return shmaddr; 1463 } 1464 1465 abi_long target_shmdt(abi_ulong shmaddr) 1466 { 1467 abi_long rv; 1468 1469 /* shmdt pointers are always untagged */ 1470 1471 WITH_MMAP_LOCK_GUARD() { 1472 abi_ulong last = shm_region_find(shmaddr); 1473 if (last == 0) { 1474 return -TARGET_EINVAL; 1475 } 1476 1477 rv = get_errno(shmdt(g2h_untagged(shmaddr))); 1478 if (rv == 0) { 1479 abi_ulong size = last - shmaddr + 1; 1480 1481 page_set_flags(shmaddr, last, 0); 1482 shm_region_rm_complete(shmaddr, last); 1483 mmap_reserve_or_unmap(shmaddr, size); 1484 } 1485 } 1486 return rv; 1487 } 1488