Lines Matching full:if

17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
43 if (mmap_lock_count++ == 0) { in mmap_lock()
51 if (--mmap_lock_count == 0) { in mmap_unlock()
64 if (mmap_lock_count) in mmap_fork_start()
71 if (child) { in mmap_fork_end()
96 if (i->start == start) { in shm_region_find()
109 if (i->start >= start && i->last <= last) { in shm_region_rm_complete()
119 * Return 0 if the target prot bitmask is invalid, otherwise
132 * The PROT_BTI bit is only accepted if the cpu supports the feature. in validate_prot_to_pageflags()
134 * the bit has been requested. If set and valid, record the bit in validate_prot_to_pageflags()
137 if ((prot & TARGET_PROT_BTI) && cpu_isar_feature(aa64_bti, cpu)) { in validate_prot_to_pageflags()
142 if ((prot & TARGET_PROT_MTE) && cpu_isar_feature(aa64_mte, cpu)) { in validate_prot_to_pageflags()
182 if ((start & ~TARGET_PAGE_MASK) != 0) { in target_mprotect()
186 if (!page_flags) { in target_mprotect()
189 if (len == 0) { in target_mprotect()
193 if (!guest_range_valid_untagged(start, len)) { in target_mprotect()
204 if (host_last - host_start < host_page_size) { in target_mprotect()
218 if (host_start < start) { in target_mprotect()
224 /* If the resulting sum differs, create a new range. */ in target_mprotect()
225 if (prot1 != target_prot) { in target_mprotect()
234 if (last < host_last) { in target_mprotect()
240 /* If the resulting sum differs, create a new range. */ in target_mprotect()
241 if (prot1 != target_prot) { in target_mprotect()
250 /* Create a range for the middle, if any remains. */ in target_mprotect()
251 if (host_start < host_last) { in target_mprotect()
262 if (ret != 0) { in target_mprotect()
277 * If reserved_va, we must replace the memory reservation.
281 if (reserved_va) { in do_munmap()
293 * If @zero, zero the rest of the block at EOF.
301 if (likely(r == len)) { in mmap_pread()
305 if (r == 0) { in mmap_pread()
307 if (zero) { in mmap_pread()
312 if (r > 0) { in mmap_pread()
317 } else if (errno != EINTR) { in mmap_pread()
327 * Here be dragons. This case will not work if there is an existing
343 if (!(flags & MAP_ANONYMOUS) in mmap_frag()
348 * error if write is possible while it is a shared mapping. in mmap_frag()
366 if (prot_old == 0) { in mmap_frag()
375 if (p != host_start) { in mmap_frag()
376 if (p != MAP_FAILED) { in mmap_frag()
390 if (!(host_prot_old & PROT_WRITE)) { in mmap_frag()
396 if (flags & MAP_ANONYMOUS) { in mmap_frag()
398 } else if (!mmap_pread(fd, g2h_untagged(start), last - start + 1, in mmap_frag()
404 if (host_prot_new != host_prot_old) { in mmap_frag()
424 if (ret == -1 && start > mmap_min_addr) { in mmap_find_vma_reserved()
436 * Return -1 if error.
447 /* If 'start' == 0, then a default start address is used. */ in mmap_find_vma()
448 if (start == 0) { in mmap_find_vma()
456 if (reserved_va) { in mmap_find_vma()
475 /* ENOMEM, if host address space has no memory */ in mmap_find_vma()
476 if (ptr == MAP_FAILED) { in mmap_find_vma()
486 if (h2g_valid(ptr + size - 1)) { in mmap_find_vma()
489 if ((addr & (align - 1)) == 0) { in mmap_find_vma()
491 if (start == mmap_next_start && addr >= task_unmapped_base) { in mmap_find_vma()
526 * again at low memory. If any repetition, fail. in mmap_find_vma()
534 /* ENOMEM if we checked the whole of the target address space. */ in mmap_find_vma()
535 if (addr == (abi_ulong)-1) { in mmap_find_vma()
537 } else if (addr == 0) { in mmap_find_vma()
538 if (wrapped) { in mmap_find_vma()
549 } else if (wrapped && addr >= start) { in mmap_find_vma()
563 if (flags & MAP_ANONYMOUS) { in mmap_end()
567 if (passthrough_start > passthrough_last) { in mmap_end()
570 if (start < passthrough_start) { in mmap_end()
575 if (passthrough_last < last) { in mmap_end()
581 if (qemu_loglevel_mask(CPU_LOG_PAGE)) { in mmap_end()
583 if (f) { in mmap_end()
603 if (start || (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) { in mmap_h_eq_g()
608 if (p == MAP_FAILED) { in mmap_h_eq_g()
611 /* If the host kernel does not support MAP_FIXED_NOREPLACE, emulate. */ in mmap_h_eq_g()
612 if ((flags & MAP_FIXED_NOREPLACE) && p != want_p) { in mmap_h_eq_g()
632 * For example, if mmaping a file of 100 bytes on a host with 4K
642 * If the file is later extended (e.g. ftruncate), the SIGBUS
659 if (start || (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) { in mmap_h_lt_g()
663 if (!(flags & MAP_ANONYMOUS)) { in mmap_h_lt_g()
666 if (fstat(fd, &sb) == -1) { in mmap_h_lt_g()
669 if (offset >= sb.st_size) { in mmap_h_lt_g()
677 } else if (offset + len > sb.st_size) { in mmap_h_lt_g()
686 if (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE)) { in mmap_h_lt_g()
687 if (fileend_adj) { in mmap_h_lt_g()
692 if (p != want_p) { in mmap_h_lt_g()
693 if (p != MAP_FAILED) { in mmap_h_lt_g()
701 if (fileend_adj) { in mmap_h_lt_g()
706 if (t == MAP_FAILED) { in mmap_h_lt_g()
713 * If we have replaced an existing mapping with MAP_FIXED, in mmap_h_lt_g()
717 if (!(flags & MAP_FIXED_NOREPLACE) && in mmap_h_lt_g()
738 if (p == MAP_FAILED) { in mmap_h_lt_g()
743 if (part_len) { in mmap_h_lt_g()
749 if (len < host_len) { in mmap_h_lt_g()
753 if (!(flags & MAP_ANONYMOUS)) { in mmap_h_lt_g()
757 if (t == MAP_FAILED) { in mmap_h_lt_g()
769 if (fileend_adj) { in mmap_h_lt_g()
794 if (start || (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) { in mmap_h_gt_g()
798 if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) { in mmap_h_gt_g()
804 if (p == MAP_FAILED) { in mmap_h_gt_g()
816 if (!(flags & MAP_ANONYMOUS)) { in mmap_h_gt_g()
823 if (misaligned_offset && (flags & MAP_TYPE) != MAP_PRIVATE) { in mmap_h_gt_g()
836 if (real_start < start) { in mmap_h_gt_g()
838 if (last <= real_page_last) { in mmap_h_gt_g()
840 if (!mmap_frag(real_start, start, last, target_prot, in mmap_h_gt_g()
847 if (!mmap_frag(real_start, start, real_page_last, target_prot, in mmap_h_gt_g()
854 if (last < real_last) { in mmap_h_gt_g()
856 if (!mmap_frag(real_page_start, real_page_start, last, in mmap_h_gt_g()
864 if (real_start > real_last) { in mmap_h_gt_g()
875 if (flags & MAP_ANONYMOUS) { in mmap_h_gt_g()
877 } else if (!misaligned_offset) { in mmap_h_gt_g()
884 if (p != want_p) { in mmap_h_gt_g()
885 if (p != MAP_FAILED) { in mmap_h_gt_g()
892 if (misaligned_offset) { in mmap_h_gt_g()
893 if (!mmap_pread(fd, p, host_len, offset + real_start - start, false)) { in mmap_h_gt_g()
897 if (!(host_prot & PROT_WRITE)) { in mmap_h_gt_g()
916 if (reserved_va) { in target_mmap__locked()
917 if (flags & MAP_FIXED_NOREPLACE) { in target_mmap__locked()
919 if (!page_check_range_empty(start, start + len - 1)) { in target_mmap__locked()
924 } else if (!(flags & MAP_FIXED)) { in target_mmap__locked()
931 if (start == (abi_ulong)-1) { in target_mmap__locked()
942 if (host_page_size == TARGET_PAGE_SIZE) { in target_mmap__locked()
945 } else if (host_page_size < TARGET_PAGE_SIZE) { in target_mmap__locked()
963 if (!len) { in target_mmap()
969 if (!page_flags) { in target_mmap()
976 if (!len || len != (size_t)len) { in target_mmap()
981 if (offset & ~TARGET_PAGE_MASK) { in target_mmap()
985 if (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE)) { in target_mmap()
986 if (start & ~TARGET_PAGE_MASK) { in target_mmap()
990 if (!guest_range_valid_untagged(start, len)) { in target_mmap()
1004 * If we're mapping shared memory, ensure we generate code for parallel in target_mmap()
1009 if (ret != -1 && (flags & MAP_TYPE) != MAP_PRIVATE) { in target_mmap()
1011 if (!tcg_cflags_has(cpu, CF_PARALLEL)) { in target_mmap()
1036 * If guest pages remain on the first or last host pages, in mmap_reserve_or_unmap()
1041 if (real_last - real_start < host_page_size) { in mmap_reserve_or_unmap()
1049 if (prot != 0) { in mmap_reserve_or_unmap()
1056 if (prot != 0) { in mmap_reserve_or_unmap()
1063 if (prot != 0) { in mmap_reserve_or_unmap()
1067 if (real_last < real_start) { in mmap_reserve_or_unmap()
1084 if (start & ~TARGET_PAGE_MASK) { in target_munmap()
1089 if (len == 0 || !guest_range_valid_untagged(start, len)) { in target_munmap()
1096 if (likely(ret == 0)) { in target_munmap()
1112 if (!guest_range_valid_untagged(old_addr, old_size) || in target_mremap()
1123 if (flags & MREMAP_FIXED) { in target_mremap()
1127 if (reserved_va && host_addr != MAP_FAILED) { in target_mremap()
1129 * If new and old addresses overlap then the above mremap will in target_mremap()
1134 } else if (flags & MREMAP_MAYMOVE) { in target_mremap()
1139 if (mmap_start == -1) { in target_mremap()
1146 if (reserved_va) { in target_mremap()
1152 if (reserved_va && old_size < new_size) { in target_mremap()
1160 if (page_flags == 0) { in target_mremap()
1164 if (host_addr != MAP_FAILED) { in target_mremap()
1165 /* Check if address fits target address space */ in target_mremap()
1166 if (!guest_range_valid_untagged(h2g(host_addr), new_size)) { in target_mremap()
1172 } else if (reserved_va && old_size > new_size) { in target_mremap()
1183 if (host_addr == MAP_FAILED) { in target_mremap()
1203 if (start & ~TARGET_PAGE_MASK) { in target_madvise()
1206 if (len_in == 0) { in target_madvise()
1210 if (len == 0 || !guest_range_valid_untagged(start, len)) { in target_madvise()
1236 * If all guest pages have PAGE_PASSTHROUGH set, mappings have the in target_madvise()
1239 * We pass through MADV_WIPEONFORK and MADV_KEEPONFORK if possible and in target_madvise()
1240 * return failure if not. in target_madvise()
1242 * MADV_DONTNEED is passed through as well, if possible. in target_madvise()
1243 * If passthrough isn't possible, we nevertheless (wrongly!) return in target_madvise()
1255 if (page_check_range(start, len, PAGE_PASSTHROUGH)) { in target_madvise()
1257 if ((advice == MADV_DONTNEED) && (ret == 0)) { in target_madvise()
1277 * this means that addresses are rounded to the large size if
1288 #if defined(__arm__) || defined(__mips__) || defined(__sparc__)
1316 if (shmaddr) { in target_shmat()
1317 if (shmaddr & (m_shmlba - 1)) { in target_shmat()
1318 if (shmflg & SHM_RND) { in target_shmat()
1322 * host required alignment too. Anyway if we don't, we'll in target_shmat()
1326 if (shmaddr == 0 && (shmflg & SHM_REMAP)) { in target_shmat()
1339 if (shmaddr & (require - 1)) { in target_shmat()
1345 if (shmflg & SHM_REMAP) { in target_shmat()
1354 if (is_error(ret)) { in target_shmat()
1362 if (!guest_range_valid_untagged(shmaddr, m_len)) { in target_shmat()
1371 if (!shmaddr) { in target_shmat()
1373 if (shmaddr == -1) { in target_shmat()
1377 } else if (shmflg & SHM_REMAP) { in target_shmat()
1379 * If host page size > target page size, the host shmat may map in target_shmat()
1384 if (t_len < h_len && in target_shmat()
1390 if (!page_check_range_empty(shmaddr, shmaddr + m_len - 1)) { in target_shmat()
1403 if (h_len != t_len) { in target_shmat()
1410 if (unlikely(test != want)) { in target_shmat()
1414 if (mapped) { in target_shmat()
1422 if (reserved_va || mapped) { in target_shmat()
1426 if (test == MAP_FAILED) { in target_shmat()
1428 if (mapped) { in target_shmat()
1451 if (!tcg_cflags_has(cpu, CF_PARALLEL)) { in target_shmat()
1456 if (qemu_loglevel_mask(CPU_LOG_PAGE)) { in target_shmat()
1458 if (f) { in target_shmat()
1475 if (last == 0) { in target_shmdt()
1480 if (rv == 0) { in target_shmdt()