Lines Matching +full:memory +full:- +full:region

1 // SPDX-License-Identifier: GPL-2.0-only
6 * have any form of memory management unit (thus no virtual memory).
8 * See Documentation/admin-guide/mm/nommu-mmap.rst
10 * Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
11 * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
12 * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
14 * Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
31 #include <linux/backing-dev.h>
68 * Return the total memory allocated for this pointer, not
95 * region. This test is intentionally done in reverse order, in kobjsize()
97 * PAGE_SIZE for 0-order pages. in kobjsize()
102 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize()
104 return vma->vm_end - vma->vm_start; in kobjsize()
115 * follow_pfn - look up PFN at a user virtual address
116 * @vma: memory mapping
122 * Returns zero and the pfn at @pfn on success, -ve otherwise.
127 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfn()
128 return -EINVAL; in follow_pfn()
175 mmap_write_lock(current->mm); in __vmalloc_user_flags()
176 vma = find_vma(current->mm, (unsigned long)ret); in __vmalloc_user_flags()
178 vma->vm_flags |= VM_USERMAP; in __vmalloc_user_flags()
179 mmap_write_unlock(current->mm); in __vmalloc_user_flags()
207 count = -(unsigned long) buf; in vread()
217 count = -(unsigned long) addr; in vwrite()
224 * vmalloc - allocate virtually contiguous memory
241 * vzalloc - allocate virtually contiguous memory with zero fill
247 * The memory allocated is set to zero.
259 * vmalloc_node - allocate memory on a specific node
276 * vzalloc_node - allocate memory on a specific node with zero fill
282 * The memory allocated is set to zero.
294 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
307 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
310 * The resulting memory area is 32bit addressable and zeroed so it can be
319 * We'll have to sort out the ZONE_DMA bits for 64-bit, in vmalloc_32_user()
366 return -EINVAL; in vm_insert_page()
373 return -EINVAL; in vm_map_pages()
380 return -EINVAL; in vm_map_pages_zero()
387 * like trying to un-brk an area that has already been mapped
393 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE1()
395 if (brk < mm->start_brk || brk > mm->context.end_brk) in SYSCALL_DEFINE1()
396 return mm->brk; in SYSCALL_DEFINE1()
398 if (mm->brk == brk) in SYSCALL_DEFINE1()
399 return mm->brk; in SYSCALL_DEFINE1()
404 if (brk <= mm->brk) { in SYSCALL_DEFINE1()
405 mm->brk = brk; in SYSCALL_DEFINE1()
410 * Ok, looks good - let it rip. in SYSCALL_DEFINE1()
412 flush_icache_user_range(mm->brk, brk); in SYSCALL_DEFINE1()
413 return mm->brk = brk; in SYSCALL_DEFINE1()
417 * initialise the percpu counter for VM and region record slabs
429 * validate the region tree
430 * - the caller must hold the region lock
435 struct vm_region *region, *last; in validate_nommu_regions() local
443 BUG_ON(last->vm_end <= last->vm_start); in validate_nommu_regions()
444 BUG_ON(last->vm_top < last->vm_end); in validate_nommu_regions()
447 region = rb_entry(p, struct vm_region, vm_rb); in validate_nommu_regions()
450 BUG_ON(region->vm_end <= region->vm_start); in validate_nommu_regions()
451 BUG_ON(region->vm_top < region->vm_end); in validate_nommu_regions()
452 BUG_ON(region->vm_start < last->vm_top); in validate_nommu_regions()
464 * add a region into the global tree
466 static void add_nommu_region(struct vm_region *region) in add_nommu_region() argument
478 if (region->vm_start < pregion->vm_start) in add_nommu_region()
479 p = &(*p)->rb_left; in add_nommu_region()
480 else if (region->vm_start > pregion->vm_start) in add_nommu_region()
481 p = &(*p)->rb_right; in add_nommu_region()
482 else if (pregion == region) in add_nommu_region()
488 rb_link_node(&region->vm_rb, parent, p); in add_nommu_region()
489 rb_insert_color(&region->vm_rb, &nommu_region_tree); in add_nommu_region()
495 * delete a region from the global tree
497 static void delete_nommu_region(struct vm_region *region) in delete_nommu_region() argument
502 rb_erase(&region->vm_rb, &nommu_region_tree); in delete_nommu_region()
520 * release a reference to a region
521 * - the caller must hold the region semaphore for writing, which this releases
522 * - the region may not have been added to the tree yet, in which case vm_top
525 static void __put_nommu_region(struct vm_region *region) in __put_nommu_region() argument
530 if (--region->vm_usage == 0) { in __put_nommu_region()
531 if (region->vm_top > region->vm_start) in __put_nommu_region()
532 delete_nommu_region(region); in __put_nommu_region()
535 if (region->vm_file) in __put_nommu_region()
536 fput(region->vm_file); in __put_nommu_region()
538 /* IO memory and memory shared directly out of the pagecache in __put_nommu_region()
540 if (region->vm_flags & VM_MAPPED_COPY) in __put_nommu_region()
541 free_page_series(region->vm_start, region->vm_top); in __put_nommu_region()
542 kmem_cache_free(vm_region_jar, region); in __put_nommu_region()
549 * release a reference to a region
551 static void put_nommu_region(struct vm_region *region) in put_nommu_region() argument
554 __put_nommu_region(region); in put_nommu_region()
561 * - should be called with mm->mmap_lock held writelocked
569 BUG_ON(!vma->vm_region); in add_vma_to_mm()
571 mm->map_count++; in add_vma_to_mm()
572 vma->vm_mm = mm; in add_vma_to_mm()
575 if (vma->vm_file) { in add_vma_to_mm()
576 mapping = vma->vm_file->f_mapping; in add_vma_to_mm()
580 vma_interval_tree_insert(vma, &mapping->i_mmap); in add_vma_to_mm()
587 p = &mm->mm_rb.rb_node; in add_vma_to_mm()
594 if (vma->vm_start < pvma->vm_start) in add_vma_to_mm()
595 p = &(*p)->rb_left; in add_vma_to_mm()
596 else if (vma->vm_start > pvma->vm_start) { in add_vma_to_mm()
598 p = &(*p)->rb_right; in add_vma_to_mm()
599 } else if (vma->vm_end < pvma->vm_end) in add_vma_to_mm()
600 p = &(*p)->rb_left; in add_vma_to_mm()
601 else if (vma->vm_end > pvma->vm_end) { in add_vma_to_mm()
603 p = &(*p)->rb_right; in add_vma_to_mm()
605 p = &(*p)->rb_left; in add_vma_to_mm()
608 p = &(*p)->rb_right; in add_vma_to_mm()
613 rb_link_node(&vma->vm_rb, parent, p); in add_vma_to_mm()
614 rb_insert_color(&vma->vm_rb, &mm->mm_rb); in add_vma_to_mm()
631 struct mm_struct *mm = vma->vm_mm; in delete_vma_from_mm()
634 mm->map_count--; in delete_vma_from_mm()
637 if (curr->vmacache.vmas[i] == vma) { in delete_vma_from_mm()
644 if (vma->vm_file) { in delete_vma_from_mm()
645 mapping = vma->vm_file->f_mapping; in delete_vma_from_mm()
649 vma_interval_tree_remove(vma, &mapping->i_mmap); in delete_vma_from_mm()
655 rb_erase(&vma->vm_rb, &mm->mm_rb); in delete_vma_from_mm()
665 if (vma->vm_ops && vma->vm_ops->close) in delete_vma()
666 vma->vm_ops->close(vma); in delete_vma()
667 if (vma->vm_file) in delete_vma()
668 fput(vma->vm_file); in delete_vma()
669 put_nommu_region(vma->vm_region); in delete_vma()
675 * - should be called with mm->mmap_lock at least held readlocked
688 for (vma = mm->mmap; vma; vma = vma->vm_next) { in find_vma()
689 if (vma->vm_start > addr) in find_vma()
691 if (vma->vm_end > addr) { in find_vma()
703 * - we don't extend stack VMAs under NOMMU conditions
712 * - not supported under NOMMU conditions
716 return -ENOMEM; in expand_stack()
721 * - should be called with mm->mmap_lock at least held readlocked
737 for (vma = mm->mmap; vma; vma = vma->vm_next) { in find_vma_exact()
738 if (vma->vm_start < addr) in find_vma_exact()
740 if (vma->vm_start > addr) in find_vma_exact()
742 if (vma->vm_end == end) { in find_vma_exact()
768 return -EINVAL; in validate_mmap_request()
772 return -EINVAL; in validate_mmap_request()
775 return -EINVAL; in validate_mmap_request()
780 return -ENOMEM; in validate_mmap_request()
784 return -EOVERFLOW; in validate_mmap_request()
788 if (!file->f_op->mmap) in validate_mmap_request()
789 return -ENODEV; in validate_mmap_request()
792 * - we support chardevs that provide their own "memory" in validate_mmap_request()
793 * - we support files/blockdevs that are memory backed in validate_mmap_request()
795 if (file->f_op->mmap_capabilities) { in validate_mmap_request()
796 capabilities = file->f_op->mmap_capabilities(file); in validate_mmap_request()
800 switch (file_inode(file)->i_mode & S_IFMT) { in validate_mmap_request()
814 return -EINVAL; in validate_mmap_request()
820 if (!file->f_op->get_unmapped_area) in validate_mmap_request()
822 if (!(file->f_mode & FMODE_CAN_READ)) in validate_mmap_request()
826 if (!(file->f_mode & FMODE_READ)) in validate_mmap_request()
827 return -EACCES; in validate_mmap_request()
832 !(file->f_mode & FMODE_WRITE)) in validate_mmap_request()
833 return -EACCES; in validate_mmap_request()
836 (file->f_mode & FMODE_WRITE)) in validate_mmap_request()
837 return -EACCES; in validate_mmap_request()
840 return -EAGAIN; in validate_mmap_request()
843 return -ENODEV; in validate_mmap_request()
848 /* we're going to read the file into private memory we in validate_mmap_request()
851 return -ENODEV; in validate_mmap_request()
867 return -EINVAL; in validate_mmap_request()
874 if (path_noexec(&file->f_path)) { in validate_mmap_request()
876 return -EPERM; in validate_mmap_request()
879 if (current->personality & READ_IMPLIES_EXEC) { in validate_mmap_request()
891 /* anonymous mappings are always memory backed and can be in validate_mmap_request()
898 (current->personality & READ_IMPLIES_EXEC)) in validate_mmap_request()
924 /* vm_flags |= mm->def_flags; */ in determine_vm_flags()
927 /* attempt to share read-only copies of mapped file chunks */ in determine_vm_flags()
933 * if possible - used for chardevs, ramfs/tmpfs/shmfs and in determine_vm_flags()
941 * it's being traced - otherwise breakpoints set in it may interfere in determine_vm_flags()
944 if ((flags & MAP_PRIVATE) && current->ptrace) in determine_vm_flags()
958 ret = call_mmap(vma->vm_file, vma); in do_mmap_shared_file()
960 vma->vm_region->vm_top = vma->vm_region->vm_end; in do_mmap_shared_file()
963 if (ret != -ENOSYS) in do_mmap_shared_file()
966 /* getting -ENOSYS indicates that direct mmap isn't possible (as in do_mmap_shared_file()
969 return -ENODEV; in do_mmap_shared_file()
976 struct vm_region *region, in do_mmap_private() argument
985 * shared mappings on devices or memory in do_mmap_private()
986 * - VM_MAYSHARE will be set if it may attempt to share in do_mmap_private()
989 ret = call_mmap(vma->vm_file, vma); in do_mmap_private()
992 BUG_ON(!(vma->vm_flags & VM_MAYSHARE)); in do_mmap_private()
993 vma->vm_region->vm_top = vma->vm_region->vm_end; in do_mmap_private()
996 if (ret != -ENOSYS) in do_mmap_private()
1005 /* allocate some memory to hold the mapping in do_mmap_private()
1006 * - note that this may not return a page-aligned address if the object in do_mmap_private()
1013 /* we don't want to allocate a power-of-2 sized page set */ in do_mmap_private()
1014 if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) in do_mmap_private()
1023 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; in do_mmap_private()
1024 region->vm_start = (unsigned long) base; in do_mmap_private()
1025 region->vm_end = region->vm_start + len; in do_mmap_private()
1026 region->vm_top = region->vm_start + (total << PAGE_SHIFT); in do_mmap_private()
1028 vma->vm_start = region->vm_start; in do_mmap_private()
1029 vma->vm_end = region->vm_start + len; in do_mmap_private()
1031 if (vma->vm_file) { in do_mmap_private()
1035 fpos = vma->vm_pgoff; in do_mmap_private()
1038 ret = kernel_read(vma->vm_file, base, len, &fpos); in do_mmap_private()
1044 memset(base + ret, 0, len - ret); in do_mmap_private()
1053 free_page_series(region->vm_start, region->vm_top); in do_mmap_private()
1054 region->vm_start = vma->vm_start = 0; in do_mmap_private()
1055 region->vm_end = vma->vm_end = 0; in do_mmap_private()
1056 region->vm_top = 0; in do_mmap_private()
1061 len, current->pid, current->comm); in do_mmap_private()
1063 return -ENOMEM; in do_mmap_private()
1079 struct vm_region *region; in do_mmap() local
1103 region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL); in do_mmap()
1104 if (!region) in do_mmap()
1107 vma = vm_area_alloc(current->mm); in do_mmap()
1111 region->vm_usage = 1; in do_mmap()
1112 region->vm_flags = vm_flags; in do_mmap()
1113 region->vm_pgoff = pgoff; in do_mmap()
1115 vma->vm_flags = vm_flags; in do_mmap()
1116 vma->vm_pgoff = pgoff; in do_mmap()
1119 region->vm_file = get_file(file); in do_mmap()
1120 vma->vm_file = get_file(file); in do_mmap()
1127 * - we can only share with a superset match on most regular files in do_mmap()
1128 * - shared mappings on character devices and memory backed files are in do_mmap()
1137 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; in do_mmap()
1143 if (!(pregion->vm_flags & VM_MAYSHARE)) in do_mmap()
1147 if (file_inode(pregion->vm_file) != in do_mmap()
1151 if (pregion->vm_pgoff >= pgend) in do_mmap()
1154 rpglen = pregion->vm_end - pregion->vm_start; in do_mmap()
1155 rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT; in do_mmap()
1156 rpgend = pregion->vm_pgoff + rpglen; in do_mmap()
1162 if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && in do_mmap()
1163 !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { in do_mmap()
1164 /* new mapping is not a subset of the region */ in do_mmap()
1170 /* we've found a region we can share */ in do_mmap()
1171 pregion->vm_usage++; in do_mmap()
1172 vma->vm_region = pregion; in do_mmap()
1173 start = pregion->vm_start; in do_mmap()
1174 start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT; in do_mmap()
1175 vma->vm_start = start; in do_mmap()
1176 vma->vm_end = start + len; in do_mmap()
1178 if (pregion->vm_flags & VM_MAPPED_COPY) in do_mmap()
1179 vma->vm_flags |= VM_MAPPED_COPY; in do_mmap()
1183 vma->vm_region = NULL; in do_mmap()
1184 vma->vm_start = 0; in do_mmap()
1185 vma->vm_end = 0; in do_mmap()
1186 pregion->vm_usage--; in do_mmap()
1191 fput(region->vm_file); in do_mmap()
1192 kmem_cache_free(vm_region_jar, region); in do_mmap()
1193 region = pregion; in do_mmap()
1199 * - this is the hook for quasi-memory character devices to in do_mmap()
1203 addr = file->f_op->get_unmapped_area(file, addr, len, in do_mmap()
1207 if (ret != -ENOSYS) in do_mmap()
1213 ret = -ENODEV; in do_mmap()
1219 vma->vm_start = region->vm_start = addr; in do_mmap()
1220 vma->vm_end = region->vm_end = addr + len; in do_mmap()
1225 vma->vm_region = region; in do_mmap()
1228 * - the region is filled in if NOMMU_MAP_DIRECT is still set in do_mmap()
1230 if (file && vma->vm_flags & VM_SHARED) in do_mmap()
1233 ret = do_mmap_private(vma, region, len, capabilities); in do_mmap()
1236 add_nommu_region(region); in do_mmap()
1239 if (!vma->vm_file && in do_mmap()
1242 memset((void *)region->vm_start, 0, in do_mmap()
1243 region->vm_end - region->vm_start); in do_mmap()
1246 result = vma->vm_start; in do_mmap()
1248 current->mm->total_vm += len >> PAGE_SHIFT; in do_mmap()
1251 add_vma_to_mm(current->mm, vma); in do_mmap()
1253 /* we flush the region from the icache only when the first executable in do_mmap()
1255 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) { in do_mmap()
1256 flush_icache_user_range(region->vm_start, region->vm_end); in do_mmap()
1257 region->vm_icache_flushed = true; in do_mmap()
1267 if (region->vm_file) in do_mmap()
1268 fput(region->vm_file); in do_mmap()
1269 kmem_cache_free(vm_region_jar, region); in do_mmap()
1270 if (vma->vm_file) in do_mmap()
1271 fput(vma->vm_file); in do_mmap()
1278 ret = -EINVAL; in do_mmap()
1282 kmem_cache_free(vm_region_jar, region); in do_mmap()
1284 len, current->pid); in do_mmap()
1286 return -ENOMEM; in do_mmap()
1289 pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n", in do_mmap()
1290 len, current->pid); in do_mmap()
1292 return -ENOMEM; in do_mmap()
1300 unsigned long retval = -EBADF; in ksys_mmap_pgoff()
1341 return -EFAULT; in SYSCALL_DEFINE1()
1343 return -EINVAL; in SYSCALL_DEFINE1()
1358 struct vm_region *region; in split_vma() local
1362 * only a single usage on the region) */ in split_vma()
1363 if (vma->vm_file) in split_vma()
1364 return -ENOMEM; in split_vma()
1366 if (mm->map_count >= sysctl_max_map_count) in split_vma()
1367 return -ENOMEM; in split_vma()
1369 region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL); in split_vma()
1370 if (!region) in split_vma()
1371 return -ENOMEM; in split_vma()
1375 kmem_cache_free(vm_region_jar, region); in split_vma()
1376 return -ENOMEM; in split_vma()
1380 *region = *vma->vm_region; in split_vma()
1381 new->vm_region = region; in split_vma()
1383 npages = (addr - vma->vm_start) >> PAGE_SHIFT; in split_vma()
1386 region->vm_top = region->vm_end = new->vm_end = addr; in split_vma()
1388 region->vm_start = new->vm_start = addr; in split_vma()
1389 region->vm_pgoff = new->vm_pgoff += npages; in split_vma()
1392 if (new->vm_ops && new->vm_ops->open) in split_vma()
1393 new->vm_ops->open(new); in split_vma()
1397 delete_nommu_region(vma->vm_region); in split_vma()
1399 vma->vm_region->vm_start = vma->vm_start = addr; in split_vma()
1400 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; in split_vma()
1402 vma->vm_region->vm_end = vma->vm_end = addr; in split_vma()
1403 vma->vm_region->vm_top = addr; in split_vma()
1405 add_nommu_region(vma->vm_region); in split_vma()
1406 add_nommu_region(new->vm_region); in split_vma()
1421 struct vm_region *region; in shrink_vma() local
1426 if (from > vma->vm_start) in shrink_vma()
1427 vma->vm_end = from; in shrink_vma()
1429 vma->vm_start = to; in shrink_vma()
1432 /* cut the backing region down to size */ in shrink_vma()
1433 region = vma->vm_region; in shrink_vma()
1434 BUG_ON(region->vm_usage != 1); in shrink_vma()
1437 delete_nommu_region(region); in shrink_vma()
1438 if (from > region->vm_start) { in shrink_vma()
1439 to = region->vm_top; in shrink_vma()
1440 region->vm_top = region->vm_end = from; in shrink_vma()
1442 region->vm_start = to; in shrink_vma()
1444 add_nommu_region(region); in shrink_vma()
1453 * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1464 return -EINVAL; in do_munmap()
1473 pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n", in do_munmap()
1474 current->pid, current->comm, in do_munmap()
1475 start, start + len - 1); in do_munmap()
1478 return -EINVAL; in do_munmap()
1481 /* we're allowed to split an anonymous VMA but not a file-backed one */ in do_munmap()
1482 if (vma->vm_file) { in do_munmap()
1484 if (start > vma->vm_start) in do_munmap()
1485 return -EINVAL; in do_munmap()
1486 if (end == vma->vm_end) in do_munmap()
1488 vma = vma->vm_next; in do_munmap()
1490 return -EINVAL; in do_munmap()
1493 if (start == vma->vm_start && end == vma->vm_end) in do_munmap()
1495 if (start < vma->vm_start || end > vma->vm_end) in do_munmap()
1496 return -EINVAL; in do_munmap()
1498 return -EINVAL; in do_munmap()
1499 if (end != vma->vm_end && offset_in_page(end)) in do_munmap()
1500 return -EINVAL; in do_munmap()
1501 if (start != vma->vm_start && end != vma->vm_end) { in do_munmap()
1518 struct mm_struct *mm = current->mm; in vm_munmap()
1543 mm->total_vm = 0; in exit_mmap()
1545 while ((vma = mm->mmap)) { in exit_mmap()
1546 mm->mmap = vma->vm_next; in exit_mmap()
1555 return -ENOMEM; in vm_brk()
1563 * as long as it stays within the region allocated by do_mmap_private() and the
1578 return (unsigned long) -EINVAL; in do_mremap()
1581 return -EINVAL; in do_mremap()
1584 return (unsigned long) -EINVAL; in do_mremap()
1586 vma = find_vma_exact(current->mm, addr, old_len); in do_mremap()
1588 return (unsigned long) -EINVAL; in do_mremap()
1590 if (vma->vm_end != vma->vm_start + old_len) in do_mremap()
1591 return (unsigned long) -EFAULT; in do_mremap()
1593 if (vma->vm_flags & VM_MAYSHARE) in do_mremap()
1594 return (unsigned long) -EPERM; in do_mremap()
1596 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start) in do_mremap()
1597 return (unsigned long) -ENOMEM; in do_mremap()
1599 /* all checks complete - do it */ in do_mremap()
1600 vma->vm_end = vma->vm_start + new_len; in do_mremap()
1601 return vma->vm_start; in do_mremap()
1610 mmap_write_lock(current->mm); in SYSCALL_DEFINE5()
1612 mmap_write_unlock(current->mm); in SYSCALL_DEFINE5()
1626 return -EINVAL; in remap_pfn_range()
1628 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; in remap_pfn_range()
1636 unsigned long vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory()
1638 pfn += vma->vm_pgoff; in vm_iomap_memory()
1639 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory()
1646 unsigned int size = vma->vm_end - vma->vm_start; in remap_vmalloc_range()
1648 if (!(vma->vm_flags & VM_USERMAP)) in remap_vmalloc_range()
1649 return -EINVAL; in remap_vmalloc_range()
1651 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT)); in remap_vmalloc_range()
1652 vma->vm_end = vma->vm_start + size; in remap_vmalloc_range()
1661 return -ENOMEM; in arch_get_unmapped_area()
1691 if (addr + len >= vma->vm_end) in __access_remote_vm()
1692 len = vma->vm_end - addr; in __access_remote_vm()
1695 if (write && vma->vm_flags & VM_MAYWRITE) in __access_remote_vm()
1698 else if (!write && vma->vm_flags & VM_MAYREAD) in __access_remote_vm()
1713 * access_remote_vm - access another process' address space
1730 * - source/target buffer must be kernel space
1752 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
1766 struct vm_region *region; in nommu_shrink_inode_mappings() local
1771 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; in nommu_shrink_inode_mappings()
1774 i_mmap_lock_read(inode->i_mapping); in nommu_shrink_inode_mappings()
1777 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) { in nommu_shrink_inode_mappings()
1778 /* found one - only interested if it's shared out of the page in nommu_shrink_inode_mappings()
1780 if (vma->vm_flags & VM_SHARED) { in nommu_shrink_inode_mappings()
1781 i_mmap_unlock_read(inode->i_mapping); in nommu_shrink_inode_mappings()
1783 return -ETXTBSY; /* not quite true, but near enough */ in nommu_shrink_inode_mappings()
1787 /* reduce any regions that overlap the dead zone - if in existence, in nommu_shrink_inode_mappings()
1793 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) { in nommu_shrink_inode_mappings()
1794 if (!(vma->vm_flags & VM_SHARED)) in nommu_shrink_inode_mappings()
1797 region = vma->vm_region; in nommu_shrink_inode_mappings()
1798 r_size = region->vm_top - region->vm_start; in nommu_shrink_inode_mappings()
1799 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size; in nommu_shrink_inode_mappings()
1802 region->vm_top -= r_top - newsize; in nommu_shrink_inode_mappings()
1803 if (region->vm_end > region->vm_top) in nommu_shrink_inode_mappings()
1804 region->vm_end = region->vm_top; in nommu_shrink_inode_mappings()
1808 i_mmap_unlock_read(inode->i_mapping); in nommu_shrink_inode_mappings()
1816 * This is intended to prevent a user from starting a single memory hogging
1820 * The default value is min(3% of free memory, 128MB)
1827 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); in init_user_reserve()
1838 * to log in and kill a memory hogging process.
1848 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); in init_admin_reserve()