Lines Matching refs:vma

75 static bool vma_had_uncowed_parents(struct vm_area_struct *vma)  in vma_had_uncowed_parents()  argument
81 return vma && vma->anon_vma && !list_is_singular(&vma->anon_vma_chain); in vma_had_uncowed_parents()
86 struct vm_area_struct *vma = merge_next ? vmg->next : vmg->prev; in is_mergeable_vma() local
88 if (!mpol_equal(vmg->policy, vma_policy(vma))) in is_mergeable_vma()
98 if ((vma->vm_flags ^ vmg->vm_flags) & ~VM_SOFTDIRTY) in is_mergeable_vma()
100 if (vma->vm_file != vmg->file) in is_mergeable_vma()
102 if (!is_mergeable_vm_userfaultfd_ctx(vma, vmg->uffd_ctx)) in is_mergeable_vma()
104 if (!anon_vma_name_eq(anon_vma_name(vma), vmg->anon_name)) in is_mergeable_vma()
141 struct vm_area_struct *vma, in init_multi_vma_prep() argument
148 vp->vma = vma; in init_multi_vma_prep()
149 vp->anon_vma = vma->anon_vma; in init_multi_vma_prep()
172 vp->file = vma->vm_file; in init_multi_vma_prep()
174 vp->mapping = vma->vm_file->f_mapping; in init_multi_vma_prep()
225 static void __vma_link_file(struct vm_area_struct *vma, in __vma_link_file() argument
228 if (vma_is_shared_maywrite(vma)) in __vma_link_file()
232 vma_interval_tree_insert(vma, &mapping->i_mmap); in __vma_link_file()
239 static void __remove_shared_vm_struct(struct vm_area_struct *vma, in __remove_shared_vm_struct() argument
242 if (vma_is_shared_maywrite(vma)) in __remove_shared_vm_struct()
246 vma_interval_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct()
265 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) in anon_vma_interval_tree_pre_update_vma() argument
269 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in anon_vma_interval_tree_pre_update_vma()
274 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) in anon_vma_interval_tree_post_update_vma() argument
278 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in anon_vma_interval_tree_post_update_vma()
289 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end); in vma_prepare()
310 anon_vma_interval_tree_pre_update_vma(vp->vma); in vma_prepare()
317 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap); in vma_prepare()
340 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap); in vma_complete()
359 anon_vma_interval_tree_post_update_vma(vp->vma); in vma_complete()
369 uprobe_mmap(vp->vma); in vma_complete()
385 anon_vma_merge(vp->vma, vp->remove); in vma_complete()
389 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end); in vma_complete()
411 static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma) in init_vma_prep() argument
413 init_multi_vma_prep(vp, vma, NULL); in init_vma_prep()
461 void remove_vma(struct vm_area_struct *vma) in remove_vma() argument
464 vma_close(vma); in remove_vma()
465 if (vma->vm_file) in remove_vma()
466 fput(vma->vm_file); in remove_vma()
467 mpol_put(vma_policy(vma)); in remove_vma()
468 vm_area_free(vma); in remove_vma()
476 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, in unmap_region() argument
479 struct mm_struct *mm = vma->vm_mm; in unmap_region()
484 unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end, in unmap_region()
486 mas_set(mas, vma->vm_end); in unmap_region()
487 free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, in unmap_region()
499 __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, in __split_vma() argument
506 WARN_ON(vma->vm_start >= addr); in __split_vma()
507 WARN_ON(vma->vm_end <= addr); in __split_vma()
509 if (vma->vm_ops && vma->vm_ops->may_split) { in __split_vma()
510 err = vma->vm_ops->may_split(vma, addr); in __split_vma()
515 new = vm_area_dup(vma); in __split_vma()
523 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); in __split_vma()
531 err = vma_dup_policy(vma, new); in __split_vma()
535 err = anon_vma_clone(new, vma); in __split_vma()
545 vma_start_write(vma); in __split_vma()
548 init_vma_prep(&vp, vma); in __split_vma()
556 vma_adjust_trans_huge(vma, vma->vm_start, addr, NULL); in __split_vma()
557 if (is_vm_hugetlb_page(vma)) in __split_vma()
558 hugetlb_split(vma, addr); in __split_vma()
561 vma->vm_start = addr; in __split_vma()
562 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT; in __split_vma()
564 vma->vm_end = addr; in __split_vma()
568 vma_complete(&vp, vmi, vma->vm_mm); in __split_vma()
569 validate_mm(vma->vm_mm); in __split_vma()
592 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, in split_vma() argument
595 if (vma->vm_mm->map_count >= sysctl_max_map_count) in split_vma()
598 return __split_vma(vmi, vma, addr, new_below); in split_vma()
648 struct vm_area_struct *vma; in validate_mm() local
652 for_each_vma(vmi, vma) { in validate_mm()
654 struct anon_vma *anon_vma = vma->anon_vma; in validate_mm()
662 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm)) in validate_mm()
665 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm)) in validate_mm()
671 dump_vma(vma); in validate_mm()
672 pr_emerg("tree range: %px start %lx end %lx\n", vma, in validate_mm()
680 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in validate_mm()
732 struct vm_area_struct *vma; in commit_merge() local
737 vma = vmg->middle; in commit_merge()
740 vma = vmg->target; in commit_merge()
745 init_multi_vma_prep(&vp, vma, vmg); in commit_merge()
753 if (vma_iter_prealloc(vmg->vmi, vma)) in commit_merge()
761 vma_adjust_trans_huge(vma, vmg->start, vmg->end, in commit_merge()
763 vma_set_range(vma, vmg->start, vmg->end, vmg->pgoff); in commit_merge()
767 vma_complete(&vp, vmg->vmi, vma->vm_mm); in commit_merge()
773 static bool can_merge_remove_vma(struct vm_area_struct *vma) in can_merge_remove_vma() argument
775 return !vma->vm_ops || !vma->vm_ops->close; in can_merge_remove_vma()
1185 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, in vma_shrink() argument
1190 WARN_ON((vma->vm_start != start) && (vma->vm_end != end)); in vma_shrink()
1192 if (vma->vm_start < start) in vma_shrink()
1193 vma_iter_config(vmi, vma->vm_start, start); in vma_shrink()
1195 vma_iter_config(vmi, end, vma->vm_end); in vma_shrink()
1200 vma_start_write(vma); in vma_shrink()
1202 init_vma_prep(&vp, vma); in vma_shrink()
1204 vma_adjust_trans_huge(vma, start, end, NULL); in vma_shrink()
1207 vma_set_range(vma, start, end, pgoff); in vma_shrink()
1208 vma_complete(&vp, vmi, vma->vm_mm); in vma_shrink()
1209 validate_mm(vma->vm_mm); in vma_shrink()
1226 tlb_gather_mmu(&tlb, vms->vma->vm_mm); in vms_clear_ptes()
1227 update_hiwater_rss(vms->vma->vm_mm); in vms_clear_ptes()
1228 unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end, in vms_clear_ptes()
1233 free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start, in vms_clear_ptes()
1242 struct vm_area_struct *vma; in vms_clean_up_area() local
1249 mas_for_each(mas_detach, vma, ULONG_MAX) in vms_clean_up_area()
1250 vma_close(vma); in vms_clean_up_area()
1265 struct vm_area_struct *vma; in vms_complete_munmap_vmas() local
1292 mas_for_each(mas_detach, vma, ULONG_MAX) in vms_complete_munmap_vmas()
1293 remove_vma(vma); in vms_complete_munmap_vmas()
1311 struct vm_area_struct *vma; in reattach_vmas() local
1314 mas_for_each(mas_detach, vma, ULONG_MAX) in reattach_vmas()
1315 vma_mark_attached(vma); in reattach_vmas()
1340 if (vms->start > vms->vma->vm_start) { in vms_gather_munmap_vmas()
1347 if (vms->end < vms->vma->vm_end && in vms_gather_munmap_vmas()
1348 vms->vma->vm_mm->map_count >= sysctl_max_map_count) { in vms_gather_munmap_vmas()
1354 if (vma_is_sealed(vms->vma)) { in vms_gather_munmap_vmas()
1359 error = __split_vma(vms->vmi, vms->vma, vms->start, 1); in vms_gather_munmap_vmas()
1479 struct vma_iterator *vmi, struct vm_area_struct *vma, in init_vma_munmap() argument
1484 vms->vma = vma; in init_vma_munmap()
1485 if (vma) { in init_vma_munmap()
1515 int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, in do_vmi_align_munmap() argument
1526 init_vma_munmap(&vms, vmi, vma, start, end, uf, unlock); in do_vmi_align_munmap()
1567 struct vm_area_struct *vma; in do_vmi_munmap() local
1577 vma = vma_find(vmi, end); in do_vmi_munmap()
1578 if (!vma) { in do_vmi_munmap()
1584 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); in do_vmi_munmap()
1602 struct vm_area_struct *vma = vmg->middle; in vma_modify() local
1619 (vma->vm_start != start || vma->vm_end != end)); in vma_modify()
1622 if (vma->vm_start < start) { in vma_modify()
1623 int err = split_vma(vmg->vmi, vma, start, 1); in vma_modify()
1630 if (vma->vm_end > end) { in vma_modify()
1631 int err = split_vma(vmg->vmi, vma, end, 0); in vma_modify()
1637 return vma; in vma_modify()
1642 struct vm_area_struct *vma, unsigned long start, unsigned long end, in vma_modify_flags()
1645 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); in vma_modify_flags()
1655 struct vm_area_struct *vma, in vma_modify_name() argument
1660 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); in vma_modify_name()
1670 struct vm_area_struct *vma, in vma_modify_policy() argument
1674 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); in vma_modify_policy()
1684 struct vm_area_struct *vma, in vma_modify_flags_uffd() argument
1690 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); in vma_modify_flags_uffd()
1705 struct vm_area_struct *vma, in vma_merge_extend()
1708 VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta); in vma_merge_extend()
1738 struct vm_area_struct *vma) in unlink_file_vma_batch_add() argument
1740 if (vma->vm_file == NULL) in unlink_file_vma_batch_add()
1743 if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) || in unlink_file_vma_batch_add()
1747 vb->vmas[vb->count] = vma; in unlink_file_vma_batch_add()
1761 void unlink_file_vma(struct vm_area_struct *vma) in unlink_file_vma() argument
1763 struct file *file = vma->vm_file; in unlink_file_vma()
1769 __remove_shared_vm_struct(vma, mapping); in unlink_file_vma()
1774 void vma_link_file(struct vm_area_struct *vma) in vma_link_file() argument
1776 struct file *file = vma->vm_file; in vma_link_file()
1782 __vma_link_file(vma, mapping); in vma_link_file()
1787 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) in vma_link() argument
1791 vma_iter_config(&vmi, vma->vm_start, vma->vm_end); in vma_link()
1792 if (vma_iter_prealloc(&vmi, vma)) in vma_link()
1795 vma_start_write(vma); in vma_link()
1796 vma_iter_store_new(&vmi, vma); in vma_link()
1797 vma_link_file(vma); in vma_link()
1811 struct vm_area_struct *vma = *vmap; in copy_vma() local
1812 unsigned long vma_start = vma->vm_start; in copy_vma()
1813 struct mm_struct *mm = vma->vm_mm; in copy_vma()
1817 VMG_VMA_STATE(vmg, &vmi, NULL, vma, addr, addr + len); in copy_vma()
1823 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { in copy_vma()
1833 if (vma->vm_file) in copy_vma()
1864 *vmap = vma = new_vma; in copy_vma()
1866 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); in copy_vma()
1868 new_vma = vm_area_dup(vma); in copy_vma()
1872 if (vma_dup_policy(vma, new_vma)) in copy_vma()
1874 if (anon_vma_clone(new_vma, vma)) in copy_vma()
1967 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) in find_mergeable_anon_vma() argument
1971 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end); in find_mergeable_anon_vma()
1976 anon_vma = reusable_anon_vma(next, vma, next); in find_mergeable_anon_vma()
1982 VM_BUG_ON_VMA(prev != vma, vma); in find_mergeable_anon_vma()
1986 anon_vma = reusable_anon_vma(prev, prev, vma); in find_mergeable_anon_vma()
2006 static bool vma_is_shared_writable(struct vm_area_struct *vma) in vma_is_shared_writable() argument
2008 return (vma->vm_flags & (VM_WRITE | VM_SHARED)) == in vma_is_shared_writable()
2012 static bool vma_fs_can_writeback(struct vm_area_struct *vma) in vma_fs_can_writeback() argument
2015 if (vma->vm_flags & VM_PFNMAP) in vma_fs_can_writeback()
2018 return vma->vm_file && vma->vm_file->f_mapping && in vma_fs_can_writeback()
2019 mapping_can_writeback(vma->vm_file->f_mapping); in vma_fs_can_writeback()
2026 bool vma_needs_dirty_tracking(struct vm_area_struct *vma) in vma_needs_dirty_tracking() argument
2029 if (!vma_is_shared_writable(vma)) in vma_needs_dirty_tracking()
2033 if (vm_ops_needs_writenotify(vma->vm_ops)) in vma_needs_dirty_tracking()
2040 return vma_fs_can_writeback(vma); in vma_needs_dirty_tracking()
2049 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) in vma_wants_writenotify() argument
2052 if (!vma_is_shared_writable(vma)) in vma_wants_writenotify()
2056 if (vm_ops_needs_writenotify(vma->vm_ops)) in vma_wants_writenotify()
2062 pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags))) in vma_wants_writenotify()
2069 if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma)) in vma_wants_writenotify()
2073 if (userfaultfd_wp(vma)) in vma_wants_writenotify()
2077 return vma_fs_can_writeback(vma); in vma_wants_writenotify()
2163 struct vm_area_struct *vma; in mm_take_all_locks() local
2177 for_each_vma(vmi, vma) { in mm_take_all_locks()
2180 vma_start_write(vma); in mm_take_all_locks()
2184 for_each_vma(vmi, vma) { in mm_take_all_locks()
2187 if (vma->vm_file && vma->vm_file->f_mapping && in mm_take_all_locks()
2188 is_vm_hugetlb_page(vma)) in mm_take_all_locks()
2189 vm_lock_mapping(mm, vma->vm_file->f_mapping); in mm_take_all_locks()
2193 for_each_vma(vmi, vma) { in mm_take_all_locks()
2196 if (vma->vm_file && vma->vm_file->f_mapping && in mm_take_all_locks()
2197 !is_vm_hugetlb_page(vma)) in mm_take_all_locks()
2198 vm_lock_mapping(mm, vma->vm_file->f_mapping); in mm_take_all_locks()
2202 for_each_vma(vmi, vma) { in mm_take_all_locks()
2205 if (vma->anon_vma) in mm_take_all_locks()
2206 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in mm_take_all_locks()
2259 struct vm_area_struct *vma; in mm_drop_all_locks() local
2266 for_each_vma(vmi, vma) { in mm_drop_all_locks()
2267 if (vma->anon_vma) in mm_drop_all_locks()
2268 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in mm_drop_all_locks()
2270 if (vma->vm_file && vma->vm_file->f_mapping) in mm_drop_all_locks()
2271 vm_unlock_mapping(vma->vm_file->f_mapping); in mm_drop_all_locks()
2348 vms->vma = vma_find(vmi, map->end); in __mmap_prepare()
2349 init_vma_munmap(vms, vmi, vms->vma, map->addr, map->end, uf, in __mmap_prepare()
2353 if (vms->vma) { in __mmap_prepare()
2403 struct vm_area_struct *vma) in __mmap_new_file_vma() argument
2408 vma->vm_file = get_file(map->file); in __mmap_new_file_vma()
2413 error = mmap_file(vma->vm_file, vma); in __mmap_new_file_vma()
2415 fput(vma->vm_file); in __mmap_new_file_vma()
2416 vma->vm_file = NULL; in __mmap_new_file_vma()
2418 vma_iter_set(vmi, vma->vm_end); in __mmap_new_file_vma()
2420 unmap_region(&vmi->mas, vma, map->prev, map->next); in __mmap_new_file_vma()
2426 WARN_ON_ONCE(map->addr != vma->vm_start); in __mmap_new_file_vma()
2431 VM_WARN_ON_ONCE(map->vm_flags != vma->vm_flags && in __mmap_new_file_vma()
2433 (vma->vm_flags & VM_MAYWRITE)); in __mmap_new_file_vma()
2435 map->file = vma->vm_file; in __mmap_new_file_vma()
2436 map->vm_flags = vma->vm_flags; in __mmap_new_file_vma()
2454 struct vm_area_struct *vma; in __mmap_new_vma() local
2461 vma = vm_area_alloc(map->mm); in __mmap_new_vma()
2462 if (!vma) in __mmap_new_vma()
2466 vma_set_range(vma, map->addr, map->end, map->pgoff); in __mmap_new_vma()
2467 vm_flags_init(vma, map->vm_flags); in __mmap_new_vma()
2468 vma->vm_page_prot = map->page_prot; in __mmap_new_vma()
2470 if (vma_iter_prealloc(vmi, vma)) { in __mmap_new_vma()
2476 error = __mmap_new_file_vma(map, vma); in __mmap_new_vma()
2478 error = shmem_zero_setup(vma); in __mmap_new_vma()
2480 vma_set_anonymous(vma); in __mmap_new_vma()
2487 vm_flags_init(vma, map->vm_flags); in __mmap_new_vma()
2496 vma_start_write(vma); in __mmap_new_vma()
2497 vma_iter_store_new(vmi, vma); in __mmap_new_vma()
2499 vma_link_file(vma); in __mmap_new_vma()
2505 if (!vma_is_anonymous(vma)) in __mmap_new_vma()
2506 khugepaged_enter_vma(vma, map->vm_flags); in __mmap_new_vma()
2507 *vmap = vma; in __mmap_new_vma()
2513 vm_area_free(vma); in __mmap_new_vma()
2524 static void __mmap_complete(struct mmap_state *map, struct vm_area_struct *vma) in __mmap_complete() argument
2527 vm_flags_t vm_flags = vma->vm_flags; in __mmap_complete()
2529 perf_event_mmap(vma); in __mmap_complete()
2534 vm_stat_account(mm, vma->vm_flags, map->pglen); in __mmap_complete()
2536 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || in __mmap_complete()
2537 is_vm_hugetlb_page(vma) || in __mmap_complete()
2538 vma == get_gate_vma(mm)) in __mmap_complete()
2539 vm_flags_clear(vma, VM_LOCKED_MASK); in __mmap_complete()
2544 if (vma->vm_file) in __mmap_complete()
2545 uprobe_mmap(vma); in __mmap_complete()
2554 vm_flags_set(vma, VM_SOFTDIRTY); in __mmap_complete()
2556 vma_set_page_prot(vma); in __mmap_complete()
2602 static void set_vma_user_defined_fields(struct vm_area_struct *vma, in set_vma_user_defined_fields() argument
2606 vma->vm_ops = map->vm_ops; in set_vma_user_defined_fields()
2607 vma->vm_private_data = map->vm_private_data; in set_vma_user_defined_fields()
2644 struct vm_area_struct *vma = NULL; in __mmap_region() local
2665 vma = vma_merge_new_range(&vmg); in __mmap_region()
2669 if (!vma) { in __mmap_region()
2670 error = __mmap_new_vma(&map, &vma); in __mmap_region()
2676 set_vma_user_defined_fields(vma, &map); in __mmap_region()
2678 __mmap_complete(&map, vma); in __mmap_region()
2762 int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, in do_brk_flags() argument
2786 if (vma && vma->vm_end == addr) { in do_brk_flags()
2789 vmg.prev = vma; in do_brk_flags()
2799 if (vma) in do_brk_flags()
2802 vma = vm_area_alloc(mm); in do_brk_flags()
2803 if (!vma) in do_brk_flags()
2806 vma_set_anonymous(vma); in do_brk_flags()
2807 vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT); in do_brk_flags()
2808 vm_flags_init(vma, vm_flags); in do_brk_flags()
2809 vma->vm_page_prot = vm_get_page_prot(vm_flags); in do_brk_flags()
2810 vma_start_write(vma); in do_brk_flags()
2811 if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) in do_brk_flags()
2817 perf_event_mmap(vma); in do_brk_flags()
2822 vm_flags_set(vma, VM_SOFTDIRTY); in do_brk_flags()
2826 vm_area_free(vma); in do_brk_flags()
2946 static int acct_stack_growth(struct vm_area_struct *vma, in acct_stack_growth() argument
2949 struct mm_struct *mm = vma->vm_mm; in acct_stack_growth()
2953 if (!may_expand_vm(mm, vma->vm_flags, grow)) in acct_stack_growth()
2961 if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT)) in acct_stack_growth()
2965 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : in acct_stack_growth()
2966 vma->vm_end - size; in acct_stack_growth()
2967 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) in acct_stack_growth()
2985 int expand_upwards(struct vm_area_struct *vma, unsigned long address) in expand_upwards() argument
2987 struct mm_struct *mm = vma->vm_mm; in expand_upwards()
2991 VMA_ITERATOR(vmi, mm, vma->vm_start); in expand_upwards()
2993 if (!(vma->vm_flags & VM_GROWSUP)) in expand_upwards()
3011 next = find_vma_intersection(mm, vma->vm_end, gap_addr); in expand_upwards()
3021 vma_iter_config(&vmi, vma->vm_start, address); in expand_upwards()
3022 if (vma_iter_prealloc(&vmi, vma)) in expand_upwards()
3026 if (unlikely(anon_vma_prepare(vma))) { in expand_upwards()
3032 vma_start_write(vma); in expand_upwards()
3034 anon_vma_lock_write(vma->anon_vma); in expand_upwards()
3037 if (address > vma->vm_end) { in expand_upwards()
3040 size = address - vma->vm_start; in expand_upwards()
3041 grow = (address - vma->vm_end) >> PAGE_SHIFT; in expand_upwards()
3044 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { in expand_upwards()
3045 error = acct_stack_growth(vma, size, grow); in expand_upwards()
3047 if (vma->vm_flags & VM_LOCKED) in expand_upwards()
3049 vm_stat_account(mm, vma->vm_flags, grow); in expand_upwards()
3050 anon_vma_interval_tree_pre_update_vma(vma); in expand_upwards()
3051 vma->vm_end = address; in expand_upwards()
3053 vma_iter_store_overwrite(&vmi, vma); in expand_upwards()
3054 anon_vma_interval_tree_post_update_vma(vma); in expand_upwards()
3056 perf_event_mmap(vma); in expand_upwards()
3060 anon_vma_unlock_write(vma->anon_vma); in expand_upwards()
3071 int expand_downwards(struct vm_area_struct *vma, unsigned long address) in expand_downwards() argument
3073 struct mm_struct *mm = vma->vm_mm; in expand_downwards()
3076 VMA_ITERATOR(vmi, mm, vma->vm_start); in expand_downwards()
3078 if (!(vma->vm_flags & VM_GROWSDOWN)) in expand_downwards()
3098 vma_iter_next_range_limit(&vmi, vma->vm_start); in expand_downwards()
3100 vma_iter_config(&vmi, address, vma->vm_end); in expand_downwards()
3101 if (vma_iter_prealloc(&vmi, vma)) in expand_downwards()
3105 if (unlikely(anon_vma_prepare(vma))) { in expand_downwards()
3111 vma_start_write(vma); in expand_downwards()
3113 anon_vma_lock_write(vma->anon_vma); in expand_downwards()
3116 if (address < vma->vm_start) { in expand_downwards()
3119 size = vma->vm_end - address; in expand_downwards()
3120 grow = (vma->vm_start - address) >> PAGE_SHIFT; in expand_downwards()
3123 if (grow <= vma->vm_pgoff) { in expand_downwards()
3124 error = acct_stack_growth(vma, size, grow); in expand_downwards()
3126 if (vma->vm_flags & VM_LOCKED) in expand_downwards()
3128 vm_stat_account(mm, vma->vm_flags, grow); in expand_downwards()
3129 anon_vma_interval_tree_pre_update_vma(vma); in expand_downwards()
3130 vma->vm_start = address; in expand_downwards()
3131 vma->vm_pgoff -= grow; in expand_downwards()
3133 vma_iter_store_overwrite(&vmi, vma); in expand_downwards()
3134 anon_vma_interval_tree_post_update_vma(vma); in expand_downwards()
3136 perf_event_mmap(vma); in expand_downwards()
3140 anon_vma_unlock_write(vma->anon_vma); in expand_downwards()
3168 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) in insert_vm_struct() argument
3170 unsigned long charged = vma_pages(vma); in insert_vm_struct()
3173 if (find_vma_intersection(mm, vma->vm_start, vma->vm_end)) in insert_vm_struct()
3176 if ((vma->vm_flags & VM_ACCOUNT) && in insert_vm_struct()
3192 if (vma_is_anonymous(vma)) { in insert_vm_struct()
3193 BUG_ON(vma->anon_vma); in insert_vm_struct()
3194 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; in insert_vm_struct()
3197 if (vma_link(mm, vma)) { in insert_vm_struct()
3198 if (vma->vm_flags & VM_ACCOUNT) in insert_vm_struct()