Lines Matching defs:vma
80 struct vm_area_struct *vma;
110 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
112 if (!rwsem_is_locked(&vma->vm_mm->mmap_lock))
113 vma_assert_locked(vma);
115 return vma->anon_name;
119 static int replace_anon_vma_name(struct vm_area_struct *vma,
122 struct anon_vma_name *orig_name = anon_vma_name(vma);
125 vma->anon_name = NULL;
133 vma->anon_name = anon_vma_name_reuse(anon_name);
139 static int replace_anon_vma_name(struct vm_area_struct *vma,
149 * Update the vm_flags or anon_name on region of a vma, splitting it or merging
155 struct vm_area_struct *vma = madv_behavior->vma;
161 if (new_flags == vma->vm_flags && (!set_new_anon_name ||
162 anon_vma_name_eq(anon_vma_name(vma), anon_name)))
166 vma = vma_modify_name(&vmi, madv_behavior->prev, vma,
169 vma = vma_modify_flags(&vmi, madv_behavior->prev, vma,
172 if (IS_ERR(vma))
173 return PTR_ERR(vma);
175 madv_behavior->vma = vma;
178 vma_start_write(vma);
179 vm_flags_reset(vma, new_flags);
181 return replace_anon_vma_name(vma, anon_name);
190 struct vm_area_struct *vma = walk->private;
202 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
218 vma, addr, &splug);
236 static void shmem_swapin_range(struct vm_area_struct *vma,
240 XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start));
241 pgoff_t end_index = linear_page_index(vma, end) - 1;
257 addr = vma->vm_start +
258 ((xas.xa_index - vma->vm_pgoff) << PAGE_SHIFT);
263 vma, addr, &splug);
285 struct vm_area_struct *vma = madv_behavior->vma;
287 struct file *file = vma->vm_file;
294 walk_page_range_vma(vma, start, end, &swapin_walk_ops, vma);
300 shmem_swapin_range(vma, start, end, file->f_mapping);
316 * explicitly grab a reference because the vma (and hence the
317 * vma's reference to the file) can go away as soon as we drop
322 offset = (loff_t)(start - vma->vm_start)
323 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
331 static inline bool can_do_file_pageout(struct vm_area_struct *vma)
333 if (!vma->vm_file)
342 file_inode(vma->vm_file)) ||
343 file_permission(vma->vm_file, MAY_WRITE) == 0;
364 struct vm_area_struct *vma = walk->vma;
376 pageout_anon_only_filter = pageout && !vma_is_anonymous(vma) &&
377 !can_do_file_pageout(vma);
385 ptl = pmd_trans_huge_lock(pmd, vma);
423 pmdp_invalidate(vma, addr, pmd);
454 start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
479 folio = vm_normal_folio(vma, addr, ptent);
534 clear_young_dirty_ptes(vma, addr, pte, nr,
580 struct vm_area_struct *vma = madv_behavior->vma;
587 tlb_start_vma(tlb, vma);
588 walk_page_range_vma(vma, range->start, range->end, &cold_walk_ops,
590 tlb_end_vma(tlb, vma);
593 static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
595 return !(vma->vm_flags & (VM_LOCKED|VM_PFNMAP|VM_HUGETLB));
600 struct vm_area_struct *vma = madv_behavior->vma;
603 if (!can_madv_lru_vma(vma))
615 struct vm_area_struct *vma,
623 tlb_start_vma(tlb, vma);
624 walk_page_range_vma(vma, range->start, range->end, &cold_walk_ops,
626 tlb_end_vma(tlb, vma);
632 struct vm_area_struct *vma = madv_behavior->vma;
634 if (!can_madv_lru_vma(vma))
643 if (!vma_is_anonymous(vma) && (!can_do_file_pageout(vma) &&
644 (vma->vm_flags & VM_MAYSHARE)))
649 madvise_pageout_page_range(&tlb, vma, &madv_behavior->range);
662 struct vm_area_struct *vma = walk->vma;
672 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
709 folio = vm_normal_folio(vma, addr, ptent);
772 clear_young_dirty_ptes(vma, addr, pte, nr, cydp_flags);
806 struct vm_area_struct *vma = madv_behavior->vma;
815 /* MADV_FREE works for only anon vma at the moment */
816 if (!vma_is_anonymous(vma))
819 range.start = max(vma->vm_start, start_addr);
820 if (range.start >= vma->vm_end)
822 range.end = min(vma->vm_end, end_addr);
823 if (range.end <= vma->vm_start)
832 tlb_start_vma(tlb, vma);
834 walk_page_range_vma(vma, range.start, range.end,
836 tlb_end_vma(tlb, vma);
870 madv_behavior->tlb, madv_behavior->vma, range->start,
878 struct vm_area_struct *vma = madv_behavior->vma;
882 if (!is_vm_hugetlb_page(vma)) {
888 return !(vma->vm_flags & forbidden);
893 if (range->start & ~huge_page_mask(hstate_vma(vma)))
902 range->end = ALIGN_DOWN(range->end, huge_page_size(hstate_vma(vma)));
919 if (!userfaultfd_remove(madv_behavior->vma, range->start, range->end)) {
920 struct vm_area_struct *vma;
924 madv_behavior->vma = vma = vma_lookup(mm, range->start);
925 if (!vma)
928 * Potential end adjustment for hugetlb vma is OK as
929 * the check below keeps end within vma.
933 if (range->end > vma->vm_end) {
935 * Don't fail if end > vma->vm_end. If the old
936 * vma was split while the mmap_lock was
940 * adjacent next vma that we'll walk
943 * end-vma->vm_end range, but the manager can
946 range->end = vma->vm_end;
952 * the adjustment for hugetlb vma above may have rounded
1017 struct vm_area_struct *vma = madv_behavior->vma;
1023 if (vma->vm_flags & VM_LOCKED)
1026 f = vma->vm_file;
1032 if (!vma_is_shared_maywrite(vma))
1035 offset = (loff_t)(start - vma->vm_start)
1036 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
1040 * explicitly grab a reference because the vma (and hence the
1041 * vma's reference to the file) can go away as soon as we drop
1045 if (userfaultfd_remove(vma, start, end)) {
1057 static bool is_valid_guard_vma(struct vm_area_struct *vma, bool allow_locked)
1069 return !(vma->vm_flags & disallowed);
1135 struct vm_area_struct *vma = madv_behavior->vma;
1140 if (!is_valid_guard_vma(vma, /* allow_locked = */false))
1150 err = anon_vma_prepare(vma);
1170 err = walk_page_range_mm(vma->vm_mm, range->start, range->end,
1187 zap_page_range_single(vma, range->start,
1231 update_mmu_cache(walk->vma, addr, pte);
1246 struct vm_area_struct *vma = madv_behavior->vma;
1253 if (!is_valid_guard_vma(vma, /* allow_locked = */true))
1256 return walk_page_range_vma(vma, range->start, range->end,
1287 struct vm_area_struct *vma = madv_behavior->vma;
1290 if (!vma_is_sealed(vma))
1309 if (!vma_is_anonymous(vma))
1313 if ((vma->vm_flags & VM_WRITE) &&
1314 arch_vma_access_permitted(vma, /* write= */ true,
1329 * Apply an madvise behavior to a region of a vma. madvise_update_vma
1336 struct vm_area_struct *vma = madv_behavior->vma;
1337 vm_flags_t new_flags = vma->vm_flags;
1358 return madvise_collapse(vma, range->start, range->end,
1386 if (vma->vm_file || new_flags & VM_SHARED)
1399 if ((!is_vm_hugetlb_page(vma) && (new_flags & VM_SPECIAL)) ||
1406 error = ksm_madvise(vma, range->start, range->end,
1413 error = hugepage_madvise(vma, &new_flags, behavior);
1419 if (vma->vm_file && !vma_is_anon_shmem(vma))
1586 struct vm_area_struct *vma;
1588 vma = lock_vma_under_rcu(mm, madv_behavior->range.start);
1589 if (!vma)
1595 if (madv_behavior->range.end > vma->vm_end || current->mm != mm ||
1596 userfaultfd_armed(vma)) {
1597 vma_end_read(vma);
1600 madv_behavior->vma = vma;
1612 * cover the overlap between the current vma and the original range. Any
1627 struct vm_area_struct *prev, *vma;
1636 vma_end_read(madv_behavior->vma);
1640 vma = find_vma_prev(mm, range->start, &prev);
1641 if (vma && range->start > vma->vm_start)
1642 prev = vma;
1646 if (!vma)
1649 /* Here start < (last_end|vma->vm_end). */
1650 if (range->start < vma->vm_start) {
1658 range->start = vma->vm_start;
1663 /* Here vma->vm_start <= range->start < (last_end|vma->vm_end) */
1664 range->end = min(vma->vm_end, last_end);
1666 /* Here vma->vm_start <= range->start < range->end <= (last_end|vma->vm_end). */
1668 madv_behavior->vma = vma;
1675 vma = NULL;
1678 vma = madv_behavior->vma;
1679 prev = vma;
1682 if (vma && range->end < vma->vm_end)
1683 range->end = vma->vm_end;
1687 vma = find_vma(mm, vma ? vma->vm_end : range->end);
1695 * Any behaviour which results in changes to the vma->vm_flags needs to