Lines Matching defs:dst_vma

24 bool validate_dst_vma(struct vm_area_struct *dst_vma, unsigned long dst_end)
26 /* Make sure that the dst range is fully within dst_vma. */
27 if (dst_end > dst_vma->vm_end)
35 if (!dst_vma->vm_userfaultfd_ctx.ctx)
103 struct vm_area_struct *dst_vma;
105 dst_vma = uffd_lock_vma(dst_mm, dst_start);
106 if (IS_ERR(dst_vma) || validate_dst_vma(dst_vma, dst_start + len))
107 return dst_vma;
109 vma_end_read(dst_vma);
124 struct vm_area_struct *dst_vma;
127 dst_vma = find_vma_and_prepare_anon(dst_mm, dst_start);
128 if (IS_ERR(dst_vma))
131 if (validate_dst_vma(dst_vma, dst_start + len))
132 return dst_vma;
134 dst_vma = ERR_PTR(-ENOENT);
137 return dst_vma;
147 static bool mfill_file_over_size(struct vm_area_struct *dst_vma,
153 if (!dst_vma->vm_file)
156 inode = dst_vma->vm_file->f_inode;
157 offset = linear_page_index(dst_vma, dst_addr);
163 * Install PTEs, to map dst_addr (within dst_vma) to page.
169 struct vm_area_struct *dst_vma,
174 struct mm_struct *dst_mm = dst_vma->vm_mm;
176 bool writable = dst_vma->vm_flags & VM_WRITE;
177 bool vm_shared = dst_vma->vm_flags & VM_SHARED;
182 _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
187 _dst_pte = pte_mkwrite(_dst_pte, dst_vma);
196 if (mfill_file_over_size(dst_vma, dst_addr)) {
214 folio_add_file_rmap_pte(folio, page, dst_vma);
216 folio_add_new_anon_rmap(folio, dst_vma, dst_addr, RMAP_EXCLUSIVE);
217 folio_add_lru_vma(folio, dst_vma);
229 update_mmu_cache(dst_vma, dst_addr, dst_pte);
238 struct vm_area_struct *dst_vma,
250 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma,
299 if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
302 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
314 struct vm_area_struct *dst_vma,
320 folio = vma_alloc_zeroed_movable_folio(dst_vma, dst_addr);
324 if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
334 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
346 struct vm_area_struct *dst_vma,
353 if (mm_forbids_zeropage(dst_vma->vm_mm))
354 return mfill_atomic_pte_zeroed_folio(dst_pmd, dst_vma, dst_addr);
357 dst_vma->vm_page_prot));
359 dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl);
362 if (mfill_file_over_size(dst_vma, dst_addr)) {
369 set_pte_at(dst_vma->vm_mm, dst_addr, dst_pte, _dst_pte);
371 update_mmu_cache(dst_vma, dst_addr, dst_pte);
381 struct vm_area_struct *dst_vma,
385 struct inode *inode = file_inode(dst_vma->vm_file);
386 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
408 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
425 struct vm_area_struct *dst_vma,
430 struct mm_struct *dst_mm = dst_vma->vm_mm;
440 if (mfill_file_over_size(dst_vma, dst_addr)) {
453 update_mmu_cache(dst_vma, dst_addr, dst_pte);
490 struct vm_area_struct *dst_vma,
496 struct mm_struct *dst_mm = dst_vma->vm_mm;
515 uffd_mfill_unlock(dst_vma);
523 vma_hpagesize = vma_kernel_pagesize(dst_vma);
534 * On routine entry dst_vma is set. If we had to drop mmap_lock and
535 * retry, dst_vma will be set to NULL and we must lookup again.
537 if (!dst_vma) {
538 dst_vma = uffd_mfill_lock(dst_mm, dst_start, len);
539 if (IS_ERR(dst_vma)) {
540 err = PTR_ERR(dst_vma);
545 if (!is_vm_hugetlb_page(dst_vma))
549 if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
572 idx = linear_page_index(dst_vma, dst_addr);
573 mapping = dst_vma->vm_file->f_mapping;
576 hugetlb_vma_lock_read(dst_vma);
579 dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize);
581 hugetlb_vma_unlock_read(dst_vma);
589 hugetlb_vma_unlock_read(dst_vma);
594 err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr,
597 hugetlb_vma_unlock_read(dst_vma);
604 uffd_mfill_unlock(dst_vma);
614 dst_vma = NULL;
634 uffd_mfill_unlock(dst_vma);
646 struct vm_area_struct *dst_vma,
654 struct vm_area_struct *dst_vma,
663 return mfill_atomic_pte_continue(dst_pmd, dst_vma,
666 return mfill_atomic_pte_poison(dst_pmd, dst_vma,
680 if (!(dst_vma->vm_flags & VM_SHARED)) {
682 err = mfill_atomic_pte_copy(dst_pmd, dst_vma,
687 dst_vma, dst_addr);
689 err = shmem_mfill_atomic_pte(dst_pmd, dst_vma,
704 struct vm_area_struct *dst_vma;
730 dst_vma = uffd_mfill_lock(dst_mm, dst_start, len);
731 if (IS_ERR(dst_vma)) {
732 err = PTR_ERR(dst_vma);
751 if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
752 dst_vma->vm_flags & VM_SHARED))
756 * validate 'mode' now that we know the dst_vma: don't allow
759 if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP))
765 if (is_vm_hugetlb_page(dst_vma))
766 return mfill_atomic_hugetlb(ctx, dst_vma, dst_start,
769 if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
771 if (!vma_is_shmem(dst_vma) &&
812 err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr,
820 uffd_mfill_unlock(dst_vma);
851 uffd_mfill_unlock(dst_vma);
900 long uffd_wp_range(struct vm_area_struct *dst_vma,
907 VM_WARN_ONCE(start < dst_vma->vm_start || start + len > dst_vma->vm_end,
920 if (!enable_wp && vma_wants_manual_pte_write_upgrade(dst_vma))
922 tlb_gather_mmu(&tlb, dst_vma->vm_mm);
923 ret = change_protection(&tlb, dst_vma, start, start + len, mm_cp_flags);
935 struct vm_area_struct *dst_vma;
962 for_each_vma_range(vmi, dst_vma, end) {
964 if (!userfaultfd_wp(dst_vma)) {
969 if (is_vm_hugetlb_page(dst_vma)) {
971 page_mask = vma_kernel_pagesize(dst_vma) - 1;
976 _start = max(dst_vma->vm_start, start);
977 _end = min(dst_vma->vm_end, end);
979 err = uffd_wp_range(dst_vma, _start, _end - _start, enable_wp);
1030 struct vm_area_struct *dst_vma,
1063 folio_move_anon_rmap(src_folio, dst_vma);
1064 src_folio->index = linear_page_index(dst_vma, dst_addr);
1066 orig_dst_pte = folio_mk_pte(src_folio, dst_vma->vm_page_prot);
1073 orig_dst_pte = pte_mkwrite(orig_dst_pte, dst_vma);
1081 static int move_swap_pte(struct mm_struct *mm, struct vm_area_struct *dst_vma,
1109 * index and mapping to align with the dst_vma, where a swap-in may
1113 folio_move_anon_rmap(src_folio, dst_vma);
1114 src_folio->index = linear_page_index(dst_vma, dst_addr);
1147 struct vm_area_struct *dst_vma,
1165 dst_vma->vm_page_prot));
1180 struct vm_area_struct *dst_vma,
1272 err = move_zeropage_pte(mm, dst_vma, src_vma,
1378 err = move_present_pte(mm, dst_vma, src_vma,
1442 err = move_swap_pte(mm, dst_vma, dst_addr, src_addr, dst_pte, src_pte,
1493 struct vm_area_struct *dst_vma)
1496 if ((src_vma->vm_flags & VM_ACCESS_FLAGS) != (dst_vma->vm_flags & VM_ACCESS_FLAGS) ||
1497 pgprot_val(src_vma->vm_page_prot) != pgprot_val(dst_vma->vm_page_prot))
1501 if ((src_vma->vm_flags & VM_LOCKED) != (dst_vma->vm_flags & VM_LOCKED))
1512 if (!vma_move_compatible(src_vma) || !vma_move_compatible(dst_vma))
1515 /* Ensure dst_vma is registered in uffd we are operating on */
1516 if (!dst_vma->vm_userfaultfd_ctx.ctx ||
1517 dst_vma->vm_userfaultfd_ctx.ctx != ctx)
1521 if (!vma_is_anonymous(src_vma) || !vma_is_anonymous(dst_vma))
1542 /* Skip finding src_vma if src_start is in dst_vma */
1570 * Skip finding src_vma if src_start is in dst_vma. This also ensures
1583 * vma_start_read(dst_vma)
1588 * vma_start_write(dst_vma)
1621 static void uffd_move_unlock(struct vm_area_struct *dst_vma,
1625 if (src_vma != dst_vma)
1626 vma_end_read(dst_vma);
1646 static void uffd_move_unlock(struct vm_area_struct *dst_vma,
1650 mmap_read_unlock(dst_vma->vm_mm);
1734 struct vm_area_struct *src_vma, *dst_vma;
1749 err = uffd_move_lock(mm, dst_start, src_start, &dst_vma, &src_vma);
1769 if (dst_vma->vm_flags & VM_SHARED)
1771 if (dst_start + len > dst_vma->vm_end)
1774 err = validate_move_areas(ctx, src_vma, dst_vma);
1843 dst_pmdval, dst_vma, src_vma,
1864 dst_vma, src_vma,
1892 uffd_move_unlock(dst_vma, src_vma);