| /linux/mm/ |
| H A D | userfaultfd.c | 24 bool validate_dst_vma(struct vm_area_struct *dst_vma, unsigned long dst_end) in validate_dst_vma() argument 27 if (dst_end > dst_vma->vm_end) in validate_dst_vma() 35 if (!dst_vma->vm_userfaultfd_ctx.ctx) in validate_dst_vma() 103 struct vm_area_struct *dst_vma; in uffd_mfill_lock() local 105 dst_vma = uffd_lock_vma(dst_mm, dst_start); in uffd_mfill_lock() 106 if (IS_ERR(dst_vma) || validate_dst_vma(dst_vma, dst_start + len)) in uffd_mfill_lock() 107 return dst_vma; in uffd_mfill_lock() 109 vma_end_read(dst_vma); in uffd_mfill_lock() 124 struct vm_area_struct *dst_vma; in uffd_mfill_lock() local 127 dst_vma = find_vma_and_prepare_anon(dst_mm, dst_start); in uffd_mfill_lock() [all …]
|
| H A D | memory.c | 925 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma, in copy_nonpresent_pte() argument 928 vm_flags_t vm_flags = dst_vma->vm_flags; in copy_nonpresent_pte() 990 folio_try_dup_anon_rmap_pte(folio, page, dst_vma, src_vma); in copy_nonpresent_pte() 1020 pte_marker marker = copy_pte_marker(entry, dst_vma); in copy_nonpresent_pte() 1027 if (!userfaultfd_wp(dst_vma)) in copy_nonpresent_pte() 1046 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, in copy_present_page() argument 1067 folio_add_new_anon_rmap(new_folio, dst_vma, addr, RMAP_EXCLUSIVE); in copy_present_page() 1068 folio_add_lru_vma(new_folio, dst_vma); in copy_present_page() 1072 pte = folio_mk_pte(new_folio, dst_vma->vm_page_prot); in copy_present_page() 1073 pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma); in copy_present_page() [all …]
|
| H A D | hugetlb.c | 5566 struct vm_area_struct *dst_vma, in copy_hugetlb_page_range() argument 5605 dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz); in copy_hugetlb_page_range() 5630 if (!userfaultfd_wp(dst_vma)) in copy_hugetlb_page_range() 5649 if (!userfaultfd_wp(dst_vma)) in copy_hugetlb_page_range() 5654 pte_to_swp_entry(entry), dst_vma); in copy_hugetlb_page_range() 5683 new_folio = alloc_hugetlb_folio(dst_vma, addr, false); in copy_hugetlb_page_range() 5690 addr, dst_vma); in copy_hugetlb_page_range() 5703 restore_reserve_on_error(h, dst_vma, addr, in copy_hugetlb_page_range() 5709 hugetlb_install_folio(dst_vma, dst_pte, addr, in copy_hugetlb_page_range() 5728 if (!userfaultfd_wp(dst_vma)) in copy_hugetlb_page_range() [all …]
|
| H A D | huge_memory.c | 1660 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) in copy_huge_pmd() argument 1689 if (!vma_is_anonymous(dst_vma)) in copy_huge_pmd() 1721 if (!userfaultfd_wp(dst_vma)) in copy_huge_pmd() 1753 if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, dst_vma, src_vma))) { in copy_huge_pmd() 1767 if (!userfaultfd_wp(dst_vma)) in copy_huge_pmd() 2531 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, in move_pages_huge_pmd() argument 2548 vma_assert_locked(dst_vma); in move_pages_huge_pmd() 2631 folio_move_anon_rmap(src_folio, dst_vma); in move_pages_huge_pmd() 2632 src_folio->index = linear_page_index(dst_vma, dst_addr); in move_pages_huge_pmd() 2634 _dst_pmd = folio_mk_pmd(src_folio, dst_vma->vm_page_prot); in move_pages_huge_pmd() [all …]
|
| H A D | shmem.c | 3154 struct vm_area_struct *dst_vma, in shmem_mfill_atomic_pte() argument 3160 struct inode *inode = file_inode(dst_vma->vm_file); in shmem_mfill_atomic_pte() 3164 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); in shmem_mfill_atomic_pte() 3242 ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp); in shmem_mfill_atomic_pte() 3249 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, in shmem_mfill_atomic_pte()
|
| /linux/include/linux/ |
| H A D | rmap.h | 559 struct page *page, int nr_pages, struct vm_area_struct *dst_vma, in __folio_dup_file_rmap() argument 578 folio_add_large_mapcount(folio, orig_nr_pages, dst_vma); in __folio_dup_file_rmap() 583 folio_inc_large_mapcount(folio, dst_vma); in __folio_dup_file_rmap() 602 struct page *page, int nr_pages, struct vm_area_struct *dst_vma) in folio_dup_file_rmap_ptes() argument 604 __folio_dup_file_rmap(folio, page, nr_pages, dst_vma, PGTABLE_LEVEL_PTE); in folio_dup_file_rmap_ptes() 608 struct page *page, struct vm_area_struct *dst_vma) in folio_dup_file_rmap_pte() argument 610 __folio_dup_file_rmap(folio, page, 1, dst_vma, PGTABLE_LEVEL_PTE); in folio_dup_file_rmap_pte() 624 struct page *page, struct vm_area_struct *dst_vma) in folio_dup_file_rmap_pmd() argument 627 __folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, dst_vma, PGTABLE_LEVEL_PTE); in folio_dup_file_rmap_pmd() 634 struct page *page, int nr_pages, struct vm_area_struct *dst_vma, in __folio_try_dup_anon_rmap() argument [all …]
|
| H A D | shmem_fs.h | 201 struct vm_area_struct *dst_vma, 207 #define shmem_mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, \ argument
|
| H A D | userfaultfd_k.h | 115 struct vm_area_struct *dst_vma, 140 struct vm_area_struct *dst_vma,
|
| H A D | mm_inline.h | 544 swp_entry_t entry, struct vm_area_struct *dst_vma) in copy_pte_marker() argument 551 if ((srcm & PTE_MARKER_UFFD_WP) && userfaultfd_wp(dst_vma)) in copy_pte_marker()
|
| H A D | hugetlb.h | 146 struct vm_area_struct *dst_vma, 332 struct vm_area_struct *dst_vma, in copy_hugetlb_page_range() argument 395 struct vm_area_struct *dst_vma, in hugetlb_mfill_atomic_pte() argument
|
| H A D | huge_mm.h | 13 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
|
| H A D | mm.h | 2466 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
|