Home
last modified time | relevance | path

Searched refs:src_vma (Results 1 – 9 of 9) sorted by relevance

/linux/mm/
H A Duserfaultfd.c1031 struct vm_area_struct *src_vma, in move_present_pte() argument
1055 orig_src_pte = ptep_clear_flush(src_vma, src_addr, src_pte); in move_present_pte()
1148 struct vm_area_struct *src_vma, in move_zeropage_pte() argument
1166 ptep_clear_flush(src_vma, src_addr, src_pte); in move_zeropage_pte()
1181 struct vm_area_struct *src_vma, in move_pages_pte() argument
1199 flush_cache_range(src_vma, src_addr, src_addr + PAGE_SIZE); in move_pages_pte()
1272 err = move_zeropage_pte(mm, dst_vma, src_vma, in move_pages_pte()
1299 folio = vm_normal_folio(src_vma, src_addr, orig_src_pte); in move_pages_pte()
1378 err = move_present_pte(mm, dst_vma, src_vma, in move_pages_pte()
1492 struct vm_area_struct *src_vma, in validate_move_areas() argument
1622 uffd_move_unlock(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma) uffd_move_unlock() argument
1647 uffd_move_unlock(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma) uffd_move_unlock() argument
1734 struct vm_area_struct *src_vma, *dst_vma; move_pages() local
[all...]
H A Dmemory.c797 struct vm_area_struct *src_vma, unsigned long addr, int *rss) in copy_nonpresent_pte() argument
861 folio_try_dup_anon_rmap_pte(folio, page, dst_vma, src_vma); in copy_nonpresent_pte()
886 VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags)); in copy_nonpresent_pte()
887 if (try_restore_exclusive_pte(src_vma, addr, src_pte, orig_pte)) in copy_nonpresent_pte()
917 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, in copy_present_page() argument
933 if (copy_mc_user_highpage(&new_folio->page, page, addr, src_vma)) in copy_present_page()
953 struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte, in __copy_present_ptes() argument
956 struct mm_struct *src_mm = src_vma->vm_mm; in __copy_present_ptes()
959 if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) { in __copy_present_ptes()
965 if (src_vma in __copy_present_ptes()
983 copy_present_ptes(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pte_t * dst_pte,pte_t * src_pte,pte_t pte,unsigned long addr,int max_nr,int * rss,struct folio ** prealloc) copy_present_ptes() argument
1078 copy_pte_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pmd_t * dst_pmd,pmd_t * src_pmd,unsigned long addr,unsigned long end) copy_pte_range() argument
1233 copy_pmd_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pud_t * dst_pud,pud_t * src_pud,unsigned long addr,unsigned long end) copy_pmd_range() argument
1269 copy_pud_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,p4d_t * dst_p4d,p4d_t * src_p4d,unsigned long addr,unsigned long end) copy_pud_range() argument
1306 copy_p4d_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pgd_t * dst_pgd,pgd_t * src_pgd,unsigned long addr,unsigned long end) copy_p4d_range() argument
1335 vma_needs_copy(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma) vma_needs_copy() argument
1362 copy_page_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma) copy_page_range() argument
[all...]
H A Dhugetlb.c5553 struct vm_area_struct *src_vma) in copy_hugetlb_page_range() argument
5558 bool cow = is_cow_mapping(src_vma->vm_flags); in copy_hugetlb_page_range()
5559 struct hstate *h = hstate_vma(src_vma); in copy_hugetlb_page_range()
5568 src_vma->vm_start, in copy_hugetlb_page_range()
5569 src_vma->vm_end); in copy_hugetlb_page_range()
5571 vma_assert_write_locked(src_vma); in copy_hugetlb_page_range()
5580 hugetlb_vma_lock_read(src_vma); in copy_hugetlb_page_range()
5584 for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) { in copy_hugetlb_page_range()
5586 src_pte = hugetlb_walk(src_vma, add in copy_hugetlb_page_range()
[all...]
H A Dhuge_memory.c1668 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) in copy_huge_pmd() argument
1691 VM_WARN_ON_ONCE(is_cow_mapping(src_vma->vm_flags) && pmd_write(pmd)); in copy_huge_pmd()
1760 if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, dst_vma, src_vma))) { in copy_huge_pmd()
1766 __split_huge_pmd(src_vma, src_pmd, addr, false); in copy_huge_pmd()
2531 * The PT lock for src_pmd and dst_vma/src_vma (for reading) are locked by
2538 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, in move_pages_huge_pmd() argument
2554 vma_assert_locked(src_vma); in move_pages_huge_pmd()
2588 flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE); in move_pages_huge_pmd()
2630 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd); in move_pages_huge_pmd()
2645 src_pmdval = pmdp_huge_clear_flush(src_vma, src_add in move_pages_huge_pmd()
[all...]
/linux/include/linux/
H A Drmap.h643 struct vm_area_struct *src_vma, enum rmap_level level) in __folio_try_dup_anon_rmap() argument
660 unlikely(folio_needs_cow_for_dma(src_vma, folio)); in __folio_try_dup_anon_rmap()
711 * @src_vma: The vm area from which the mappings are duplicated
730 struct vm_area_struct *src_vma) in folio_try_dup_anon_rmap_ptes() argument
733 src_vma, RMAP_LEVEL_PTE); in folio_try_dup_anon_rmap_ptes()
738 struct vm_area_struct *src_vma) in folio_try_dup_anon_rmap_pte() argument
740 return __folio_try_dup_anon_rmap(folio, page, 1, dst_vma, src_vma, in folio_try_dup_anon_rmap_pte()
750 * @src_vma: The vm area from which the mapping is duplicated
769 struct vm_area_struct *src_vma) in folio_try_dup_anon_rmap_pmd() argument
773 src_vma, RMAP_LEVEL_PM in folio_try_dup_anon_rmap_pmd()
[all...]
H A Duserfaultfd_k.h141 struct vm_area_struct *src_vma,
H A Dhuge_mm.h13 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
H A Dhugetlb.h333 struct vm_area_struct *src_vma) in copy_hugetlb_page_range() argument
H A Dmm.h2373 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);