Lines Matching refs:vma
107 if (!userfaultfd_wp(vmf->vma)) in vmf_orig_pte_uffd_wp()
390 struct vm_area_struct *vma = unmap->first; in free_pgtables() local
403 unsigned long addr = vma->vm_start; in free_pgtables()
413 vma_start_write(vma); in free_pgtables()
414 unlink_anon_vmas(vma); in free_pgtables()
417 unlink_file_vma_batch_add(&vb, vma); in free_pgtables()
422 while (next && next->vm_start <= vma->vm_end + PMD_SIZE) { in free_pgtables()
423 vma = next; in free_pgtables()
426 vma_start_write(vma); in free_pgtables()
427 unlink_anon_vmas(vma); in free_pgtables()
428 unlink_file_vma_batch_add(&vb, vma); in free_pgtables()
432 free_pgd_range(tlb, addr, vma->vm_end, unmap->pg_start, in free_pgtables()
434 vma = next; in free_pgtables()
435 } while (vma); in free_pgtables()
599 static void print_bad_page_map(struct vm_area_struct *vma, in print_bad_page_map() argument
609 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; in print_bad_page_map()
610 index = linear_page_index(vma, addr); in print_bad_page_map()
614 __print_bad_page_map_pgtable(vma->vm_mm, addr); in print_bad_page_map()
618 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); in print_bad_page_map()
620 vma->vm_file, in print_bad_page_map()
621 vma->vm_ops ? vma->vm_ops->fault : NULL, in print_bad_page_map()
622 vma->vm_file ? vma->vm_file->f_op->mmap : NULL, in print_bad_page_map()
623 vma->vm_file ? vma->vm_file->f_op->mmap_prepare : NULL, in print_bad_page_map()
628 #define print_bad_pte(vma, addr, pte, page) \ argument
629 print_bad_page_map(vma, addr, pte_val(pte), page, PGTABLE_LEVEL_PTE)
696 static inline struct page *__vm_normal_page(struct vm_area_struct *vma, in __vm_normal_page() argument
703 if (vma->vm_ops && vma->vm_ops->find_normal_page) in __vm_normal_page()
704 return vma->vm_ops->find_normal_page(vma, addr); in __vm_normal_page()
706 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) in __vm_normal_page()
711 print_bad_page_map(vma, addr, entry, NULL, level); in __vm_normal_page()
719 if (unlikely(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))) { in __vm_normal_page()
720 if (vma->vm_flags & VM_MIXEDMAP) { in __vm_normal_page()
725 unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT; in __vm_normal_page()
728 if (pfn == vma->vm_pgoff + off) in __vm_normal_page()
730 if (!is_cow_mapping(vma->vm_flags)) in __vm_normal_page()
741 print_bad_page_map(vma, addr, entry, NULL, level); in __vm_normal_page()
764 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, in vm_normal_page() argument
767 return __vm_normal_page(vma, addr, pte_pfn(pte), pte_special(pte), in vm_normal_page()
783 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr, in vm_normal_folio() argument
786 struct page *page = vm_normal_page(vma, addr, pte); in vm_normal_folio()
806 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, in vm_normal_page_pmd() argument
809 return __vm_normal_page(vma, addr, pmd_pfn(pmd), pmd_special(pmd), in vm_normal_page_pmd()
825 struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, in vm_normal_folio_pmd() argument
828 struct page *page = vm_normal_page_pmd(vma, addr, pmd); in vm_normal_folio_pmd()
847 struct page *vm_normal_page_pud(struct vm_area_struct *vma, in vm_normal_page_pud() argument
850 return __vm_normal_page(vma, addr, pud_pfn(pud), pud_special(pud), in vm_normal_page_pud()
881 static void restore_exclusive_pte(struct vm_area_struct *vma, in restore_exclusive_pte() argument
889 pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); in restore_exclusive_pte()
896 if ((vma->vm_flags & VM_WRITE) && in restore_exclusive_pte()
897 can_change_pte_writable(vma, address, pte)) { in restore_exclusive_pte()
900 pte = pte_mkwrite(pte, vma); in restore_exclusive_pte()
902 set_pte_at(vma->vm_mm, address, ptep, pte); in restore_exclusive_pte()
908 update_mmu_cache(vma, address, ptep); in restore_exclusive_pte()
915 static int try_restore_exclusive_pte(struct vm_area_struct *vma, in try_restore_exclusive_pte() argument
923 restore_exclusive_pte(vma, folio, page, addr, ptep, orig_pte); in try_restore_exclusive_pte()
1199 struct vm_area_struct *vma, unsigned long addr, bool need_zero) in folio_prealloc() argument
1204 new_folio = vma_alloc_zeroed_movable_folio(vma, addr); in folio_prealloc()
1206 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr); in folio_prealloc()
1604 zap_install_uffd_wp_if_needed(struct vm_area_struct *vma, in zap_install_uffd_wp_if_needed() argument
1614 if (vma_is_anonymous(vma)) in zap_install_uffd_wp_if_needed()
1622 if (pte_install_uffd_wp_if_needed(vma, addr, pte, pteval)) in zap_install_uffd_wp_if_needed()
1634 struct vm_area_struct *vma, struct folio *folio, in zap_present_folio_ptes() argument
1651 if (pte_young(ptent) && likely(vma_has_recency(vma))) in zap_present_folio_ptes()
1660 arch_check_zapped_pte(vma, ptent); in zap_present_folio_ptes()
1662 if (unlikely(userfaultfd_pte_wp(vma, ptent))) in zap_present_folio_ptes()
1663 *any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte, in zap_present_folio_ptes()
1667 folio_remove_rmap_ptes(folio, page, nr, vma); in zap_present_folio_ptes()
1670 print_bad_pte(vma, addr, ptent, page); in zap_present_folio_ptes()
1685 struct vm_area_struct *vma, pte_t *pte, pte_t ptent, in zap_present_ptes() argument
1695 page = vm_normal_page(vma, addr, ptent); in zap_present_ptes()
1699 arch_check_zapped_pte(vma, ptent); in zap_present_ptes()
1701 if (userfaultfd_pte_wp(vma, ptent)) in zap_present_ptes()
1702 *any_skipped = zap_install_uffd_wp_if_needed(vma, addr, in zap_present_ptes()
1720 zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr, in zap_present_ptes()
1725 zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, 1, addr, in zap_present_ptes()
1731 struct vm_area_struct *vma, pte_t *pte, pte_t ptent, in zap_nonpresent_ptes() argument
1753 WARN_ON_ONCE(!vma_is_anonymous(vma)); in zap_nonpresent_ptes()
1755 folio_remove_rmap_pte(folio, page, vma); in zap_nonpresent_ptes()
1776 if (!vma_is_anonymous(vma) && !zap_drop_markers(details)) in zap_nonpresent_ptes()
1795 clear_not_present_full_ptes(vma->vm_mm, addr, pte, nr, tlb->fullmm); in zap_nonpresent_ptes()
1796 *any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent); in zap_nonpresent_ptes()
1802 struct vm_area_struct *vma, pte_t *pte, in do_zap_pte_range() argument
1827 nr += zap_present_ptes(tlb, vma, pte, ptent, max_nr, addr, in do_zap_pte_range()
1831 nr += zap_nonpresent_ptes(tlb, vma, pte, ptent, max_nr, addr, in do_zap_pte_range()
1896 struct vm_area_struct *vma, pmd_t *pmd, in zap_pte_range() argument
1929 nr = do_zap_pte_range(tlb, vma, pte, addr, end, details, rss, in zap_pte_range()
1957 tlb_flush_rmaps(tlb, vma); in zap_pte_range()
1988 struct vm_area_struct *vma, pud_t *pud, in zap_pmd_range() argument
2000 __split_huge_pmd(vma, pmd, addr, false); in zap_pmd_range()
2001 else if (zap_huge_pmd(tlb, vma, pmd, addr)) { in zap_pmd_range()
2021 addr = zap_pte_range(tlb, vma, pmd, addr, next, details); in zap_pmd_range()
2030 struct vm_area_struct *vma, p4d_t *p4d, in zap_pud_range() argument
2042 split_huge_pud(vma, pud, addr); in zap_pud_range()
2043 else if (zap_huge_pud(tlb, vma, pud, addr)) in zap_pud_range()
2049 next = zap_pmd_range(tlb, vma, pud, addr, next, details); in zap_pud_range()
2058 struct vm_area_struct *vma, pgd_t *pgd, in zap_p4d_range() argument
2070 next = zap_pud_range(tlb, vma, p4d, addr, next, details); in zap_p4d_range()
2077 struct vm_area_struct *vma, in unmap_page_range() argument
2085 tlb_start_vma(tlb, vma); in unmap_page_range()
2086 pgd = pgd_offset(vma->vm_mm, addr); in unmap_page_range()
2091 next = zap_p4d_range(tlb, vma, pgd, addr, next, details); in unmap_page_range()
2093 tlb_end_vma(tlb, vma); in unmap_page_range()
2098 struct vm_area_struct *vma, unsigned long start_addr, in unmap_single_vma() argument
2101 unsigned long start = max(vma->vm_start, start_addr); in unmap_single_vma()
2104 if (start >= vma->vm_end) in unmap_single_vma()
2106 end = min(vma->vm_end, end_addr); in unmap_single_vma()
2107 if (end <= vma->vm_start) in unmap_single_vma()
2110 if (vma->vm_file) in unmap_single_vma()
2111 uprobe_munmap(vma, start, end); in unmap_single_vma()
2114 if (unlikely(is_vm_hugetlb_page(vma))) { in unmap_single_vma()
2126 if (vma->vm_file) { in unmap_single_vma()
2129 __unmap_hugepage_range(tlb, vma, start, end, in unmap_single_vma()
2133 unmap_page_range(tlb, vma, start, end, details); in unmap_single_vma()
2155 struct vm_area_struct *vma; in unmap_vmas() local
2163 vma = unmap->first; in unmap_vmas()
2164 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, in unmap_vmas()
2170 hugetlb_zap_begin(vma, &start, &end); in unmap_vmas()
2171 unmap_single_vma(tlb, vma, start, end, &details); in unmap_vmas()
2172 hugetlb_zap_end(vma, &details); in unmap_vmas()
2173 vma = mas_find(unmap->mas, unmap->tree_end - 1); in unmap_vmas()
2174 } while (vma); in unmap_vmas()
2190 struct vm_area_struct *vma, unsigned long address, in zap_page_range_single_batched() argument
2196 VM_WARN_ON_ONCE(!tlb || tlb->mm != vma->vm_mm); in zap_page_range_single_batched()
2198 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, in zap_page_range_single_batched()
2200 hugetlb_zap_begin(vma, &range.start, &range.end); in zap_page_range_single_batched()
2201 update_hiwater_rss(vma->vm_mm); in zap_page_range_single_batched()
2207 unmap_single_vma(tlb, vma, address, end, details); in zap_page_range_single_batched()
2209 if (is_vm_hugetlb_page(vma)) { in zap_page_range_single_batched()
2215 hugetlb_zap_end(vma, details); in zap_page_range_single_batched()
2216 tlb_gather_mmu(tlb, vma->vm_mm); in zap_page_range_single_batched()
2229 void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, in zap_page_range_single() argument
2234 tlb_gather_mmu(&tlb, vma->vm_mm); in zap_page_range_single()
2235 zap_page_range_single_batched(&tlb, vma, address, size, details); in zap_page_range_single()
2250 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, in zap_vma_ptes() argument
2253 if (!range_in_vma(vma, address, address + size) || in zap_vma_ptes()
2254 !(vma->vm_flags & VM_PFNMAP)) in zap_vma_ptes()
2257 zap_page_range_single(vma, address, size, NULL); in zap_vma_ptes()
2293 static bool vm_mixed_zeropage_allowed(struct vm_area_struct *vma) in vm_mixed_zeropage_allowed() argument
2295 VM_WARN_ON_ONCE(vma->vm_flags & VM_PFNMAP); in vm_mixed_zeropage_allowed()
2302 if (mm_forbids_zeropage(vma->vm_mm)) in vm_mixed_zeropage_allowed()
2305 if (is_cow_mapping(vma->vm_flags)) in vm_mixed_zeropage_allowed()
2308 if (!(vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) in vm_mixed_zeropage_allowed()
2319 return vma->vm_ops && vma->vm_ops->pfn_mkwrite && in vm_mixed_zeropage_allowed()
2320 (vma_is_fsdax(vma) || vma->vm_flags & VM_IO); in vm_mixed_zeropage_allowed()
2323 static int validate_page_before_insert(struct vm_area_struct *vma, in validate_page_before_insert() argument
2331 if (!vm_mixed_zeropage_allowed(vma)) in validate_page_before_insert()
2341 static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, in insert_page_into_pte_locked() argument
2357 pteval = maybe_mkwrite(pteval, vma); in insert_page_into_pte_locked()
2359 if (ptep_set_access_flags(vma, addr, pte, pteval, 1)) in insert_page_into_pte_locked()
2360 update_mmu_cache(vma, addr, pte); in insert_page_into_pte_locked()
2373 pteval = maybe_mkwrite(pte_mkdirty(pteval), vma); in insert_page_into_pte_locked()
2375 inc_mm_counter(vma->vm_mm, mm_counter_file(folio)); in insert_page_into_pte_locked()
2376 folio_add_file_rmap_pte(folio, page, vma); in insert_page_into_pte_locked()
2378 set_pte_at(vma->vm_mm, addr, pte, pteval); in insert_page_into_pte_locked()
2382 static int insert_page(struct vm_area_struct *vma, unsigned long addr, in insert_page() argument
2389 retval = validate_page_before_insert(vma, page); in insert_page()
2393 pte = get_locked_pte(vma->vm_mm, addr, &ptl); in insert_page()
2396 retval = insert_page_into_pte_locked(vma, pte, addr, page, prot, in insert_page()
2403 static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte, in insert_page_in_batch_locked() argument
2408 err = validate_page_before_insert(vma, page); in insert_page_in_batch_locked()
2411 return insert_page_into_pte_locked(vma, pte, addr, page, prot, false); in insert_page_in_batch_locked()
2417 static int insert_pages(struct vm_area_struct *vma, unsigned long addr, in insert_pages() argument
2423 struct mm_struct *const mm = vma->vm_mm; in insert_pages()
2452 int err = insert_page_in_batch_locked(vma, pte, in insert_pages()
2490 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, in vm_insert_pages() argument
2495 if (addr < vma->vm_start || end_addr >= vma->vm_end) in vm_insert_pages()
2497 if (!(vma->vm_flags & VM_MIXEDMAP)) { in vm_insert_pages()
2498 BUG_ON(mmap_read_trylock(vma->vm_mm)); in vm_insert_pages()
2499 BUG_ON(vma->vm_flags & VM_PFNMAP); in vm_insert_pages()
2500 vm_flags_set(vma, VM_MIXEDMAP); in vm_insert_pages()
2503 return insert_pages(vma, addr, pages, num, vma->vm_page_prot); in vm_insert_pages()
2537 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, in vm_insert_page() argument
2540 if (addr < vma->vm_start || addr >= vma->vm_end) in vm_insert_page()
2542 if (!(vma->vm_flags & VM_MIXEDMAP)) { in vm_insert_page()
2543 BUG_ON(mmap_read_trylock(vma->vm_mm)); in vm_insert_page()
2544 BUG_ON(vma->vm_flags & VM_PFNMAP); in vm_insert_page()
2545 vm_flags_set(vma, VM_MIXEDMAP); in vm_insert_page()
2547 return insert_page(vma, addr, page, vma->vm_page_prot, false); in vm_insert_page()
2564 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages, in __vm_map_pages() argument
2567 unsigned long count = vma_pages(vma); in __vm_map_pages()
2568 unsigned long uaddr = vma->vm_start; in __vm_map_pages()
2578 return vm_insert_pages(vma, uaddr, pages + offset, &count); in __vm_map_pages()
2599 int vm_map_pages(struct vm_area_struct *vma, struct page **pages, in vm_map_pages() argument
2602 return __vm_map_pages(vma, pages, num, vma->vm_pgoff); in vm_map_pages()
2619 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, in vm_map_pages_zero() argument
2622 return __vm_map_pages(vma, pages, num, 0); in vm_map_pages_zero()
2626 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, in insert_pfn() argument
2629 struct mm_struct *mm = vma->vm_mm; in insert_pfn()
2654 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in insert_pfn()
2655 if (ptep_set_access_flags(vma, addr, pte, entry, 1)) in insert_pfn()
2656 update_mmu_cache(vma, addr, pte); in insert_pfn()
2666 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in insert_pfn()
2670 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ in insert_pfn()
2710 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_pfn_prot() argument
2719 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); in vmf_insert_pfn_prot()
2720 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vmf_insert_pfn_prot()
2722 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_prot()
2723 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); in vmf_insert_pfn_prot()
2725 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_pfn_prot()
2733 return insert_pfn(vma, addr, pfn, pgprot, false); in vmf_insert_pfn_prot()
2757 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_pfn() argument
2760 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot); in vmf_insert_pfn()
2764 static bool vm_mixed_ok(struct vm_area_struct *vma, unsigned long pfn, in vm_mixed_ok() argument
2768 (mkwrite || !vm_mixed_zeropage_allowed(vma))) in vm_mixed_ok()
2771 if (vma->vm_flags & VM_MIXEDMAP) in vm_mixed_ok()
2778 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, in __vm_insert_mixed() argument
2781 pgprot_t pgprot = vma->vm_page_prot; in __vm_insert_mixed()
2784 if (!vm_mixed_ok(vma, pfn, mkwrite)) in __vm_insert_mixed()
2787 if (addr < vma->vm_start || addr >= vma->vm_end) in __vm_insert_mixed()
2811 err = insert_page(vma, addr, page, pgprot, mkwrite); in __vm_insert_mixed()
2813 return insert_pfn(vma, addr, pfn, pgprot, mkwrite); in __vm_insert_mixed()
2827 pgprot_t pgprot = vmf->vma->vm_page_prot; in vmf_insert_page_mkwrite()
2831 if (addr < vmf->vma->vm_start || addr >= vmf->vma->vm_end) in vmf_insert_page_mkwrite()
2834 err = insert_page(vmf->vma, addr, page, pgprot, write); in vmf_insert_page_mkwrite()
2844 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_mixed() argument
2847 return __vm_insert_mixed(vma, addr, pfn, false); in vmf_insert_mixed()
2856 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, in vmf_insert_mixed_mkwrite() argument
2859 return __vm_insert_mixed(vma, addr, pfn, true); in vmf_insert_mixed_mkwrite()
2979 static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range_internal() argument
2985 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range_internal()
2991 VM_WARN_ON_ONCE(!vma_test_all_flags_mask(vma, VMA_REMAP_FLAGS)); in remap_pfn_range_internal()
2996 flush_cache_range(vma, addr, end); in remap_pfn_range_internal()
3012 static int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range_notrack() argument
3015 int error = remap_pfn_range_internal(vma, addr, pfn, size, prot); in remap_pfn_range_notrack()
3025 zap_page_range_single(vma, addr, size, NULL); in remap_pfn_range_notrack()
3058 static int remap_pfn_range_track(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range_track() argument
3075 if (addr == vma->vm_start && addr + size == vma->vm_end) { in remap_pfn_range_track()
3076 if (vma->pfnmap_track_ctx) in remap_pfn_range_track()
3085 err = remap_pfn_range_notrack(vma, addr, pfn, size, prot); in remap_pfn_range_track()
3090 vma->pfnmap_track_ctx = ctx; in remap_pfn_range_track()
3095 static int do_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, in do_remap_pfn_range() argument
3098 return remap_pfn_range_track(vma, addr, pfn, size, prot); in do_remap_pfn_range()
3101 static int do_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, in do_remap_pfn_range() argument
3104 return remap_pfn_range_notrack(vma, addr, pfn, size, prot); in do_remap_pfn_range()
3120 static int remap_pfn_range_prepare_vma(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range_prepare_vma() argument
3126 err = get_remap_pgoff(is_cow_mapping(vma->vm_flags), addr, end, in remap_pfn_range_prepare_vma()
3127 vma->vm_start, vma->vm_end, pfn, &vma->vm_pgoff); in remap_pfn_range_prepare_vma()
3131 vma_set_flags_mask(vma, VMA_REMAP_FLAGS); in remap_pfn_range_prepare_vma()
3147 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range() argument
3152 err = remap_pfn_range_prepare_vma(vma, addr, pfn, size); in remap_pfn_range()
3156 return do_remap_pfn_range(vma, addr, pfn, size, prot); in remap_pfn_range()
3160 int remap_pfn_range_complete(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range_complete() argument
3163 return do_remap_pfn_range(vma, addr, pfn, size, prot); in remap_pfn_range_complete()
3181 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) in vm_iomap_memory() argument
3200 if (vma->vm_pgoff > pages) in vm_iomap_memory()
3202 pfn += vma->vm_pgoff; in vm_iomap_memory()
3203 pages -= vma->vm_pgoff; in vm_iomap_memory()
3206 vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory()
3211 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory()
3466 struct vm_area_struct *vma = vmf->vma; in __wp_page_copy_user() local
3467 struct mm_struct *mm = vma->vm_mm; in __wp_page_copy_user()
3471 if (copy_mc_user_highpage(dst, src, addr, vma)) in __wp_page_copy_user()
3501 update_mmu_tlb(vma, addr, vmf->pte); in __wp_page_copy_user()
3507 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) in __wp_page_copy_user()
3508 update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1); in __wp_page_copy_user()
3526 update_mmu_tlb(vma, addr, vmf->pte); in __wp_page_copy_user()
3558 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) in __get_fault_gfp_mask() argument
3560 struct file *vm_file = vma->vm_file; in __get_fault_gfp_mask()
3585 if (vmf->vma->vm_file && in do_page_mkwrite()
3586 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host)) in do_page_mkwrite()
3589 ret = vmf->vma->vm_ops->page_mkwrite(vmf); in do_page_mkwrite()
3613 struct vm_area_struct *vma = vmf->vma; in fault_dirty_shared_page() local
3617 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite; in fault_dirty_shared_page()
3631 file_update_time(vma->vm_file); in fault_dirty_shared_page()
3667 struct vm_area_struct *vma = vmf->vma; in wp_page_reuse() local
3684 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_reuse()
3686 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in wp_page_reuse()
3687 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) in wp_page_reuse()
3688 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); in wp_page_reuse()
3700 struct vm_area_struct *vma = vmf->vma; in vmf_can_call_fault() local
3702 if (vma->vm_ops->map_pages || !(vmf->flags & FAULT_FLAG_VMA_LOCK)) in vmf_can_call_fault()
3704 vma_end_read(vma); in vmf_can_call_fault()
3725 struct vm_area_struct *vma = vmf->vma; in __vmf_anon_prepare() local
3728 if (likely(vma->anon_vma)) in __vmf_anon_prepare()
3731 if (!mmap_read_trylock(vma->vm_mm)) in __vmf_anon_prepare()
3734 if (__anon_vma_prepare(vma)) in __vmf_anon_prepare()
3737 mmap_read_unlock(vma->vm_mm); in __vmf_anon_prepare()
3761 struct vm_area_struct *vma = vmf->vma; in wp_page_copy() local
3762 struct mm_struct *mm = vma->vm_mm; in wp_page_copy()
3780 new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero); in wp_page_copy()
3827 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_copy()
3828 entry = folio_mk_pte(new_folio, vma->vm_page_prot); in wp_page_copy()
3836 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in wp_page_copy()
3846 ptep_clear_flush(vma, vmf->address, vmf->pte); in wp_page_copy()
3847 folio_add_new_anon_rmap(new_folio, vma, vmf->address, RMAP_EXCLUSIVE); in wp_page_copy()
3848 folio_add_lru_vma(new_folio, vma); in wp_page_copy()
3851 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); in wp_page_copy()
3875 folio_remove_rmap_pte(old_folio, vmf->page, vma); in wp_page_copy()
3883 update_mmu_tlb(vma, vmf->address, vmf->pte); in wp_page_copy()
3928 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); in finish_mkwrite_fault()
3929 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, in finish_mkwrite_fault()
3938 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in finish_mkwrite_fault()
3952 struct vm_area_struct *vma = vmf->vma; in wp_pfn_shared() local
3954 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { in wp_pfn_shared()
3963 ret = vma->vm_ops->pfn_mkwrite(vmf); in wp_pfn_shared()
3975 struct vm_area_struct *vma = vmf->vma; in wp_page_shared() local
3980 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { in wp_page_shared()
4014 struct vm_area_struct *vma) in __wp_can_reuse_large_anon_folio() argument
4065 VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != vma->vm_mm->mm_id && in __wp_can_reuse_large_anon_folio()
4066 folio_mm_id(folio, 1) != vma->vm_mm->mm_id); in __wp_can_reuse_large_anon_folio()
4080 struct vm_area_struct *vma) in __wp_can_reuse_large_anon_folio() argument
4087 struct vm_area_struct *vma) in wp_can_reuse_anon_folio() argument
4090 return __wp_can_reuse_large_anon_folio(folio, vma); in wp_can_reuse_anon_folio()
4122 folio_move_anon_rmap(folio, vma); in wp_can_reuse_anon_folio()
4153 struct vm_area_struct *vma = vmf->vma; in do_wp_page() local
4158 if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) { in do_wp_page()
4159 if (!userfaultfd_wp_async(vma)) { in do_wp_page()
4171 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); in do_wp_page()
4183 if (unlikely(userfaultfd_wp(vmf->vma) && in do_wp_page()
4184 mm_tlb_flush_pending(vmf->vma->vm_mm))) in do_wp_page()
4185 flush_tlb_page(vmf->vma, vmf->address); in do_wp_page()
4188 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); in do_wp_page()
4197 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { in do_wp_page()
4220 (PageAnonExclusive(vmf->page) || wp_can_reuse_anon_folio(folio, vma))) { in do_wp_page()
4244 static void unmap_mapping_range_vma(struct vm_area_struct *vma, in unmap_mapping_range_vma() argument
4248 zap_page_range_single(vma, start_addr, end_addr - start_addr, details); in unmap_mapping_range_vma()
4256 struct vm_area_struct *vma; in unmap_mapping_range_tree() local
4259 vma_interval_tree_foreach(vma, root, first_index, last_index) { in unmap_mapping_range_tree()
4260 vba = vma->vm_pgoff; in unmap_mapping_range_tree()
4261 vea = vba + vma_pages(vma) - 1; in unmap_mapping_range_tree()
4265 unmap_mapping_range_vma(vma, in unmap_mapping_range_tree()
4266 ((zba - vba) << PAGE_SHIFT) + vma->vm_start, in unmap_mapping_range_tree()
4267 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, in unmap_mapping_range_tree()
4378 struct vm_area_struct *vma = vmf->vma; in remove_device_exclusive_entry() local
4399 vma->vm_mm, vmf->address & PAGE_MASK, in remove_device_exclusive_entry()
4403 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in remove_device_exclusive_entry()
4406 restore_exclusive_pte(vma, folio, vmf->page, vmf->address, in remove_device_exclusive_entry()
4425 struct vm_area_struct *vma, in should_try_to_free_swap() argument
4439 if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) || in should_try_to_free_swap()
4454 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_marker_clear()
4467 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte); in pte_marker_clear()
4474 if (vma_is_anonymous(vmf->vma)) in do_pte_missing()
4490 if (unlikely(!userfaultfd_wp(vmf->vma))) in pte_marker_handle_uffd_wp()
4525 struct vm_area_struct *vma = vmf->vma; in __alloc_swap_folio() local
4529 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address); in __alloc_swap_folio()
4534 if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm, in __alloc_swap_folio()
4603 struct vm_area_struct *vma = vmf->vma; in alloc_swap_folio() local
4617 if (unlikely(userfaultfd_armed(vma))) in alloc_swap_folio()
4633 orders = thp_vma_allowable_orders(vma, vma->vm_flags, TVA_PAGEFAULT, in alloc_swap_folio()
4635 orders = thp_vma_suitable_orders(vma, vmf->address, orders); in alloc_swap_folio()
4642 pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in alloc_swap_folio()
4662 gfp = vma_thp_gfp_mask(vma); in alloc_swap_folio()
4665 folio = vma_alloc_folio(gfp, order, vma, addr); in alloc_swap_folio()
4667 if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm, in alloc_swap_folio()
4708 struct vm_area_struct *vma = vmf->vma; in do_swap_page() local
4728 migration_entry_wait(vma->vm_mm, vmf->pmd, in do_swap_page()
4739 vma_end_read(vma); in do_swap_page()
4745 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
4774 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); in do_swap_page()
4787 swap_update_readahead(folio, vma, vmf->address); in do_swap_page()
4810 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
4821 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); in do_swap_page()
4854 folio = ksm_might_need_to_copy(folio, vma, vmf->address); in do_swap_page()
4881 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_swap_page()
4903 if (unlikely(folio_start < max(address & PMD_MASK, vma->vm_start))) in do_swap_page()
4905 if (unlikely(folio_end > pmd_addr_end(address, vma->vm_end))) in do_swap_page()
5002 add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); in do_swap_page()
5003 add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages); in do_swap_page()
5004 pte = mk_pte(page, vma->vm_page_prot); in do_swap_page()
5018 if ((vma->vm_flags & VM_WRITE) && !userfaultfd_pte_wp(vma, pte) && in do_swap_page()
5019 !pte_needs_soft_dirty_wp(vma, pte)) { in do_swap_page()
5020 pte = pte_mkwrite(pte, vma); in do_swap_page()
5029 flush_icache_pages(vma, page, nr_pages); in do_swap_page()
5034 folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE); in do_swap_page()
5035 folio_add_lru_vma(folio, vma); in do_swap_page()
5044 folio_add_new_anon_rmap(folio, vma, address, rmap_flags); in do_swap_page()
5048 folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address, in do_swap_page()
5055 set_ptes(vma->vm_mm, address, ptep, pte, nr_pages); in do_swap_page()
5056 arch_do_swap_page_nr(vma->vm_mm, vma, address, in do_swap_page()
5064 if (should_try_to_free_swap(si, folio, vma, nr_pages, vmf->flags)) in do_swap_page()
5089 update_mmu_cache_range(vmf, vma, address, ptep, nr_pages); in do_swap_page()
5129 struct vm_area_struct *vma = vmf->vma; in alloc_anon_folio() local
5142 if (unlikely(userfaultfd_armed(vma))) in alloc_anon_folio()
5150 orders = thp_vma_allowable_orders(vma, vma->vm_flags, TVA_PAGEFAULT, in alloc_anon_folio()
5152 orders = thp_vma_suitable_orders(vma, vmf->address, orders); in alloc_anon_folio()
5180 gfp = vma_thp_gfp_mask(vma); in alloc_anon_folio()
5183 folio = vma_alloc_folio(gfp, order, vma, addr); in alloc_anon_folio()
5185 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { in alloc_anon_folio()
5209 return folio_prealloc(vma->vm_mm, vma, vmf->address, true); in alloc_anon_folio()
5219 struct vm_area_struct *vma = vmf->vma; in do_anonymous_page() local
5227 if (vma->vm_flags & VM_SHARED) in do_anonymous_page()
5234 if (pte_alloc(vma->vm_mm, vmf->pmd)) in do_anonymous_page()
5239 !mm_forbids_zeropage(vma->vm_mm)) { in do_anonymous_page()
5241 vma->vm_page_prot)); in do_anonymous_page()
5242 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_anonymous_page()
5247 update_mmu_tlb(vma, vmf->address, vmf->pte); in do_anonymous_page()
5250 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
5254 if (userfaultfd_missing(vma)) { in do_anonymous_page()
5282 entry = folio_mk_pte(folio, vma->vm_page_prot); in do_anonymous_page()
5284 if (vma->vm_flags & VM_WRITE) in do_anonymous_page()
5285 entry = pte_mkwrite(pte_mkdirty(entry), vma); in do_anonymous_page()
5287 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); in do_anonymous_page()
5291 update_mmu_tlb(vma, addr, vmf->pte); in do_anonymous_page()
5294 update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages); in do_anonymous_page()
5298 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
5303 if (userfaultfd_missing(vma)) { in do_anonymous_page()
5310 add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); in do_anonymous_page()
5312 folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); in do_anonymous_page()
5313 folio_add_lru_vma(folio, vma); in do_anonymous_page()
5317 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages); in do_anonymous_page()
5320 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr_pages); in do_anonymous_page()
5339 struct vm_area_struct *vma = vmf->vma; in __do_fault() local
5359 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in __do_fault()
5364 ret = vma->vm_ops->fault(vmf); in __do_fault()
5396 struct vm_area_struct *vma = vmf->vma; in deposit_prealloc_pte() local
5398 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in deposit_prealloc_pte()
5403 mm_inc_nr_ptes(vma->vm_mm); in deposit_prealloc_pte()
5409 struct vm_area_struct *vma = vmf->vma; in do_set_pmd() local
5422 if (thp_disabled_by_hw() || vma_thp_disabled(vma, vma->vm_flags, in do_set_pmd()
5426 if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER)) in do_set_pmd()
5447 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in do_set_pmd()
5452 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_set_pmd()
5456 flush_icache_pages(vma, page, HPAGE_PMD_NR); in do_set_pmd()
5458 entry = folio_mk_pmd(folio, vma->vm_page_prot); in do_set_pmd()
5460 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in do_set_pmd()
5462 add_mm_counter(vma->vm_mm, mm_counter_file(folio), HPAGE_PMD_NR); in do_set_pmd()
5463 folio_add_file_rmap_pmd(folio, page, vma); in do_set_pmd()
5471 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_set_pmd()
5473 update_mmu_cache_pmd(vma, haddr, vmf->pmd); in do_set_pmd()
5500 struct vm_area_struct *vma = vmf->vma; in set_pte_range() local
5505 flush_icache_pages(vma, page, nr); in set_pte_range()
5506 entry = mk_pte(page, vma->vm_page_prot); in set_pte_range()
5514 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in set_pte_range()
5520 if (write && !(vma->vm_flags & VM_SHARED)) { in set_pte_range()
5522 folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); in set_pte_range()
5523 folio_add_lru_vma(folio, vma); in set_pte_range()
5525 folio_add_file_rmap_ptes(folio, page, nr, vma); in set_pte_range()
5527 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); in set_pte_range()
5530 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr); in set_pte_range()
5558 struct vm_area_struct *vma = vmf->vma; in finish_fault() local
5563 !(vma->vm_flags & VM_SHARED); in finish_fault()
5582 if (!(vma->vm_flags & VM_SHARED)) { in finish_fault()
5583 ret = check_stable_address_space(vma->vm_mm); in finish_fault()
5588 if (!needs_fallback && vma->vm_file) { in finish_fault()
5589 struct address_space *mapping = vma->vm_file->f_mapping; in finish_fault()
5613 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte); in finish_fault()
5614 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) in finish_fault()
5621 if (unlikely(userfaultfd_armed(vma)) || unlikely(needs_fallback)) { in finish_fault()
5626 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff; in finish_fault()
5635 vma_off + (nr_pages - idx) > vma_pages(vma) || in finish_fault()
5646 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in finish_fault()
5653 update_mmu_tlb(vma, addr, vmf->pte); in finish_fault()
5665 add_mm_counter(vma->vm_mm, type, nr_pages); in finish_fault()
5738 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff; in do_fault_around()
5748 pte_off + vma_pages(vmf->vma) - vma_off) - 1; in do_fault_around()
5751 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); in do_fault_around()
5757 ret = vmf->vma->vm_ops->map_pages(vmf, in do_fault_around()
5769 if (!vmf->vma->vm_ops->map_pages) in should_fault_around()
5772 if (uffd_disable_fault_around(vmf->vma)) in should_fault_around()
5813 struct vm_area_struct *vma = vmf->vma; in do_cow_fault() local
5823 folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false); in do_cow_fault()
5835 if (copy_mc_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma)) { in do_cow_fault()
5855 struct vm_area_struct *vma = vmf->vma; in do_shared_fault() local
5873 if (vma->vm_ops->page_mkwrite) { in do_shared_fault()
5905 struct vm_area_struct *vma = vmf->vma; in do_fault() local
5906 struct mm_struct *vm_mm = vma->vm_mm; in do_fault()
5912 if (!vma->vm_ops->fault) { in do_fault()
5913 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in do_fault()
5934 else if (!(vma->vm_flags & VM_SHARED)) in do_fault()
5951 struct vm_area_struct *vma = vmf->vma; in numa_migrate_check() local
5968 if (folio_maybe_mapped_shared(folio) && (vma->vm_flags & VM_SHARED)) in numa_migrate_check()
5980 vma_set_access_pid_bit(vma); in numa_migrate_check()
5994 static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, in numa_rebuild_single_mapping() argument
6000 old_pte = ptep_modify_prot_start(vma, fault_addr, fault_pte); in numa_rebuild_single_mapping()
6001 pte = pte_modify(old_pte, vma->vm_page_prot); in numa_rebuild_single_mapping()
6004 pte = pte_mkwrite(pte, vma); in numa_rebuild_single_mapping()
6005 ptep_modify_prot_commit(vma, fault_addr, fault_pte, old_pte, pte); in numa_rebuild_single_mapping()
6006 update_mmu_cache_range(vmf, vma, fault_addr, fault_pte, 1); in numa_rebuild_single_mapping()
6009 static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, in numa_rebuild_large_mapping() argument
6020 start = max3(addr_start, pt_start, vma->vm_start); in numa_rebuild_large_mapping()
6022 vma->vm_end); in numa_rebuild_large_mapping()
6037 ptent = pte_modify(ptent, vma->vm_page_prot); in numa_rebuild_large_mapping()
6040 can_change_pte_writable(vma, addr, ptent)) in numa_rebuild_large_mapping()
6044 numa_rebuild_single_mapping(vmf, vma, addr, start_ptep, writable); in numa_rebuild_large_mapping()
6050 struct vm_area_struct *vma = vmf->vma; in do_numa_page() local
6054 bool pte_write_upgrade = vma_wants_manual_pte_write_upgrade(vma); in do_numa_page()
6073 pte = pte_modify(old_pte, vma->vm_page_prot); in do_numa_page()
6081 can_change_pte_writable(vma, vmf->address, pte)) in do_numa_page()
6084 folio = vm_normal_folio(vma, vmf->address, pte); in do_numa_page()
6095 if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) { in do_numa_page()
6113 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_numa_page()
6127 numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable, in do_numa_page()
6130 numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, in do_numa_page()
6141 struct vm_area_struct *vma = vmf->vma; in create_huge_pmd() local
6142 if (vma_is_anonymous(vma)) in create_huge_pmd()
6144 if (vma->vm_ops->huge_fault) in create_huge_pmd()
6145 return vma->vm_ops->huge_fault(vmf, PMD_ORDER); in create_huge_pmd()
6152 struct vm_area_struct *vma = vmf->vma; in wp_huge_pmd() local
6156 if (vma_is_anonymous(vma)) { in wp_huge_pmd()
6158 userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) { in wp_huge_pmd()
6159 if (userfaultfd_wp_async(vmf->vma)) in wp_huge_pmd()
6166 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { in wp_huge_pmd()
6167 if (vma->vm_ops->huge_fault) { in wp_huge_pmd()
6168 ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER); in wp_huge_pmd()
6176 __split_huge_pmd(vma, vmf->pmd, vmf->address, false); in wp_huge_pmd()
6185 struct vm_area_struct *vma = vmf->vma; in create_huge_pud() local
6187 if (vma_is_anonymous(vma)) in create_huge_pud()
6189 if (vma->vm_ops->huge_fault) in create_huge_pud()
6190 return vma->vm_ops->huge_fault(vmf, PUD_ORDER); in create_huge_pud()
6199 struct vm_area_struct *vma = vmf->vma; in wp_huge_pud() local
6203 if (vma_is_anonymous(vma)) in wp_huge_pud()
6205 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { in wp_huge_pud()
6206 if (vma->vm_ops->huge_fault) { in wp_huge_pud()
6207 ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER); in wp_huge_pud()
6214 __split_huge_pud(vma, vmf->pud, vmf->address); in wp_huge_pud()
6251 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address, in fix_spurious_fault()
6254 flush_tlb_fix_spurious_fault_pmd(vmf->vma, vmf->address, in fix_spurious_fault()
6302 vmf->pte = pte_offset_map_rw_nolock(vmf->vma->vm_mm, vmf->pmd, in handle_pte_fault()
6322 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) in handle_pte_fault()
6328 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in handle_pte_fault()
6338 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, in handle_pte_fault()
6340 update_mmu_cache_range(vmf, vmf->vma, vmf->address, in handle_pte_fault()
6355 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, in __handle_mm_fault() argument
6359 .vma = vma, in __handle_mm_fault()
6363 .pgoff = linear_page_index(vma, address), in __handle_mm_fault()
6364 .gfp_mask = __get_fault_gfp_mask(vma), in __handle_mm_fault()
6366 struct mm_struct *mm = vma->vm_mm; in __handle_mm_fault()
6367 vm_flags_t vm_flags = vma->vm_flags; in __handle_mm_fault()
6382 thp_vma_allowable_order(vma, vm_flags, TVA_PAGEFAULT, PUD_ORDER)) { in __handle_mm_fault()
6416 thp_vma_allowable_order(vma, vm_flags, TVA_PAGEFAULT, PMD_ORDER)) { in __handle_mm_fault()
6437 if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma)) in __handle_mm_fault()
6527 static void lru_gen_enter_fault(struct vm_area_struct *vma) in lru_gen_enter_fault() argument
6530 current->in_lru_fault = vma_has_recency(vma); in lru_gen_enter_fault()
6538 static void lru_gen_enter_fault(struct vm_area_struct *vma) in lru_gen_enter_fault() argument
6547 static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma, in sanitize_fault_flags() argument
6557 if (!is_cow_mapping(vma->vm_flags)) in sanitize_fault_flags()
6561 if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE))) in sanitize_fault_flags()
6564 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) && in sanitize_fault_flags()
6565 !is_cow_mapping(vma->vm_flags))) in sanitize_fault_flags()
6589 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, in handle_mm_fault() argument
6593 struct mm_struct *mm = vma->vm_mm; in handle_mm_fault()
6599 ret = sanitize_fault_flags(vma, &flags); in handle_mm_fault()
6603 if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, in handle_mm_fault()
6610 is_droppable = !!(vma->vm_flags & VM_DROPPABLE); in handle_mm_fault()
6619 lru_gen_enter_fault(vma); in handle_mm_fault()
6621 if (unlikely(is_vm_hugetlb_page(vma))) in handle_mm_fault()
6622 ret = hugetlb_fault(vma->vm_mm, vma, address, flags); in handle_mm_fault()
6624 ret = __handle_mm_fault(vma, address, flags); in handle_mm_fault()
6742 static inline void pfnmap_lockdep_assert(struct vm_area_struct *vma) in pfnmap_lockdep_assert() argument
6745 struct file *file = vma->vm_file; in pfnmap_lockdep_assert()
6750 lockdep_is_held(&vma->vm_mm->mmap_lock)); in pfnmap_lockdep_assert()
6752 lockdep_assert(lockdep_is_held(&vma->vm_mm->mmap_lock)); in pfnmap_lockdep_assert()
6789 struct vm_area_struct *vma = args->vma; in follow_pfnmap_start() local
6791 struct mm_struct *mm = vma->vm_mm; in follow_pfnmap_start()
6799 pfnmap_lockdep_assert(vma); in follow_pfnmap_start()
6801 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) in follow_pfnmap_start()
6804 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfnmap_start()
6904 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, in generic_access_phys() argument
6913 struct follow_pfnmap_args args = { .vma = vma, .address = addr }; in generic_access_phys()
6979 struct vm_area_struct *vma = NULL; in __access_remote_vm() local
6981 gup_flags, &vma); in __access_remote_vm()
6985 vma = vma_lookup(mm, addr); in __access_remote_vm()
6986 if (!vma) { in __access_remote_vm()
6987 vma = expand_stack(mm, addr); in __access_remote_vm()
6990 if (!vma) in __access_remote_vm()
7003 if (vma->vm_ops && vma->vm_ops->access) in __access_remote_vm()
7004 bytes = vma->vm_ops->access(vma, addr, buf, in __access_remote_vm()
7018 copy_to_user_page(vma, page, addr, in __access_remote_vm()
7022 copy_from_user_page(vma, page, addr, in __access_remote_vm()
7106 struct vm_area_struct *vma = NULL; in __copy_remote_vm_str() local
7108 page = get_user_page_vma_remote(mm, addr, gup_flags, &vma); in __copy_remote_vm_str()
7143 copy_from_user_page(vma, page, addr, buf, maddr + (PAGE_SIZE - 1), 1); in __copy_remote_vm_str()
7203 struct vm_area_struct *vma; in print_vma_addr() local
7211 vma = vma_lookup(mm, ip); in print_vma_addr()
7212 if (vma && vma->vm_file) { in print_vma_addr()
7213 struct file *f = vma->vm_file; in print_vma_addr()
7214 ip -= vma->vm_start; in print_vma_addr()
7215 ip += vma->vm_pgoff << PAGE_SHIFT; in print_vma_addr()
7217 vma->vm_start, in print_vma_addr()
7218 vma->vm_end - vma->vm_start); in print_vma_addr()
7370 struct vm_area_struct *vma, in copy_user_gigantic_page() argument
7384 addr + i*PAGE_SIZE, vma)) in copy_user_gigantic_page()
7393 struct vm_area_struct *vma; member
7402 if (copy_mc_user_highpage(dst, src, addr, copy_arg->vma)) in copy_subpage()
7408 unsigned long addr_hint, struct vm_area_struct *vma) in copy_user_large_folio() argument
7414 .vma = vma, in copy_user_large_folio()
7418 return copy_user_gigantic_page(dst, src, addr_hint, vma, nr_pages); in copy_user_large_folio()
7483 void vma_pgtable_walk_begin(struct vm_area_struct *vma) in vma_pgtable_walk_begin() argument
7485 if (is_vm_hugetlb_page(vma)) in vma_pgtable_walk_begin()
7486 hugetlb_vma_lock_read(vma); in vma_pgtable_walk_begin()
7489 void vma_pgtable_walk_end(struct vm_area_struct *vma) in vma_pgtable_walk_end() argument
7491 if (is_vm_hugetlb_page(vma)) in vma_pgtable_walk_end()
7492 hugetlb_vma_unlock_read(vma); in vma_pgtable_walk_end()