Lines Matching +full:- +full:- +full:enable +full:- +full:trace +full:- +full:backends

2 // SPDX-License-Identifier: GPL-2.0-only
10 * demand-loading started 01.12.91 - seems it is high on the list of
11 * things wanted, and it should be easy to implement. - Linus
15 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
16 * pages started 02.12.91, seems to work. - Linus.
22 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
28 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
30 * 20.12.91 - Ok, making the swap-device changeable like the root.
34 * 05.04.94 - Multi-page memory management added for v1.1.
37 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
71 #include <linux/memory-tiers.h>
82 #include <trace/events/kmem.h>
91 #include "pgalloc-track.h"
96 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
112 * Return true if the original pte was a uffd-wp pte marker (so the pte was
113 * wr-protected).
117 if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) in vmf_orig_pte_uffd_wp()
120 return pte_marker_uffd_wp(vmf->orig_pte); in vmf_orig_pte_uffd_wp()
193 mm_dec_nr_ptes(tlb->mm); in free_pte_range()
221 if (end - 1 > ceiling - 1) in free_pmd_range()
227 mm_dec_nr_pmds(tlb->mm); in free_pmd_range()
255 if (end - 1 > ceiling - 1) in free_pud_range()
261 mm_dec_nr_puds(tlb->mm); in free_pud_range()
289 if (end - 1 > ceiling - 1) in free_p4d_range()
298 * This function frees user-level page tables of a process.
314 * Why all these "- 1"s? Because 0 represents both the bottom in free_pgd_range()
315 * of the address space and the top of it (using -1 for the in free_pgd_range()
319 * Comparisons need to use "end - 1" and "ceiling - 1" (though in free_pgd_range()
330 * bother to round floor or end up - the tests don't need that. in free_pgd_range()
344 if (end - 1 > ceiling - 1) in free_pgd_range()
345 end -= PMD_SIZE; in free_pgd_range()
346 if (addr > end - 1) in free_pgd_range()
353 pgd = pgd_offset(tlb->mm, addr); in free_pgd_range()
367 unsigned long addr = vma->vm_start; in free_pgtables()
374 next = mas_find(mas, ceiling - 1); in free_pgtables()
388 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
389 floor, next ? next->vm_start : ceiling); in free_pgtables()
394 while (next && next->vm_start <= vma->vm_end + PMD_SIZE in free_pgtables()
397 next = mas_find(mas, ceiling - 1); in free_pgtables()
405 free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
406 floor, next ? next->vm_start : ceiling); in free_pgtables()
426 * of a chain of data-dependent loads, meaning most CPUs (alpha in pmd_install()
428 * seen in-order. See the alpha page table accessors for the in pmd_install()
442 return -ENOMEM; in __pte_alloc()
454 return -ENOMEM; in __pte_alloc_kernel()
484 * is found. For example, we might have a PFN-mapped pte in
492 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte()
521 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; in print_bad_pte()
525 current->comm, in print_bad_pte()
530 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); in print_bad_pte()
532 vma->vm_file, in print_bad_pte()
533 vma->vm_ops ? vma->vm_ops->fault : NULL, in print_bad_pte()
534 vma->vm_file ? vma->vm_file->f_op->mmap : NULL, in print_bad_pte()
535 mapping ? mapping->a_ops->read_folio : NULL); in print_bad_pte()
541 * vm_normal_page -- This function gets the "struct page" associated with a pte.
561 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
590 if (vma->vm_ops && vma->vm_ops->find_special_page) in vm_normal_page()
591 return vma->vm_ops->find_special_page(vma, addr); in vm_normal_page()
592 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) in vm_normal_page()
613 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { in vm_normal_page()
614 if (vma->vm_flags & VM_MIXEDMAP) { in vm_normal_page()
620 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page()
621 if (pfn == vma->vm_pgoff + off) in vm_normal_page()
623 if (!is_cow_mapping(vma->vm_flags)) in vm_normal_page()
663 * in a direct-access (dax) mapping, so let's just replicate the in vm_normal_page_pmd()
666 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { in vm_normal_page_pmd()
667 if (vma->vm_flags & VM_MIXEDMAP) { in vm_normal_page_pmd()
673 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page_pmd()
674 if (pfn == vma->vm_pgoff + off) in vm_normal_page_pmd()
676 if (!is_cow_mapping(vma->vm_flags)) in vm_normal_page_pmd()
717 pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); in restore_exclusive_pte()
743 set_pte_at(vma->vm_mm, address, ptep, pte); in restore_exclusive_pte()
746 * No need to invalidate - it was non-present before. However in restore_exclusive_pte()
769 return -EBUSY; in try_restore_exclusive_pte()
783 unsigned long vm_flags = dst_vma->vm_flags; in copy_nonpresent_pte()
792 return -EIO; in copy_nonpresent_pte()
795 if (unlikely(list_empty(&dst_mm->mmlist))) { in copy_nonpresent_pte()
797 if (list_empty(&dst_mm->mmlist)) in copy_nonpresent_pte()
798 list_add(&dst_mm->mmlist, in copy_nonpresent_pte()
799 &src_mm->mmlist); in copy_nonpresent_pte()
848 * We do not preserve soft-dirty information, because so in copy_nonpresent_pte()
870 VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags)); in copy_nonpresent_pte()
872 return -EBUSY; in copy_nonpresent_pte()
873 return -ENOENT; in copy_nonpresent_pte()
893 * and re-use the pte the traditional way.
895 * And if we need a pre-allocated page but don't yet have
910 return -EAGAIN; in copy_present_page()
917 copy_user_highpage(&new_folio->page, page, addr, src_vma); in copy_present_page()
924 pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot); in copy_present_page()
927 /* Uffd-wp needs to be delivered to dest pte as well */ in copy_present_page()
929 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); in copy_present_page()
934 * Copy one pte. Returns 0 if succeeded, or -EAGAIN if one preallocated page
942 struct mm_struct *src_mm = src_vma->vm_mm; in copy_present_pte()
943 unsigned long vm_flags = src_vma->vm_flags; in copy_present_pte()
993 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); in copy_present_pte()
1025 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pte_range()
1026 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pte_range()
1044 * protected by mmap_lock-less collapse skipping areas with anon_vma in copy_pte_range()
1050 ret = -ENOMEM; in copy_pte_range()
1066 * We are holding two locks at this point - either of them in copy_pte_range()
1085 if (ret == -EIO) { in copy_pte_range()
1088 } else if (ret == -EBUSY) { in copy_pte_range()
1099 WARN_ON_ONCE(ret != -ENOENT); in copy_pte_range()
1105 * If we need a pre-allocated page for this pte, drop the in copy_pte_range()
1108 if (unlikely(ret == -EAGAIN)) in copy_pte_range()
1112 * pre-alloc page cannot be reused by next time so as in copy_pte_range()
1129 if (ret == -EIO) { in copy_pte_range()
1132 ret = -ENOMEM; in copy_pte_range()
1136 } else if (ret == -EBUSY) { in copy_pte_range()
1138 } else if (ret == -EAGAIN) { in copy_pte_range()
1141 return -ENOMEM; in copy_pte_range()
1162 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pmd_range()
1163 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pmd_range()
1169 return -ENOMEM; in copy_pmd_range()
1176 VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma); in copy_pmd_range()
1179 if (err == -ENOMEM) in copy_pmd_range()
1180 return -ENOMEM; in copy_pmd_range()
1189 return -ENOMEM; in copy_pmd_range()
1199 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pud_range()
1200 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pud_range()
1206 return -ENOMEM; in copy_pud_range()
1213 VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma); in copy_pud_range()
1216 if (err == -ENOMEM) in copy_pud_range()
1217 return -ENOMEM; in copy_pud_range()
1226 return -ENOMEM; in copy_pud_range()
1236 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_p4d_range()
1242 return -ENOMEM; in copy_p4d_range()
1250 return -ENOMEM; in copy_p4d_range()
1264 * Always copy pgtables when dst_vma has uffd-wp enabled even if it's in vma_needs_copy()
1265 * file-backed (e.g. shmem). Because when uffd-wp is enabled, pgtable in vma_needs_copy()
1266 * contains uffd-wp protection information, that's something we can't in vma_needs_copy()
1272 if (src_vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) in vma_needs_copy()
1275 if (src_vma->anon_vma) in vma_needs_copy()
1292 unsigned long addr = src_vma->vm_start; in copy_page_range()
1293 unsigned long end = src_vma->vm_end; in copy_page_range()
1294 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_page_range()
1295 struct mm_struct *src_mm = src_vma->vm_mm; in copy_page_range()
1306 if (unlikely(src_vma->vm_flags & VM_PFNMAP)) { in copy_page_range()
1322 is_cow = is_cow_mapping(src_vma->vm_flags); in copy_page_range()
1336 raw_write_seqcount_begin(&src_mm->write_protect_seq); in copy_page_range()
1349 ret = -ENOMEM; in copy_page_range()
1355 raw_write_seqcount_end(&src_mm->write_protect_seq); in copy_page_range()
1369 return details->even_cows; in should_zap_cows()
1383 /* Otherwise we should only zap non-anon pages */ in should_zap_page()
1392 return details->zap_flags & ZAP_FLAG_DROP_MARKER; in zap_drop_file_uffd_wp()
1396 * This function makes sure that we'll replace the none pte with an uffd-wp
1419 struct mm_struct *mm = tlb->mm; in zap_pte_range()
1453 tlb->fullmm); in zap_pte_range()
1476 rss[mm_counter(page)]--; in zap_pte_range()
1500 * consider uffd-wp bit when zap. For more information, in zap_pte_range()
1504 rss[mm_counter(page)]--; in zap_pte_range()
1512 rss[MM_SWAPENTS]--; in zap_pte_range()
1519 rss[mm_counter(page)]--; in zap_pte_range()
1537 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); in zap_pte_range()
1575 if (next - addr != HPAGE_PMD_SIZE) in zap_pmd_range()
1582 } else if (details && details->single_folio && in zap_pmd_range()
1583 folio_test_pmd_mappable(details->single_folio) && in zap_pmd_range()
1584 next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) { in zap_pmd_range()
1585 spinlock_t *ptl = pmd_lock(tlb->mm, pmd); in zap_pmd_range()
1599 pmd--; in zap_pmd_range()
1617 if (next - addr != HPAGE_PUD_SIZE) { in zap_pud_range()
1618 mmap_assert_locked(tlb->mm); in zap_pud_range()
1663 pgd = pgd_offset(vma->vm_mm, addr); in unmap_page_range()
1679 unsigned long start = max(vma->vm_start, start_addr); in unmap_single_vma()
1682 if (start >= vma->vm_end) in unmap_single_vma()
1684 end = min(vma->vm_end, end_addr); in unmap_single_vma()
1685 if (end <= vma->vm_start) in unmap_single_vma()
1688 if (vma->vm_file) in unmap_single_vma()
1691 if (unlikely(vma->vm_flags & VM_PFNMAP)) in unmap_single_vma()
1697 * It is undesirable to test vma->vm_file as it in unmap_single_vma()
1698 * should be non-null for valid hugetlb area. in unmap_single_vma()
1701 * hugetlbfs ->mmap method fails, in unmap_single_vma()
1702 * mmap_region() nullifies vma->vm_file in unmap_single_vma()
1707 if (vma->vm_file) { in unmap_single_vma()
1709 details->zap_flags : 0; in unmap_single_vma()
1719 * unmap_vmas - unmap a range of memory covered by a list of vma's
1736 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1747 /* Careful - we need to zap private pages too! */ in unmap_vmas()
1751 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, in unmap_vmas()
1761 vma = mas_find(mas, tree_end - 1); in unmap_vmas()
1767 * zap_page_range_single - remove user pages in a given range
1783 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, in zap_page_range_single()
1786 tlb_gather_mmu(&tlb, vma->vm_mm); in zap_page_range_single()
1787 update_hiwater_rss(vma->vm_mm); in zap_page_range_single()
1790 * unmap 'address-end' not 'range.start-range.end' as range in zap_page_range_single()
1800 * zap_vma_ptes - remove ptes mapping the vma
1814 !(vma->vm_flags & VM_PFNMAP)) in zap_vma_ptes()
1859 return -EINVAL; in validate_page_before_insert()
1870 return -EBUSY; in insert_page_into_pte_locked()
1873 inc_mm_counter(vma->vm_mm, mm_counter_file(page)); in insert_page_into_pte_locked()
1875 set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot)); in insert_page_into_pte_locked()
1896 retval = -ENOMEM; in insert_page()
1897 pte = get_locked_pte(vma->vm_mm, addr, &ptl); in insert_page()
1912 return -EINVAL; in insert_page_in_batch_locked()
1928 struct mm_struct *const mm = vma->vm_mm; in insert_pages()
1934 ret = -EFAULT; in insert_pages()
1940 remaining_pages_total, PTRS_PER_PTE - pte_index(addr)); in insert_pages()
1943 ret = -ENOMEM; in insert_pages()
1953 ret = -EFAULT; in insert_pages()
1962 remaining_pages_total -= pte_idx; in insert_pages()
1969 pages_to_write_in_pmd -= batch_size; in insert_pages()
1970 remaining_pages_total -= batch_size; in insert_pages()
1981 * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
1998 const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1; in vm_insert_pages()
2000 if (addr < vma->vm_start || end_addr >= vma->vm_end) in vm_insert_pages()
2001 return -EFAULT; in vm_insert_pages()
2002 if (!(vma->vm_flags & VM_MIXEDMAP)) { in vm_insert_pages()
2003 BUG_ON(mmap_read_trylock(vma->vm_mm)); in vm_insert_pages()
2004 BUG_ON(vma->vm_flags & VM_PFNMAP); in vm_insert_pages()
2008 return insert_pages(vma, addr, pages, num, vma->vm_page_prot); in vm_insert_pages()
2013 * vm_insert_page - insert single page into user vma
2034 * Usually this function is called from f_op->mmap() handler
2035 * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
2037 * function from other places, for example from page-fault handler.
2044 if (addr < vma->vm_start || addr >= vma->vm_end) in vm_insert_page()
2045 return -EFAULT; in vm_insert_page()
2047 return -EINVAL; in vm_insert_page()
2048 if (!(vma->vm_flags & VM_MIXEDMAP)) { in vm_insert_page()
2049 BUG_ON(mmap_read_trylock(vma->vm_mm)); in vm_insert_page()
2050 BUG_ON(vma->vm_flags & VM_PFNMAP); in vm_insert_page()
2053 return insert_page(vma, addr, page, vma->vm_page_prot); in vm_insert_page()
2058 * __vm_map_pages - maps range of kernel pages into user vma
2072 unsigned long uaddr = vma->vm_start; in __vm_map_pages()
2077 return -ENXIO; in __vm_map_pages()
2080 if (count > num - offset) in __vm_map_pages()
2081 return -ENXIO; in __vm_map_pages()
2094 * vm_map_pages - maps range of kernel pages starts with non zero offset
2114 return __vm_map_pages(vma, pages, num, vma->vm_pgoff); in vm_map_pages()
2119 * vm_map_pages_zero - map range of kernel pages starts with zero offset
2141 struct mm_struct *mm = vma->vm_mm; in insert_pfn()
2193 * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
2200 * to override pgprot on a per-page basis.
2207 * pgprot typically only differs from @vma->vm_page_prot when drivers set
2208 * caching- and encryption bits different than those of @vma->vm_page_prot,
2209 * because the caching- or encryption mode may not be known at mmap() time.
2211 * This is ok as long as @vma->vm_page_prot is not used by the core vm
2214 * functions that don't touch caching- or encryption bits, using pte_modify()
2217 * Also when new page-table entries are created, this is only done using the
2218 * fault() callback, and never using the value of vma->vm_page_prot,
2219 * except for page-table entries that point to anonymous pages as the result
2234 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); in vmf_insert_pfn_prot()
2235 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vmf_insert_pfn_prot()
2237 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_prot()
2238 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); in vmf_insert_pfn_prot()
2240 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_pfn_prot()
2254 * vmf_insert_pfn - insert single pfn into user vma
2262 * This function should only be called from a vm_ops->fault handler, and
2276 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot); in vmf_insert_pfn()
2283 if (vma->vm_flags & VM_MIXEDMAP) in vm_mixed_ok()
2297 pgprot_t pgprot = vma->vm_page_prot; in __vm_insert_mixed()
2302 if (addr < vma->vm_start || addr >= vma->vm_end) in __vm_insert_mixed()
2332 if (err == -ENOMEM) in __vm_insert_mixed()
2334 if (err < 0 && err != -EBUSY) in __vm_insert_mixed()
2362 * in null mappings (currently treated as "copy-on-access")
2374 return -ENOMEM; in remap_pte_range()
2379 err = -EACCES; in remap_pte_range()
2398 pfn -= addr >> PAGE_SHIFT; in remap_pmd_range()
2401 return -ENOMEM; in remap_pmd_range()
2421 pfn -= addr >> PAGE_SHIFT; in remap_pud_range()
2424 return -ENOMEM; in remap_pud_range()
2443 pfn -= addr >> PAGE_SHIFT; in remap_p4d_range()
2446 return -ENOMEM; in remap_p4d_range()
2459 * must have pre-validated the caching bits of the pgprot_t.
2467 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range_notrack()
2471 return -EINVAL; in remap_pfn_range_notrack()
2486 * There's a horrible special case to handle copy-on-write in remap_pfn_range_notrack()
2488 * un-COW'ed pages by matching them up with "vma->vm_pgoff". in remap_pfn_range_notrack()
2491 if (is_cow_mapping(vma->vm_flags)) { in remap_pfn_range_notrack()
2492 if (addr != vma->vm_start || end != vma->vm_end) in remap_pfn_range_notrack()
2493 return -EINVAL; in remap_pfn_range_notrack()
2494 vma->vm_pgoff = pfn; in remap_pfn_range_notrack()
2500 pfn -= addr >> PAGE_SHIFT; in remap_pfn_range_notrack()
2515 * remap_pfn_range - remap kernel memory to userspace
2533 return -EINVAL; in remap_pfn_range()
2543 * vm_iomap_memory - remap memory to userspace
2552 * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2553 * whatever write-combining details or similar.
2563 return -EINVAL; in vm_iomap_memory()
2565 * You *really* shouldn't map things that aren't page-aligned, in vm_iomap_memory()
2573 return -EINVAL; in vm_iomap_memory()
2576 if (vma->vm_pgoff > pages) in vm_iomap_memory()
2577 return -EINVAL; in vm_iomap_memory()
2578 pfn += vma->vm_pgoff; in vm_iomap_memory()
2579 pages -= vma->vm_pgoff; in vm_iomap_memory()
2582 vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory()
2584 return -EINVAL; in vm_iomap_memory()
2587 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory()
2605 return -ENOMEM; in apply_to_pte_range()
2611 return -EINVAL; in apply_to_pte_range()
2648 return -ENOMEM; in apply_to_pmd_range()
2657 return -EINVAL; in apply_to_pmd_range()
2684 return -ENOMEM; in apply_to_pud_range()
2693 return -EINVAL; in apply_to_pud_range()
2720 return -ENOMEM; in apply_to_p4d_range()
2729 return -EINVAL; in apply_to_p4d_range()
2755 return -EINVAL; in __apply_to_page_range()
2763 return -EINVAL; in __apply_to_page_range()
2808 * read non-atomically. Before making any commitment, on those architectures
2819 spin_lock(vmf->ptl); in pte_unmap_same()
2820 same = pte_same(ptep_get(vmf->pte), vmf->orig_pte); in pte_unmap_same()
2821 spin_unlock(vmf->ptl); in pte_unmap_same()
2824 pte_unmap(vmf->pte); in pte_unmap_same()
2825 vmf->pte = NULL; in pte_unmap_same()
2832 * -EHWPOISON: copy failed due to hwpoison in source page
2833 * -EAGAIN: copied failed (some other reason)
2841 struct vm_area_struct *vma = vmf->vma; in __wp_page_copy_user()
2842 struct mm_struct *mm = vma->vm_mm; in __wp_page_copy_user()
2843 unsigned long addr = vmf->address; in __wp_page_copy_user()
2848 return -EHWPOISON; in __wp_page_copy_user()
2855 * a "struct page" for it. We do a best-effort copy by in __wp_page_copy_user()
2857 * fails, we just zero-fill it. Live with it. in __wp_page_copy_user()
2867 vmf->pte = NULL; in __wp_page_copy_user()
2868 if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) { in __wp_page_copy_user()
2871 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); in __wp_page_copy_user()
2872 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { in __wp_page_copy_user()
2877 if (vmf->pte) in __wp_page_copy_user()
2878 update_mmu_tlb(vma, addr, vmf->pte); in __wp_page_copy_user()
2879 ret = -EAGAIN; in __wp_page_copy_user()
2883 entry = pte_mkyoung(vmf->orig_pte); in __wp_page_copy_user()
2884 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) in __wp_page_copy_user()
2885 update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1); in __wp_page_copy_user()
2895 if (vmf->pte) in __wp_page_copy_user()
2898 /* Re-validate under PTL if the page is still mapped */ in __wp_page_copy_user()
2899 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); in __wp_page_copy_user()
2900 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { in __wp_page_copy_user()
2902 if (vmf->pte) in __wp_page_copy_user()
2903 update_mmu_tlb(vma, addr, vmf->pte); in __wp_page_copy_user()
2904 ret = -EAGAIN; in __wp_page_copy_user()
2915 * use-case in __wp_page_copy_user()
2926 if (vmf->pte) in __wp_page_copy_user()
2927 pte_unmap_unlock(vmf->pte, vmf->ptl); in __wp_page_copy_user()
2937 struct file *vm_file = vma->vm_file; in __get_fault_gfp_mask()
2940 return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO; in __get_fault_gfp_mask()
2958 unsigned int old_flags = vmf->flags; in do_page_mkwrite()
2960 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; in do_page_mkwrite()
2962 if (vmf->vma->vm_file && in do_page_mkwrite()
2963 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host)) in do_page_mkwrite()
2966 ret = vmf->vma->vm_ops->page_mkwrite(vmf); in do_page_mkwrite()
2968 vmf->flags = old_flags; in do_page_mkwrite()
2973 if (!folio->mapping) { in do_page_mkwrite()
2990 struct vm_area_struct *vma = vmf->vma; in fault_dirty_shared_page()
2992 struct folio *folio = page_folio(vmf->page); in fault_dirty_shared_page()
2994 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite; in fault_dirty_shared_page()
2999 * Take a local copy of the address_space - folio.mapping may be zeroed in fault_dirty_shared_page()
3001 * pinned by vma->vm_file's reference. We rely on folio_unlock()'s in fault_dirty_shared_page()
3008 file_update_time(vma->vm_file); in fault_dirty_shared_page()
3039 * any related book-keeping.
3042 __releases(vmf->ptl) in wp_page_reuse()
3044 struct vm_area_struct *vma = vmf->vma; in wp_page_reuse()
3047 VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE)); in wp_page_reuse()
3051 !PageAnonExclusive(vmf->page)); in wp_page_reuse()
3057 folio_xchg_last_cpupid(folio, (1 << LAST_CPUPID_SHIFT) - 1); in wp_page_reuse()
3060 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_reuse()
3061 entry = pte_mkyoung(vmf->orig_pte); in wp_page_reuse()
3063 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) in wp_page_reuse()
3064 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); in wp_page_reuse()
3065 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_reuse()
3071 * vm_ops that have a ->map_pages have been audited and don't need
3076 struct vm_area_struct *vma = vmf->vma; in vmf_can_call_fault()
3078 if (vma->vm_ops->map_pages || !(vmf->flags & FAULT_FLAG_VMA_LOCK)) in vmf_can_call_fault()
3086 struct vm_area_struct *vma = vmf->vma; in vmf_anon_prepare()
3088 if (likely(vma->anon_vma)) in vmf_anon_prepare()
3090 if (vmf->flags & FAULT_FLAG_VMA_LOCK) { in vmf_anon_prepare()
3108 * - Allocate a page, copy the content of the old page to the new one.
3109 * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
3110 * - Take the PTL. If the pte changed, bail out and release the allocated page
3111 * - If the pte is still the way we remember it, update the page table and all
3112 * relevant references. This includes dropping the reference the page-table
3114 * - In any case, unlock the PTL and drop the reference we took to the old page.
3118 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; in wp_page_copy()
3119 struct vm_area_struct *vma = vmf->vma; in wp_page_copy()
3120 struct mm_struct *mm = vma->vm_mm; in wp_page_copy()
3131 if (vmf->page) in wp_page_copy()
3132 old_folio = page_folio(vmf->page); in wp_page_copy()
3137 pfn_is_zero = is_zero_pfn(pte_pfn(vmf->orig_pte)); in wp_page_copy()
3138 new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero); in wp_page_copy()
3145 err = __wp_page_copy_user(&new_folio->page, vmf->page, vmf); in wp_page_copy()
3149 * it's fine. If not, userspace would re-fault on in wp_page_copy()
3152 * The -EHWPOISON case will not be retried. in wp_page_copy()
3159 return err == -EHWPOISON ? VM_FAULT_HWPOISON : 0; in wp_page_copy()
3161 kmsan_copy_page_meta(&new_folio->page, vmf->page); in wp_page_copy()
3167 vmf->address & PAGE_MASK, in wp_page_copy()
3168 (vmf->address & PAGE_MASK) + PAGE_SIZE); in wp_page_copy()
3172 * Re-check the pte - we dropped the lock in wp_page_copy()
3174 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); in wp_page_copy()
3175 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { in wp_page_copy()
3178 dec_mm_counter(mm, mm_counter_file(&old_folio->page)); in wp_page_copy()
3182 ksm_might_unmap_zero_page(mm, vmf->orig_pte); in wp_page_copy()
3185 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_copy()
3186 entry = mk_pte(&new_folio->page, vma->vm_page_prot); in wp_page_copy()
3189 if (pte_soft_dirty(vmf->orig_pte)) in wp_page_copy()
3191 if (pte_uffd_wp(vmf->orig_pte)) in wp_page_copy()
3204 ptep_clear_flush(vma, vmf->address, vmf->pte); in wp_page_copy()
3205 folio_add_new_anon_rmap(new_folio, vma, vmf->address); in wp_page_copy()
3213 set_pte_at_notify(mm, vmf->address, vmf->pte, entry); in wp_page_copy()
3214 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); in wp_page_copy()
3238 folio_remove_rmap_pte(old_folio, vmf->page, vma); in wp_page_copy()
3244 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_copy()
3245 } else if (vmf->pte) { in wp_page_copy()
3246 update_mmu_tlb(vma, vmf->address, vmf->pte); in wp_page_copy()
3247 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_copy()
3256 free_swap_cache(&old_folio->page); in wp_page_copy()
3273 * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
3277 * @folio: the folio of vmf->page
3280 * shared mapping due to PTE being read-only once the mapped page is prepared.
3291 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); in finish_mkwrite_fault()
3292 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, in finish_mkwrite_fault()
3293 &vmf->ptl); in finish_mkwrite_fault()
3294 if (!vmf->pte) in finish_mkwrite_fault()
3300 if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) { in finish_mkwrite_fault()
3301 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in finish_mkwrite_fault()
3302 pte_unmap_unlock(vmf->pte, vmf->ptl); in finish_mkwrite_fault()
3315 struct vm_area_struct *vma = vmf->vma; in wp_pfn_shared()
3317 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { in wp_pfn_shared()
3320 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_pfn_shared()
3325 vmf->flags |= FAULT_FLAG_MKWRITE; in wp_pfn_shared()
3326 ret = vma->vm_ops->pfn_mkwrite(vmf); in wp_pfn_shared()
3336 __releases(vmf->ptl) in wp_page_shared()
3338 struct vm_area_struct *vma = vmf->vma; in wp_page_shared()
3343 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { in wp_page_shared()
3346 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_shared()
3420 * shared-page counter for the old page.
3423 * done by the caller (the low-level page fault routine in most cases).
3431 * We enter with non-exclusive mmap_lock (to exclude vma changes,
3436 __releases(vmf->ptl) in do_wp_page()
3438 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; in do_wp_page()
3439 struct vm_area_struct *vma = vmf->vma; in do_wp_page()
3444 if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) { in do_wp_page()
3446 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
3452 * etc.) because we're only removing the uffd-wp bit, in do_wp_page()
3455 pte = pte_clear_uffd_wp(ptep_get(vmf->pte)); in do_wp_page()
3457 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); in do_wp_page()
3462 vmf->orig_pte = pte; in do_wp_page()
3466 * Userfaultfd write-protect can defer flushes. Ensure the TLB in do_wp_page()
3469 if (unlikely(userfaultfd_wp(vmf->vma) && in do_wp_page()
3470 mm_tlb_flush_pending(vmf->vma->vm_mm))) in do_wp_page()
3471 flush_tlb_page(vmf->vma, vmf->address); in do_wp_page()
3474 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); in do_wp_page()
3476 if (vmf->page) in do_wp_page()
3477 folio = page_folio(vmf->page); in do_wp_page()
3483 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { in do_wp_page()
3489 * Just mark the pages writable and/or call ops->pfn_mkwrite. in do_wp_page()
3491 if (!vmf->page) in do_wp_page()
3504 (PageAnonExclusive(vmf->page) || wp_can_reuse_anon_folio(folio, vma))) { in do_wp_page()
3505 if (!PageAnonExclusive(vmf->page)) in do_wp_page()
3506 SetPageAnonExclusive(vmf->page); in do_wp_page()
3508 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
3520 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
3532 zap_page_range_single(vma, start_addr, end_addr - start_addr, details); in unmap_mapping_range_vma()
3544 vba = vma->vm_pgoff; in unmap_mapping_range_tree()
3545 vea = vba + vma_pages(vma) - 1; in unmap_mapping_range_tree()
3550 ((zba - vba) << PAGE_SHIFT) + vma->vm_start, in unmap_mapping_range_tree()
3551 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, in unmap_mapping_range_tree()
3557 * unmap_mapping_folio() - Unmap single folio from processes.
3569 struct address_space *mapping = folio->mapping; in unmap_mapping_folio()
3576 first_index = folio->index; in unmap_mapping_folio()
3577 last_index = folio_next_index(folio) - 1; in unmap_mapping_folio()
3584 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) in unmap_mapping_folio()
3585 unmap_mapping_range_tree(&mapping->i_mmap, first_index, in unmap_mapping_folio()
3591 * unmap_mapping_pages() - Unmap pages from processes.
3607 pgoff_t last_index = start + nr - 1; in unmap_mapping_pages()
3614 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) in unmap_mapping_pages()
3615 unmap_mapping_range_tree(&mapping->i_mmap, first_index, in unmap_mapping_pages()
3622 * unmap_mapping_range - unmap the portion of all mmaps in the specified
3642 pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT; in unmap_mapping_range()
3647 (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; in unmap_mapping_range()
3649 hlen = ULONG_MAX - hba + 1; in unmap_mapping_range()
3661 struct folio *folio = page_folio(vmf->page); in remove_device_exclusive_entry()
3662 struct vm_area_struct *vma = vmf->vma; in remove_device_exclusive_entry()
3668 * the PTL so a racing thread can remove the device-exclusive in remove_device_exclusive_entry()
3671 * been re-allocated after being freed all we do is lock and in remove_device_exclusive_entry()
3683 vma->vm_mm, vmf->address & PAGE_MASK, in remove_device_exclusive_entry()
3684 (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL); in remove_device_exclusive_entry()
3687 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in remove_device_exclusive_entry()
3688 &vmf->ptl); in remove_device_exclusive_entry()
3689 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) in remove_device_exclusive_entry()
3690 restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte); in remove_device_exclusive_entry()
3692 if (vmf->pte) in remove_device_exclusive_entry()
3693 pte_unmap_unlock(vmf->pte, vmf->ptl); in remove_device_exclusive_entry()
3707 if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) || in should_try_to_free_swap()
3722 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_marker_clear()
3723 vmf->address, &vmf->ptl); in pte_marker_clear()
3724 if (!vmf->pte) in pte_marker_clear()
3727 * Be careful so that we will only recover a special uffd-wp pte into a in pte_marker_clear()
3734 if (pte_same(vmf->orig_pte, ptep_get(vmf->pte))) in pte_marker_clear()
3735 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte); in pte_marker_clear()
3736 pte_unmap_unlock(vmf->pte, vmf->ptl); in pte_marker_clear()
3742 if (vma_is_anonymous(vmf->vma)) in do_pte_missing()
3749 * This is actually a page-missing access, but with uffd-wp special pte
3750 * installed. It means this pte was wr-protected before being unmapped.
3756 * got unregistered - we can simply clear them. in pte_marker_handle_uffd_wp()
3758 if (unlikely(!userfaultfd_wp(vmf->vma))) in pte_marker_handle_uffd_wp()
3766 swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte); in handle_pte_marker()
3776 /* Higher priority than uffd-wp when data corrupted */ in handle_pte_marker()
3788 * We enter with non-exclusive mmap_lock (to exclude vma changes,
3797 struct vm_area_struct *vma = vmf->vma; in do_swap_page()
3812 entry = pte_to_swp_entry(vmf->orig_pte); in do_swap_page()
3815 migration_entry_wait(vma->vm_mm, vmf->pmd, in do_swap_page()
3816 vmf->address); in do_swap_page()
3818 vmf->page = pfn_swap_entry_to_page(entry); in do_swap_page()
3821 if (vmf->flags & FAULT_FLAG_VMA_LOCK) { in do_swap_page()
3831 vmf->page = pfn_swap_entry_to_page(entry); in do_swap_page()
3832 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
3833 vmf->address, &vmf->ptl); in do_swap_page()
3834 if (unlikely(!vmf->pte || in do_swap_page()
3835 !pte_same(ptep_get(vmf->pte), in do_swap_page()
3836 vmf->orig_pte))) in do_swap_page()
3843 get_page(vmf->page); in do_swap_page()
3844 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_swap_page()
3845 ret = vmf->page->pgmap->ops->migrate_to_ram(vmf); in do_swap_page()
3846 put_page(vmf->page); in do_swap_page()
3852 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); in do_swap_page()
3863 folio = swap_cache_get_folio(entry, vma, vmf->address); in do_swap_page()
3869 if (data_race(si->flags & SWP_SYNCHRONOUS_IO) && in do_swap_page()
3887 vma, vmf->address, false); in do_swap_page()
3888 page = &folio->page; in do_swap_page()
3894 vma->vm_mm, GFP_KERNEL, in do_swap_page()
3908 folio->swap = entry; in do_swap_page()
3910 folio->private = NULL; in do_swap_page()
3925 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
3926 vmf->address, &vmf->ptl); in do_swap_page()
3927 if (likely(vmf->pte && in do_swap_page()
3928 pte_same(ptep_get(vmf->pte), vmf->orig_pte))) in do_swap_page()
3936 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); in do_swap_page()
3964 * page->index of !PageKSM() pages would be nonlinear inside the in do_swap_page()
3965 * anon VMA -- PageKSM() is lost on actual swapout. in do_swap_page()
3967 folio = ksm_might_need_to_copy(folio, vma, vmf->address); in do_swap_page()
3972 } else if (unlikely(folio == ERR_PTR(-EHWPOISON))) { in do_swap_page()
3986 if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache && in do_swap_page()
3996 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_swap_page()
3997 &vmf->ptl); in do_swap_page()
3998 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) in do_swap_page()
4022 exclusive = pte_swp_exclusive(vmf->orig_pte); in do_swap_page()
4026 * swapcache -> certainly exclusive. in do_swap_page()
4030 data_race(si->flags & SWP_STABLE_WRITES)) { in do_swap_page()
4032 * This is tricky: not all swap backends support in do_swap_page()
4040 * For these problematic swap backends, simply drop the in do_swap_page()
4066 if (should_try_to_free_swap(folio, vma, vmf->flags)) in do_swap_page()
4069 inc_mm_counter(vma->vm_mm, MM_ANONPAGES); in do_swap_page()
4070 dec_mm_counter(vma->vm_mm, MM_SWAPENTS); in do_swap_page()
4071 pte = mk_pte(page, vma->vm_page_prot); in do_swap_page()
4081 if (vmf->flags & FAULT_FLAG_WRITE) { in do_swap_page()
4083 vmf->flags &= ~FAULT_FLAG_WRITE; in do_swap_page()
4088 if (pte_swp_soft_dirty(vmf->orig_pte)) in do_swap_page()
4090 if (pte_swp_uffd_wp(vmf->orig_pte)) in do_swap_page()
4092 vmf->orig_pte = pte; in do_swap_page()
4096 folio_add_new_anon_rmap(folio, vma, vmf->address); in do_swap_page()
4099 folio_add_anon_rmap_pte(folio, page, vma, vmf->address, in do_swap_page()
4105 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); in do_swap_page()
4106 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); in do_swap_page()
4122 if (vmf->flags & FAULT_FLAG_WRITE) { in do_swap_page()
4129 /* No need to invalidate - it was non-present before */ in do_swap_page()
4130 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); in do_swap_page()
4132 if (vmf->pte) in do_swap_page()
4133 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_swap_page()
4142 if (vmf->pte) in do_swap_page()
4143 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_swap_page()
4174 struct vm_area_struct *vma = vmf->vma; in alloc_anon_folio()
4183 * If uffd is active for the vma we need per-page fault fidelity to in alloc_anon_folio()
4194 orders = thp_vma_allowable_orders(vma, vma->vm_flags, false, true, true, in alloc_anon_folio()
4195 BIT(PMD_ORDER) - 1); in alloc_anon_folio()
4196 orders = thp_vma_suitable_orders(vma, vmf->address, orders); in alloc_anon_folio()
4201 pte = pte_offset_map(vmf->pmd, vmf->address & PMD_MASK); in alloc_anon_folio()
4203 return ERR_PTR(-EAGAIN); in alloc_anon_folio()
4212 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); in alloc_anon_folio()
4223 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); in alloc_anon_folio()
4226 clear_huge_page(&folio->page, vmf->address, 1 << order); in alloc_anon_folio()
4234 return vma_alloc_zeroed_movable_folio(vmf->vma, vmf->address); in alloc_anon_folio()
4238 * We enter with non-exclusive mmap_lock (to exclude vma changes,
4245 struct vm_area_struct *vma = vmf->vma; in do_anonymous_page()
4246 unsigned long addr = vmf->address; in do_anonymous_page()
4253 /* File mapping without ->vm_ops ? */ in do_anonymous_page()
4254 if (vma->vm_flags & VM_SHARED) in do_anonymous_page()
4261 if (pte_alloc(vma->vm_mm, vmf->pmd)) in do_anonymous_page()
4264 /* Use the zero-page for reads */ in do_anonymous_page()
4265 if (!(vmf->flags & FAULT_FLAG_WRITE) && in do_anonymous_page()
4266 !mm_forbids_zeropage(vma->vm_mm)) { in do_anonymous_page()
4267 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), in do_anonymous_page()
4268 vma->vm_page_prot)); in do_anonymous_page()
4269 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_anonymous_page()
4270 vmf->address, &vmf->ptl); in do_anonymous_page()
4271 if (!vmf->pte) in do_anonymous_page()
4274 update_mmu_tlb(vma, vmf->address, vmf->pte); in do_anonymous_page()
4277 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
4282 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
4291 /* Returns NULL on OOM or ERR_PTR(-EAGAIN) if we must retry the fault */ in do_anonymous_page()
4299 addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE); in do_anonymous_page()
4301 if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL)) in do_anonymous_page()
4312 entry = mk_pte(&folio->page, vma->vm_page_prot); in do_anonymous_page()
4314 if (vma->vm_flags & VM_WRITE) in do_anonymous_page()
4317 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); in do_anonymous_page()
4318 if (!vmf->pte) in do_anonymous_page()
4321 update_mmu_tlb(vma, addr, vmf->pte); in do_anonymous_page()
4323 } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) { in do_anonymous_page()
4325 update_mmu_tlb(vma, addr + PAGE_SIZE * i, vmf->pte + i); in do_anonymous_page()
4329 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
4335 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
4340 folio_ref_add(folio, nr_pages - 1); in do_anonymous_page()
4341 add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); in do_anonymous_page()
4347 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages); in do_anonymous_page()
4349 /* No need to invalidate - it was non-present before */ in do_anonymous_page()
4350 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr_pages); in do_anonymous_page()
4352 if (vmf->pte) in do_anonymous_page()
4353 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
4366 * released depending on flags and vma->vm_ops->fault() return value.
4371 struct vm_area_struct *vma = vmf->vma; in __do_fault()
4390 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) { in __do_fault()
4391 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in __do_fault()
4392 if (!vmf->prealloc_pte) in __do_fault()
4396 ret = vma->vm_ops->fault(vmf); in __do_fault()
4401 folio = page_folio(vmf->page); in __do_fault()
4402 if (unlikely(PageHWPoison(vmf->page))) { in __do_fault()
4405 if (page_mapped(vmf->page)) in __do_fault()
4408 if (mapping_evict_folio(folio->mapping, folio)) in __do_fault()
4413 vmf->page = NULL; in __do_fault()
4420 VM_BUG_ON_PAGE(!folio_test_locked(folio), vmf->page); in __do_fault()
4428 struct vm_area_struct *vma = vmf->vma; in deposit_prealloc_pte()
4430 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in deposit_prealloc_pte()
4435 mm_inc_nr_ptes(vma->vm_mm); in deposit_prealloc_pte()
4436 vmf->prealloc_pte = NULL; in deposit_prealloc_pte()
4442 struct vm_area_struct *vma = vmf->vma; in do_set_pmd()
4443 bool write = vmf->flags & FAULT_FLAG_WRITE; in do_set_pmd()
4444 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in do_set_pmd()
4451 if (page != &folio->page || folio_order(folio) != HPAGE_PMD_ORDER) in do_set_pmd()
4467 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { in do_set_pmd()
4468 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in do_set_pmd()
4469 if (!vmf->prealloc_pte) in do_set_pmd()
4473 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_set_pmd()
4474 if (unlikely(!pmd_none(*vmf->pmd))) in do_set_pmd()
4479 entry = mk_huge_pmd(page, vma->vm_page_prot); in do_set_pmd()
4483 add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); in do_set_pmd()
4492 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_set_pmd()
4494 update_mmu_cache_pmd(vma, haddr, vmf->pmd); in do_set_pmd()
4500 spin_unlock(vmf->ptl); in do_set_pmd()
4511 * set_pte_range - Set a range of PTEs to point to pages in a folio.
4521 struct vm_area_struct *vma = vmf->vma; in set_pte_range()
4523 bool write = vmf->flags & FAULT_FLAG_WRITE; in set_pte_range()
4524 bool prefault = in_range(vmf->address, addr, nr * PAGE_SIZE); in set_pte_range()
4528 entry = mk_pte(page, vma->vm_page_prot); in set_pte_range()
4539 /* copy-on-write page */ in set_pte_range()
4540 if (write && !(vma->vm_flags & VM_SHARED)) { in set_pte_range()
4541 add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr); in set_pte_range()
4546 add_mm_counter(vma->vm_mm, mm_counter_file(page), nr); in set_pte_range()
4549 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); in set_pte_range()
4551 /* no need to invalidate: a not-present page won't be cached */ in set_pte_range()
4552 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr); in set_pte_range()
4557 if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID) in vmf_pte_changed()
4558 return !pte_same(ptep_get(vmf->pte), vmf->orig_pte); in vmf_pte_changed()
4560 return !pte_none(ptep_get(vmf->pte)); in vmf_pte_changed()
4564 * finish_fault - finish page fault once we have prepared the page to fault
4580 struct vm_area_struct *vma = vmf->vma; in finish_fault()
4585 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) in finish_fault()
4586 page = vmf->cow_page; in finish_fault()
4588 page = vmf->page; in finish_fault()
4594 if (!(vma->vm_flags & VM_SHARED)) { in finish_fault()
4595 ret = check_stable_address_space(vma->vm_mm); in finish_fault()
4600 if (pmd_none(*vmf->pmd)) { in finish_fault()
4607 if (vmf->prealloc_pte) in finish_fault()
4608 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte); in finish_fault()
4609 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) in finish_fault()
4613 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in finish_fault()
4614 vmf->address, &vmf->ptl); in finish_fault()
4615 if (!vmf->pte) in finish_fault()
4618 /* Re-check under ptl */ in finish_fault()
4622 set_pte_range(vmf, folio, page, 1, vmf->address); in finish_fault()
4625 update_mmu_tlb(vma, vmf->address, vmf->pte); in finish_fault()
4629 pte_unmap_unlock(vmf->pte, vmf->ptl); in finish_fault()
4650 return -EINVAL; in fault_around_bytes_set()
4653 * The minimum value is 1 page, however this results in no fault-around in fault_around_bytes_set()
4677 * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
4678 * not ready to be mapped: not up-to-date, locked, etc.
4695 pgoff_t pte_off = pte_index(vmf->address); in do_fault_around()
4696 /* The page offset of vmf->address within the VMA. */ in do_fault_around()
4697 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff; in do_fault_around()
4703 pte_off - min(pte_off, vma_off)); in do_fault_around()
4707 pte_off + vma_pages(vmf->vma) - vma_off) - 1; in do_fault_around()
4709 if (pmd_none(*vmf->pmd)) { in do_fault_around()
4710 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); in do_fault_around()
4711 if (!vmf->prealloc_pte) in do_fault_around()
4716 ret = vmf->vma->vm_ops->map_pages(vmf, in do_fault_around()
4717 vmf->pgoff + from_pte - pte_off, in do_fault_around()
4718 vmf->pgoff + to_pte - pte_off); in do_fault_around()
4724 /* Return true if we should do read fault-around, false otherwise */
4727 /* No ->map_pages? No way to fault around... */ in should_fault_around()
4728 if (!vmf->vma->vm_ops->map_pages) in should_fault_around()
4731 if (uffd_disable_fault_around(vmf->vma)) in should_fault_around()
4744 * Let's call ->map_pages() first and use ->fault() as fallback in do_read_fault()
4763 folio = page_folio(vmf->page); in do_read_fault()
4772 struct vm_area_struct *vma = vmf->vma; in do_cow_fault()
4782 folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false); in do_cow_fault()
4786 vmf->cow_page = &folio->page; in do_cow_fault()
4794 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); in do_cow_fault()
4798 unlock_page(vmf->page); in do_cow_fault()
4799 put_page(vmf->page); in do_cow_fault()
4810 struct vm_area_struct *vma = vmf->vma; in do_shared_fault()
4822 folio = page_folio(vmf->page); in do_shared_fault()
4828 if (vma->vm_ops->page_mkwrite) { in do_shared_fault()
4851 * We enter with non-exclusive mmap_lock (to exclude vma changes,
4860 struct vm_area_struct *vma = vmf->vma; in do_fault()
4861 struct mm_struct *vm_mm = vma->vm_mm; in do_fault()
4867 if (!vma->vm_ops->fault) { in do_fault()
4868 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in do_fault()
4869 vmf->address, &vmf->ptl); in do_fault()
4870 if (unlikely(!vmf->pte)) in do_fault()
4880 if (unlikely(pte_none(ptep_get(vmf->pte)))) in do_fault()
4885 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_fault()
4887 } else if (!(vmf->flags & FAULT_FLAG_WRITE)) in do_fault()
4889 else if (!(vma->vm_flags & VM_SHARED)) in do_fault()
4895 if (vmf->prealloc_pte) { in do_fault()
4896 pte_free(vm_mm, vmf->prealloc_pte); in do_fault()
4897 vmf->prealloc_pte = NULL; in do_fault()
4921 struct vm_area_struct *vma = vmf->vma; in do_numa_page()
4935 spin_lock(vmf->ptl); in do_numa_page()
4936 if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { in do_numa_page()
4937 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
4942 old_pte = ptep_get(vmf->pte); in do_numa_page()
4943 pte = pte_modify(old_pte, vma->vm_page_prot); in do_numa_page()
4951 can_change_pte_writable(vma, vmf->address, pte)) in do_numa_page()
4954 folio = vm_normal_folio(vma, vmf->address, pte); in do_numa_page()
4958 /* TODO: handle PTE-mapped THP */ in do_numa_page()
4977 if (folio_estimated_sharers(folio) > 1 && (vma->vm_flags & VM_SHARED)) in do_numa_page()
4987 last_cpupid = (-1 & LAST_CPUPID_MASK); in do_numa_page()
4990 target_nid = numa_migrate_prep(folio, vma, vmf->address, nid, &flags); in do_numa_page()
4995 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
5004 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_numa_page()
5005 vmf->address, &vmf->ptl); in do_numa_page()
5006 if (unlikely(!vmf->pte)) in do_numa_page()
5008 if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { in do_numa_page()
5009 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
5022 * non-accessible ptes, some can allow access by kernel mode. in do_numa_page()
5024 old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte); in do_numa_page()
5025 pte = pte_modify(old_pte, vma->vm_page_prot); in do_numa_page()
5029 ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte); in do_numa_page()
5030 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); in do_numa_page()
5031 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
5037 struct vm_area_struct *vma = vmf->vma; in create_huge_pmd()
5040 if (vma->vm_ops->huge_fault) in create_huge_pmd()
5041 return vma->vm_ops->huge_fault(vmf, PMD_ORDER); in create_huge_pmd()
5048 struct vm_area_struct *vma = vmf->vma; in wp_huge_pmd()
5049 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; in wp_huge_pmd()
5054 userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) { in wp_huge_pmd()
5055 if (userfaultfd_wp_async(vmf->vma)) in wp_huge_pmd()
5062 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { in wp_huge_pmd()
5063 if (vma->vm_ops->huge_fault) { in wp_huge_pmd()
5064 ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER); in wp_huge_pmd()
5071 /* COW or write-notify handled on pte level: split pmd. */ in wp_huge_pmd()
5072 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); in wp_huge_pmd()
5081 struct vm_area_struct *vma = vmf->vma; in create_huge_pud()
5085 if (vma->vm_ops->huge_fault) in create_huge_pud()
5086 return vma->vm_ops->huge_fault(vmf, PUD_ORDER); in create_huge_pud()
5095 struct vm_area_struct *vma = vmf->vma; in wp_huge_pud()
5101 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { in wp_huge_pud()
5102 if (vma->vm_ops->huge_fault) { in wp_huge_pud()
5103 ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER); in wp_huge_pud()
5109 /* COW or write-notify not handled on PUD level: split pud.*/ in wp_huge_pud()
5110 __split_huge_pud(vma, vmf->pud, vmf->address); in wp_huge_pud()
5124 * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
5134 if (unlikely(pmd_none(*vmf->pmd))) { in handle_pte_fault()
5136 * Leave __pte_alloc() until later: because vm_ops->fault may in handle_pte_fault()
5141 vmf->pte = NULL; in handle_pte_fault()
5142 vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID; in handle_pte_fault()
5150 vmf->pte = pte_offset_map_nolock(vmf->vma->vm_mm, vmf->pmd, in handle_pte_fault()
5151 vmf->address, &vmf->ptl); in handle_pte_fault()
5152 if (unlikely(!vmf->pte)) in handle_pte_fault()
5154 vmf->orig_pte = ptep_get_lockless(vmf->pte); in handle_pte_fault()
5155 vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID; in handle_pte_fault()
5157 if (pte_none(vmf->orig_pte)) { in handle_pte_fault()
5158 pte_unmap(vmf->pte); in handle_pte_fault()
5159 vmf->pte = NULL; in handle_pte_fault()
5163 if (!vmf->pte) in handle_pte_fault()
5166 if (!pte_present(vmf->orig_pte)) in handle_pte_fault()
5169 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) in handle_pte_fault()
5172 spin_lock(vmf->ptl); in handle_pte_fault()
5173 entry = vmf->orig_pte; in handle_pte_fault()
5174 if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) { in handle_pte_fault()
5175 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in handle_pte_fault()
5178 if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { in handle_pte_fault()
5181 else if (likely(vmf->flags & FAULT_FLAG_WRITE)) in handle_pte_fault()
5185 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, in handle_pte_fault()
5186 vmf->flags & FAULT_FLAG_WRITE)) { in handle_pte_fault()
5187 update_mmu_cache_range(vmf, vmf->vma, vmf->address, in handle_pte_fault()
5188 vmf->pte, 1); in handle_pte_fault()
5191 if (vmf->flags & FAULT_FLAG_TRIED) in handle_pte_fault()
5199 if (vmf->flags & FAULT_FLAG_WRITE) in handle_pte_fault()
5200 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address, in handle_pte_fault()
5201 vmf->pte); in handle_pte_fault()
5204 pte_unmap_unlock(vmf->pte, vmf->ptl); in handle_pte_fault()
5225 struct mm_struct *mm = vma->vm_mm; in __handle_mm_fault()
5226 unsigned long vm_flags = vma->vm_flags; in __handle_mm_fault()
5309 * mm_account_fault - Do page fault accounting
5312 * of perf event counters, but we'll still do the per-task accounting to
5321 * still be in per-arch page fault handlers at the entry of page fault.
5358 current->maj_flt++; in mm_account_fault()
5360 current->min_flt++; in mm_account_fault()
5380 current->in_lru_fault = vma_has_recency(vma); in lru_gen_enter_fault()
5385 current->in_lru_fault = false; in lru_gen_exit_fault()
5405 * just treat it like an ordinary read-fault otherwise. in sanitize_fault_flags()
5407 if (!is_cow_mapping(vma->vm_flags)) in sanitize_fault_flags()
5410 /* Write faults on read-only mappings are impossible ... */ in sanitize_fault_flags()
5411 if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE))) in sanitize_fault_flags()
5414 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) && in sanitize_fault_flags()
5415 !is_cow_mapping(vma->vm_flags))) in sanitize_fault_flags()
5420 * Per-VMA locks can't be used with FAULT_FLAG_RETRY_NOWAIT because of in sanitize_fault_flags()
5442 struct mm_struct *mm = vma->vm_mm; in handle_mm_fault()
5459 * Enable the memcg OOM handling for faults triggered in user in handle_mm_fault()
5468 ret = hugetlb_fault(vma->vm_mm, vma, address, flags); in handle_mm_fault()
5516 * from RWSEM_READER_BIAS -> RWSEM_WRITER_LOCKED, but in mmap_upgrade_trylock()
5561 if (likely(vma && (vma->vm_start <= addr))) in lock_mm_and_find_vma()
5568 if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) { in lock_mm_and_find_vma()
5579 * re-take it, and also look up the vma again, in lock_mm_and_find_vma()
5580 * re-checking it. in lock_mm_and_find_vma()
5589 if (vma->vm_start <= addr) in lock_mm_and_find_vma()
5591 if (!(vma->vm_flags & VM_GROWSDOWN)) in lock_mm_and_find_vma()
5617 MA_STATE(mas, &mm->mm_mt, address, address); in lock_vma_under_rcu()
5635 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) in lock_vma_under_rcu()
5639 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) in lock_vma_under_rcu()
5643 if (vma->detached) { in lock_vma_under_rcu()
5665 * We've already handled the fast-path in-line.
5671 return -ENOMEM; in __p4d_alloc()
5673 spin_lock(&mm->page_table_lock); in __p4d_alloc()
5680 spin_unlock(&mm->page_table_lock); in __p4d_alloc()
5688 * We've already handled the fast-path in-line.
5694 return -ENOMEM; in __pud_alloc()
5696 spin_lock(&mm->page_table_lock); in __pud_alloc()
5703 spin_unlock(&mm->page_table_lock); in __pud_alloc()
5711 * We've already handled the fast-path in-line.
5718 return -ENOMEM; in __pmd_alloc()
5734 * follow_pte - look up PTE at a user virtual address
5750 * it is not a good general-purpose API.
5752 * Return: zero on success, -ve otherwise.
5788 return -EINVAL; in follow_pte()
5793 * follow_pfn - look up PFN at a user virtual address
5803 * Return: zero and the pfn at @pfn on success, -ve otherwise.
5808 int ret = -EINVAL; in follow_pfn()
5812 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfn()
5815 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); in follow_pfn()
5829 int ret = -EINVAL; in follow_phys()
5833 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_phys()
5836 if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) in follow_phys()
5854 * generic_access_phys - generic implementation for iomem mmap access
5874 int ret = -EINVAL; in generic_access_phys()
5876 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in generic_access_phys()
5877 return -EINVAL; in generic_access_phys()
5880 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl)) in generic_access_phys()
5881 return -EINVAL; in generic_access_phys()
5889 return -EINVAL; in generic_access_phys()
5893 return -ENOMEM; in generic_access_phys()
5895 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl)) in generic_access_phys()
5954 return buf - old_buf; in __access_remote_vm()
5966 if (vma->vm_ops && vma->vm_ops->access) in __access_remote_vm()
5967 bytes = vma->vm_ops->access(vma, addr, buf, in __access_remote_vm()
5974 offset = addr & (PAGE_SIZE-1); in __access_remote_vm()
5975 if (bytes > PAGE_SIZE-offset) in __access_remote_vm()
5976 bytes = PAGE_SIZE-offset; in __access_remote_vm()
5989 len -= bytes; in __access_remote_vm()
5995 return buf - old_buf; in __access_remote_vm()
5999 * access_remote_vm - access another process' address space
6044 struct mm_struct *mm = current->mm; in print_vma_addr()
6054 if (vma && vma->vm_file) { in print_vma_addr()
6055 struct file *f = vma->vm_file; in print_vma_addr()
6064 vma->vm_start, in print_vma_addr()
6065 vma->vm_end - vma->vm_start); in print_vma_addr()
6079 if (current->mm) in __might_fault()
6080 might_lock_read(&current->mm->mmap_lock); in __might_fault()
6099 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1); in process_huge_page()
6103 n = (addr_hint - addr) / PAGE_SIZE; in process_huge_page()
6109 for (i = pages_per_huge_page - 1; i >= 2 * n; i--) { in process_huge_page()
6117 base = pages_per_huge_page - 2 * (pages_per_huge_page - n); in process_huge_page()
6118 l = pages_per_huge_page - n; in process_huge_page()
6128 * Process remaining subpages in left-right-left-right pattern in process_huge_page()
6133 int right_idx = base + 2 * l - 1 - i; in process_huge_page()
6174 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1); in clear_huge_page()
6201 return -EHWPOISON; in copy_user_gigantic_page()
6217 if (copy_mc_user_highpage(copy_arg->dst + idx, copy_arg->src + idx, in copy_subpage()
6218 addr, copy_arg->vma)) { in copy_subpage()
6219 memory_failure_queue(page_to_pfn(copy_arg->src + idx), 0); in copy_subpage()
6220 return -EHWPOISON; in copy_subpage()
6230 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1); in copy_user_large_folio()
6232 .dst = &dst->page, in copy_user_large_folio()
6233 .src = &src->page, in copy_user_large_folio()
6264 ret_val -= (PAGE_SIZE - rc); in copy_folio_from_user()
6282 page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0, in ptlock_cache_init()
6293 ptdesc->ptl = ptl; in ptlock_alloc()
6299 kmem_cache_free(page_ptl_cachep, ptdesc->ptl); in ptlock_free()