Lines Matching refs:vma
43 static bool maybe_change_pte_writable(struct vm_area_struct *vma, pte_t pte) in maybe_change_pte_writable() argument
45 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE))) in maybe_change_pte_writable()
53 if (pte_needs_soft_dirty_wp(vma, pte)) in maybe_change_pte_writable()
57 if (userfaultfd_pte_wp(vma, pte)) in maybe_change_pte_writable()
63 static bool can_change_private_pte_writable(struct vm_area_struct *vma, in can_change_private_pte_writable() argument
68 if (!maybe_change_pte_writable(vma, pte)) in can_change_private_pte_writable()
77 page = vm_normal_page(vma, addr, pte); in can_change_private_pte_writable()
81 static bool can_change_shared_pte_writable(struct vm_area_struct *vma, in can_change_shared_pte_writable() argument
84 if (!maybe_change_pte_writable(vma, pte)) in can_change_shared_pte_writable()
99 bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr, in can_change_pte_writable() argument
102 if (!(vma->vm_flags & VM_SHARED)) in can_change_pte_writable()
103 return can_change_private_pte_writable(vma, addr, pte); in can_change_pte_writable()
105 return can_change_shared_pte_writable(vma, pte); in can_change_pte_writable()
121 static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr, in prot_numa_skip() argument
140 if (is_cow_mapping(vma->vm_flags) && in prot_numa_skip()
178 static void prot_commit_flush_ptes(struct vm_area_struct *vma, unsigned long addr, in prot_commit_flush_ptes() argument
192 ptent = pte_mkwrite(ptent, vma); in prot_commit_flush_ptes()
194 modify_prot_commit_ptes(vma, addr, ptep, oldpte, ptent, nr_ptes); in prot_commit_flush_ptes()
230 static void commit_anon_folio_batch(struct vm_area_struct *vma, in commit_anon_folio_batch() argument
242 prot_commit_flush_ptes(vma, addr, ptep, oldpte, ptent, len, in commit_anon_folio_batch()
249 static void set_write_prot_commit_flush_ptes(struct vm_area_struct *vma, in set_write_prot_commit_flush_ptes() argument
255 if (vma->vm_flags & VM_SHARED) { in set_write_prot_commit_flush_ptes()
256 set_write = can_change_shared_pte_writable(vma, ptent); in set_write_prot_commit_flush_ptes()
257 prot_commit_flush_ptes(vma, addr, ptep, oldpte, ptent, nr_ptes, in set_write_prot_commit_flush_ptes()
262 set_write = maybe_change_pte_writable(vma, ptent) && in set_write_prot_commit_flush_ptes()
265 prot_commit_flush_ptes(vma, addr, ptep, oldpte, ptent, nr_ptes, in set_write_prot_commit_flush_ptes()
269 commit_anon_folio_batch(vma, folio, page, addr, ptep, oldpte, ptent, nr_ptes, tlb); in set_write_prot_commit_flush_ptes()
273 struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, in change_pte_range() argument
286 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range()
291 if (prot_numa && !(vma->vm_flags & VM_SHARED) && in change_pte_range()
292 atomic_read(&vma->vm_mm->mm_users) == 1) in change_pte_range()
295 flush_tlb_batched_pending(vma->vm_mm); in change_pte_range()
307 page = vm_normal_page(vma, addr, oldpte); in change_pte_range()
315 int ret = prot_numa_skip(vma, addr, oldpte, pte, in change_pte_range()
328 oldpte = modify_prot_start_ptes(vma, addr, pte, nr_ptes); in change_pte_range()
351 set_write_prot_commit_flush_ptes(vma, folio, page, in change_pte_range()
354 prot_commit_flush_ptes(vma, addr, pte, oldpte, ptent, in change_pte_range()
401 pte_clear(vma->vm_mm, addr, pte); in change_pte_range()
415 set_pte_at(vma->vm_mm, addr, pte, newpte); in change_pte_range()
429 if (userfaultfd_wp_use_markers(vma)) { in change_pte_range()
436 set_pte_at(vma->vm_mm, addr, pte, in change_pte_range()
453 pgtable_split_needed(struct vm_area_struct *vma, unsigned long cp_flags) in pgtable_split_needed() argument
461 return (cp_flags & MM_CP_UFFD_WP) && !vma_is_anonymous(vma); in pgtable_split_needed()
469 pgtable_populate_needed(struct vm_area_struct *vma, unsigned long cp_flags) in pgtable_populate_needed() argument
476 return userfaultfd_wp_use_markers(vma); in pgtable_populate_needed()
485 #define change_pmd_prepare(vma, pmd, cp_flags) \ argument
488 if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \
489 if (pte_alloc(vma->vm_mm, pmd)) \
500 #define change_prepare(vma, high, low, addr, cp_flags) \ argument
503 if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \
504 low##_t *p = low##_alloc(vma->vm_mm, high, addr); \
512 struct vm_area_struct *vma, pud_t *pud, unsigned long addr, in change_pmd_range() argument
527 ret = change_pmd_prepare(vma, pmd, cp_flags); in change_pmd_range()
539 pgtable_split_needed(vma, cp_flags)) { in change_pmd_range()
540 __split_huge_pmd(vma, pmd, addr, false); in change_pmd_range()
546 ret = change_pmd_prepare(vma, pmd, cp_flags); in change_pmd_range()
552 ret = change_huge_pmd(tlb, vma, pmd, in change_pmd_range()
567 ret = change_pte_range(tlb, vma, pmd, addr, next, newprot, in change_pmd_range()
582 struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr, in change_pud_range() argument
596 ret = change_prepare(vma, pudp, pmd, addr, cp_flags); in change_pud_range()
609 vma->vm_mm, addr, end); in change_pud_range()
615 pgtable_split_needed(vma, cp_flags)) { in change_pud_range()
616 __split_huge_pud(vma, pudp, addr); in change_pud_range()
619 ret = change_huge_pud(tlb, vma, pudp, in change_pud_range()
630 pages += change_pmd_range(tlb, vma, pudp, addr, next, newprot, in change_pud_range()
641 struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, in change_p4d_range() argument
651 ret = change_prepare(vma, p4d, pud, addr, cp_flags); in change_p4d_range()
656 pages += change_pud_range(tlb, vma, p4d, addr, next, newprot, in change_p4d_range()
664 struct vm_area_struct *vma, unsigned long addr, in change_protection_range() argument
667 struct mm_struct *mm = vma->vm_mm; in change_protection_range()
674 tlb_start_vma(tlb, vma); in change_protection_range()
677 ret = change_prepare(vma, pgd, p4d, addr, cp_flags); in change_protection_range()
684 pages += change_p4d_range(tlb, vma, pgd, addr, next, newprot, in change_protection_range()
688 tlb_end_vma(tlb, vma); in change_protection_range()
694 struct vm_area_struct *vma, unsigned long start, in change_protection() argument
697 pgprot_t newprot = vma->vm_page_prot; in change_protection()
714 if (is_vm_hugetlb_page(vma)) in change_protection()
715 pages = hugetlb_change_protection(vma, start, end, newprot, in change_protection()
718 pages = change_protection_range(tlb, vma, start, end, newprot, in change_protection()
756 struct vm_area_struct *vma, struct vm_area_struct **pprev, in mprotect_fixup() argument
759 struct mm_struct *mm = vma->vm_mm; in mprotect_fixup()
760 vm_flags_t oldflags = READ_ONCE(vma->vm_flags); in mprotect_fixup()
766 if (vma_is_sealed(vma)) in mprotect_fixup()
770 *pprev = vma; in mprotect_fixup()
811 } else if ((oldflags & VM_ACCOUNT) && vma_is_anonymous(vma) && in mprotect_fixup()
812 !vma->anon_vma) { in mprotect_fixup()
816 vma = vma_modify_flags(vmi, *pprev, vma, start, end, newflags); in mprotect_fixup()
817 if (IS_ERR(vma)) { in mprotect_fixup()
818 error = PTR_ERR(vma); in mprotect_fixup()
822 *pprev = vma; in mprotect_fixup()
828 vma_start_write(vma); in mprotect_fixup()
829 vm_flags_reset_once(vma, newflags); in mprotect_fixup()
830 if (vma_wants_manual_pte_write_upgrade(vma)) in mprotect_fixup()
832 vma_set_page_prot(vma); in mprotect_fixup()
834 change_protection(tlb, vma, start, end, mm_cp_flags); in mprotect_fixup()
845 populate_vma_page_range(vma, start, end, NULL); in mprotect_fixup()
850 perf_event_mmap(vma); in mprotect_fixup()
865 struct vm_area_struct *vma, *prev; in do_mprotect_pkey() local
904 vma = vma_find(&vmi, end); in do_mprotect_pkey()
906 if (!vma) in do_mprotect_pkey()
910 if (vma->vm_start >= end) in do_mprotect_pkey()
912 start = vma->vm_start; in do_mprotect_pkey()
914 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_mprotect_pkey()
917 if (vma->vm_start > start) in do_mprotect_pkey()
920 end = vma->vm_end; in do_mprotect_pkey()
922 if (!(vma->vm_flags & VM_GROWSUP)) in do_mprotect_pkey()
928 if (start > vma->vm_start) in do_mprotect_pkey()
929 prev = vma; in do_mprotect_pkey()
933 tmp = vma->vm_start; in do_mprotect_pkey()
934 for_each_vma_range(vmi, vma, end) { in do_mprotect_pkey()
939 if (vma->vm_start != tmp) { in do_mprotect_pkey()
945 if (rier && (vma->vm_flags & VM_MAYEXEC)) in do_mprotect_pkey()
955 new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey); in do_mprotect_pkey()
957 newflags |= (vma->vm_flags & ~mask_off_old_flags); in do_mprotect_pkey()
965 if (map_deny_write_exec(vma->vm_flags, newflags)) { in do_mprotect_pkey()
976 error = security_file_mprotect(vma, reqprot, prot); in do_mprotect_pkey()
980 tmp = vma->vm_end; in do_mprotect_pkey()
984 if (vma->vm_ops && vma->vm_ops->mprotect) { in do_mprotect_pkey()
985 error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags); in do_mprotect_pkey()
990 error = mprotect_fixup(&vmi, &tlb, vma, &prev, nstart, tmp, newflags); in do_mprotect_pkey()