Lines Matching defs:vma

43 static bool maybe_change_pte_writable(struct vm_area_struct *vma, pte_t pte)
45 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
53 if (pte_needs_soft_dirty_wp(vma, pte))
57 if (userfaultfd_pte_wp(vma, pte))
63 static bool can_change_private_pte_writable(struct vm_area_struct *vma,
68 if (!maybe_change_pte_writable(vma, pte))
77 page = vm_normal_page(vma, addr, pte);
81 static bool can_change_shared_pte_writable(struct vm_area_struct *vma,
84 if (!maybe_change_pte_writable(vma, pte))
99 bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
102 if (!(vma->vm_flags & VM_SHARED))
103 return can_change_private_pte_writable(vma, addr, pte);
105 return can_change_shared_pte_writable(vma, pte);
121 static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
140 if (is_cow_mapping(vma->vm_flags) &&
178 static void prot_commit_flush_ptes(struct vm_area_struct *vma, unsigned long addr,
192 ptent = pte_mkwrite(ptent, vma);
194 modify_prot_commit_ptes(vma, addr, ptep, oldpte, ptent, nr_ptes);
219 * if the vma is a private vma, and we cannot determine whether to change
220 * the pte to writable just from the vma and the pte, we then need to look
230 static void commit_anon_folio_batch(struct vm_area_struct *vma,
242 prot_commit_flush_ptes(vma, addr, ptep, oldpte, ptent, len,
249 static void set_write_prot_commit_flush_ptes(struct vm_area_struct *vma,
255 if (vma->vm_flags & VM_SHARED) {
256 set_write = can_change_shared_pte_writable(vma, ptent);
257 prot_commit_flush_ptes(vma, addr, ptep, oldpte, ptent, nr_ptes,
262 set_write = maybe_change_pte_writable(vma, ptent) &&
265 prot_commit_flush_ptes(vma, addr, ptep, oldpte, ptent, nr_ptes,
269 commit_anon_folio_batch(vma, folio, page, addr, ptep, oldpte, ptent, nr_ptes, tlb);
273 struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
286 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
291 if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
292 atomic_read(&vma->vm_mm->mm_users) == 1)
295 flush_tlb_batched_pending(vma->vm_mm);
307 page = vm_normal_page(vma, addr, oldpte);
315 int ret = prot_numa_skip(vma, addr, oldpte, pte,
328 oldpte = modify_prot_start_ptes(vma, addr, pte, nr_ptes);
351 set_write_prot_commit_flush_ptes(vma, folio, page,
354 prot_commit_flush_ptes(vma, addr, pte, oldpte, ptent,
401 pte_clear(vma->vm_mm, addr, pte);
415 set_pte_at(vma->vm_mm, addr, pte, newpte);
429 if (userfaultfd_wp_use_markers(vma)) {
436 set_pte_at(vma->vm_mm, addr, pte,
453 pgtable_split_needed(struct vm_area_struct *vma, unsigned long cp_flags)
461 return (cp_flags & MM_CP_UFFD_WP) && !vma_is_anonymous(vma);
469 pgtable_populate_needed(struct vm_area_struct *vma, unsigned long cp_flags)
476 return userfaultfd_wp_use_markers(vma);
485 #define change_pmd_prepare(vma, pmd, cp_flags) \
488 if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \
489 if (pte_alloc(vma->vm_mm, pmd)) \
500 #define change_prepare(vma, high, low, addr, cp_flags) \
503 if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \
504 low##_t *p = low##_alloc(vma->vm_mm, high, addr); \
512 struct vm_area_struct *vma, pud_t *pud, unsigned long addr,
527 ret = change_pmd_prepare(vma, pmd, cp_flags);
539 pgtable_split_needed(vma, cp_flags)) {
540 __split_huge_pmd(vma, pmd, addr, false);
546 ret = change_pmd_prepare(vma, pmd, cp_flags);
552 ret = change_huge_pmd(tlb, vma, pmd,
567 ret = change_pte_range(tlb, vma, pmd, addr, next, newprot,
582 struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr,
596 ret = change_prepare(vma, pudp, pmd, addr, cp_flags);
609 vma->vm_mm, addr, end);
615 pgtable_split_needed(vma, cp_flags)) {
616 __split_huge_pud(vma, pudp, addr);
619 ret = change_huge_pud(tlb, vma, pudp,
630 pages += change_pmd_range(tlb, vma, pudp, addr, next, newprot,
641 struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr,
651 ret = change_prepare(vma, p4d, pud, addr, cp_flags);
656 pages += change_pud_range(tlb, vma, p4d, addr, next, newprot,
664 struct vm_area_struct *vma, unsigned long addr,
667 struct mm_struct *mm = vma->vm_mm;
674 tlb_start_vma(tlb, vma);
677 ret = change_prepare(vma, pgd, p4d, addr, cp_flags);
684 pages += change_p4d_range(tlb, vma, pgd, addr, next, newprot,
688 tlb_end_vma(tlb, vma);
694 struct vm_area_struct *vma, unsigned long start,
697 pgprot_t newprot = vma->vm_page_prot;
706 * vma_set_page_prot() will adjust vma->vm_page_prot accordingly.
714 if (is_vm_hugetlb_page(vma))
715 pages = hugetlb_change_protection(vma, start, end, newprot,
718 pages = change_protection_range(tlb, vma, start, end, newprot,
756 struct vm_area_struct *vma, struct vm_area_struct **pprev,
759 struct mm_struct *mm = vma->vm_mm;
760 vm_flags_t oldflags = READ_ONCE(vma->vm_flags);
766 if (vma_is_sealed(vma))
770 *pprev = vma;
811 } else if ((oldflags & VM_ACCOUNT) && vma_is_anonymous(vma) &&
812 !vma->anon_vma) {
816 vma = vma_modify_flags(vmi, *pprev, vma, start, end, newflags);
817 if (IS_ERR(vma)) {
818 error = PTR_ERR(vma);
822 *pprev = vma;
828 vma_start_write(vma);
829 vm_flags_reset_once(vma, newflags);
830 if (vma_wants_manual_pte_write_upgrade(vma))
832 vma_set_page_prot(vma);
834 change_protection(tlb, vma, start, end, mm_cp_flags);
845 populate_vma_page_range(vma, start, end, NULL);
850 perf_event_mmap(vma);
865 struct vm_area_struct *vma, *prev;
904 vma = vma_find(&vmi, end);
906 if (!vma)
910 if (vma->vm_start >= end)
912 start = vma->vm_start;
914 if (!(vma->vm_flags & VM_GROWSDOWN))
917 if (vma->vm_start > start)
920 end = vma->vm_end;
922 if (!(vma->vm_flags & VM_GROWSUP))
928 if (start > vma->vm_start)
929 prev = vma;
933 tmp = vma->vm_start;
934 for_each_vma_range(vmi, vma, end) {
939 if (vma->vm_start != tmp) {
945 if (rier && (vma->vm_flags & VM_MAYEXEC))
955 new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
957 newflags |= (vma->vm_flags & ~mask_off_old_flags);
965 if (map_deny_write_exec(vma->vm_flags, newflags)) {
976 error = security_file_mprotect(vma, reqprot, prot);
980 tmp = vma->vm_end;
984 if (vma->vm_ops && vma->vm_ops->mprotect) {
985 error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags);
990 error = mprotect_fixup(&vmi, &tlb, vma, &prev, nstart, tmp, newflags);