Lines Matching +full:tlb +full:- +full:split

1 // SPDX-License-Identifier: GPL-2.0
34 #include <linux/memory-tiers.h>
39 #include <asm/tlb.h>
48 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE))) in can_change_pte_writable()
59 /* Do we need write faults for uffd-wp tracking? */ in can_change_pte_writable()
63 if (!(vma->vm_flags & VM_SHARED)) { in can_change_pte_writable()
65 * Writable MAP_PRIVATE mapping: We can only special-case on in can_change_pte_writable()
67 * write-fault handler similarly would map them writable without in can_change_pte_writable()
78 * needs a real write-fault for writenotify in can_change_pte_writable()
81 * just like the write-fault handler would do. in can_change_pte_writable()
86 static long change_pte_range(struct mmu_gather *tlb, in change_pte_range() argument
98 tlb_change_page_size(tlb, PAGE_SIZE); in change_pte_range()
99 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range()
101 return -EAGAIN; in change_pte_range()
104 if (prot_numa && !(vma->vm_flags & VM_SHARED) && in change_pte_range()
105 atomic_read(&vma->vm_mm->mm_users) == 1) in change_pte_range()
108 flush_tlb_batched_pending(vma->vm_mm); in change_pte_range()
124 /* Avoid TLB flush if possible */ in change_pte_range()
133 /* Also skip shared copy-on-write pages */ in change_pte_range()
134 if (is_cow_mapping(vma->vm_flags) && in change_pte_range()
150 * a single-threaded process is running on. in change_pte_range()
179 * to catch actual write access -- see in change_pte_range()
186 * writable and avoid the write-fault handler, for in change_pte_range()
197 tlb_flush_pte_range(tlb, addr, PAGE_SIZE); in change_pte_range()
220 * We do not preserve soft-dirtiness. See in change_pte_range()
238 * If this is uffd-wp pte marker and we'd like in change_pte_range()
243 pte_clear(vma->vm_mm, addr, pte); in change_pte_range()
257 set_pte_at(vma->vm_mm, addr, pte, newpte); in change_pte_range()
273 * For file-backed mem, we need to be able to in change_pte_range()
274 * wr-protect a none pte, because even if the in change_pte_range()
278 set_pte_at(vma->vm_mm, addr, pte, in change_pte_range()
285 pte_unmap_unlock(pte - 1, ptl); in change_pte_range()
291 * Return true if we want to split THPs into PTE mappings in change
299 * we need to split. For example, we cannot wr-protect a file thp in pgtable_split_needed()
301 * split by erasing the pmd so far. in pgtable_split_needed()
331 if (pte_alloc(vma->vm_mm, pmd)) \
332 err = -ENOMEM; \
346 low##_t *p = low##_alloc(vma->vm_mm, high, addr); \
348 err = -ENOMEM; \
353 static inline long change_pmd_range(struct mmu_gather *tlb, in change_pmd_range() argument
380 if ((next - addr != HPAGE_PMD_SIZE) || in change_pmd_range()
384 * For file-backed, the pmd could have been in change_pmd_range()
386 * necessary, then fall-through to pte level. in change_pmd_range()
394 ret = change_huge_pmd(tlb, vma, pmd, in change_pmd_range()
406 /* fall through, the trans huge pmd just split */ in change_pmd_range()
409 ret = change_pte_range(tlb, vma, pmd, addr, next, newprot, in change_pmd_range()
423 static inline long change_pud_range(struct mmu_gather *tlb, in change_pud_range() argument
451 vma->vm_mm, addr, end); in change_pud_range()
456 if ((next - addr != PUD_SIZE) || in change_pud_range()
461 ret = change_huge_pud(tlb, vma, pudp, in change_pud_range()
472 pages += change_pmd_range(tlb, vma, pudp, addr, next, newprot, in change_pud_range()
482 static inline long change_p4d_range(struct mmu_gather *tlb, in change_p4d_range() argument
498 pages += change_pud_range(tlb, vma, p4d, addr, next, newprot, in change_p4d_range()
505 static long change_protection_range(struct mmu_gather *tlb, in change_protection_range() argument
509 struct mm_struct *mm = vma->vm_mm; in change_protection_range()
516 tlb_start_vma(tlb, vma); in change_protection_range()
526 pages += change_p4d_range(tlb, vma, pgd, addr, next, newprot, in change_protection_range()
530 tlb_end_vma(tlb, vma); in change_protection_range()
535 long change_protection(struct mmu_gather *tlb, in change_protection() argument
539 pgprot_t newprot = vma->vm_page_prot; in change_protection()
546 * Ordinary protection updates (mprotect, uffd-wp, softdirty tracking) in change_protection()
548 * vma_set_page_prot() will adjust vma->vm_page_prot accordingly. in change_protection()
560 pages = change_protection_range(tlb, vma, start, end, newprot, in change_protection()
570 *(pgprot_t *)(walk->private)) ? in prot_none_pte_entry()
571 0 : -EACCES; in prot_none_pte_entry()
579 *(pgprot_t *)(walk->private)) ? in prot_none_hugetlb_entry()
580 0 : -EACCES; in prot_none_hugetlb_entry()
597 mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb, in mprotect_fixup() argument
601 struct mm_struct *mm = vma->vm_mm; in mprotect_fixup()
602 unsigned long oldflags = READ_ONCE(vma->vm_flags); in mprotect_fixup()
603 long nrpages = (end - start) >> PAGE_SHIFT; in mprotect_fixup()
609 return -EPERM; in mprotect_fixup()
626 error = walk_page_range(current->mm, start, end, in mprotect_fixup()
638 * hugetlb mapping were accounted for even if read-only so there is in mprotect_fixup()
645 return -ENOMEM; in mprotect_fixup()
650 return -ENOMEM; in mprotect_fixup()
654 !vma->anon_vma) { in mprotect_fixup()
676 change_protection(tlb, vma, start, end, mm_cp_flags); in mprotect_fixup()
690 vm_stat_account(mm, oldflags, -nrpages); in mprotect_fixup()
701 * pkey==-1 when doing a legacy mprotect()
710 const bool rier = (current->personality & READ_IMPLIES_EXEC) && in do_mprotect_pkey()
712 struct mmu_gather tlb; in do_mprotect_pkey() local
719 return -EINVAL; in do_mprotect_pkey()
722 return -EINVAL; in do_mprotect_pkey()
728 return -ENOMEM; in do_mprotect_pkey()
730 return -EINVAL; in do_mprotect_pkey()
734 if (mmap_write_lock_killable(current->mm)) in do_mprotect_pkey()
735 return -EINTR; in do_mprotect_pkey()
741 error = -EINVAL; in do_mprotect_pkey()
742 if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey)) in do_mprotect_pkey()
745 vma_iter_init(&vmi, current->mm, start); in do_mprotect_pkey()
747 error = -ENOMEM; in do_mprotect_pkey()
752 if (vma->vm_start >= end) in do_mprotect_pkey()
754 start = vma->vm_start; in do_mprotect_pkey()
755 error = -EINVAL; in do_mprotect_pkey()
756 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_mprotect_pkey()
759 if (vma->vm_start > start) in do_mprotect_pkey()
762 end = vma->vm_end; in do_mprotect_pkey()
763 error = -EINVAL; in do_mprotect_pkey()
764 if (!(vma->vm_flags & VM_GROWSUP)) in do_mprotect_pkey()
770 if (start > vma->vm_start) in do_mprotect_pkey()
773 tlb_gather_mmu(&tlb, current->mm); in do_mprotect_pkey()
775 tmp = vma->vm_start; in do_mprotect_pkey()
781 if (vma->vm_start != tmp) { in do_mprotect_pkey()
782 error = -ENOMEM; in do_mprotect_pkey()
787 if (rier && (vma->vm_flags & VM_MAYEXEC)) in do_mprotect_pkey()
799 newflags |= (vma->vm_flags & ~mask_off_old_flags); in do_mprotect_pkey()
803 error = -EACCES; in do_mprotect_pkey()
807 if (map_deny_write_exec(vma->vm_flags, newflags)) { in do_mprotect_pkey()
808 error = -EACCES; in do_mprotect_pkey()
812 /* Allow architectures to sanity-check the new flags */ in do_mprotect_pkey()
814 error = -EINVAL; in do_mprotect_pkey()
822 tmp = vma->vm_end; in do_mprotect_pkey()
826 if (vma->vm_ops && vma->vm_ops->mprotect) { in do_mprotect_pkey()
827 error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags); in do_mprotect_pkey()
832 error = mprotect_fixup(&vmi, &tlb, vma, &prev, nstart, tmp, newflags); in do_mprotect_pkey()
840 tlb_finish_mmu(&tlb); in do_mprotect_pkey()
843 error = -ENOMEM; in do_mprotect_pkey()
846 mmap_write_unlock(current->mm); in do_mprotect_pkey()
853 return do_mprotect_pkey(start, len, prot, -1); in SYSCALL_DEFINE3()
871 return -EINVAL; in SYSCALL_DEFINE2()
874 return -EINVAL; in SYSCALL_DEFINE2()
876 mmap_write_lock(current->mm); in SYSCALL_DEFINE2()
877 pkey = mm_pkey_alloc(current->mm); in SYSCALL_DEFINE2()
879 ret = -ENOSPC; in SYSCALL_DEFINE2()
880 if (pkey == -1) in SYSCALL_DEFINE2()
885 mm_pkey_free(current->mm, pkey); in SYSCALL_DEFINE2()
890 mmap_write_unlock(current->mm); in SYSCALL_DEFINE2()
898 mmap_write_lock(current->mm); in SYSCALL_DEFINE1()
899 ret = mm_pkey_free(current->mm, pkey); in SYSCALL_DEFINE1()
900 mmap_write_unlock(current->mm); in SYSCALL_DEFINE1()