Lines Matching +full:tlb +full:- +full:split
1 // SPDX-License-Identifier: GPL-2.0
34 #include <linux/memory-tiers.h>
38 #include <asm/tlb.h>
47 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE))) in can_change_pte_writable()
58 /* Do we need write faults for uffd-wp tracking? */ in can_change_pte_writable()
62 if (!(vma->vm_flags & VM_SHARED)) { in can_change_pte_writable()
64 * Writable MAP_PRIVATE mapping: We can only special-case on in can_change_pte_writable()
66 * write-fault handler similarly would map them writable without in can_change_pte_writable()
75 * needs a real write-fault for writenotify in can_change_pte_writable()
78 * just like the write-fault handler would do. in can_change_pte_writable()
83 static long change_pte_range(struct mmu_gather *tlb, in change_pte_range() argument
95 tlb_change_page_size(tlb, PAGE_SIZE); in change_pte_range()
96 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range()
98 return -EAGAIN; in change_pte_range()
101 if (prot_numa && !(vma->vm_flags & VM_SHARED) && in change_pte_range()
102 atomic_read(&vma->vm_mm->mm_users) == 1) in change_pte_range()
105 flush_tlb_batched_pending(vma->vm_mm); in change_pte_range()
121 /* Avoid TLB flush if possible */ in change_pte_range()
130 /* Also skip shared copy-on-write pages */ in change_pte_range()
131 if (is_cow_mapping(vma->vm_flags) && in change_pte_range()
146 * a single-threaded process is running on. in change_pte_range()
176 * to catch actual write access -- see in change_pte_range()
183 * writable and avoid the write-fault handler, for in change_pte_range()
194 tlb_flush_pte_range(tlb, addr, PAGE_SIZE); in change_pte_range()
217 * We do not preserve soft-dirtiness. See in change_pte_range()
241 * If this is uffd-wp pte marker and we'd like in change_pte_range()
246 pte_clear(vma->vm_mm, addr, pte); in change_pte_range()
260 set_pte_at(vma->vm_mm, addr, pte, newpte); in change_pte_range()
276 * For file-backed mem, we need to be able to in change_pte_range()
277 * wr-protect a none pte, because even if the in change_pte_range()
281 set_pte_at(vma->vm_mm, addr, pte, in change_pte_range()
288 pte_unmap_unlock(pte - 1, ptl); in change_pte_range()
294 * Return true if we want to split THPs into PTE mappings in change
302 * we need to split. We cannot wr-protect shmem thp because file in pgtable_split_needed()
303 * thp is handled differently when split by erasing the pmd so far. in pgtable_split_needed()
333 if (pte_alloc(vma->vm_mm, pmd)) \
334 err = -ENOMEM; \
348 low##_t *p = low##_alloc(vma->vm_mm, high, addr); \
350 err = -ENOMEM; \
355 static inline long change_pmd_range(struct mmu_gather *tlb, in change_pmd_range() argument
387 vma->vm_mm, addr, end); in change_pmd_range()
393 if ((next - addr != HPAGE_PMD_SIZE) || in change_pmd_range()
397 * For file-backed, the pmd could have been in change_pmd_range()
399 * necessary, then fall-through to pte level. in change_pmd_range()
407 ret = change_huge_pmd(tlb, vma, pmd, in change_pmd_range()
419 /* fall through, the trans huge pmd just split */ in change_pmd_range()
422 ret = change_pte_range(tlb, vma, pmd, addr, next, newprot, in change_pmd_range()
439 static inline long change_pud_range(struct mmu_gather *tlb, in change_pud_range() argument
455 pages += change_pmd_range(tlb, vma, pud, addr, next, newprot, in change_pud_range()
462 static inline long change_p4d_range(struct mmu_gather *tlb, in change_p4d_range() argument
478 pages += change_pud_range(tlb, vma, p4d, addr, next, newprot, in change_p4d_range()
485 static long change_protection_range(struct mmu_gather *tlb, in change_protection_range() argument
489 struct mm_struct *mm = vma->vm_mm; in change_protection_range()
496 tlb_start_vma(tlb, vma); in change_protection_range()
506 pages += change_p4d_range(tlb, vma, pgd, addr, next, newprot, in change_protection_range()
510 tlb_end_vma(tlb, vma); in change_protection_range()
515 long change_protection(struct mmu_gather *tlb, in change_protection() argument
519 pgprot_t newprot = vma->vm_page_prot; in change_protection()
526 * Ordinary protection updates (mprotect, uffd-wp, softdirty tracking) in change_protection()
528 * vma_set_page_prot() will adjust vma->vm_page_prot accordingly. in change_protection()
540 pages = change_protection_range(tlb, vma, start, end, newprot, in change_protection()
550 *(pgprot_t *)(walk->private)) ? in prot_none_pte_entry()
551 0 : -EACCES; in prot_none_pte_entry()
559 *(pgprot_t *)(walk->private)) ? in prot_none_hugetlb_entry()
560 0 : -EACCES; in prot_none_hugetlb_entry()
577 mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb, in mprotect_fixup() argument
581 struct mm_struct *mm = vma->vm_mm; in mprotect_fixup()
582 unsigned long oldflags = vma->vm_flags; in mprotect_fixup()
583 long nrpages = (end - start) >> PAGE_SHIFT; in mprotect_fixup()
599 (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && in mprotect_fixup()
603 error = walk_page_range(current->mm, start, end, in mprotect_fixup()
615 * hugetlb mapping were accounted for even if read-only so there is in mprotect_fixup()
622 return -ENOMEM; in mprotect_fixup()
627 return -ENOMEM; in mprotect_fixup()
631 !vma->anon_vma) { in mprotect_fixup()
653 change_protection(tlb, vma, start, end, mm_cp_flags); in mprotect_fixup()
667 vm_stat_account(mm, oldflags, -nrpages); in mprotect_fixup()
678 * pkey==-1 when doing a legacy mprotect()
687 const bool rier = (current->personality & READ_IMPLIES_EXEC) && in do_mprotect_pkey()
689 struct mmu_gather tlb; in do_mprotect_pkey() local
696 return -EINVAL; in do_mprotect_pkey()
699 return -EINVAL; in do_mprotect_pkey()
705 return -ENOMEM; in do_mprotect_pkey()
707 return -EINVAL; in do_mprotect_pkey()
711 if (mmap_write_lock_killable(current->mm)) in do_mprotect_pkey()
712 return -EINTR; in do_mprotect_pkey()
718 error = -EINVAL; in do_mprotect_pkey()
719 if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey)) in do_mprotect_pkey()
722 vma_iter_init(&vmi, current->mm, start); in do_mprotect_pkey()
724 error = -ENOMEM; in do_mprotect_pkey()
729 if (vma->vm_start >= end) in do_mprotect_pkey()
731 start = vma->vm_start; in do_mprotect_pkey()
732 error = -EINVAL; in do_mprotect_pkey()
733 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_mprotect_pkey()
736 if (vma->vm_start > start) in do_mprotect_pkey()
739 end = vma->vm_end; in do_mprotect_pkey()
740 error = -EINVAL; in do_mprotect_pkey()
741 if (!(vma->vm_flags & VM_GROWSUP)) in do_mprotect_pkey()
747 if (start > vma->vm_start) in do_mprotect_pkey()
750 tlb_gather_mmu(&tlb, current->mm); in do_mprotect_pkey()
752 tmp = vma->vm_start; in do_mprotect_pkey()
758 if (vma->vm_start != tmp) { in do_mprotect_pkey()
759 error = -ENOMEM; in do_mprotect_pkey()
764 if (rier && (vma->vm_flags & VM_MAYEXEC)) in do_mprotect_pkey()
776 newflags |= (vma->vm_flags & ~mask_off_old_flags); in do_mprotect_pkey()
780 error = -EACCES; in do_mprotect_pkey()
785 error = -EACCES; in do_mprotect_pkey()
789 /* Allow architectures to sanity-check the new flags */ in do_mprotect_pkey()
791 error = -EINVAL; in do_mprotect_pkey()
799 tmp = vma->vm_end; in do_mprotect_pkey()
803 if (vma->vm_ops && vma->vm_ops->mprotect) { in do_mprotect_pkey()
804 error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags); in do_mprotect_pkey()
809 error = mprotect_fixup(&vmi, &tlb, vma, &prev, nstart, tmp, newflags); in do_mprotect_pkey()
817 tlb_finish_mmu(&tlb); in do_mprotect_pkey()
820 error = -ENOMEM; in do_mprotect_pkey()
823 mmap_write_unlock(current->mm); in do_mprotect_pkey()
830 return do_mprotect_pkey(start, len, prot, -1); in SYSCALL_DEFINE3()
848 return -EINVAL; in SYSCALL_DEFINE2()
851 return -EINVAL; in SYSCALL_DEFINE2()
853 mmap_write_lock(current->mm); in SYSCALL_DEFINE2()
854 pkey = mm_pkey_alloc(current->mm); in SYSCALL_DEFINE2()
856 ret = -ENOSPC; in SYSCALL_DEFINE2()
857 if (pkey == -1) in SYSCALL_DEFINE2()
862 mm_pkey_free(current->mm, pkey); in SYSCALL_DEFINE2()
867 mmap_write_unlock(current->mm); in SYSCALL_DEFINE2()
875 mmap_write_lock(current->mm); in SYSCALL_DEFINE1()
876 ret = mm_pkey_free(current->mm, pkey); in SYSCALL_DEFINE1()
877 mmap_write_unlock(current->mm); in SYSCALL_DEFINE1()