Lines Matching +full:tlb +full:- +full:split
1 // SPDX-License-Identifier: GPL-2.0-or-later
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
27 #include <asm/tlb.h>
29 #include <asm/pte-walk.h>
41 return current->thread.regs && TRAP(current->thread.regs) == 0x400; in is_exec_fault()
76 /* Server-style MMU handles coherency when hashing if HW exec permission
77 * is supposed per page (currently 64-bit only). If not, then, we always
90 if (!test_bit(PG_dcache_clean, &folio->flags)) { in set_pte_filter_hash()
92 set_bit(PG_dcache_clean, &folio->flags); in set_pte_filter_hash()
108 * This is also called once for the folio. So only work with folio->flags here.
130 if (test_bit(PG_dcache_clean, &folio->flags)) in set_pte_filter()
136 set_bit(PG_dcache_clean, &folio->flags); in set_pte_filter()
168 if (WARN_ON(!(vma->vm_flags & VM_EXEC))) in set_access_flags_filter()
178 if (test_bit(PG_dcache_clean, &folio->flags)) in set_access_flags_filter()
183 set_bit(PG_dcache_clean, &folio->flags); in set_access_flags_filter()
196 /* Note: mm->context.id might not yet have been assigned as in set_ptes()
213 * tlb flush for this update. in set_ptes()
219 if (--nr == 0) in set_ptes()
250 assert_pte_locked(vma->vm_mm, address); in ptep_set_access_flags()
265 * TLB entry. Without this, platforms that don't do a write of the TLB in huge_ptep_set_access_flags()
266 * entry in the TLB miss handler asm will fault ad infinitum. in huge_ptep_set_access_flags()
282 assert_spin_locked(huge_pte_lockptr(h, vma->vm_mm, ptep)); in huge_ptep_set_access_flags()
303 #error "8M hugetlb folios are incompatible with split page table locks"
313 * tlb flush for this update. in __set_huge_pte_at()
351 * tlb flush for this update. in set_huge_pte_at()
386 pgd = mm->pgd + pgd_index(addr); in assert_pte_locked()
426 * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
448 * value don't get updated by a parallel THP split/collapse, in __find_linux_pte()
503 * A hugepage split is captured by this condition, see in __find_linux_pte()