Lines Matching +full:ctx +full:- +full:asid

1 /* SPDX-License-Identifier: GPL-2.0 */
16 #include <asm/processor-flags.h>
23 #define TLB_FLUSH_ALL -1UL
76 * are on. This means that it may not match current->active_mm,
81 * LOADED_MM_SWITCHING during the brief interrupts-off window
101 * This tells us to go invalidate all the non-loaded ctxs[]
104 * The current ctx was kept up-to-date as it ran and does not
134 * There is one per ASID that we use, and the ASID (what the
139 * contain entries that are out-of-date as when that mm reached
147 * various bits of init code. This is fine -- code that
159 * - Actively using an mm. Our CPU's bit will be set in
162 * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
165 * - Lazily using a real mm. loaded_mm != &init_mm, our bit
195 * - flush_tlb_all() flushes all processes TLBs
196 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
197 * - flush_tlb_page(vma, vmaddr) flushes one page
198 * - flush_tlb_range(vma, start, end) flushes a range of pages
199 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
200 * - flush_tlb_multi(cpumask, info) flushes TLBs on multiple cpus
203 * and page-granular flushes are available only on i486 and up.
209 * - Fully flush a single mm. .mm will be set, .end will be
213 * - Partially flush a single mm. .mm will be set, .start and
215 * such that the changes between generation .new_tlb_gen-1 and
218 * - Fully flush all mms whose tlb_gens have been updated. .mm
238 static inline bool is_dyn_asid(u16 asid) in is_dyn_asid() argument
240 return asid < TLB_NR_DYN_ASIDS; in is_dyn_asid()
243 static inline bool is_global_asid(u16 asid) in is_global_asid() argument
245 return !is_dyn_asid(asid); in is_global_asid()
251 u16 asid; in mm_global_asid() local
256 asid = smp_load_acquire(&mm->context.global_asid); in mm_global_asid()
258 /* mm->context.global_asid is either 0, or a global ASID */ in mm_global_asid()
259 VM_WARN_ON_ONCE(asid && is_dyn_asid(asid)); in mm_global_asid()
261 return asid; in mm_global_asid()
267 mm->context.global_asid = 0; in mm_init_global_asid()
268 mm->context.asid_transition = false; in mm_init_global_asid()
272 static inline void mm_assign_global_asid(struct mm_struct *mm, u16 asid) in mm_assign_global_asid() argument
275 * Notably flush_tlb_mm_range() -> broadcast_tlb_flush() -> in mm_assign_global_asid()
279 mm->context.asid_transition = true; in mm_assign_global_asid()
280 smp_store_release(&mm->context.global_asid, asid); in mm_assign_global_asid()
285 WRITE_ONCE(mm->context.asid_transition, false); in mm_clear_asid_transition()
293 return mm && READ_ONCE(mm->context.asid_transition); in mm_in_asid_transition()
298 static inline void mm_assign_global_asid(struct mm_struct *mm, u16 asid) { } in mm_assign_global_asid() argument
311 flush_tlb_mm_range((vma)->vm_mm, start, end, \
312 ((vma)->vm_flags & VM_HUGETLB) \
324 flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false); in flush_tlb_page()
347 return atomic64_inc_return(&mm->context.tlb_gen); in inc_mm_tlb_gen()
354 cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); in arch_tlbbatch_add_pending()
355 batch->unmapped_pages = true; in arch_tlbbatch_add_pending()
356 mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL); in arch_tlbbatch_add_pending()
372 * Only include flags that would not trigger spurious page-faults. in pte_flags_need_flush()
373 * Non-present entries are not cached. Hardware would set the in pte_flags_need_flush()
424 /* !PRESENT -> * ; no need for flush */ in pte_needs_flush()
433 * check PTE flags; ignore access-bit; see comment in in pte_needs_flush()
447 /* !PRESENT -> * ; no need for flush */ in huge_pmd_needs_flush()
456 * check PMD flags; do not ignore access-bit; see in huge_pmd_needs_flush()