Lines Matching +full:i +full:- +full:tlb +full:- +full:size
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* include/asm-generic/tlb.h
4 * Generic TLB shootdown code
32 * Generic MMU-gather implementation.
35 * correct and efficient ordering of freeing pages and TLB invalidations.
40 * 2) TLB invalidate page
49 * - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu()
53 * Finish in particular will issue a (final) TLB invalidate and free
56 * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
61 * - tlb_remove_table()
63 * tlb_remove_table() is the basic primitive to free page-table directories
70 * - tlb_remove_page() / __tlb_remove_page()
71 * - tlb_remove_page_size() / __tlb_remove_page_size()
81 * - tlb_change_page_size()
83 * call before __tlb_remove_page*() to set the current page-size; implies a
86 * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
88 * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
91 * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
94 * - mmu_gather::fullmm
99 * - We can ignore tlb_{start,end}_vma(); because we don't
102 * - (RISC) architectures that use ASIDs can cycle to a new ASID
105 * - mmu_gather::need_flush_all
108 * flush the entire TLB irrespective of the range. For instance
109 * x86-PAE needs this when changing top-level entries.
116 * - mmu_gather::start / mmu_gather::end
121 * - mmu_gather::freed_tables
125 * - tlb_get_unmap_shift() / tlb_get_unmap_size()
127 * returns the smallest TLB entry size unmapped in this range.
133 * Additionally there are a few opt-in features:
138 * changes the size and provides mmu_gather::page_size to tlb_flush().
140 * This might be useful if your architecture has size specific TLB
148 * Useful if your architecture has non-page page directories.
155 * Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
158 * Useful if your architecture doesn't use IPIs for remote TLB invalidates
159 * and therefore doesn't naturally serialize with software page-table walkers.
187 * This is useful if your architecture already flushes TLB entries in the
202 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
204 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
212 #define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page)) argument
218 * This allows an architecture that does not use the linux page-tables for
241 * to work on, then just handle a few from the on-stack structure.
253 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
257 * lockups for non-preemptible kernels on huge machines when a lot of memory
263 extern bool __tlb_remove_page_size(struct mmu_gather *tlb,
272 #define tlb_delay_rmap(tlb) (((tlb)->delayed_rmap = 1), true) argument
273 extern void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma);
279 * We have a no-op version of the rmap removal that doesn't
286 #define tlb_delay_rmap(tlb) (false) argument
287 static inline void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma) { } in tlb_flush_rmaps() argument
311 * requires a complete flush of the tlb
353 void tlb_flush_mmu(struct mmu_gather *tlb);
355 static inline void __tlb_adjust_range(struct mmu_gather *tlb, in __tlb_adjust_range() argument
359 tlb->start = min(tlb->start, address); in __tlb_adjust_range()
360 tlb->end = max(tlb->end, address + range_size); in __tlb_adjust_range()
363 static inline void __tlb_reset_range(struct mmu_gather *tlb) in __tlb_reset_range() argument
365 if (tlb->fullmm) { in __tlb_reset_range()
366 tlb->start = tlb->end = ~0; in __tlb_reset_range()
368 tlb->start = TASK_SIZE; in __tlb_reset_range()
369 tlb->end = 0; in __tlb_reset_range()
371 tlb->freed_tables = 0; in __tlb_reset_range()
372 tlb->cleared_ptes = 0; in __tlb_reset_range()
373 tlb->cleared_pmds = 0; in __tlb_reset_range()
374 tlb->cleared_puds = 0; in __tlb_reset_range()
375 tlb->cleared_p4ds = 0; in __tlb_reset_range()
397 static inline void tlb_flush(struct mmu_gather *tlb) in tlb_flush() argument
399 if (tlb->end) in tlb_flush()
400 flush_tlb_mm(tlb->mm); in tlb_flush()
411 static inline void tlb_flush(struct mmu_gather *tlb) in tlb_flush() argument
413 if (tlb->fullmm || tlb->need_flush_all) { in tlb_flush()
414 flush_tlb_mm(tlb->mm); in tlb_flush()
415 } else if (tlb->end) { in tlb_flush()
417 .vm_mm = tlb->mm, in tlb_flush()
418 .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) | in tlb_flush()
419 (tlb->vma_huge ? VM_HUGETLB : 0), in tlb_flush()
422 flush_tlb_range(&vma, tlb->start, tlb->end); in tlb_flush()
430 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_update_vma_flags() argument
434 * mips-4k) flush only large pages. in tlb_update_vma_flags()
436 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB in tlb_update_vma_flags()
443 tlb->vma_huge = is_vm_hugetlb_page(vma); in tlb_update_vma_flags()
444 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); in tlb_update_vma_flags()
445 tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)); in tlb_update_vma_flags()
448 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) in tlb_flush_mmu_tlbonly() argument
454 if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds || in tlb_flush_mmu_tlbonly()
455 tlb->cleared_puds || tlb->cleared_p4ds)) in tlb_flush_mmu_tlbonly()
458 tlb_flush(tlb); in tlb_flush_mmu_tlbonly()
459 __tlb_reset_range(tlb); in tlb_flush_mmu_tlbonly()
462 static inline void tlb_remove_page_size(struct mmu_gather *tlb, in tlb_remove_page_size() argument
465 if (__tlb_remove_page_size(tlb, encode_page(page, 0), page_size)) in tlb_remove_page_size()
466 tlb_flush_mmu(tlb); in tlb_remove_page_size()
469 static __always_inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page, unsigned i… in __tlb_remove_page() argument
471 return __tlb_remove_page_size(tlb, encode_page(page, flags), PAGE_SIZE); in __tlb_remove_page()
478 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) in tlb_remove_page() argument
480 return tlb_remove_page_size(tlb, page, PAGE_SIZE); in tlb_remove_page()
483 static inline void tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt) in tlb_remove_ptdesc() argument
485 tlb_remove_table(tlb, pt); in tlb_remove_ptdesc()
488 /* Like tlb_remove_ptdesc, but for page-like page directories. */
489 static inline void tlb_remove_page_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt) in tlb_remove_page_ptdesc() argument
491 tlb_remove_page(tlb, ptdesc_page(pt)); in tlb_remove_page_ptdesc()
494 static inline void tlb_change_page_size(struct mmu_gather *tlb, in tlb_change_page_size() argument
498 if (tlb->page_size && tlb->page_size != page_size) { in tlb_change_page_size()
499 if (!tlb->fullmm && !tlb->need_flush_all) in tlb_change_page_size()
500 tlb_flush_mmu(tlb); in tlb_change_page_size()
503 tlb->page_size = page_size; in tlb_change_page_size()
507 static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb) in tlb_get_unmap_shift() argument
509 if (tlb->cleared_ptes) in tlb_get_unmap_shift()
511 if (tlb->cleared_pmds) in tlb_get_unmap_shift()
513 if (tlb->cleared_puds) in tlb_get_unmap_shift()
515 if (tlb->cleared_p4ds) in tlb_get_unmap_shift()
521 static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) in tlb_get_unmap_size() argument
523 return 1UL << tlb_get_unmap_shift(tlb); in tlb_get_unmap_size()
527 * In the case of tlb vma handling, we can optimise these away in the
531 static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_start_vma() argument
533 if (tlb->fullmm) in tlb_start_vma()
536 tlb_update_vma_flags(tlb, vma); in tlb_start_vma()
538 flush_cache_range(vma, vma->vm_start, vma->vm_end); in tlb_start_vma()
542 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_end_vma() argument
544 if (tlb->fullmm) in tlb_end_vma()
549 * page mapcount -- there might not be page-frames for these PFNs after in tlb_end_vma()
553 if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) { in tlb_end_vma()
555 * Do a TLB flush and reset the range at VMA boundaries; this avoids in tlb_end_vma()
558 tlb_flush_mmu_tlbonly(tlb); in tlb_end_vma()
563 * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
566 static inline void tlb_flush_pte_range(struct mmu_gather *tlb, in tlb_flush_pte_range() argument
567 unsigned long address, unsigned long size) in tlb_flush_pte_range() argument
569 __tlb_adjust_range(tlb, address, size); in tlb_flush_pte_range()
570 tlb->cleared_ptes = 1; in tlb_flush_pte_range()
573 static inline void tlb_flush_pmd_range(struct mmu_gather *tlb, in tlb_flush_pmd_range() argument
574 unsigned long address, unsigned long size) in tlb_flush_pmd_range() argument
576 __tlb_adjust_range(tlb, address, size); in tlb_flush_pmd_range()
577 tlb->cleared_pmds = 1; in tlb_flush_pmd_range()
580 static inline void tlb_flush_pud_range(struct mmu_gather *tlb, in tlb_flush_pud_range() argument
581 unsigned long address, unsigned long size) in tlb_flush_pud_range() argument
583 __tlb_adjust_range(tlb, address, size); in tlb_flush_pud_range()
584 tlb->cleared_puds = 1; in tlb_flush_pud_range()
587 static inline void tlb_flush_p4d_range(struct mmu_gather *tlb, in tlb_flush_p4d_range() argument
588 unsigned long address, unsigned long size) in tlb_flush_p4d_range() argument
590 __tlb_adjust_range(tlb, address, size); in tlb_flush_p4d_range()
591 tlb->cleared_p4ds = 1; in tlb_flush_p4d_range()
595 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) argument
599 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
602 * so we can later optimise away the tlb invalidate. This helps when
603 * userspace is unmapping already-unmapped pages, which happens quite a lot.
605 #define tlb_remove_tlb_entry(tlb, ptep, address) \ argument
607 tlb_flush_pte_range(tlb, address, PAGE_SIZE); \
608 __tlb_remove_tlb_entry(tlb, ptep, address); \
611 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ argument
615 tlb_flush_p4d_range(tlb, address, _sz); \
617 tlb_flush_pud_range(tlb, address, _sz); \
619 tlb_flush_pmd_range(tlb, address, _sz); \
621 tlb_flush_pte_range(tlb, address, _sz); \
622 __tlb_remove_tlb_entry(tlb, ptep, address); \
626 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
630 #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0) argument
633 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ argument
635 tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \
636 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
640 * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
644 #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0) argument
647 #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \ argument
649 tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \
650 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
658 * architected non-legacy page table cache (which I'm not aware of
660 * explicit flushing for that, likely *separate* from a regular TLB entry
664 * that would want something that odd, I think it is up to that
672 #define pte_free_tlb(tlb, ptep, address) \ argument
674 tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \
675 tlb->freed_tables = 1; \
676 __pte_free_tlb(tlb, ptep, address); \
681 #define pmd_free_tlb(tlb, pmdp, address) \ argument
683 tlb_flush_pud_range(tlb, address, PAGE_SIZE); \
684 tlb->freed_tables = 1; \
685 __pmd_free_tlb(tlb, pmdp, address); \
690 #define pud_free_tlb(tlb, pudp, address) \ argument
692 tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \
693 tlb->freed_tables = 1; \
694 __pud_free_tlb(tlb, pudp, address); \
699 #define p4d_free_tlb(tlb, pudp, address) \ argument
701 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
702 tlb->freed_tables = 1; \
703 __p4d_free_tlb(tlb, pudp, address); \