Lines Matching +full:i +full:- +full:tlb +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * This file contains the routines for TLB flushing.
6 * this does -not- include 603 however which shares the implementation with
9 * -- BenH
15 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
39 #include <asm/tlb.h>
40 #include <asm/code-patching.h>
48 * This struct lists the sw-supported page sizes. The hardawre MMU may support
113 /* The variables below are currently only used on 64-bit Book3E
119 int mmu_pte_psize; /* Page size used for PTE pages */
120 int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
127 * exceptions. This is used for bolted and e6500 TLB miss handlers which
128 * do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
136 /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
142 * Base TLB flushing operations:
144 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
145 * - flush_tlb_page(vma, vmaddr) flushes one page
146 * - flush_tlb_range(vma, start, end) flushes a range of pages
147 * - flush_tlb_kernel_range(start, end) flushes kernel pages
149 * - local_* variants of page and mm only apply to the current
155 * These are the base non-SMP variants of page and mm flushing
162 pid = mm->context.id; in local_flush_tlb_mm()
175 pid = mm ? mm->context.id : 0; in __local_flush_tlb_page()
183 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, in local_flush_tlb_page()
198 * And here are the SMP non-local implementations
215 _tlbil_pid(p ? p->pid : 0); in do_flush_tlb_mm_ipi()
222 _tlbil_va(p->addr, p->pid, p->tsize, p->ind); in do_flush_tlb_page_ipi()
230 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
231 * - we are invaliating some target that isn't currently running here
233 * - some other CPU is re-acquiring a lost PID for this mm
237 * invalidation of TLB entries present prior to this call, so we
247 pid = mm->context.id; in flush_tlb_mm()
276 pid = mm->context.id; in __flush_tlb_page()
314 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, in flush_tlb_page()
322 * Flush kernel TLB entries in the given range
341 * be optimized based on a threshold on the size of the range, since
349 if (end - start == PAGE_SIZE && !(start & ~PAGE_MASK)) in flush_tlb_range()
352 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
356 void tlb_flush(struct mmu_gather *tlb) in tlb_flush() argument
358 flush_tlb_mm(tlb->mm); in tlb_flush()
362 * Below are functions specific to the 64-bit variant of Book3E though that
369 * Handling of virtual linear page tables or indirect TLB entries
372 void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address) in tlb_flush_pgtable() argument
379 unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift; in tlb_flush_pgtable() local
386 __flush_tlb_page(tlb->mm, start, tsize, 1); in tlb_flush_pgtable()
387 start += size; in tlb_flush_pgtable()
394 vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful; in tlb_flush_pgtable()
396 __flush_tlb_page(tlb->mm, vpte, tsize, 0); in tlb_flush_pgtable()
405 int i, psize; in setup_page_sizes() local
423 shift = def->shift; in setup_page_sizes()
429 shift = (shift - 10) >> 1; in setup_page_sizes()
432 def->flags |= MMU_PAGE_SIZE_DIRECT; in setup_page_sizes()
450 * We expect 4K subpage size and unrestricted indirect size. in setup_page_sizes()
451 * The lack of a restriction on indirect size is a Freescale in setup_page_sizes()
460 if (!def->shift) in setup_page_sizes()
463 if (tlb1ps & (1U << (def->shift - 10))) { in setup_page_sizes()
464 def->flags |= MMU_PAGE_SIZE_DIRECT; in setup_page_sizes()
467 def->flags |= MMU_PAGE_SIZE_INDIRECT; in setup_page_sizes()
483 if (tlb0ps & (1U << (def->shift - 10))) in setup_page_sizes()
484 def->flags |= MMU_PAGE_SIZE_DIRECT; in setup_page_sizes()
494 /* Now, we only deal with one IND page size for each in setup_page_sizes()
495 * direct size. Hopefully all implementations today are in setup_page_sizes()
499 for (i = 0; i < 3; i++) { in setup_page_sizes()
511 if (ps == (def->shift - 10)) in setup_page_sizes()
512 def->flags |= MMU_PAGE_SIZE_INDIRECT; in setup_page_sizes()
513 if (sps == (def->shift - 10)) in setup_page_sizes()
514 def->ind = ps + 10; in setup_page_sizes()
529 if (def->flags == 0) { in setup_page_sizes()
530 def->shift = 0; in setup_page_sizes()
533 pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10), in setup_page_sizes()
534 __page_type_names[def->flags & 0x3]); in setup_page_sizes()
541 * If we want to use HW tablewalk, enable it by patching the TLB miss in setup_mmu_htw()
563 * Early initialization of the MMU TLB code
625 * page sizes in the TLB, but for now let's assume 16M is in early_init_mmu_global()
635 /* XXX This code only checks for TLB 0 capabilities and doesn't in early_init_mmu_global()
636 * check what page size combos are supported by the HW. It in early_init_mmu_global()
658 * for use by the TLB miss code in early_init_mmu_global()
674 * do this because highmem is not supported on 64-bit. in early_mmu_set_memory_limit()
699 /* On non-FSL Embedded 64-bit, we adjust the RMA size to match in setup_initial_memory_limit()
700 * the bolted TLB entry. We know for now that only 1G in setup_initial_memory_limit()
704 * on FSL Embedded 64-bit, usually all RAM is bolted, but with in setup_initial_memory_limit()
707 * highmem on 64-bit). We limit ppc64_rma_size to what would be in setup_initial_memory_limit()
713 * We crop it to the size of the first MEMBLOCK to in setup_initial_memory_limit()
741 of_get_flat_dt_prop(root, "cooperative-partition", NULL)) in early_init_mmu()