Lines Matching +full:i +full:- +full:tlb +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * This file contains the routines for TLB flushing.
6 * this does -not- include 603 however which shares the implementation with
9 * -- BenH
15 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
39 #include <asm/tlb.h>
40 #include <asm/code-patching.h>
48 * This struct lists the sw-supported page sizes. The hardawre MMU may support
147 /* The variables below are currently only used on 64-bit Book3E
153 int mmu_linear_psize; /* Page size used for the linear mapping */
154 int mmu_pte_psize; /* Page size used for PTE pages */
155 int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
162 * exceptions. This is used for bolted and e6500 TLB miss handlers which
163 * do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
171 /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
177 * Base TLB flushing operations:
179 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
180 * - flush_tlb_page(vma, vmaddr) flushes one page
181 * - flush_tlb_range(vma, start, end) flushes a range of pages
182 * - flush_tlb_kernel_range(start, end) flushes kernel pages
184 * - local_* variants of page and mm only apply to the current
189 * These are the base non-SMP variants of page and mm flushing
196 pid = mm->context.id; in local_flush_tlb_mm()
209 pid = mm ? mm->context.id : 0; in __local_flush_tlb_page()
217 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, in local_flush_tlb_page()
223 * And here are the SMP non-local implementations
240 _tlbil_pid(p ? p->pid : 0); in do_flush_tlb_mm_ipi()
247 _tlbil_va(p->addr, p->pid, p->tsize, p->ind); in do_flush_tlb_page_ipi()
255 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
256 * - we are invaliating some target that isn't currently running here
258 * - some other CPU is re-acquiring a lost PID for this mm
262 * invalidation of TLB entries present prior to this call, so we
272 pid = mm->context.id; in flush_tlb_mm()
301 pid = mm->context.id; in __flush_tlb_page()
339 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, in flush_tlb_page()
351 if (of_get_flat_dt_prop(root, "cooperative-partition", NULL)) in early_init_mmu_47x()
358 * Flush kernel TLB entries in the given range
375 * be optimized based on a threshold on the size of the range, since
383 if (end - start == PAGE_SIZE && !(start & ~PAGE_MASK)) in flush_tlb_range()
386 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
390 void tlb_flush(struct mmu_gather *tlb) in tlb_flush() argument
392 flush_tlb_mm(tlb->mm); in tlb_flush()
396 * Below are functions specific to the 64-bit variant of Book3E though that
403 * Handling of virtual linear page tables or indirect TLB entries
406 void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address) in tlb_flush_pgtable() argument
413 unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift; in tlb_flush_pgtable() local
420 __flush_tlb_page(tlb->mm, start, tsize, 1); in tlb_flush_pgtable()
421 start += size; in tlb_flush_pgtable()
428 vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful; in tlb_flush_pgtable()
430 __flush_tlb_page(tlb->mm, vpte, tsize, 0); in tlb_flush_pgtable()
439 int i, psize; in setup_page_sizes() local
457 shift = def->shift; in setup_page_sizes()
463 shift = (shift - 10) >> 1; in setup_page_sizes()
466 def->flags |= MMU_PAGE_SIZE_DIRECT; in setup_page_sizes()
484 * We expect 4K subpage size and unrestricted indirect size. in setup_page_sizes()
485 * The lack of a restriction on indirect size is a Freescale in setup_page_sizes()
494 if (!def->shift) in setup_page_sizes()
497 if (tlb1ps & (1U << (def->shift - 10))) { in setup_page_sizes()
498 def->flags |= MMU_PAGE_SIZE_DIRECT; in setup_page_sizes()
501 def->flags |= MMU_PAGE_SIZE_INDIRECT; in setup_page_sizes()
517 if (tlb0ps & (1U << (def->shift - 10))) in setup_page_sizes()
518 def->flags |= MMU_PAGE_SIZE_DIRECT; in setup_page_sizes()
528 /* Now, we only deal with one IND page size for each in setup_page_sizes()
529 * direct size. Hopefully all implementations today are in setup_page_sizes()
533 for (i = 0; i < 3; i++) { in setup_page_sizes()
545 if (ps == (def->shift - 10)) in setup_page_sizes()
546 def->flags |= MMU_PAGE_SIZE_INDIRECT; in setup_page_sizes()
547 if (sps == (def->shift - 10)) in setup_page_sizes()
548 def->ind = ps + 10; in setup_page_sizes()
563 if (def->flags == 0) { in setup_page_sizes()
564 def->shift = 0; in setup_page_sizes()
567 pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10), in setup_page_sizes()
568 __page_type_names[def->flags & 0x3]); in setup_page_sizes()
575 * If we want to use HW tablewalk, enable it by patching the TLB miss in setup_mmu_htw()
597 * Early initialization of the MMU TLB code
659 * now our boot and TLB miss code hard wires it. Ideally in early_init_mmu_global()
660 * we should find out a suitable page size and patch the in early_init_mmu_global()
661 * TLB miss code (either that or use the PACA to store in early_init_mmu_global()
667 * page sizes in the TLB, but for now let's assume 16M is in early_init_mmu_global()
677 /* XXX This code only checks for TLB 0 capabilities and doesn't in early_init_mmu_global()
678 * check what page size combos are supported by the HW. It in early_init_mmu_global()
700 * for use by the TLB miss code in early_init_mmu_global()
716 * do this because highmem is not supported on 64-bit. in early_mmu_set_memory_limit()
741 /* On non-FSL Embedded 64-bit, we adjust the RMA size to match in setup_initial_memory_limit()
742 * the bolted TLB entry. We know for now that only 1G in setup_initial_memory_limit()
746 * on FSL Embedded 64-bit, usually all RAM is bolted, but with in setup_initial_memory_limit()
749 * highmem on 64-bit). We limit ppc64_rma_size to what would be in setup_initial_memory_limit()
755 * We crop it to the size of the first MEMBLOCK to in setup_initial_memory_limit()