Lines Matching +full:hard +full:- +full:wires

1 // SPDX-License-Identifier: GPL-2.0-or-later
6 * this does -not- include 603 however which shares the implementation with
9 * -- BenH
15 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
40 #include <asm/code-patching.h>
48 * This struct lists the sw-supported page sizes. The hardawre MMU may support
147 /* The variables below are currently only used on 64-bit Book3E
171 /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
179 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
180 * - flush_tlb_page(vma, vmaddr) flushes one page
181 * - flush_tlb_range(vma, start, end) flushes a range of pages
182 * - flush_tlb_kernel_range(start, end) flushes kernel pages
184 * - local_* variants of page and mm only apply to the current
189 * These are the base non-SMP variants of page and mm flushing
196 pid = mm->context.id; in local_flush_tlb_mm()
209 pid = mm ? mm->context.id : 0; in __local_flush_tlb_page()
217 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, in local_flush_tlb_page()
223 * And here are the SMP non-local implementations
240 _tlbil_pid(p ? p->pid : 0); in do_flush_tlb_mm_ipi()
247 _tlbil_va(p->addr, p->pid, p->tsize, p->ind); in do_flush_tlb_page_ipi()
255 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
256 * - we are invaliating some target that isn't currently running here
258 * - some other CPU is re-acquiring a lost PID for this mm
272 pid = mm->context.id; in flush_tlb_mm()
301 pid = mm->context.id; in __flush_tlb_page()
339 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, in flush_tlb_page()
351 if (of_get_flat_dt_prop(root, "cooperative-partition", NULL)) in early_init_mmu_47x()
383 if (end - start == PAGE_SIZE && !(start & ~PAGE_MASK)) in flush_tlb_range()
386 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
392 flush_tlb_mm(tlb->mm); in tlb_flush()
396 * Below are functions specific to the 64-bit variant of Book3E though that
420 __flush_tlb_page(tlb->mm, start, tsize, 1); in tlb_flush_pgtable()
428 vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful; in tlb_flush_pgtable()
430 __flush_tlb_page(tlb->mm, vpte, tsize, 0); in tlb_flush_pgtable()
457 shift = def->shift; in setup_page_sizes()
463 shift = (shift - 10) >> 1; in setup_page_sizes()
466 def->flags |= MMU_PAGE_SIZE_DIRECT; in setup_page_sizes()
494 if (!def->shift) in setup_page_sizes()
497 if (tlb1ps & (1U << (def->shift - 10))) { in setup_page_sizes()
498 def->flags |= MMU_PAGE_SIZE_DIRECT; in setup_page_sizes()
501 def->flags |= MMU_PAGE_SIZE_INDIRECT; in setup_page_sizes()
517 if (tlb0ps & (1U << (def->shift - 10))) in setup_page_sizes()
518 def->flags |= MMU_PAGE_SIZE_DIRECT; in setup_page_sizes()
545 if (ps == (def->shift - 10)) in setup_page_sizes()
546 def->flags |= MMU_PAGE_SIZE_INDIRECT; in setup_page_sizes()
547 if (sps == (def->shift - 10)) in setup_page_sizes()
548 def->ind = ps + 10; in setup_page_sizes()
563 if (def->flags == 0) { in setup_page_sizes()
564 def->shift = 0; in setup_page_sizes()
567 pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10), in setup_page_sizes()
568 __page_type_names[def->flags & 0x3]); in setup_page_sizes()
659 * now our boot and TLB miss code hard wires it. Ideally in early_init_mmu_global()
716 * do this because highmem is not supported on 64-bit. in early_mmu_set_memory_limit()
741 /* On non-FSL Embedded 64-bit, we adjust the RMA size to match in setup_initial_memory_limit()
746 * on FSL Embedded 64-bit, usually all RAM is bolted, but with in setup_initial_memory_limit()
749 * highmem on 64-bit). We limit ppc64_rma_size to what would be in setup_initial_memory_limit()