Lines Matching +full:d +full:- +full:tlb +full:- +full:sets

1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
4 * Copyright (C) 2008-2009 PetaLogix
51 * Sort of meaningless for non-VM targets.
58 #include <asm-generic/pgtable-nopmd.h>
103 * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash
107 * We use the hash table as an extended TLB, i.e. a cache of currently
108 * active mappings. We maintain a two-level page table tree, much
110 * management code. Low-level assembler code in hashtable.S
117 * The MicroBlaze processor has a TLB architecture identical to PPC-40x. The
118 * instruction and data sides share a unified, 64-entry, semi-associative
119 * TLB which is maintained totally under software control. In addition, the
120 * instruction side has a hardware-managed, 2,4, or 8-entry, fully-associative
121 * TLB which serves as a first level to the shared TLB. These two TLBs are
126 * The normal case is that PTEs are 32-bits and we have a 1-page
127 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
131 /* PGDIR_SHIFT determines what a top-level page table entry can map */
134 #define PGDIR_MASK (~(PGDIR_SIZE-1))
137 * entries per page directory level: our page-table tree is two-level, so
142 #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
148 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
151 printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \
154 printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
158 * Bits in a linux-style PTE. These match the bits in the
159 * (hardware-defined) PTE as closely as possible.
170 * - bits 20 and 21 must be cleared, because we use 4k pages (4xx can
173 * - We use only zones 0 (for kernel pages) and 1 (for user pages)
174 * of the 16 available. Bit 24-26 of the TLB are cleared in the TLB
177 * - PRESENT *must* be in the bottom two bits because swap cache
180 * is cleared in the TLB miss handler before the TLB entry is loaded.
181 * - All other bits of the PTE are loaded into TLBLO without
192 #define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
223 * another purpose. -- paulus.
271 * for zero-mapped memory areas etc..
289 ((pte_val(x) - memory_start) >> PAGE_SHIFT))
348 pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) | \
362 * pte_update clears and sets bit atomically, and returns
364 * The ((unsigned long)(p+1) - 4) hack is to get to the least-significant
379 : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set) in pte_update()
448 /* Find an entry in the third-level page table.. */
456 * (if used). -- paulus