1 /* 2 * MMU enable and page table manipulation functions 3 * 4 * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com> 5 * 6 * This work is licensed under the terms of the GNU LGPL, version 2. 7 */ 8 #include <asm/setup.h> 9 #include <asm/thread_info.h> 10 #include <asm/cpumask.h> 11 #include <asm/mmu.h> 12 #include <asm/setup.h> 13 #include <asm/page.h> 14 15 #include "alloc_page.h" 16 #include "vmalloc.h" 17 #include <asm/pgtable-hwdef.h> 18 #include <asm/pgtable.h> 19 20 extern unsigned long etext; 21 22 pgd_t *mmu_idmap; 23 24 /* CPU 0 starts with disabled MMU */ 25 static cpumask_t mmu_disabled_cpumask = { {1} }; 26 unsigned int mmu_disabled_cpu_count = 1; 27 28 bool __mmu_enabled(void) 29 { 30 int cpu = current_thread_info()->cpu; 31 32 /* 33 * mmu_enabled is called from places that are guarding the 34 * use of exclusive ops (which require the mmu to be enabled). 35 * That means we CANNOT call anything from here that may use a 36 * spinlock, atomic bitop, etc., otherwise we'll recurse. 37 * [cpumask_]test_bit is safe though. 38 */ 39 return !cpumask_test_cpu(cpu, &mmu_disabled_cpumask); 40 } 41 42 void mmu_mark_enabled(int cpu) 43 { 44 if (cpumask_test_and_clear_cpu(cpu, &mmu_disabled_cpumask)) 45 --mmu_disabled_cpu_count; 46 } 47 48 void mmu_mark_disabled(int cpu) 49 { 50 if (!cpumask_test_and_set_cpu(cpu, &mmu_disabled_cpumask)) 51 ++mmu_disabled_cpu_count; 52 } 53 54 extern void asm_mmu_enable(phys_addr_t pgtable); 55 void mmu_enable(pgd_t *pgtable) 56 { 57 struct thread_info *info = current_thread_info(); 58 59 asm_mmu_enable(__pa(pgtable)); 60 flush_tlb_all(); 61 62 info->pgtable = pgtable; 63 mmu_mark_enabled(info->cpu); 64 } 65 66 extern void asm_mmu_disable(void); 67 void mmu_disable(void) 68 { 69 int cpu = current_thread_info()->cpu; 70 71 mmu_mark_disabled(cpu); 72 73 asm_mmu_disable(); 74 } 75 76 static pteval_t *get_pte(pgd_t *pgtable, uintptr_t vaddr) 77 { 78 pgd_t *pgd = pgd_offset(pgtable, vaddr); 79 pmd_t *pmd = pmd_alloc(pgd, vaddr); 80 pte_t *pte = pte_alloc(pmd, vaddr); 81 82 return &pte_val(*pte); 83 } 84 85 static pteval_t *install_pte(pgd_t *pgtable, uintptr_t vaddr, pteval_t pte) 86 { 87 pteval_t *p_pte = get_pte(pgtable, vaddr); 88 89 *p_pte = pte; 90 flush_tlb_page(vaddr); 91 return p_pte; 92 } 93 94 static pteval_t *install_page_prot(pgd_t *pgtable, phys_addr_t phys, 95 uintptr_t vaddr, pgprot_t prot) 96 { 97 pteval_t pte = phys; 98 pte |= PTE_TYPE_PAGE | PTE_AF | PTE_SHARED; 99 pte |= pgprot_val(prot); 100 return install_pte(pgtable, vaddr, pte); 101 } 102 103 pteval_t *install_page(pgd_t *pgtable, phys_addr_t phys, void *virt) 104 { 105 return install_page_prot(pgtable, phys, (uintptr_t)virt, 106 __pgprot(PTE_WBWA | PTE_USER)); 107 } 108 109 phys_addr_t virt_to_pte_phys(pgd_t *pgtable, void *mem) 110 { 111 return (*get_pte(pgtable, (uintptr_t)mem) & PHYS_MASK & -PAGE_SIZE) 112 + ((ulong)mem & (PAGE_SIZE - 1)); 113 } 114 115 void mmu_set_range_ptes(pgd_t *pgtable, uintptr_t virt_offset, 116 phys_addr_t phys_start, phys_addr_t phys_end, 117 pgprot_t prot) 118 { 119 phys_addr_t paddr = phys_start & PAGE_MASK; 120 uintptr_t vaddr = virt_offset & PAGE_MASK; 121 uintptr_t virt_end = phys_end - paddr + vaddr; 122 123 for (; vaddr < virt_end; vaddr += PAGE_SIZE, paddr += PAGE_SIZE) 124 install_page_prot(pgtable, paddr, vaddr, prot); 125 } 126 127 void mmu_set_range_sect(pgd_t *pgtable, uintptr_t virt_offset, 128 phys_addr_t phys_start, phys_addr_t phys_end, 129 pgprot_t prot) 130 { 131 phys_addr_t paddr = phys_start & PGDIR_MASK; 132 uintptr_t vaddr = virt_offset & PGDIR_MASK; 133 uintptr_t virt_end = phys_end - paddr + vaddr; 134 135 for (; vaddr < virt_end; vaddr += PGDIR_SIZE, paddr += PGDIR_SIZE) { 136 pgd_t *pgd = pgd_offset(pgtable, vaddr); 137 pgd_val(*pgd) = paddr; 138 pgd_val(*pgd) |= PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S; 139 pgd_val(*pgd) |= pgprot_val(prot); 140 flush_tlb_page(vaddr); 141 } 142 } 143 144 void *setup_mmu(phys_addr_t phys_end) 145 { 146 uintptr_t code_end = (uintptr_t)&etext; 147 148 /* 0G-1G = I/O, 1G-3G = identity, 3G-4G = vmalloc */ 149 if (phys_end > (3ul << 30)) 150 phys_end = 3ul << 30; 151 152 #ifdef __aarch64__ 153 init_alloc_vpage((void*)(4ul << 30)); 154 #endif 155 156 mmu_idmap = alloc_page(); 157 158 /* 159 * mach-virt I/O regions: 160 * - The first 1G (arm/arm64) 161 * - 512M at 256G (arm64, arm uses highmem=off) 162 * - 512G at 512G (arm64, arm uses highmem=off) 163 */ 164 mmu_set_range_sect(mmu_idmap, 165 0, 0, (1ul << 30), 166 __pgprot(PMD_SECT_UNCACHED | PMD_SECT_USER)); 167 #ifdef __aarch64__ 168 mmu_set_range_sect(mmu_idmap, 169 (1ul << 38), (1ul << 38), (1ul << 38) | (1ul << 29), 170 __pgprot(PMD_SECT_UNCACHED | PMD_SECT_USER)); 171 mmu_set_range_sect(mmu_idmap, 172 (1ul << 39), (1ul << 39), (1ul << 40), 173 __pgprot(PMD_SECT_UNCACHED | PMD_SECT_USER)); 174 #endif 175 176 /* armv8 requires code shared between EL1 and EL0 to be read-only */ 177 mmu_set_range_ptes(mmu_idmap, PHYS_OFFSET, 178 PHYS_OFFSET, code_end, 179 __pgprot(PTE_WBWA | PTE_RDONLY | PTE_USER)); 180 181 mmu_set_range_ptes(mmu_idmap, code_end, 182 code_end, phys_end, 183 __pgprot(PTE_WBWA | PTE_USER)); 184 185 mmu_enable(mmu_idmap); 186 return mmu_idmap; 187 } 188 189 phys_addr_t __virt_to_phys(unsigned long addr) 190 { 191 if (mmu_enabled()) { 192 pgd_t *pgtable = current_thread_info()->pgtable; 193 return virt_to_pte_phys(pgtable, (void *)addr); 194 } 195 return addr; 196 } 197 198 unsigned long __phys_to_virt(phys_addr_t addr) 199 { 200 /* 201 * We don't guarantee that phys_to_virt(virt_to_phys(vaddr)) == vaddr, but 202 * the default page tables do identity map all physical addresses, which 203 * means phys_to_virt(virt_to_phys((void *)paddr)) == paddr. 204 */ 205 assert(!mmu_enabled() || __virt_to_phys(addr) == addr); 206 return addr; 207 } 208 209 void mmu_clear_user(unsigned long vaddr) 210 { 211 pgd_t *pgtable; 212 pteval_t *pte; 213 214 if (!mmu_enabled()) 215 return; 216 217 pgtable = current_thread_info()->pgtable; 218 pte = get_pte(pgtable, vaddr); 219 220 *pte &= ~PTE_USER; 221 flush_tlb_page(vaddr); 222 } 223