1153d1936SAndrew Jones /* 2153d1936SAndrew Jones * MMU enable and page table manipulation functions 3153d1936SAndrew Jones * 4153d1936SAndrew Jones * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com> 5153d1936SAndrew Jones * 6153d1936SAndrew Jones * This work is licensed under the terms of the GNU LGPL, version 2. 7153d1936SAndrew Jones */ 88cca5668SAndrew Jones #include <asm/setup.h> 9eb225344SAndrew Jones #include <asm/thread_info.h> 10eb225344SAndrew Jones #include <asm/cpumask.h> 118cca5668SAndrew Jones #include <asm/mmu.h> 12c2a95639SPaolo Bonzini #include <asm/setup.h> 13c2a95639SPaolo Bonzini #include <asm/page.h> 14c2a95639SPaolo Bonzini 15031755dbSPaolo Bonzini #include "alloc_page.h" 16031755dbSPaolo Bonzini #include "vmalloc.h" 17c2a95639SPaolo Bonzini #include <asm/pgtable-hwdef.h> 18c2a95639SPaolo Bonzini #include <asm/pgtable.h> 19153d1936SAndrew Jones 2070cea146SAlexandru Elisei #include <linux/compiler.h> 2170cea146SAlexandru Elisei 22db328a24SAndrew Jones extern unsigned long etext; 23db328a24SAndrew Jones 242f3028cdSAndrew Jones pgd_t *mmu_idmap; 25153d1936SAndrew Jones 26c33efcf3SAndrew Jones /* CPU 0 starts with disabled MMU */ 27c33efcf3SAndrew Jones static cpumask_t mmu_disabled_cpumask = { {1} }; 28b141dbacSAndrew Jones unsigned int mmu_disabled_cpu_count = 1; 29c33efcf3SAndrew Jones 30b141dbacSAndrew Jones bool __mmu_enabled(void) 31153d1936SAndrew Jones { 32c33efcf3SAndrew Jones int cpu = current_thread_info()->cpu; 33eb225344SAndrew Jones 341742c67aSAndrew Jones /* 351742c67aSAndrew Jones * mmu_enabled is called from places that are guarding the 361742c67aSAndrew Jones * use of exclusive ops (which require the mmu to be enabled). 371742c67aSAndrew Jones * That means we CANNOT call anything from here that may use a 381742c67aSAndrew Jones * spinlock, atomic bitop, etc., otherwise we'll recurse. 391742c67aSAndrew Jones * [cpumask_]test_bit is safe though. 401742c67aSAndrew Jones */ 41c33efcf3SAndrew Jones return !cpumask_test_cpu(cpu, &mmu_disabled_cpumask); 42153d1936SAndrew Jones } 43153d1936SAndrew Jones 441742c67aSAndrew Jones void mmu_mark_enabled(int cpu) 451742c67aSAndrew Jones { 461742c67aSAndrew Jones if (cpumask_test_and_clear_cpu(cpu, &mmu_disabled_cpumask)) 471742c67aSAndrew Jones --mmu_disabled_cpu_count; 481742c67aSAndrew Jones } 491742c67aSAndrew Jones 501742c67aSAndrew Jones void mmu_mark_disabled(int cpu) 511742c67aSAndrew Jones { 521742c67aSAndrew Jones if (!cpumask_test_and_set_cpu(cpu, &mmu_disabled_cpumask)) 531742c67aSAndrew Jones ++mmu_disabled_cpu_count; 541742c67aSAndrew Jones } 551742c67aSAndrew Jones 56153d1936SAndrew Jones extern void asm_mmu_enable(phys_addr_t pgtable); 57153d1936SAndrew Jones void mmu_enable(pgd_t *pgtable) 58153d1936SAndrew Jones { 5936b50de9SAndrew Jones struct thread_info *info = current_thread_info(); 60c33efcf3SAndrew Jones 61153d1936SAndrew Jones asm_mmu_enable(__pa(pgtable)); 62b141dbacSAndrew Jones 6336b50de9SAndrew Jones info->pgtable = pgtable; 6436b50de9SAndrew Jones mmu_mark_enabled(info->cpu); 65153d1936SAndrew Jones } 66153d1936SAndrew Jones 67e27b176bSAndrew Jones extern void asm_mmu_disable(void); 68e27b176bSAndrew Jones void mmu_disable(void) 69e27b176bSAndrew Jones { 7008c19b24SAlexandru Elisei unsigned long sp = current_stack_pointer; 71c33efcf3SAndrew Jones int cpu = current_thread_info()->cpu; 72c33efcf3SAndrew Jones 7308c19b24SAlexandru Elisei assert_msg(__virt_to_phys(sp) == sp, 7408c19b24SAlexandru Elisei "Attempting to disable MMU with non-identity mapped stack"); 7508c19b24SAlexandru Elisei 76c33efcf3SAndrew Jones mmu_mark_disabled(cpu); 77c33efcf3SAndrew Jones 78e27b176bSAndrew Jones asm_mmu_disable(); 79e27b176bSAndrew Jones } 80e27b176bSAndrew Jones 81031755dbSPaolo Bonzini static pteval_t *get_pte(pgd_t *pgtable, uintptr_t vaddr) 82031755dbSPaolo Bonzini { 83031755dbSPaolo Bonzini pgd_t *pgd = pgd_offset(pgtable, vaddr); 84*a2d06852SNikos Nikoleris pud_t *pud = pud_alloc(pgd, vaddr); 85*a2d06852SNikos Nikoleris pmd_t *pmd = pmd_alloc(pud, vaddr); 86031755dbSPaolo Bonzini pte_t *pte = pte_alloc(pmd, vaddr); 87031755dbSPaolo Bonzini 88031755dbSPaolo Bonzini return &pte_val(*pte); 89031755dbSPaolo Bonzini } 90031755dbSPaolo Bonzini 91031755dbSPaolo Bonzini static pteval_t *install_pte(pgd_t *pgtable, uintptr_t vaddr, pteval_t pte) 92031755dbSPaolo Bonzini { 93031755dbSPaolo Bonzini pteval_t *p_pte = get_pte(pgtable, vaddr); 94f8891de2SAndrew Jones 9570cea146SAlexandru Elisei WRITE_ONCE(*p_pte, pte); 9620239febSAlexandru Elisei flush_tlb_page(vaddr); 97031755dbSPaolo Bonzini return p_pte; 98031755dbSPaolo Bonzini } 99031755dbSPaolo Bonzini 100031755dbSPaolo Bonzini static pteval_t *install_page_prot(pgd_t *pgtable, phys_addr_t phys, 101031755dbSPaolo Bonzini uintptr_t vaddr, pgprot_t prot) 102031755dbSPaolo Bonzini { 103031755dbSPaolo Bonzini pteval_t pte = phys; 104031755dbSPaolo Bonzini pte |= PTE_TYPE_PAGE | PTE_AF | PTE_SHARED; 105031755dbSPaolo Bonzini pte |= pgprot_val(prot); 106031755dbSPaolo Bonzini return install_pte(pgtable, vaddr, pte); 107031755dbSPaolo Bonzini } 108031755dbSPaolo Bonzini 109031755dbSPaolo Bonzini pteval_t *install_page(pgd_t *pgtable, phys_addr_t phys, void *virt) 110031755dbSPaolo Bonzini { 111031755dbSPaolo Bonzini return install_page_prot(pgtable, phys, (uintptr_t)virt, 112031755dbSPaolo Bonzini __pgprot(PTE_WBWA | PTE_USER)); 113031755dbSPaolo Bonzini } 114031755dbSPaolo Bonzini 115031755dbSPaolo Bonzini phys_addr_t virt_to_pte_phys(pgd_t *pgtable, void *mem) 116031755dbSPaolo Bonzini { 117031755dbSPaolo Bonzini return (*get_pte(pgtable, (uintptr_t)mem) & PHYS_MASK & -PAGE_SIZE) 118031755dbSPaolo Bonzini + ((ulong)mem & (PAGE_SIZE - 1)); 119031755dbSPaolo Bonzini } 120031755dbSPaolo Bonzini 121f0671a7bSPaolo Bonzini void mmu_set_range_ptes(pgd_t *pgtable, uintptr_t virt_offset, 122f0671a7bSPaolo Bonzini phys_addr_t phys_start, phys_addr_t phys_end, 1232f3028cdSAndrew Jones pgprot_t prot) 124153d1936SAndrew Jones { 125f0671a7bSPaolo Bonzini phys_addr_t paddr = phys_start & PAGE_MASK; 126f0671a7bSPaolo Bonzini uintptr_t vaddr = virt_offset & PAGE_MASK; 127f0671a7bSPaolo Bonzini uintptr_t virt_end = phys_end - paddr + vaddr; 1282f3028cdSAndrew Jones 129031755dbSPaolo Bonzini for (; vaddr < virt_end; vaddr += PAGE_SIZE, paddr += PAGE_SIZE) 130031755dbSPaolo Bonzini install_page_prot(pgtable, paddr, vaddr, prot); 1312f3028cdSAndrew Jones } 1322f3028cdSAndrew Jones 133f0671a7bSPaolo Bonzini void mmu_set_range_sect(pgd_t *pgtable, uintptr_t virt_offset, 134f0671a7bSPaolo Bonzini phys_addr_t phys_start, phys_addr_t phys_end, 1352f3028cdSAndrew Jones pgprot_t prot) 1362f3028cdSAndrew Jones { 137*a2d06852SNikos Nikoleris phys_addr_t paddr = phys_start & PMD_MASK; 138*a2d06852SNikos Nikoleris uintptr_t vaddr = virt_offset & PMD_MASK; 139f0671a7bSPaolo Bonzini uintptr_t virt_end = phys_end - paddr + vaddr; 14070cea146SAlexandru Elisei pgd_t *pgd; 141*a2d06852SNikos Nikoleris pud_t *pud; 142*a2d06852SNikos Nikoleris pmd_t *pmd; 143*a2d06852SNikos Nikoleris pmd_t entry; 1442f3028cdSAndrew Jones 145*a2d06852SNikos Nikoleris for (; vaddr < virt_end; vaddr += PMD_SIZE, paddr += PMD_SIZE) { 146*a2d06852SNikos Nikoleris pmd_val(entry) = paddr; 147*a2d06852SNikos Nikoleris pmd_val(entry) |= PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S; 148*a2d06852SNikos Nikoleris pmd_val(entry) |= pgprot_val(prot); 14970cea146SAlexandru Elisei pgd = pgd_offset(pgtable, vaddr); 150*a2d06852SNikos Nikoleris pud = pud_alloc(pgd, vaddr); 151*a2d06852SNikos Nikoleris pmd = pmd_alloc(pud, vaddr); 152*a2d06852SNikos Nikoleris WRITE_ONCE(*pmd, entry); 153f8891de2SAndrew Jones flush_tlb_page(vaddr); 1542f3028cdSAndrew Jones } 1552f3028cdSAndrew Jones } 1562f3028cdSAndrew Jones 157031755dbSPaolo Bonzini void *setup_mmu(phys_addr_t phys_end) 158153d1936SAndrew Jones { 159f0671a7bSPaolo Bonzini uintptr_t code_end = (uintptr_t)&etext; 160ef31e31dSAndrew Jones struct mem_region *r; 161153d1936SAndrew Jones 162031755dbSPaolo Bonzini /* 0G-1G = I/O, 1G-3G = identity, 3G-4G = vmalloc */ 163031755dbSPaolo Bonzini if (phys_end > (3ul << 30)) 164031755dbSPaolo Bonzini phys_end = 3ul << 30; 165031755dbSPaolo Bonzini 166031755dbSPaolo Bonzini #ifdef __aarch64__ 167031755dbSPaolo Bonzini init_alloc_vpage((void*)(4ul << 30)); 168c67363eeSNikos Nikoleris 169c67363eeSNikos Nikoleris assert_msg(system_supports_granule(PAGE_SIZE), 170c67363eeSNikos Nikoleris "Unsupported translation granule %ld\n", PAGE_SIZE); 171031755dbSPaolo Bonzini #endif 172031755dbSPaolo Bonzini 173031755dbSPaolo Bonzini mmu_idmap = alloc_page(); 174153d1936SAndrew Jones 175ef31e31dSAndrew Jones for (r = mem_regions; r->end; ++r) { 176ef31e31dSAndrew Jones if (!(r->flags & MR_F_IO)) 177ef31e31dSAndrew Jones continue; 178ef31e31dSAndrew Jones mmu_set_range_sect(mmu_idmap, r->start, r->start, r->end, 179f0671a7bSPaolo Bonzini __pgprot(PMD_SECT_UNCACHED | PMD_SECT_USER)); 180ef31e31dSAndrew Jones } 181153d1936SAndrew Jones 182db328a24SAndrew Jones /* armv8 requires code shared between EL1 and EL0 to be read-only */ 1832f3028cdSAndrew Jones mmu_set_range_ptes(mmu_idmap, PHYS_OFFSET, 184db328a24SAndrew Jones PHYS_OFFSET, code_end, 185db328a24SAndrew Jones __pgprot(PTE_WBWA | PTE_RDONLY | PTE_USER)); 186db328a24SAndrew Jones 187db328a24SAndrew Jones mmu_set_range_ptes(mmu_idmap, code_end, 188db328a24SAndrew Jones code_end, phys_end, 1892f3028cdSAndrew Jones __pgprot(PTE_WBWA | PTE_USER)); 190153d1936SAndrew Jones 1912f3028cdSAndrew Jones mmu_enable(mmu_idmap); 192031755dbSPaolo Bonzini return mmu_idmap; 193153d1936SAndrew Jones } 194f02b6363SAndrew Jones 195f02b6363SAndrew Jones phys_addr_t __virt_to_phys(unsigned long addr) 196f02b6363SAndrew Jones { 197f02b6363SAndrew Jones if (mmu_enabled()) { 198f02b6363SAndrew Jones pgd_t *pgtable = current_thread_info()->pgtable; 199f02b6363SAndrew Jones return virt_to_pte_phys(pgtable, (void *)addr); 200f02b6363SAndrew Jones } 201f02b6363SAndrew Jones return addr; 202f02b6363SAndrew Jones } 203f02b6363SAndrew Jones 204f02b6363SAndrew Jones unsigned long __phys_to_virt(phys_addr_t addr) 205f02b6363SAndrew Jones { 206f02b6363SAndrew Jones /* 207f02b6363SAndrew Jones * We don't guarantee that phys_to_virt(virt_to_phys(vaddr)) == vaddr, but 208f02b6363SAndrew Jones * the default page tables do identity map all physical addresses, which 209f02b6363SAndrew Jones * means phys_to_virt(virt_to_phys((void *)paddr)) == paddr. 210f02b6363SAndrew Jones */ 211f02b6363SAndrew Jones assert(!mmu_enabled() || __virt_to_phys(addr) == addr); 212f02b6363SAndrew Jones return addr; 213f02b6363SAndrew Jones } 2143ff42bddSAlexandru Elisei 21502f1cdc8SAlexandru Elisei void mmu_clear_user(pgd_t *pgtable, unsigned long vaddr) 2163ff42bddSAlexandru Elisei { 21702f1cdc8SAlexandru Elisei pgd_t *pgd; 218*a2d06852SNikos Nikoleris pud_t *pud; 21902f1cdc8SAlexandru Elisei pmd_t *pmd; 22002f1cdc8SAlexandru Elisei pte_t *pte; 2213ff42bddSAlexandru Elisei 2223ff42bddSAlexandru Elisei if (!mmu_enabled()) 2233ff42bddSAlexandru Elisei return; 2243ff42bddSAlexandru Elisei 22502f1cdc8SAlexandru Elisei pgd = pgd_offset(pgtable, vaddr); 22602f1cdc8SAlexandru Elisei assert(pgd_valid(*pgd)); 227*a2d06852SNikos Nikoleris pud = pud_offset(pgd, vaddr); 228*a2d06852SNikos Nikoleris assert(pud_valid(*pud)); 229*a2d06852SNikos Nikoleris pmd = pmd_offset(pud, vaddr); 23002f1cdc8SAlexandru Elisei assert(pmd_valid(*pmd)); 2313ff42bddSAlexandru Elisei 23202f1cdc8SAlexandru Elisei if (pmd_huge(*pmd)) { 23302f1cdc8SAlexandru Elisei pmd_t entry = __pmd(pmd_val(*pmd) & ~PMD_SECT_USER); 23402f1cdc8SAlexandru Elisei WRITE_ONCE(*pmd, entry); 23502f1cdc8SAlexandru Elisei goto out_flush_tlb; 23602f1cdc8SAlexandru Elisei } 23702f1cdc8SAlexandru Elisei 23802f1cdc8SAlexandru Elisei pte = pte_offset(pmd, vaddr); 23902f1cdc8SAlexandru Elisei assert(pte_valid(*pte)); 24002f1cdc8SAlexandru Elisei pte_t entry = __pte(pte_val(*pte) & ~PTE_USER); 24170cea146SAlexandru Elisei WRITE_ONCE(*pte, entry); 24202f1cdc8SAlexandru Elisei 24302f1cdc8SAlexandru Elisei out_flush_tlb: 2443ff42bddSAlexandru Elisei flush_tlb_page(vaddr); 2453ff42bddSAlexandru Elisei } 246