1153d1936SAndrew Jones /* 2153d1936SAndrew Jones * MMU enable and page table manipulation functions 3153d1936SAndrew Jones * 4153d1936SAndrew Jones * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com> 5153d1936SAndrew Jones * 6153d1936SAndrew Jones * This work is licensed under the terms of the GNU LGPL, version 2. 7153d1936SAndrew Jones */ 88cca5668SAndrew Jones #include <asm/setup.h> 9eb225344SAndrew Jones #include <asm/thread_info.h> 10eb225344SAndrew Jones #include <asm/cpumask.h> 118cca5668SAndrew Jones #include <asm/mmu.h> 12c2a95639SPaolo Bonzini #include <asm/setup.h> 13c2a95639SPaolo Bonzini #include <asm/page.h> 14c2a95639SPaolo Bonzini 15c2a95639SPaolo Bonzini #include "alloc.h" 16c2a95639SPaolo Bonzini #include <asm/pgtable-hwdef.h> 17c2a95639SPaolo Bonzini #include <asm/pgtable.h> 18153d1936SAndrew Jones 19db328a24SAndrew Jones extern unsigned long etext; 20db328a24SAndrew Jones 212f3028cdSAndrew Jones pgd_t *mmu_idmap; 22153d1936SAndrew Jones 23c33efcf3SAndrew Jones /* CPU 0 starts with disabled MMU */ 24c33efcf3SAndrew Jones static cpumask_t mmu_disabled_cpumask = { {1} }; 25b141dbacSAndrew Jones unsigned int mmu_disabled_cpu_count = 1; 26c33efcf3SAndrew Jones 27b141dbacSAndrew Jones bool __mmu_enabled(void) 28153d1936SAndrew Jones { 29c33efcf3SAndrew Jones int cpu = current_thread_info()->cpu; 30eb225344SAndrew Jones 311742c67aSAndrew Jones /* 321742c67aSAndrew Jones * mmu_enabled is called from places that are guarding the 331742c67aSAndrew Jones * use of exclusive ops (which require the mmu to be enabled). 341742c67aSAndrew Jones * That means we CANNOT call anything from here that may use a 351742c67aSAndrew Jones * spinlock, atomic bitop, etc., otherwise we'll recurse. 361742c67aSAndrew Jones * [cpumask_]test_bit is safe though. 371742c67aSAndrew Jones */ 38c33efcf3SAndrew Jones return !cpumask_test_cpu(cpu, &mmu_disabled_cpumask); 39153d1936SAndrew Jones } 40153d1936SAndrew Jones 411742c67aSAndrew Jones void mmu_mark_enabled(int cpu) 421742c67aSAndrew Jones { 431742c67aSAndrew Jones if (cpumask_test_and_clear_cpu(cpu, &mmu_disabled_cpumask)) 441742c67aSAndrew Jones --mmu_disabled_cpu_count; 451742c67aSAndrew Jones } 461742c67aSAndrew Jones 471742c67aSAndrew Jones void mmu_mark_disabled(int cpu) 481742c67aSAndrew Jones { 491742c67aSAndrew Jones if (!cpumask_test_and_set_cpu(cpu, &mmu_disabled_cpumask)) 501742c67aSAndrew Jones ++mmu_disabled_cpu_count; 511742c67aSAndrew Jones } 521742c67aSAndrew Jones 53153d1936SAndrew Jones extern void asm_mmu_enable(phys_addr_t pgtable); 54153d1936SAndrew Jones void mmu_enable(pgd_t *pgtable) 55153d1936SAndrew Jones { 56c33efcf3SAndrew Jones int cpu = current_thread_info()->cpu; 57c33efcf3SAndrew Jones 58153d1936SAndrew Jones asm_mmu_enable(__pa(pgtable)); 59153d1936SAndrew Jones flush_tlb_all(); 60b141dbacSAndrew Jones 611742c67aSAndrew Jones mmu_mark_enabled(cpu); 62153d1936SAndrew Jones } 63153d1936SAndrew Jones 64e27b176bSAndrew Jones extern void asm_mmu_disable(void); 65e27b176bSAndrew Jones void mmu_disable(void) 66e27b176bSAndrew Jones { 67c33efcf3SAndrew Jones int cpu = current_thread_info()->cpu; 68c33efcf3SAndrew Jones 69c33efcf3SAndrew Jones mmu_mark_disabled(cpu); 70c33efcf3SAndrew Jones 71e27b176bSAndrew Jones asm_mmu_disable(); 72e27b176bSAndrew Jones } 73e27b176bSAndrew Jones 74*f0671a7bSPaolo Bonzini void mmu_set_range_ptes(pgd_t *pgtable, uintptr_t virt_offset, 75*f0671a7bSPaolo Bonzini phys_addr_t phys_start, phys_addr_t phys_end, 762f3028cdSAndrew Jones pgprot_t prot) 77153d1936SAndrew Jones { 78*f0671a7bSPaolo Bonzini phys_addr_t paddr = phys_start & PAGE_MASK; 79*f0671a7bSPaolo Bonzini uintptr_t vaddr = virt_offset & PAGE_MASK; 80*f0671a7bSPaolo Bonzini uintptr_t virt_end = phys_end - paddr + vaddr; 812f3028cdSAndrew Jones 822f3028cdSAndrew Jones for (; vaddr < virt_end; vaddr += PAGE_SIZE, paddr += PAGE_SIZE) { 832f3028cdSAndrew Jones pgd_t *pgd = pgd_offset(pgtable, vaddr); 840226e262SPaolo Bonzini pmd_t *pmd = pmd_alloc(pgd, vaddr); 852f3028cdSAndrew Jones pte_t *pte = pte_alloc(pmd, vaddr); 862f3028cdSAndrew Jones 872f3028cdSAndrew Jones pte_val(*pte) = paddr; 882f3028cdSAndrew Jones pte_val(*pte) |= PTE_TYPE_PAGE | PTE_AF | PTE_SHARED; 892f3028cdSAndrew Jones pte_val(*pte) |= pgprot_val(prot); 902f3028cdSAndrew Jones } 912f3028cdSAndrew Jones } 922f3028cdSAndrew Jones 93*f0671a7bSPaolo Bonzini void mmu_set_range_sect(pgd_t *pgtable, uintptr_t virt_offset, 94*f0671a7bSPaolo Bonzini phys_addr_t phys_start, phys_addr_t phys_end, 952f3028cdSAndrew Jones pgprot_t prot) 962f3028cdSAndrew Jones { 97*f0671a7bSPaolo Bonzini phys_addr_t paddr = phys_start & PGDIR_MASK; 98*f0671a7bSPaolo Bonzini uintptr_t vaddr = virt_offset & PGDIR_MASK; 99*f0671a7bSPaolo Bonzini uintptr_t virt_end = phys_end - paddr + vaddr; 1002f3028cdSAndrew Jones 1012f3028cdSAndrew Jones for (; vaddr < virt_end; vaddr += PGDIR_SIZE, paddr += PGDIR_SIZE) { 1022f3028cdSAndrew Jones pgd_t *pgd = pgd_offset(pgtable, vaddr); 1032f3028cdSAndrew Jones pgd_val(*pgd) = paddr; 1042f3028cdSAndrew Jones pgd_val(*pgd) |= PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S; 1052f3028cdSAndrew Jones pgd_val(*pgd) |= pgprot_val(prot); 1062f3028cdSAndrew Jones } 1072f3028cdSAndrew Jones } 1082f3028cdSAndrew Jones 1092f3028cdSAndrew Jones 110153d1936SAndrew Jones 111153d1936SAndrew Jones void mmu_enable_idmap(void) 112153d1936SAndrew Jones { 113*f0671a7bSPaolo Bonzini uintptr_t phys_end = sizeof(long) == 8 || !(PHYS_END >> 32) 1142f3028cdSAndrew Jones ? PHYS_END : 0xfffff000; 115*f0671a7bSPaolo Bonzini uintptr_t code_end = (uintptr_t)&etext; 116153d1936SAndrew Jones 1172f3028cdSAndrew Jones mmu_idmap = pgd_alloc(); 118153d1936SAndrew Jones 119*f0671a7bSPaolo Bonzini mmu_set_range_sect(mmu_idmap, PHYS_IO_OFFSET, 120*f0671a7bSPaolo Bonzini PHYS_IO_OFFSET, PHYS_IO_END, 121*f0671a7bSPaolo Bonzini __pgprot(PMD_SECT_UNCACHED | PMD_SECT_USER)); 122153d1936SAndrew Jones 123db328a24SAndrew Jones /* armv8 requires code shared between EL1 and EL0 to be read-only */ 1242f3028cdSAndrew Jones mmu_set_range_ptes(mmu_idmap, PHYS_OFFSET, 125db328a24SAndrew Jones PHYS_OFFSET, code_end, 126db328a24SAndrew Jones __pgprot(PTE_WBWA | PTE_RDONLY | PTE_USER)); 127db328a24SAndrew Jones 128db328a24SAndrew Jones mmu_set_range_ptes(mmu_idmap, code_end, 129db328a24SAndrew Jones code_end, phys_end, 1302f3028cdSAndrew Jones __pgprot(PTE_WBWA | PTE_USER)); 131153d1936SAndrew Jones 1322f3028cdSAndrew Jones mmu_enable(mmu_idmap); 133153d1936SAndrew Jones } 134