1153d1936SAndrew Jones /*
2153d1936SAndrew Jones * MMU enable and page table manipulation functions
3153d1936SAndrew Jones *
4153d1936SAndrew Jones * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5153d1936SAndrew Jones *
6153d1936SAndrew Jones * This work is licensed under the terms of the GNU LGPL, version 2.
7153d1936SAndrew Jones */
8dfc1fec2SAndrew Jones #include <cpumask.h>
92479ae50SAndrew Jones #include <memregions.h>
108cca5668SAndrew Jones #include <asm/setup.h>
11eb225344SAndrew Jones #include <asm/thread_info.h>
128cca5668SAndrew Jones #include <asm/mmu.h>
13c2a95639SPaolo Bonzini #include <asm/setup.h>
14c2a95639SPaolo Bonzini #include <asm/page.h>
15e97e1c82SAndrew Jones #include <asm/io.h>
16c2a95639SPaolo Bonzini
17031755dbSPaolo Bonzini #include "alloc_page.h"
18031755dbSPaolo Bonzini #include "vmalloc.h"
19c2a95639SPaolo Bonzini #include <asm/pgtable-hwdef.h>
20c2a95639SPaolo Bonzini #include <asm/pgtable.h>
21153d1936SAndrew Jones
2270cea146SAlexandru Elisei #include <linux/compiler.h>
2370cea146SAlexandru Elisei
242f3028cdSAndrew Jones pgd_t *mmu_idmap;
25153d1936SAndrew Jones
26c33efcf3SAndrew Jones /* CPU 0 starts with disabled MMU */
270917dc65SNikos Nikoleris static cpumask_t mmu_enabled_cpumask;
28c33efcf3SAndrew Jones
mmu_enabled(void)290917dc65SNikos Nikoleris bool mmu_enabled(void)
30153d1936SAndrew Jones {
311742c67aSAndrew Jones /*
321742c67aSAndrew Jones * mmu_enabled is called from places that are guarding the
331742c67aSAndrew Jones * use of exclusive ops (which require the mmu to be enabled).
341742c67aSAndrew Jones * That means we CANNOT call anything from here that may use a
351742c67aSAndrew Jones * spinlock, atomic bitop, etc., otherwise we'll recurse.
361742c67aSAndrew Jones * [cpumask_]test_bit is safe though.
371742c67aSAndrew Jones */
380917dc65SNikos Nikoleris if (is_user()) {
390917dc65SNikos Nikoleris int cpu = current_thread_info()->cpu;
400917dc65SNikos Nikoleris return cpumask_test_cpu(cpu, &mmu_enabled_cpumask);
410917dc65SNikos Nikoleris }
420917dc65SNikos Nikoleris
430917dc65SNikos Nikoleris return __mmu_enabled();
44153d1936SAndrew Jones }
45153d1936SAndrew Jones
mmu_mark_enabled(int cpu)461742c67aSAndrew Jones void mmu_mark_enabled(int cpu)
471742c67aSAndrew Jones {
480917dc65SNikos Nikoleris cpumask_set_cpu(cpu, &mmu_enabled_cpumask);
491742c67aSAndrew Jones }
501742c67aSAndrew Jones
mmu_mark_disabled(int cpu)511742c67aSAndrew Jones void mmu_mark_disabled(int cpu)
521742c67aSAndrew Jones {
530917dc65SNikos Nikoleris cpumask_clear_cpu(cpu, &mmu_enabled_cpumask);
541742c67aSAndrew Jones }
551742c67aSAndrew Jones
56153d1936SAndrew Jones extern void asm_mmu_enable(phys_addr_t pgtable);
mmu_enable(pgd_t * pgtable)57153d1936SAndrew Jones void mmu_enable(pgd_t *pgtable)
58153d1936SAndrew Jones {
5936b50de9SAndrew Jones struct thread_info *info = current_thread_info();
60c33efcf3SAndrew Jones
61153d1936SAndrew Jones asm_mmu_enable(__pa(pgtable));
62b141dbacSAndrew Jones
6336b50de9SAndrew Jones info->pgtable = pgtable;
6436b50de9SAndrew Jones mmu_mark_enabled(info->cpu);
65153d1936SAndrew Jones }
66153d1936SAndrew Jones
67e27b176bSAndrew Jones extern void asm_mmu_disable(void);
mmu_disable(void)68e27b176bSAndrew Jones void mmu_disable(void)
69e27b176bSAndrew Jones {
7008c19b24SAlexandru Elisei unsigned long sp = current_stack_pointer;
71c33efcf3SAndrew Jones int cpu = current_thread_info()->cpu;
72c33efcf3SAndrew Jones
7308c19b24SAlexandru Elisei assert_msg(__virt_to_phys(sp) == sp,
7408c19b24SAlexandru Elisei "Attempting to disable MMU with non-identity mapped stack");
7508c19b24SAlexandru Elisei
76c33efcf3SAndrew Jones mmu_mark_disabled(cpu);
77c33efcf3SAndrew Jones
78e27b176bSAndrew Jones asm_mmu_disable();
79e27b176bSAndrew Jones }
80e27b176bSAndrew Jones
get_pte(pgd_t * pgtable,uintptr_t vaddr)81031755dbSPaolo Bonzini static pteval_t *get_pte(pgd_t *pgtable, uintptr_t vaddr)
82031755dbSPaolo Bonzini {
83031755dbSPaolo Bonzini pgd_t *pgd = pgd_offset(pgtable, vaddr);
84a2d06852SNikos Nikoleris pud_t *pud = pud_alloc(pgd, vaddr);
85a2d06852SNikos Nikoleris pmd_t *pmd = pmd_alloc(pud, vaddr);
86031755dbSPaolo Bonzini pte_t *pte = pte_alloc(pmd, vaddr);
87031755dbSPaolo Bonzini
88031755dbSPaolo Bonzini return &pte_val(*pte);
89031755dbSPaolo Bonzini }
90031755dbSPaolo Bonzini
install_pte(pgd_t * pgtable,uintptr_t vaddr,pteval_t pte)91031755dbSPaolo Bonzini static pteval_t *install_pte(pgd_t *pgtable, uintptr_t vaddr, pteval_t pte)
92031755dbSPaolo Bonzini {
93031755dbSPaolo Bonzini pteval_t *p_pte = get_pte(pgtable, vaddr);
94f8891de2SAndrew Jones
9570cea146SAlexandru Elisei WRITE_ONCE(*p_pte, pte);
9620239febSAlexandru Elisei flush_tlb_page(vaddr);
97031755dbSPaolo Bonzini return p_pte;
98031755dbSPaolo Bonzini }
99031755dbSPaolo Bonzini
install_page_prot(pgd_t * pgtable,phys_addr_t phys,uintptr_t vaddr,pgprot_t prot)100031755dbSPaolo Bonzini static pteval_t *install_page_prot(pgd_t *pgtable, phys_addr_t phys,
101031755dbSPaolo Bonzini uintptr_t vaddr, pgprot_t prot)
102031755dbSPaolo Bonzini {
103031755dbSPaolo Bonzini pteval_t pte = phys;
104031755dbSPaolo Bonzini pte |= PTE_TYPE_PAGE | PTE_AF | PTE_SHARED;
105031755dbSPaolo Bonzini pte |= pgprot_val(prot);
106031755dbSPaolo Bonzini return install_pte(pgtable, vaddr, pte);
107031755dbSPaolo Bonzini }
108031755dbSPaolo Bonzini
install_page(pgd_t * pgtable,phys_addr_t phys,void * virt)109031755dbSPaolo Bonzini pteval_t *install_page(pgd_t *pgtable, phys_addr_t phys, void *virt)
110031755dbSPaolo Bonzini {
111031755dbSPaolo Bonzini return install_page_prot(pgtable, phys, (uintptr_t)virt,
112031755dbSPaolo Bonzini __pgprot(PTE_WBWA | PTE_USER));
113031755dbSPaolo Bonzini }
114031755dbSPaolo Bonzini
11589a13406SAlexandru Elisei /*
11689a13406SAlexandru Elisei * NOTE: The Arm architecture might require the use of a
11789a13406SAlexandru Elisei * break-before-make sequence before making changes to a PTE and
11889a13406SAlexandru Elisei * certain conditions are met (see Arm ARM D5-2669 for AArch64 and
11989a13406SAlexandru Elisei * B3-1378 for AArch32 for more details).
12089a13406SAlexandru Elisei */
follow_pte(pgd_t * pgtable,uintptr_t vaddr)12125ea27e9SAlexandru Elisei pteval_t *follow_pte(pgd_t *pgtable, uintptr_t vaddr)
122031755dbSPaolo Bonzini {
12389a13406SAlexandru Elisei pgd_t *pgd;
12489a13406SAlexandru Elisei pud_t *pud;
12589a13406SAlexandru Elisei pmd_t *pmd;
12689a13406SAlexandru Elisei pte_t *pte;
12789a13406SAlexandru Elisei
12889a13406SAlexandru Elisei pgd = pgd_offset(pgtable, vaddr);
12989a13406SAlexandru Elisei if (!pgd_valid(*pgd))
13089a13406SAlexandru Elisei return NULL;
13189a13406SAlexandru Elisei
13289a13406SAlexandru Elisei pud = pud_offset(pgd, vaddr);
13389a13406SAlexandru Elisei if (!pud_valid(*pud))
13489a13406SAlexandru Elisei return NULL;
13589a13406SAlexandru Elisei
13689a13406SAlexandru Elisei pmd = pmd_offset(pud, vaddr);
13789a13406SAlexandru Elisei if (!pmd_valid(*pmd))
13889a13406SAlexandru Elisei return NULL;
13989a13406SAlexandru Elisei if (pmd_huge(*pmd))
14089a13406SAlexandru Elisei return &pmd_val(*pmd);
14189a13406SAlexandru Elisei
14289a13406SAlexandru Elisei pte = pte_offset(pmd, vaddr);
14389a13406SAlexandru Elisei if (!pte_valid(*pte))
14489a13406SAlexandru Elisei return NULL;
14589a13406SAlexandru Elisei
14689a13406SAlexandru Elisei return &pte_val(*pte);
14789a13406SAlexandru Elisei }
14889a13406SAlexandru Elisei
virt_to_pte_phys(pgd_t * pgtable,void * virt)14989a13406SAlexandru Elisei phys_addr_t virt_to_pte_phys(pgd_t *pgtable, void *virt)
15089a13406SAlexandru Elisei {
15189a13406SAlexandru Elisei phys_addr_t mask;
15289a13406SAlexandru Elisei pteval_t *pteval;
15389a13406SAlexandru Elisei
15425ea27e9SAlexandru Elisei pteval = follow_pte(pgtable, (uintptr_t)virt);
15589a13406SAlexandru Elisei if (!pteval) {
15689a13406SAlexandru Elisei install_page(pgtable, (phys_addr_t)(unsigned long)virt, virt);
15789a13406SAlexandru Elisei return (phys_addr_t)(unsigned long)virt;
15889a13406SAlexandru Elisei }
15989a13406SAlexandru Elisei
16089a13406SAlexandru Elisei if (pmd_huge(__pmd(*pteval)))
16189a13406SAlexandru Elisei mask = PMD_MASK;
16289a13406SAlexandru Elisei else
16389a13406SAlexandru Elisei mask = PAGE_MASK;
16489a13406SAlexandru Elisei
16589a13406SAlexandru Elisei return (*pteval & PHYS_MASK & mask) |
16689a13406SAlexandru Elisei ((phys_addr_t)(unsigned long)virt & ~mask);
167031755dbSPaolo Bonzini }
168031755dbSPaolo Bonzini
mmu_set_range_ptes(pgd_t * pgtable,uintptr_t virt_offset,phys_addr_t phys_start,phys_addr_t phys_end,pgprot_t prot)169f0671a7bSPaolo Bonzini void mmu_set_range_ptes(pgd_t *pgtable, uintptr_t virt_offset,
170f0671a7bSPaolo Bonzini phys_addr_t phys_start, phys_addr_t phys_end,
1712f3028cdSAndrew Jones pgprot_t prot)
172153d1936SAndrew Jones {
173f0671a7bSPaolo Bonzini phys_addr_t paddr = phys_start & PAGE_MASK;
174f0671a7bSPaolo Bonzini uintptr_t vaddr = virt_offset & PAGE_MASK;
175f0671a7bSPaolo Bonzini uintptr_t virt_end = phys_end - paddr + vaddr;
1762f3028cdSAndrew Jones
177031755dbSPaolo Bonzini for (; vaddr < virt_end; vaddr += PAGE_SIZE, paddr += PAGE_SIZE)
178031755dbSPaolo Bonzini install_page_prot(pgtable, paddr, vaddr, prot);
1792f3028cdSAndrew Jones }
1802f3028cdSAndrew Jones
mmu_set_range_sect(pgd_t * pgtable,uintptr_t virt_offset,phys_addr_t phys_start,phys_addr_t phys_end,pgprot_t prot)181f0671a7bSPaolo Bonzini void mmu_set_range_sect(pgd_t *pgtable, uintptr_t virt_offset,
182f0671a7bSPaolo Bonzini phys_addr_t phys_start, phys_addr_t phys_end,
1832f3028cdSAndrew Jones pgprot_t prot)
1842f3028cdSAndrew Jones {
185a2d06852SNikos Nikoleris phys_addr_t paddr = phys_start & PMD_MASK;
186a2d06852SNikos Nikoleris uintptr_t vaddr = virt_offset & PMD_MASK;
187f0671a7bSPaolo Bonzini uintptr_t virt_end = phys_end - paddr + vaddr;
18870cea146SAlexandru Elisei pgd_t *pgd;
189a2d06852SNikos Nikoleris pud_t *pud;
190a2d06852SNikos Nikoleris pmd_t *pmd;
191a2d06852SNikos Nikoleris pmd_t entry;
1922f3028cdSAndrew Jones
193a2d06852SNikos Nikoleris for (; vaddr < virt_end; vaddr += PMD_SIZE, paddr += PMD_SIZE) {
194a2d06852SNikos Nikoleris pmd_val(entry) = paddr;
195a2d06852SNikos Nikoleris pmd_val(entry) |= PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S;
196a2d06852SNikos Nikoleris pmd_val(entry) |= pgprot_val(prot);
19770cea146SAlexandru Elisei pgd = pgd_offset(pgtable, vaddr);
198a2d06852SNikos Nikoleris pud = pud_alloc(pgd, vaddr);
199a2d06852SNikos Nikoleris pmd = pmd_alloc(pud, vaddr);
200a2d06852SNikos Nikoleris WRITE_ONCE(*pmd, entry);
201f8891de2SAndrew Jones flush_tlb_page(vaddr);
2022f3028cdSAndrew Jones }
2032f3028cdSAndrew Jones }
2042f3028cdSAndrew Jones
setup_mmu(phys_addr_t phys_end,void * unused)2050a5b31d2SSean Christopherson void *setup_mmu(phys_addr_t phys_end, void *unused)
206153d1936SAndrew Jones {
207c1cd1a2bSAndrew Jones struct mem_region *r;
208153d1936SAndrew Jones
209e97e1c82SAndrew Jones /* 3G-4G region is reserved for vmalloc, cap phys_end at 3G */
210031755dbSPaolo Bonzini if (phys_end > (3ul << 30))
211031755dbSPaolo Bonzini phys_end = 3ul << 30;
212031755dbSPaolo Bonzini
213031755dbSPaolo Bonzini #ifdef __aarch64__
214031755dbSPaolo Bonzini init_alloc_vpage((void*)(4ul << 30));
215c67363eeSNikos Nikoleris
216c67363eeSNikos Nikoleris assert_msg(system_supports_granule(PAGE_SIZE),
217c67363eeSNikos Nikoleris "Unsupported translation granule %ld\n", PAGE_SIZE);
218031755dbSPaolo Bonzini #endif
219031755dbSPaolo Bonzini
220e97e1c82SAndrew Jones if (!mmu_idmap)
221031755dbSPaolo Bonzini mmu_idmap = alloc_page();
222153d1936SAndrew Jones
223c1cd1a2bSAndrew Jones for (r = mem_regions; r->end; ++r) {
224*6182cb91SAndrew Jones if (r->flags & (MR_F_IO | MR_F_RESERVED)) {
225c1cd1a2bSAndrew Jones continue;
226c1cd1a2bSAndrew Jones } else if (r->flags & MR_F_CODE) {
227db328a24SAndrew Jones /* armv8 requires code shared between EL1 and EL0 to be read-only */
228c1cd1a2bSAndrew Jones mmu_set_range_ptes(mmu_idmap, r->start, r->start, r->end,
229c1cd1a2bSAndrew Jones __pgprot(PTE_WBWA | PTE_USER | PTE_RDONLY));
230c1cd1a2bSAndrew Jones } else {
231c1cd1a2bSAndrew Jones mmu_set_range_ptes(mmu_idmap, r->start, r->start, r->end,
2322f3028cdSAndrew Jones __pgprot(PTE_WBWA | PTE_USER));
233c1cd1a2bSAndrew Jones }
234c1cd1a2bSAndrew Jones }
235153d1936SAndrew Jones
2362f3028cdSAndrew Jones mmu_enable(mmu_idmap);
237031755dbSPaolo Bonzini return mmu_idmap;
238153d1936SAndrew Jones }
239f02b6363SAndrew Jones
__ioremap(phys_addr_t phys_addr,size_t size)240e97e1c82SAndrew Jones void __iomem *__ioremap(phys_addr_t phys_addr, size_t size)
241e97e1c82SAndrew Jones {
242e97e1c82SAndrew Jones phys_addr_t paddr_aligned = phys_addr & PAGE_MASK;
243e97e1c82SAndrew Jones phys_addr_t paddr_end = PAGE_ALIGN(phys_addr + size);
244e97e1c82SAndrew Jones pgprot_t prot = __pgprot(PTE_UNCACHED | PTE_USER | PTE_UXN | PTE_PXN);
245e97e1c82SAndrew Jones pgd_t *pgtable;
246e97e1c82SAndrew Jones
247e97e1c82SAndrew Jones assert(sizeof(long) == 8 || !(phys_addr >> 32));
248e97e1c82SAndrew Jones
249e97e1c82SAndrew Jones if (mmu_enabled()) {
250e97e1c82SAndrew Jones pgtable = current_thread_info()->pgtable;
251e97e1c82SAndrew Jones } else {
252e97e1c82SAndrew Jones if (!mmu_idmap)
253e97e1c82SAndrew Jones mmu_idmap = alloc_page();
254e97e1c82SAndrew Jones pgtable = mmu_idmap;
255e97e1c82SAndrew Jones }
256e97e1c82SAndrew Jones
257e97e1c82SAndrew Jones mmu_set_range_ptes(pgtable, paddr_aligned, paddr_aligned,
258e97e1c82SAndrew Jones paddr_end, prot);
259e97e1c82SAndrew Jones
260e97e1c82SAndrew Jones return (void __iomem *)(unsigned long)phys_addr;
261e97e1c82SAndrew Jones }
262e97e1c82SAndrew Jones
__virt_to_phys(unsigned long addr)263f02b6363SAndrew Jones phys_addr_t __virt_to_phys(unsigned long addr)
264f02b6363SAndrew Jones {
265f02b6363SAndrew Jones if (mmu_enabled()) {
266f02b6363SAndrew Jones pgd_t *pgtable = current_thread_info()->pgtable;
267f02b6363SAndrew Jones return virt_to_pte_phys(pgtable, (void *)addr);
268f02b6363SAndrew Jones }
269f02b6363SAndrew Jones return addr;
270f02b6363SAndrew Jones }
271f02b6363SAndrew Jones
__phys_to_virt(phys_addr_t addr)272f02b6363SAndrew Jones unsigned long __phys_to_virt(phys_addr_t addr)
273f02b6363SAndrew Jones {
274f02b6363SAndrew Jones /*
275f02b6363SAndrew Jones * We don't guarantee that phys_to_virt(virt_to_phys(vaddr)) == vaddr, but
276f02b6363SAndrew Jones * the default page tables do identity map all physical addresses, which
277f02b6363SAndrew Jones * means phys_to_virt(virt_to_phys((void *)paddr)) == paddr.
278f02b6363SAndrew Jones */
279f02b6363SAndrew Jones assert(!mmu_enabled() || __virt_to_phys(addr) == addr);
280f02b6363SAndrew Jones return addr;
281f02b6363SAndrew Jones }
2823ff42bddSAlexandru Elisei
mmu_clear_user(pgd_t * pgtable,unsigned long vaddr)2832e2b82a0SLuc Maranget void mmu_clear_user(pgd_t *pgtable, unsigned long vaddr)
2842e2b82a0SLuc Maranget {
28525ea27e9SAlexandru Elisei pteval_t *p_pte = follow_pte(pgtable, vaddr);
2862e2b82a0SLuc Maranget if (p_pte) {
2872e2b82a0SLuc Maranget pteval_t entry = *p_pte & ~PTE_USER;
2882e2b82a0SLuc Maranget WRITE_ONCE(*p_pte, entry);
2893ff42bddSAlexandru Elisei flush_tlb_page(vaddr);
2903ff42bddSAlexandru Elisei }
2912e2b82a0SLuc Maranget }
292