xref: /kvm-unit-tests/lib/arm/mmu.c (revision c1cd1a2bed6907efd4699638d11c3407768cc158)
1153d1936SAndrew Jones /*
2153d1936SAndrew Jones  * MMU enable and page table manipulation functions
3153d1936SAndrew Jones  *
4153d1936SAndrew Jones  * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5153d1936SAndrew Jones  *
6153d1936SAndrew Jones  * This work is licensed under the terms of the GNU LGPL, version 2.
7153d1936SAndrew Jones  */
88cca5668SAndrew Jones #include <asm/setup.h>
9eb225344SAndrew Jones #include <asm/thread_info.h>
10eb225344SAndrew Jones #include <asm/cpumask.h>
118cca5668SAndrew Jones #include <asm/mmu.h>
12c2a95639SPaolo Bonzini #include <asm/setup.h>
13c2a95639SPaolo Bonzini #include <asm/page.h>
14e97e1c82SAndrew Jones #include <asm/io.h>
15c2a95639SPaolo Bonzini 
16031755dbSPaolo Bonzini #include "alloc_page.h"
17031755dbSPaolo Bonzini #include "vmalloc.h"
18c2a95639SPaolo Bonzini #include <asm/pgtable-hwdef.h>
19c2a95639SPaolo Bonzini #include <asm/pgtable.h>
20153d1936SAndrew Jones 
2170cea146SAlexandru Elisei #include <linux/compiler.h>
2270cea146SAlexandru Elisei 
232f3028cdSAndrew Jones pgd_t *mmu_idmap;
24153d1936SAndrew Jones 
25c33efcf3SAndrew Jones /* CPU 0 starts with disabled MMU */
260917dc65SNikos Nikoleris static cpumask_t mmu_enabled_cpumask;
27c33efcf3SAndrew Jones 
280917dc65SNikos Nikoleris bool mmu_enabled(void)
29153d1936SAndrew Jones {
301742c67aSAndrew Jones 	/*
311742c67aSAndrew Jones 	 * mmu_enabled is called from places that are guarding the
321742c67aSAndrew Jones 	 * use of exclusive ops (which require the mmu to be enabled).
331742c67aSAndrew Jones 	 * That means we CANNOT call anything from here that may use a
341742c67aSAndrew Jones 	 * spinlock, atomic bitop, etc., otherwise we'll recurse.
351742c67aSAndrew Jones 	 * [cpumask_]test_bit is safe though.
361742c67aSAndrew Jones 	 */
370917dc65SNikos Nikoleris 	if (is_user()) {
380917dc65SNikos Nikoleris 		int cpu = current_thread_info()->cpu;
390917dc65SNikos Nikoleris 		return cpumask_test_cpu(cpu, &mmu_enabled_cpumask);
400917dc65SNikos Nikoleris 	}
410917dc65SNikos Nikoleris 
420917dc65SNikos Nikoleris 	return __mmu_enabled();
43153d1936SAndrew Jones }
44153d1936SAndrew Jones 
451742c67aSAndrew Jones void mmu_mark_enabled(int cpu)
461742c67aSAndrew Jones {
470917dc65SNikos Nikoleris 	cpumask_set_cpu(cpu, &mmu_enabled_cpumask);
481742c67aSAndrew Jones }
491742c67aSAndrew Jones 
501742c67aSAndrew Jones void mmu_mark_disabled(int cpu)
511742c67aSAndrew Jones {
520917dc65SNikos Nikoleris 	cpumask_clear_cpu(cpu, &mmu_enabled_cpumask);
531742c67aSAndrew Jones }
541742c67aSAndrew Jones 
55153d1936SAndrew Jones extern void asm_mmu_enable(phys_addr_t pgtable);
56153d1936SAndrew Jones void mmu_enable(pgd_t *pgtable)
57153d1936SAndrew Jones {
5836b50de9SAndrew Jones 	struct thread_info *info = current_thread_info();
59c33efcf3SAndrew Jones 
60153d1936SAndrew Jones 	asm_mmu_enable(__pa(pgtable));
61b141dbacSAndrew Jones 
6236b50de9SAndrew Jones 	info->pgtable = pgtable;
6336b50de9SAndrew Jones 	mmu_mark_enabled(info->cpu);
64153d1936SAndrew Jones }
65153d1936SAndrew Jones 
66e27b176bSAndrew Jones extern void asm_mmu_disable(void);
67e27b176bSAndrew Jones void mmu_disable(void)
68e27b176bSAndrew Jones {
6908c19b24SAlexandru Elisei 	unsigned long sp = current_stack_pointer;
70c33efcf3SAndrew Jones 	int cpu = current_thread_info()->cpu;
71c33efcf3SAndrew Jones 
7208c19b24SAlexandru Elisei 	assert_msg(__virt_to_phys(sp) == sp,
7308c19b24SAlexandru Elisei 			"Attempting to disable MMU with non-identity mapped stack");
7408c19b24SAlexandru Elisei 
75c33efcf3SAndrew Jones 	mmu_mark_disabled(cpu);
76c33efcf3SAndrew Jones 
77e27b176bSAndrew Jones 	asm_mmu_disable();
78e27b176bSAndrew Jones }
79e27b176bSAndrew Jones 
80031755dbSPaolo Bonzini static pteval_t *get_pte(pgd_t *pgtable, uintptr_t vaddr)
81031755dbSPaolo Bonzini {
82031755dbSPaolo Bonzini 	pgd_t *pgd = pgd_offset(pgtable, vaddr);
83a2d06852SNikos Nikoleris 	pud_t *pud = pud_alloc(pgd, vaddr);
84a2d06852SNikos Nikoleris 	pmd_t *pmd = pmd_alloc(pud, vaddr);
85031755dbSPaolo Bonzini 	pte_t *pte = pte_alloc(pmd, vaddr);
86031755dbSPaolo Bonzini 
87031755dbSPaolo Bonzini 	return &pte_val(*pte);
88031755dbSPaolo Bonzini }
89031755dbSPaolo Bonzini 
90031755dbSPaolo Bonzini static pteval_t *install_pte(pgd_t *pgtable, uintptr_t vaddr, pteval_t pte)
91031755dbSPaolo Bonzini {
92031755dbSPaolo Bonzini 	pteval_t *p_pte = get_pte(pgtable, vaddr);
93f8891de2SAndrew Jones 
9470cea146SAlexandru Elisei 	WRITE_ONCE(*p_pte, pte);
9520239febSAlexandru Elisei 	flush_tlb_page(vaddr);
96031755dbSPaolo Bonzini 	return p_pte;
97031755dbSPaolo Bonzini }
98031755dbSPaolo Bonzini 
99031755dbSPaolo Bonzini static pteval_t *install_page_prot(pgd_t *pgtable, phys_addr_t phys,
100031755dbSPaolo Bonzini 				   uintptr_t vaddr, pgprot_t prot)
101031755dbSPaolo Bonzini {
102031755dbSPaolo Bonzini 	pteval_t pte = phys;
103031755dbSPaolo Bonzini 	pte |= PTE_TYPE_PAGE | PTE_AF | PTE_SHARED;
104031755dbSPaolo Bonzini 	pte |= pgprot_val(prot);
105031755dbSPaolo Bonzini 	return install_pte(pgtable, vaddr, pte);
106031755dbSPaolo Bonzini }
107031755dbSPaolo Bonzini 
108031755dbSPaolo Bonzini pteval_t *install_page(pgd_t *pgtable, phys_addr_t phys, void *virt)
109031755dbSPaolo Bonzini {
110031755dbSPaolo Bonzini 	return install_page_prot(pgtable, phys, (uintptr_t)virt,
111031755dbSPaolo Bonzini 				 __pgprot(PTE_WBWA | PTE_USER));
112031755dbSPaolo Bonzini }
113031755dbSPaolo Bonzini 
114031755dbSPaolo Bonzini phys_addr_t virt_to_pte_phys(pgd_t *pgtable, void *mem)
115031755dbSPaolo Bonzini {
116031755dbSPaolo Bonzini 	return (*get_pte(pgtable, (uintptr_t)mem) & PHYS_MASK & -PAGE_SIZE)
117031755dbSPaolo Bonzini 		+ ((ulong)mem & (PAGE_SIZE - 1));
118031755dbSPaolo Bonzini }
119031755dbSPaolo Bonzini 
120f0671a7bSPaolo Bonzini void mmu_set_range_ptes(pgd_t *pgtable, uintptr_t virt_offset,
121f0671a7bSPaolo Bonzini 			phys_addr_t phys_start, phys_addr_t phys_end,
1222f3028cdSAndrew Jones 			pgprot_t prot)
123153d1936SAndrew Jones {
124f0671a7bSPaolo Bonzini 	phys_addr_t paddr = phys_start & PAGE_MASK;
125f0671a7bSPaolo Bonzini 	uintptr_t vaddr = virt_offset & PAGE_MASK;
126f0671a7bSPaolo Bonzini 	uintptr_t virt_end = phys_end - paddr + vaddr;
1272f3028cdSAndrew Jones 
128031755dbSPaolo Bonzini 	for (; vaddr < virt_end; vaddr += PAGE_SIZE, paddr += PAGE_SIZE)
129031755dbSPaolo Bonzini 		install_page_prot(pgtable, paddr, vaddr, prot);
1302f3028cdSAndrew Jones }
1312f3028cdSAndrew Jones 
132f0671a7bSPaolo Bonzini void mmu_set_range_sect(pgd_t *pgtable, uintptr_t virt_offset,
133f0671a7bSPaolo Bonzini 			phys_addr_t phys_start, phys_addr_t phys_end,
1342f3028cdSAndrew Jones 			pgprot_t prot)
1352f3028cdSAndrew Jones {
136a2d06852SNikos Nikoleris 	phys_addr_t paddr = phys_start & PMD_MASK;
137a2d06852SNikos Nikoleris 	uintptr_t vaddr = virt_offset & PMD_MASK;
138f0671a7bSPaolo Bonzini 	uintptr_t virt_end = phys_end - paddr + vaddr;
13970cea146SAlexandru Elisei 	pgd_t *pgd;
140a2d06852SNikos Nikoleris 	pud_t *pud;
141a2d06852SNikos Nikoleris 	pmd_t *pmd;
142a2d06852SNikos Nikoleris 	pmd_t entry;
1432f3028cdSAndrew Jones 
144a2d06852SNikos Nikoleris 	for (; vaddr < virt_end; vaddr += PMD_SIZE, paddr += PMD_SIZE) {
145a2d06852SNikos Nikoleris 		pmd_val(entry) = paddr;
146a2d06852SNikos Nikoleris 		pmd_val(entry) |= PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S;
147a2d06852SNikos Nikoleris 		pmd_val(entry) |= pgprot_val(prot);
14870cea146SAlexandru Elisei 		pgd = pgd_offset(pgtable, vaddr);
149a2d06852SNikos Nikoleris 		pud = pud_alloc(pgd, vaddr);
150a2d06852SNikos Nikoleris 		pmd = pmd_alloc(pud, vaddr);
151a2d06852SNikos Nikoleris 		WRITE_ONCE(*pmd, entry);
152f8891de2SAndrew Jones 		flush_tlb_page(vaddr);
1532f3028cdSAndrew Jones 	}
1542f3028cdSAndrew Jones }
1552f3028cdSAndrew Jones 
156031755dbSPaolo Bonzini void *setup_mmu(phys_addr_t phys_end)
157153d1936SAndrew Jones {
158*c1cd1a2bSAndrew Jones 	struct mem_region *r;
159153d1936SAndrew Jones 
160e97e1c82SAndrew Jones 	/* 3G-4G region is reserved for vmalloc, cap phys_end at 3G */
161031755dbSPaolo Bonzini 	if (phys_end > (3ul << 30))
162031755dbSPaolo Bonzini 		phys_end = 3ul << 30;
163031755dbSPaolo Bonzini 
164031755dbSPaolo Bonzini #ifdef __aarch64__
165031755dbSPaolo Bonzini 	init_alloc_vpage((void*)(4ul << 30));
166c67363eeSNikos Nikoleris 
167c67363eeSNikos Nikoleris 	assert_msg(system_supports_granule(PAGE_SIZE),
168c67363eeSNikos Nikoleris 			"Unsupported translation granule %ld\n", PAGE_SIZE);
169031755dbSPaolo Bonzini #endif
170031755dbSPaolo Bonzini 
171e97e1c82SAndrew Jones 	if (!mmu_idmap)
172031755dbSPaolo Bonzini 		mmu_idmap = alloc_page();
173153d1936SAndrew Jones 
174*c1cd1a2bSAndrew Jones 	for (r = mem_regions; r->end; ++r) {
175*c1cd1a2bSAndrew Jones 		if (r->flags & MR_F_IO) {
176*c1cd1a2bSAndrew Jones 			continue;
177*c1cd1a2bSAndrew Jones 		} else if (r->flags & MR_F_CODE) {
178*c1cd1a2bSAndrew Jones 			assert_msg(r->flags & MR_F_PRIMARY, "Unexpected code region");
179db328a24SAndrew Jones 			/* armv8 requires code shared between EL1 and EL0 to be read-only */
180*c1cd1a2bSAndrew Jones 			mmu_set_range_ptes(mmu_idmap, r->start, r->start, r->end,
181*c1cd1a2bSAndrew Jones 					   __pgprot(PTE_WBWA | PTE_USER | PTE_RDONLY));
182*c1cd1a2bSAndrew Jones 		} else {
183*c1cd1a2bSAndrew Jones 			assert_msg(r->flags & MR_F_PRIMARY, "Unexpected data region");
184*c1cd1a2bSAndrew Jones 			mmu_set_range_ptes(mmu_idmap, r->start, r->start, r->end,
1852f3028cdSAndrew Jones 					   __pgprot(PTE_WBWA | PTE_USER));
186*c1cd1a2bSAndrew Jones 		}
187*c1cd1a2bSAndrew Jones 	}
188153d1936SAndrew Jones 
1892f3028cdSAndrew Jones 	mmu_enable(mmu_idmap);
190031755dbSPaolo Bonzini 	return mmu_idmap;
191153d1936SAndrew Jones }
192f02b6363SAndrew Jones 
193e97e1c82SAndrew Jones void __iomem *__ioremap(phys_addr_t phys_addr, size_t size)
194e97e1c82SAndrew Jones {
195e97e1c82SAndrew Jones 	phys_addr_t paddr_aligned = phys_addr & PAGE_MASK;
196e97e1c82SAndrew Jones 	phys_addr_t paddr_end = PAGE_ALIGN(phys_addr + size);
197e97e1c82SAndrew Jones 	pgprot_t prot = __pgprot(PTE_UNCACHED | PTE_USER | PTE_UXN | PTE_PXN);
198e97e1c82SAndrew Jones 	pgd_t *pgtable;
199e97e1c82SAndrew Jones 
200e97e1c82SAndrew Jones 	assert(sizeof(long) == 8 || !(phys_addr >> 32));
201e97e1c82SAndrew Jones 
202e97e1c82SAndrew Jones 	if (mmu_enabled()) {
203e97e1c82SAndrew Jones 		pgtable = current_thread_info()->pgtable;
204e97e1c82SAndrew Jones 	} else {
205e97e1c82SAndrew Jones 		if (!mmu_idmap)
206e97e1c82SAndrew Jones 			mmu_idmap = alloc_page();
207e97e1c82SAndrew Jones 		pgtable = mmu_idmap;
208e97e1c82SAndrew Jones 	}
209e97e1c82SAndrew Jones 
210e97e1c82SAndrew Jones 	mmu_set_range_ptes(pgtable, paddr_aligned, paddr_aligned,
211e97e1c82SAndrew Jones 			   paddr_end, prot);
212e97e1c82SAndrew Jones 
213e97e1c82SAndrew Jones 	return (void __iomem *)(unsigned long)phys_addr;
214e97e1c82SAndrew Jones }
215e97e1c82SAndrew Jones 
216f02b6363SAndrew Jones phys_addr_t __virt_to_phys(unsigned long addr)
217f02b6363SAndrew Jones {
218f02b6363SAndrew Jones 	if (mmu_enabled()) {
219f02b6363SAndrew Jones 		pgd_t *pgtable = current_thread_info()->pgtable;
220f02b6363SAndrew Jones 		return virt_to_pte_phys(pgtable, (void *)addr);
221f02b6363SAndrew Jones 	}
222f02b6363SAndrew Jones 	return addr;
223f02b6363SAndrew Jones }
224f02b6363SAndrew Jones 
225f02b6363SAndrew Jones unsigned long __phys_to_virt(phys_addr_t addr)
226f02b6363SAndrew Jones {
227f02b6363SAndrew Jones 	/*
228f02b6363SAndrew Jones 	 * We don't guarantee that phys_to_virt(virt_to_phys(vaddr)) == vaddr, but
229f02b6363SAndrew Jones 	 * the default page tables do identity map all physical addresses, which
230f02b6363SAndrew Jones 	 * means phys_to_virt(virt_to_phys((void *)paddr)) == paddr.
231f02b6363SAndrew Jones 	 */
232f02b6363SAndrew Jones 	assert(!mmu_enabled() || __virt_to_phys(addr) == addr);
233f02b6363SAndrew Jones 	return addr;
234f02b6363SAndrew Jones }
2353ff42bddSAlexandru Elisei 
2362e2b82a0SLuc Maranget /*
2372e2b82a0SLuc Maranget  * NOTE: The Arm architecture might require the use of a
2382e2b82a0SLuc Maranget  * break-before-make sequence before making changes to a PTE and
2392e2b82a0SLuc Maranget  * certain conditions are met (see Arm ARM D5-2669 for AArch64 and
2402e2b82a0SLuc Maranget  * B3-1378 for AArch32 for more details).
2412e2b82a0SLuc Maranget  */
2422e2b82a0SLuc Maranget pteval_t *mmu_get_pte(pgd_t *pgtable, uintptr_t vaddr)
2433ff42bddSAlexandru Elisei {
24402f1cdc8SAlexandru Elisei 	pgd_t *pgd;
245a2d06852SNikos Nikoleris 	pud_t *pud;
24602f1cdc8SAlexandru Elisei 	pmd_t *pmd;
24702f1cdc8SAlexandru Elisei 	pte_t *pte;
2483ff42bddSAlexandru Elisei 
2493ff42bddSAlexandru Elisei 	if (!mmu_enabled())
2502e2b82a0SLuc Maranget 		return NULL;
2513ff42bddSAlexandru Elisei 
25202f1cdc8SAlexandru Elisei 	pgd = pgd_offset(pgtable, vaddr);
25302f1cdc8SAlexandru Elisei 	assert(pgd_valid(*pgd));
254a2d06852SNikos Nikoleris 	pud = pud_offset(pgd, vaddr);
255a2d06852SNikos Nikoleris 	assert(pud_valid(*pud));
256a2d06852SNikos Nikoleris 	pmd = pmd_offset(pud, vaddr);
25702f1cdc8SAlexandru Elisei 	assert(pmd_valid(*pmd));
2583ff42bddSAlexandru Elisei 
2592e2b82a0SLuc Maranget 	if (pmd_huge(*pmd))
2602e2b82a0SLuc Maranget 		return &pmd_val(*pmd);
26102f1cdc8SAlexandru Elisei 
26202f1cdc8SAlexandru Elisei 	pte = pte_offset(pmd, vaddr);
26302f1cdc8SAlexandru Elisei 	assert(pte_valid(*pte));
26402f1cdc8SAlexandru Elisei 
2652e2b82a0SLuc Maranget         return &pte_val(*pte);
2662e2b82a0SLuc Maranget }
2672e2b82a0SLuc Maranget 
2682e2b82a0SLuc Maranget void mmu_clear_user(pgd_t *pgtable, unsigned long vaddr)
2692e2b82a0SLuc Maranget {
2702e2b82a0SLuc Maranget 	pteval_t *p_pte = mmu_get_pte(pgtable, vaddr);
2712e2b82a0SLuc Maranget 	if (p_pte) {
2722e2b82a0SLuc Maranget 		pteval_t entry = *p_pte & ~PTE_USER;
2732e2b82a0SLuc Maranget 		WRITE_ONCE(*p_pte, entry);
2743ff42bddSAlexandru Elisei 		flush_tlb_page(vaddr);
2753ff42bddSAlexandru Elisei 	}
2762e2b82a0SLuc Maranget }
277