xref: /kvm-unit-tests/lib/arm/mmu.c (revision 0917dc65eabbacb592456c0d1bb05e5828c23661)
1 /*
2  * MMU enable and page table manipulation functions
3  *
4  * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5  *
6  * This work is licensed under the terms of the GNU LGPL, version 2.
7  */
8 #include <asm/setup.h>
9 #include <asm/thread_info.h>
10 #include <asm/cpumask.h>
11 #include <asm/mmu.h>
12 #include <asm/setup.h>
13 #include <asm/page.h>
14 
15 #include "alloc_page.h"
16 #include "vmalloc.h"
17 #include <asm/pgtable-hwdef.h>
18 #include <asm/pgtable.h>
19 
20 #include <linux/compiler.h>
21 
22 extern unsigned long etext;
23 
24 pgd_t *mmu_idmap;
25 
26 /* CPU 0 starts with disabled MMU */
27 static cpumask_t mmu_enabled_cpumask;
28 
29 bool mmu_enabled(void)
30 {
31 	/*
32 	 * mmu_enabled is called from places that are guarding the
33 	 * use of exclusive ops (which require the mmu to be enabled).
34 	 * That means we CANNOT call anything from here that may use a
35 	 * spinlock, atomic bitop, etc., otherwise we'll recurse.
36 	 * [cpumask_]test_bit is safe though.
37 	 */
38 	if (is_user()) {
39 		int cpu = current_thread_info()->cpu;
40 		return cpumask_test_cpu(cpu, &mmu_enabled_cpumask);
41 	}
42 
43 	return __mmu_enabled();
44 }
45 
46 void mmu_mark_enabled(int cpu)
47 {
48 	cpumask_set_cpu(cpu, &mmu_enabled_cpumask);
49 }
50 
51 void mmu_mark_disabled(int cpu)
52 {
53 	cpumask_clear_cpu(cpu, &mmu_enabled_cpumask);
54 }
55 
56 extern void asm_mmu_enable(phys_addr_t pgtable);
57 void mmu_enable(pgd_t *pgtable)
58 {
59 	struct thread_info *info = current_thread_info();
60 
61 	asm_mmu_enable(__pa(pgtable));
62 
63 	info->pgtable = pgtable;
64 	mmu_mark_enabled(info->cpu);
65 }
66 
67 extern void asm_mmu_disable(void);
68 void mmu_disable(void)
69 {
70 	unsigned long sp = current_stack_pointer;
71 	int cpu = current_thread_info()->cpu;
72 
73 	assert_msg(__virt_to_phys(sp) == sp,
74 			"Attempting to disable MMU with non-identity mapped stack");
75 
76 	mmu_mark_disabled(cpu);
77 
78 	asm_mmu_disable();
79 }
80 
81 static pteval_t *get_pte(pgd_t *pgtable, uintptr_t vaddr)
82 {
83 	pgd_t *pgd = pgd_offset(pgtable, vaddr);
84 	pud_t *pud = pud_alloc(pgd, vaddr);
85 	pmd_t *pmd = pmd_alloc(pud, vaddr);
86 	pte_t *pte = pte_alloc(pmd, vaddr);
87 
88 	return &pte_val(*pte);
89 }
90 
91 static pteval_t *install_pte(pgd_t *pgtable, uintptr_t vaddr, pteval_t pte)
92 {
93 	pteval_t *p_pte = get_pte(pgtable, vaddr);
94 
95 	WRITE_ONCE(*p_pte, pte);
96 	flush_tlb_page(vaddr);
97 	return p_pte;
98 }
99 
100 static pteval_t *install_page_prot(pgd_t *pgtable, phys_addr_t phys,
101 				   uintptr_t vaddr, pgprot_t prot)
102 {
103 	pteval_t pte = phys;
104 	pte |= PTE_TYPE_PAGE | PTE_AF | PTE_SHARED;
105 	pte |= pgprot_val(prot);
106 	return install_pte(pgtable, vaddr, pte);
107 }
108 
109 pteval_t *install_page(pgd_t *pgtable, phys_addr_t phys, void *virt)
110 {
111 	return install_page_prot(pgtable, phys, (uintptr_t)virt,
112 				 __pgprot(PTE_WBWA | PTE_USER));
113 }
114 
115 phys_addr_t virt_to_pte_phys(pgd_t *pgtable, void *mem)
116 {
117 	return (*get_pte(pgtable, (uintptr_t)mem) & PHYS_MASK & -PAGE_SIZE)
118 		+ ((ulong)mem & (PAGE_SIZE - 1));
119 }
120 
121 void mmu_set_range_ptes(pgd_t *pgtable, uintptr_t virt_offset,
122 			phys_addr_t phys_start, phys_addr_t phys_end,
123 			pgprot_t prot)
124 {
125 	phys_addr_t paddr = phys_start & PAGE_MASK;
126 	uintptr_t vaddr = virt_offset & PAGE_MASK;
127 	uintptr_t virt_end = phys_end - paddr + vaddr;
128 
129 	for (; vaddr < virt_end; vaddr += PAGE_SIZE, paddr += PAGE_SIZE)
130 		install_page_prot(pgtable, paddr, vaddr, prot);
131 }
132 
133 void mmu_set_range_sect(pgd_t *pgtable, uintptr_t virt_offset,
134 			phys_addr_t phys_start, phys_addr_t phys_end,
135 			pgprot_t prot)
136 {
137 	phys_addr_t paddr = phys_start & PMD_MASK;
138 	uintptr_t vaddr = virt_offset & PMD_MASK;
139 	uintptr_t virt_end = phys_end - paddr + vaddr;
140 	pgd_t *pgd;
141 	pud_t *pud;
142 	pmd_t *pmd;
143 	pmd_t entry;
144 
145 	for (; vaddr < virt_end; vaddr += PMD_SIZE, paddr += PMD_SIZE) {
146 		pmd_val(entry) = paddr;
147 		pmd_val(entry) |= PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S;
148 		pmd_val(entry) |= pgprot_val(prot);
149 		pgd = pgd_offset(pgtable, vaddr);
150 		pud = pud_alloc(pgd, vaddr);
151 		pmd = pmd_alloc(pud, vaddr);
152 		WRITE_ONCE(*pmd, entry);
153 		flush_tlb_page(vaddr);
154 	}
155 }
156 
157 void *setup_mmu(phys_addr_t phys_end)
158 {
159 	uintptr_t code_end = (uintptr_t)&etext;
160 	struct mem_region *r;
161 
162 	/* 0G-1G = I/O, 1G-3G = identity, 3G-4G = vmalloc */
163 	if (phys_end > (3ul << 30))
164 		phys_end = 3ul << 30;
165 
166 #ifdef __aarch64__
167 	init_alloc_vpage((void*)(4ul << 30));
168 
169 	assert_msg(system_supports_granule(PAGE_SIZE),
170 			"Unsupported translation granule %ld\n", PAGE_SIZE);
171 #endif
172 
173 	mmu_idmap = alloc_page();
174 
175 	for (r = mem_regions; r->end; ++r) {
176 		if (!(r->flags & MR_F_IO))
177 			continue;
178 		mmu_set_range_sect(mmu_idmap, r->start, r->start, r->end,
179 				   __pgprot(PMD_SECT_UNCACHED | PMD_SECT_USER));
180 	}
181 
182 	/* armv8 requires code shared between EL1 and EL0 to be read-only */
183 	mmu_set_range_ptes(mmu_idmap, PHYS_OFFSET,
184 		PHYS_OFFSET, code_end,
185 		__pgprot(PTE_WBWA | PTE_RDONLY | PTE_USER));
186 
187 	mmu_set_range_ptes(mmu_idmap, code_end,
188 		code_end, phys_end,
189 		__pgprot(PTE_WBWA | PTE_USER));
190 
191 	mmu_enable(mmu_idmap);
192 	return mmu_idmap;
193 }
194 
195 phys_addr_t __virt_to_phys(unsigned long addr)
196 {
197 	if (mmu_enabled()) {
198 		pgd_t *pgtable = current_thread_info()->pgtable;
199 		return virt_to_pte_phys(pgtable, (void *)addr);
200 	}
201 	return addr;
202 }
203 
204 unsigned long __phys_to_virt(phys_addr_t addr)
205 {
206 	/*
207 	 * We don't guarantee that phys_to_virt(virt_to_phys(vaddr)) == vaddr, but
208 	 * the default page tables do identity map all physical addresses, which
209 	 * means phys_to_virt(virt_to_phys((void *)paddr)) == paddr.
210 	 */
211 	assert(!mmu_enabled() || __virt_to_phys(addr) == addr);
212 	return addr;
213 }
214 
215 /*
216  * NOTE: The Arm architecture might require the use of a
217  * break-before-make sequence before making changes to a PTE and
218  * certain conditions are met (see Arm ARM D5-2669 for AArch64 and
219  * B3-1378 for AArch32 for more details).
220  */
221 pteval_t *mmu_get_pte(pgd_t *pgtable, uintptr_t vaddr)
222 {
223 	pgd_t *pgd;
224 	pud_t *pud;
225 	pmd_t *pmd;
226 	pte_t *pte;
227 
228 	if (!mmu_enabled())
229 		return NULL;
230 
231 	pgd = pgd_offset(pgtable, vaddr);
232 	assert(pgd_valid(*pgd));
233 	pud = pud_offset(pgd, vaddr);
234 	assert(pud_valid(*pud));
235 	pmd = pmd_offset(pud, vaddr);
236 	assert(pmd_valid(*pmd));
237 
238 	if (pmd_huge(*pmd))
239 		return &pmd_val(*pmd);
240 
241 	pte = pte_offset(pmd, vaddr);
242 	assert(pte_valid(*pte));
243 
244         return &pte_val(*pte);
245 }
246 
247 void mmu_clear_user(pgd_t *pgtable, unsigned long vaddr)
248 {
249 	pteval_t *p_pte = mmu_get_pte(pgtable, vaddr);
250 	if (p_pte) {
251 		pteval_t entry = *p_pte & ~PTE_USER;
252 		WRITE_ONCE(*p_pte, entry);
253 		flush_tlb_page(vaddr);
254 	}
255 }
256