xref: /kvm-unit-tests/lib/arm/mmu.c (revision 6163f75d09a0a96a5c3db82dd768b13f79629c00)
1 /*
2  * MMU enable and page table manipulation functions
3  *
4  * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5  *
6  * This work is licensed under the terms of the GNU LGPL, version 2.
7  */
8 #include <asm/setup.h>
9 #include <asm/thread_info.h>
10 #include <asm/cpumask.h>
11 #include <asm/mmu.h>
12 #include <asm/setup.h>
13 #include <asm/page.h>
14 
15 #include "alloc_page.h"
16 #include "vmalloc.h"
17 #include <asm/pgtable-hwdef.h>
18 #include <asm/pgtable.h>
19 
20 extern unsigned long etext;
21 
22 pgd_t *mmu_idmap;
23 
24 /* CPU 0 starts with disabled MMU */
25 static cpumask_t mmu_disabled_cpumask = { {1} };
26 unsigned int mmu_disabled_cpu_count = 1;
27 
28 bool __mmu_enabled(void)
29 {
30 	int cpu = current_thread_info()->cpu;
31 
32 	/*
33 	 * mmu_enabled is called from places that are guarding the
34 	 * use of exclusive ops (which require the mmu to be enabled).
35 	 * That means we CANNOT call anything from here that may use a
36 	 * spinlock, atomic bitop, etc., otherwise we'll recurse.
37 	 * [cpumask_]test_bit is safe though.
38 	 */
39 	return !cpumask_test_cpu(cpu, &mmu_disabled_cpumask);
40 }
41 
42 void mmu_mark_enabled(int cpu)
43 {
44 	if (cpumask_test_and_clear_cpu(cpu, &mmu_disabled_cpumask))
45 		--mmu_disabled_cpu_count;
46 }
47 
48 void mmu_mark_disabled(int cpu)
49 {
50 	if (!cpumask_test_and_set_cpu(cpu, &mmu_disabled_cpumask))
51 		++mmu_disabled_cpu_count;
52 }
53 
54 extern void asm_mmu_enable(phys_addr_t pgtable);
55 void mmu_enable(pgd_t *pgtable)
56 {
57 	struct thread_info *info = current_thread_info();
58 
59 	asm_mmu_enable(__pa(pgtable));
60 	flush_tlb_all();
61 
62 	info->pgtable = pgtable;
63 	mmu_mark_enabled(info->cpu);
64 }
65 
66 extern void asm_mmu_disable(void);
67 void mmu_disable(void)
68 {
69 	int cpu = current_thread_info()->cpu;
70 
71 	mmu_mark_disabled(cpu);
72 
73 	asm_mmu_disable();
74 }
75 
76 static void flush_entry(pgd_t *pgtable, uintptr_t vaddr)
77 {
78 	pgd_t *pgd = pgd_offset(pgtable, vaddr);
79 	pmd_t *pmd = pmd_offset(pgd, vaddr);
80 
81 	flush_dcache_addr((ulong)pgd);
82 	flush_dcache_addr((ulong)pmd);
83 	flush_dcache_addr((ulong)pte_offset(pmd, vaddr));
84 	flush_tlb_page(vaddr);
85 }
86 
87 static pteval_t *get_pte(pgd_t *pgtable, uintptr_t vaddr)
88 {
89 	pgd_t *pgd = pgd_offset(pgtable, vaddr);
90 	pmd_t *pmd = pmd_alloc(pgd, vaddr);
91 	pte_t *pte = pte_alloc(pmd, vaddr);
92 
93 	return &pte_val(*pte);
94 }
95 
96 static pteval_t *install_pte(pgd_t *pgtable, uintptr_t vaddr, pteval_t pte)
97 {
98 	pteval_t *p_pte = get_pte(pgtable, vaddr);
99 
100 	*p_pte = pte;
101 	flush_entry(pgtable, vaddr);
102 	return p_pte;
103 }
104 
105 static pteval_t *install_page_prot(pgd_t *pgtable, phys_addr_t phys,
106 				   uintptr_t vaddr, pgprot_t prot)
107 {
108 	pteval_t pte = phys;
109 	pte |= PTE_TYPE_PAGE | PTE_AF | PTE_SHARED;
110 	pte |= pgprot_val(prot);
111 	return install_pte(pgtable, vaddr, pte);
112 }
113 
114 pteval_t *install_page(pgd_t *pgtable, phys_addr_t phys, void *virt)
115 {
116 	return install_page_prot(pgtable, phys, (uintptr_t)virt,
117 				 __pgprot(PTE_WBWA | PTE_USER));
118 }
119 
120 phys_addr_t virt_to_pte_phys(pgd_t *pgtable, void *mem)
121 {
122 	return (*get_pte(pgtable, (uintptr_t)mem) & PHYS_MASK & -PAGE_SIZE)
123 		+ ((ulong)mem & (PAGE_SIZE - 1));
124 }
125 
126 void mmu_set_range_ptes(pgd_t *pgtable, uintptr_t virt_offset,
127 			phys_addr_t phys_start, phys_addr_t phys_end,
128 			pgprot_t prot)
129 {
130 	phys_addr_t paddr = phys_start & PAGE_MASK;
131 	uintptr_t vaddr = virt_offset & PAGE_MASK;
132 	uintptr_t virt_end = phys_end - paddr + vaddr;
133 
134 	for (; vaddr < virt_end; vaddr += PAGE_SIZE, paddr += PAGE_SIZE)
135 		install_page_prot(pgtable, paddr, vaddr, prot);
136 }
137 
138 void mmu_set_range_sect(pgd_t *pgtable, uintptr_t virt_offset,
139 			phys_addr_t phys_start, phys_addr_t phys_end,
140 			pgprot_t prot)
141 {
142 	phys_addr_t paddr = phys_start & PGDIR_MASK;
143 	uintptr_t vaddr = virt_offset & PGDIR_MASK;
144 	uintptr_t virt_end = phys_end - paddr + vaddr;
145 
146 	for (; vaddr < virt_end; vaddr += PGDIR_SIZE, paddr += PGDIR_SIZE) {
147 		pgd_t *pgd = pgd_offset(pgtable, vaddr);
148 		pgd_val(*pgd) = paddr;
149 		pgd_val(*pgd) |= PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S;
150 		pgd_val(*pgd) |= pgprot_val(prot);
151 		flush_dcache_addr((ulong)pgd);
152 		flush_tlb_page(vaddr);
153 	}
154 }
155 
156 void *setup_mmu(phys_addr_t phys_end)
157 {
158 	uintptr_t code_end = (uintptr_t)&etext;
159 
160 	/* 0G-1G = I/O, 1G-3G = identity, 3G-4G = vmalloc */
161 	if (phys_end > (3ul << 30))
162 		phys_end = 3ul << 30;
163 
164 #ifdef __aarch64__
165 	init_alloc_vpage((void*)(4ul << 30));
166 #endif
167 
168 	mmu_idmap = alloc_page();
169 
170 	/*
171 	 * mach-virt I/O regions:
172 	 *   - The first 1G (arm/arm64)
173 	 *   - 512M at 256G (arm64, arm uses highmem=off)
174 	 *   - 512G at 512G (arm64, arm uses highmem=off)
175 	 */
176 	mmu_set_range_sect(mmu_idmap,
177 		0, 0, (1ul << 30),
178 		__pgprot(PMD_SECT_UNCACHED | PMD_SECT_USER));
179 #ifdef __aarch64__
180 	mmu_set_range_sect(mmu_idmap,
181 		(1ul << 38), (1ul << 38), (1ul << 38) | (1ul << 29),
182 		__pgprot(PMD_SECT_UNCACHED | PMD_SECT_USER));
183 	mmu_set_range_sect(mmu_idmap,
184 		(1ul << 39), (1ul << 39), (1ul << 40),
185 		__pgprot(PMD_SECT_UNCACHED | PMD_SECT_USER));
186 #endif
187 
188 	/* armv8 requires code shared between EL1 and EL0 to be read-only */
189 	mmu_set_range_ptes(mmu_idmap, PHYS_OFFSET,
190 		PHYS_OFFSET, code_end,
191 		__pgprot(PTE_WBWA | PTE_RDONLY | PTE_USER));
192 
193 	mmu_set_range_ptes(mmu_idmap, code_end,
194 		code_end, phys_end,
195 		__pgprot(PTE_WBWA | PTE_USER));
196 
197 	mmu_enable(mmu_idmap);
198 	return mmu_idmap;
199 }
200 
201 phys_addr_t __virt_to_phys(unsigned long addr)
202 {
203 	if (mmu_enabled()) {
204 		pgd_t *pgtable = current_thread_info()->pgtable;
205 		return virt_to_pte_phys(pgtable, (void *)addr);
206 	}
207 	return addr;
208 }
209 
210 unsigned long __phys_to_virt(phys_addr_t addr)
211 {
212 	/*
213 	 * We don't guarantee that phys_to_virt(virt_to_phys(vaddr)) == vaddr, but
214 	 * the default page tables do identity map all physical addresses, which
215 	 * means phys_to_virt(virt_to_phys((void *)paddr)) == paddr.
216 	 */
217 	assert(!mmu_enabled() || __virt_to_phys(addr) == addr);
218 	return addr;
219 }
220 
221 void mmu_clear_user(unsigned long vaddr)
222 {
223 	pgd_t *pgtable;
224 	pteval_t *pte;
225 
226 	if (!mmu_enabled())
227 		return;
228 
229 	pgtable = current_thread_info()->pgtable;
230 	pte = get_pte(pgtable, vaddr);
231 
232 	*pte &= ~PTE_USER;
233 	flush_tlb_page(vaddr);
234 }
235