xref: /kvm-unit-tests/lib/arm/mmu.c (revision ef31e31d530caa1c7951b437ef848d0858a81fd9)
1153d1936SAndrew Jones /*
2153d1936SAndrew Jones  * MMU enable and page table manipulation functions
3153d1936SAndrew Jones  *
4153d1936SAndrew Jones  * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5153d1936SAndrew Jones  *
6153d1936SAndrew Jones  * This work is licensed under the terms of the GNU LGPL, version 2.
7153d1936SAndrew Jones  */
88cca5668SAndrew Jones #include <asm/setup.h>
9eb225344SAndrew Jones #include <asm/thread_info.h>
10eb225344SAndrew Jones #include <asm/cpumask.h>
118cca5668SAndrew Jones #include <asm/mmu.h>
12c2a95639SPaolo Bonzini #include <asm/setup.h>
13c2a95639SPaolo Bonzini #include <asm/page.h>
14c2a95639SPaolo Bonzini 
15031755dbSPaolo Bonzini #include "alloc_page.h"
16031755dbSPaolo Bonzini #include "vmalloc.h"
17c2a95639SPaolo Bonzini #include <asm/pgtable-hwdef.h>
18c2a95639SPaolo Bonzini #include <asm/pgtable.h>
19153d1936SAndrew Jones 
2070cea146SAlexandru Elisei #include <linux/compiler.h>
2170cea146SAlexandru Elisei 
22db328a24SAndrew Jones extern unsigned long etext;
23db328a24SAndrew Jones 
242f3028cdSAndrew Jones pgd_t *mmu_idmap;
25153d1936SAndrew Jones 
26c33efcf3SAndrew Jones /* CPU 0 starts with disabled MMU */
27c33efcf3SAndrew Jones static cpumask_t mmu_disabled_cpumask = { {1} };
28b141dbacSAndrew Jones unsigned int mmu_disabled_cpu_count = 1;
29c33efcf3SAndrew Jones 
30b141dbacSAndrew Jones bool __mmu_enabled(void)
31153d1936SAndrew Jones {
32c33efcf3SAndrew Jones 	int cpu = current_thread_info()->cpu;
33eb225344SAndrew Jones 
341742c67aSAndrew Jones 	/*
351742c67aSAndrew Jones 	 * mmu_enabled is called from places that are guarding the
361742c67aSAndrew Jones 	 * use of exclusive ops (which require the mmu to be enabled).
371742c67aSAndrew Jones 	 * That means we CANNOT call anything from here that may use a
381742c67aSAndrew Jones 	 * spinlock, atomic bitop, etc., otherwise we'll recurse.
391742c67aSAndrew Jones 	 * [cpumask_]test_bit is safe though.
401742c67aSAndrew Jones 	 */
41c33efcf3SAndrew Jones 	return !cpumask_test_cpu(cpu, &mmu_disabled_cpumask);
42153d1936SAndrew Jones }
43153d1936SAndrew Jones 
441742c67aSAndrew Jones void mmu_mark_enabled(int cpu)
451742c67aSAndrew Jones {
461742c67aSAndrew Jones 	if (cpumask_test_and_clear_cpu(cpu, &mmu_disabled_cpumask))
471742c67aSAndrew Jones 		--mmu_disabled_cpu_count;
481742c67aSAndrew Jones }
491742c67aSAndrew Jones 
501742c67aSAndrew Jones void mmu_mark_disabled(int cpu)
511742c67aSAndrew Jones {
521742c67aSAndrew Jones 	if (!cpumask_test_and_set_cpu(cpu, &mmu_disabled_cpumask))
531742c67aSAndrew Jones 		++mmu_disabled_cpu_count;
541742c67aSAndrew Jones }
551742c67aSAndrew Jones 
56153d1936SAndrew Jones extern void asm_mmu_enable(phys_addr_t pgtable);
57153d1936SAndrew Jones void mmu_enable(pgd_t *pgtable)
58153d1936SAndrew Jones {
5936b50de9SAndrew Jones 	struct thread_info *info = current_thread_info();
60c33efcf3SAndrew Jones 
61153d1936SAndrew Jones 	asm_mmu_enable(__pa(pgtable));
62b141dbacSAndrew Jones 
6336b50de9SAndrew Jones 	info->pgtable = pgtable;
6436b50de9SAndrew Jones 	mmu_mark_enabled(info->cpu);
65153d1936SAndrew Jones }
66153d1936SAndrew Jones 
67e27b176bSAndrew Jones extern void asm_mmu_disable(void);
68e27b176bSAndrew Jones void mmu_disable(void)
69e27b176bSAndrew Jones {
7008c19b24SAlexandru Elisei 	unsigned long sp = current_stack_pointer;
71c33efcf3SAndrew Jones 	int cpu = current_thread_info()->cpu;
72c33efcf3SAndrew Jones 
7308c19b24SAlexandru Elisei 	assert_msg(__virt_to_phys(sp) == sp,
7408c19b24SAlexandru Elisei 			"Attempting to disable MMU with non-identity mapped stack");
7508c19b24SAlexandru Elisei 
76c33efcf3SAndrew Jones 	mmu_mark_disabled(cpu);
77c33efcf3SAndrew Jones 
78e27b176bSAndrew Jones 	asm_mmu_disable();
79e27b176bSAndrew Jones }
80e27b176bSAndrew Jones 
81031755dbSPaolo Bonzini static pteval_t *get_pte(pgd_t *pgtable, uintptr_t vaddr)
82031755dbSPaolo Bonzini {
83031755dbSPaolo Bonzini 	pgd_t *pgd = pgd_offset(pgtable, vaddr);
84031755dbSPaolo Bonzini 	pmd_t *pmd = pmd_alloc(pgd, vaddr);
85031755dbSPaolo Bonzini 	pte_t *pte = pte_alloc(pmd, vaddr);
86031755dbSPaolo Bonzini 
87031755dbSPaolo Bonzini 	return &pte_val(*pte);
88031755dbSPaolo Bonzini }
89031755dbSPaolo Bonzini 
90031755dbSPaolo Bonzini static pteval_t *install_pte(pgd_t *pgtable, uintptr_t vaddr, pteval_t pte)
91031755dbSPaolo Bonzini {
92031755dbSPaolo Bonzini 	pteval_t *p_pte = get_pte(pgtable, vaddr);
93f8891de2SAndrew Jones 
9470cea146SAlexandru Elisei 	WRITE_ONCE(*p_pte, pte);
9520239febSAlexandru Elisei 	flush_tlb_page(vaddr);
96031755dbSPaolo Bonzini 	return p_pte;
97031755dbSPaolo Bonzini }
98031755dbSPaolo Bonzini 
99031755dbSPaolo Bonzini static pteval_t *install_page_prot(pgd_t *pgtable, phys_addr_t phys,
100031755dbSPaolo Bonzini 				   uintptr_t vaddr, pgprot_t prot)
101031755dbSPaolo Bonzini {
102031755dbSPaolo Bonzini 	pteval_t pte = phys;
103031755dbSPaolo Bonzini 	pte |= PTE_TYPE_PAGE | PTE_AF | PTE_SHARED;
104031755dbSPaolo Bonzini 	pte |= pgprot_val(prot);
105031755dbSPaolo Bonzini 	return install_pte(pgtable, vaddr, pte);
106031755dbSPaolo Bonzini }
107031755dbSPaolo Bonzini 
108031755dbSPaolo Bonzini pteval_t *install_page(pgd_t *pgtable, phys_addr_t phys, void *virt)
109031755dbSPaolo Bonzini {
110031755dbSPaolo Bonzini 	return install_page_prot(pgtable, phys, (uintptr_t)virt,
111031755dbSPaolo Bonzini 				 __pgprot(PTE_WBWA | PTE_USER));
112031755dbSPaolo Bonzini }
113031755dbSPaolo Bonzini 
114031755dbSPaolo Bonzini phys_addr_t virt_to_pte_phys(pgd_t *pgtable, void *mem)
115031755dbSPaolo Bonzini {
116031755dbSPaolo Bonzini 	return (*get_pte(pgtable, (uintptr_t)mem) & PHYS_MASK & -PAGE_SIZE)
117031755dbSPaolo Bonzini 		+ ((ulong)mem & (PAGE_SIZE - 1));
118031755dbSPaolo Bonzini }
119031755dbSPaolo Bonzini 
120f0671a7bSPaolo Bonzini void mmu_set_range_ptes(pgd_t *pgtable, uintptr_t virt_offset,
121f0671a7bSPaolo Bonzini 			phys_addr_t phys_start, phys_addr_t phys_end,
1222f3028cdSAndrew Jones 			pgprot_t prot)
123153d1936SAndrew Jones {
124f0671a7bSPaolo Bonzini 	phys_addr_t paddr = phys_start & PAGE_MASK;
125f0671a7bSPaolo Bonzini 	uintptr_t vaddr = virt_offset & PAGE_MASK;
126f0671a7bSPaolo Bonzini 	uintptr_t virt_end = phys_end - paddr + vaddr;
1272f3028cdSAndrew Jones 
128031755dbSPaolo Bonzini 	for (; vaddr < virt_end; vaddr += PAGE_SIZE, paddr += PAGE_SIZE)
129031755dbSPaolo Bonzini 		install_page_prot(pgtable, paddr, vaddr, prot);
1302f3028cdSAndrew Jones }
1312f3028cdSAndrew Jones 
132f0671a7bSPaolo Bonzini void mmu_set_range_sect(pgd_t *pgtable, uintptr_t virt_offset,
133f0671a7bSPaolo Bonzini 			phys_addr_t phys_start, phys_addr_t phys_end,
1342f3028cdSAndrew Jones 			pgprot_t prot)
1352f3028cdSAndrew Jones {
136f0671a7bSPaolo Bonzini 	phys_addr_t paddr = phys_start & PGDIR_MASK;
137f0671a7bSPaolo Bonzini 	uintptr_t vaddr = virt_offset & PGDIR_MASK;
138f0671a7bSPaolo Bonzini 	uintptr_t virt_end = phys_end - paddr + vaddr;
13970cea146SAlexandru Elisei 	pgd_t *pgd;
14070cea146SAlexandru Elisei 	pgd_t entry;
1412f3028cdSAndrew Jones 
1422f3028cdSAndrew Jones 	for (; vaddr < virt_end; vaddr += PGDIR_SIZE, paddr += PGDIR_SIZE) {
14370cea146SAlexandru Elisei 		pgd_val(entry) = paddr;
14470cea146SAlexandru Elisei 		pgd_val(entry) |= PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S;
14570cea146SAlexandru Elisei 		pgd_val(entry) |= pgprot_val(prot);
14670cea146SAlexandru Elisei 		pgd = pgd_offset(pgtable, vaddr);
14770cea146SAlexandru Elisei 		WRITE_ONCE(*pgd, entry);
148f8891de2SAndrew Jones 		flush_tlb_page(vaddr);
1492f3028cdSAndrew Jones 	}
1502f3028cdSAndrew Jones }
1512f3028cdSAndrew Jones 
152031755dbSPaolo Bonzini void *setup_mmu(phys_addr_t phys_end)
153153d1936SAndrew Jones {
154f0671a7bSPaolo Bonzini 	uintptr_t code_end = (uintptr_t)&etext;
155*ef31e31dSAndrew Jones 	struct mem_region *r;
156153d1936SAndrew Jones 
157031755dbSPaolo Bonzini 	/* 0G-1G = I/O, 1G-3G = identity, 3G-4G = vmalloc */
158031755dbSPaolo Bonzini 	if (phys_end > (3ul << 30))
159031755dbSPaolo Bonzini 		phys_end = 3ul << 30;
160031755dbSPaolo Bonzini 
161031755dbSPaolo Bonzini #ifdef __aarch64__
162031755dbSPaolo Bonzini 	init_alloc_vpage((void*)(4ul << 30));
163031755dbSPaolo Bonzini #endif
164031755dbSPaolo Bonzini 
165031755dbSPaolo Bonzini 	mmu_idmap = alloc_page();
166153d1936SAndrew Jones 
167*ef31e31dSAndrew Jones 	for (r = mem_regions; r->end; ++r) {
168*ef31e31dSAndrew Jones 		if (!(r->flags & MR_F_IO))
169*ef31e31dSAndrew Jones 			continue;
170*ef31e31dSAndrew Jones 		mmu_set_range_sect(mmu_idmap, r->start, r->start, r->end,
171f0671a7bSPaolo Bonzini 				   __pgprot(PMD_SECT_UNCACHED | PMD_SECT_USER));
172*ef31e31dSAndrew Jones 	}
173153d1936SAndrew Jones 
174db328a24SAndrew Jones 	/* armv8 requires code shared between EL1 and EL0 to be read-only */
1752f3028cdSAndrew Jones 	mmu_set_range_ptes(mmu_idmap, PHYS_OFFSET,
176db328a24SAndrew Jones 		PHYS_OFFSET, code_end,
177db328a24SAndrew Jones 		__pgprot(PTE_WBWA | PTE_RDONLY | PTE_USER));
178db328a24SAndrew Jones 
179db328a24SAndrew Jones 	mmu_set_range_ptes(mmu_idmap, code_end,
180db328a24SAndrew Jones 		code_end, phys_end,
1812f3028cdSAndrew Jones 		__pgprot(PTE_WBWA | PTE_USER));
182153d1936SAndrew Jones 
1832f3028cdSAndrew Jones 	mmu_enable(mmu_idmap);
184031755dbSPaolo Bonzini 	return mmu_idmap;
185153d1936SAndrew Jones }
186f02b6363SAndrew Jones 
187f02b6363SAndrew Jones phys_addr_t __virt_to_phys(unsigned long addr)
188f02b6363SAndrew Jones {
189f02b6363SAndrew Jones 	if (mmu_enabled()) {
190f02b6363SAndrew Jones 		pgd_t *pgtable = current_thread_info()->pgtable;
191f02b6363SAndrew Jones 		return virt_to_pte_phys(pgtable, (void *)addr);
192f02b6363SAndrew Jones 	}
193f02b6363SAndrew Jones 	return addr;
194f02b6363SAndrew Jones }
195f02b6363SAndrew Jones 
196f02b6363SAndrew Jones unsigned long __phys_to_virt(phys_addr_t addr)
197f02b6363SAndrew Jones {
198f02b6363SAndrew Jones 	/*
199f02b6363SAndrew Jones 	 * We don't guarantee that phys_to_virt(virt_to_phys(vaddr)) == vaddr, but
200f02b6363SAndrew Jones 	 * the default page tables do identity map all physical addresses, which
201f02b6363SAndrew Jones 	 * means phys_to_virt(virt_to_phys((void *)paddr)) == paddr.
202f02b6363SAndrew Jones 	 */
203f02b6363SAndrew Jones 	assert(!mmu_enabled() || __virt_to_phys(addr) == addr);
204f02b6363SAndrew Jones 	return addr;
205f02b6363SAndrew Jones }
2063ff42bddSAlexandru Elisei 
20702f1cdc8SAlexandru Elisei void mmu_clear_user(pgd_t *pgtable, unsigned long vaddr)
2083ff42bddSAlexandru Elisei {
20902f1cdc8SAlexandru Elisei 	pgd_t *pgd;
21002f1cdc8SAlexandru Elisei 	pmd_t *pmd;
21102f1cdc8SAlexandru Elisei 	pte_t *pte;
2123ff42bddSAlexandru Elisei 
2133ff42bddSAlexandru Elisei 	if (!mmu_enabled())
2143ff42bddSAlexandru Elisei 		return;
2153ff42bddSAlexandru Elisei 
21602f1cdc8SAlexandru Elisei 	pgd = pgd_offset(pgtable, vaddr);
21702f1cdc8SAlexandru Elisei 	assert(pgd_valid(*pgd));
21802f1cdc8SAlexandru Elisei 	pmd = pmd_offset(pgd, vaddr);
21902f1cdc8SAlexandru Elisei 	assert(pmd_valid(*pmd));
2203ff42bddSAlexandru Elisei 
22102f1cdc8SAlexandru Elisei 	if (pmd_huge(*pmd)) {
22202f1cdc8SAlexandru Elisei 		pmd_t entry = __pmd(pmd_val(*pmd) & ~PMD_SECT_USER);
22302f1cdc8SAlexandru Elisei 		WRITE_ONCE(*pmd, entry);
22402f1cdc8SAlexandru Elisei 		goto out_flush_tlb;
22502f1cdc8SAlexandru Elisei 	}
22602f1cdc8SAlexandru Elisei 
22702f1cdc8SAlexandru Elisei 	pte = pte_offset(pmd, vaddr);
22802f1cdc8SAlexandru Elisei 	assert(pte_valid(*pte));
22902f1cdc8SAlexandru Elisei 	pte_t entry = __pte(pte_val(*pte) & ~PTE_USER);
23070cea146SAlexandru Elisei 	WRITE_ONCE(*pte, entry);
23102f1cdc8SAlexandru Elisei 
23202f1cdc8SAlexandru Elisei out_flush_tlb:
2333ff42bddSAlexandru Elisei 	flush_tlb_page(vaddr);
2343ff42bddSAlexandru Elisei }
235