xref: /kvm-unit-tests/lib/arm/mmu.c (revision 031755dbfefba335a44e16e1d68b3141df514573)
1153d1936SAndrew Jones /*
2153d1936SAndrew Jones  * MMU enable and page table manipulation functions
3153d1936SAndrew Jones  *
4153d1936SAndrew Jones  * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5153d1936SAndrew Jones  *
6153d1936SAndrew Jones  * This work is licensed under the terms of the GNU LGPL, version 2.
7153d1936SAndrew Jones  */
88cca5668SAndrew Jones #include <asm/setup.h>
9eb225344SAndrew Jones #include <asm/thread_info.h>
10eb225344SAndrew Jones #include <asm/cpumask.h>
118cca5668SAndrew Jones #include <asm/mmu.h>
12c2a95639SPaolo Bonzini #include <asm/setup.h>
13c2a95639SPaolo Bonzini #include <asm/page.h>
14c2a95639SPaolo Bonzini 
15c2a95639SPaolo Bonzini #include "alloc.h"
16*031755dbSPaolo Bonzini #include "alloc_page.h"
17*031755dbSPaolo Bonzini #include "vmalloc.h"
18c2a95639SPaolo Bonzini #include <asm/pgtable-hwdef.h>
19c2a95639SPaolo Bonzini #include <asm/pgtable.h>
20153d1936SAndrew Jones 
21db328a24SAndrew Jones extern unsigned long etext;
22db328a24SAndrew Jones 
232f3028cdSAndrew Jones pgd_t *mmu_idmap;
24153d1936SAndrew Jones 
25c33efcf3SAndrew Jones /* CPU 0 starts with disabled MMU */
26c33efcf3SAndrew Jones static cpumask_t mmu_disabled_cpumask = { {1} };
27b141dbacSAndrew Jones unsigned int mmu_disabled_cpu_count = 1;
28c33efcf3SAndrew Jones 
29b141dbacSAndrew Jones bool __mmu_enabled(void)
30153d1936SAndrew Jones {
31c33efcf3SAndrew Jones 	int cpu = current_thread_info()->cpu;
32eb225344SAndrew Jones 
331742c67aSAndrew Jones 	/*
341742c67aSAndrew Jones 	 * mmu_enabled is called from places that are guarding the
351742c67aSAndrew Jones 	 * use of exclusive ops (which require the mmu to be enabled).
361742c67aSAndrew Jones 	 * That means we CANNOT call anything from here that may use a
371742c67aSAndrew Jones 	 * spinlock, atomic bitop, etc., otherwise we'll recurse.
381742c67aSAndrew Jones 	 * [cpumask_]test_bit is safe though.
391742c67aSAndrew Jones 	 */
40c33efcf3SAndrew Jones 	return !cpumask_test_cpu(cpu, &mmu_disabled_cpumask);
41153d1936SAndrew Jones }
42153d1936SAndrew Jones 
431742c67aSAndrew Jones void mmu_mark_enabled(int cpu)
441742c67aSAndrew Jones {
451742c67aSAndrew Jones 	if (cpumask_test_and_clear_cpu(cpu, &mmu_disabled_cpumask))
461742c67aSAndrew Jones 		--mmu_disabled_cpu_count;
471742c67aSAndrew Jones }
481742c67aSAndrew Jones 
491742c67aSAndrew Jones void mmu_mark_disabled(int cpu)
501742c67aSAndrew Jones {
511742c67aSAndrew Jones 	if (!cpumask_test_and_set_cpu(cpu, &mmu_disabled_cpumask))
521742c67aSAndrew Jones 		++mmu_disabled_cpu_count;
531742c67aSAndrew Jones }
541742c67aSAndrew Jones 
55153d1936SAndrew Jones extern void asm_mmu_enable(phys_addr_t pgtable);
56153d1936SAndrew Jones void mmu_enable(pgd_t *pgtable)
57153d1936SAndrew Jones {
58c33efcf3SAndrew Jones 	int cpu = current_thread_info()->cpu;
59c33efcf3SAndrew Jones 
60153d1936SAndrew Jones 	asm_mmu_enable(__pa(pgtable));
61153d1936SAndrew Jones 	flush_tlb_all();
62b141dbacSAndrew Jones 
631742c67aSAndrew Jones 	mmu_mark_enabled(cpu);
64153d1936SAndrew Jones }
65153d1936SAndrew Jones 
66e27b176bSAndrew Jones extern void asm_mmu_disable(void);
67e27b176bSAndrew Jones void mmu_disable(void)
68e27b176bSAndrew Jones {
69c33efcf3SAndrew Jones 	int cpu = current_thread_info()->cpu;
70c33efcf3SAndrew Jones 
71c33efcf3SAndrew Jones 	mmu_mark_disabled(cpu);
72c33efcf3SAndrew Jones 
73e27b176bSAndrew Jones 	asm_mmu_disable();
74e27b176bSAndrew Jones }
75e27b176bSAndrew Jones 
76*031755dbSPaolo Bonzini static pteval_t *get_pte(pgd_t *pgtable, uintptr_t vaddr)
77*031755dbSPaolo Bonzini {
78*031755dbSPaolo Bonzini 	pgd_t *pgd = pgd_offset(pgtable, vaddr);
79*031755dbSPaolo Bonzini 	pmd_t *pmd = pmd_alloc(pgd, vaddr);
80*031755dbSPaolo Bonzini 	pte_t *pte = pte_alloc(pmd, vaddr);
81*031755dbSPaolo Bonzini 
82*031755dbSPaolo Bonzini 	return &pte_val(*pte);
83*031755dbSPaolo Bonzini }
84*031755dbSPaolo Bonzini 
85*031755dbSPaolo Bonzini static pteval_t *install_pte(pgd_t *pgtable, uintptr_t vaddr, pteval_t pte)
86*031755dbSPaolo Bonzini {
87*031755dbSPaolo Bonzini 	pteval_t *p_pte = get_pte(pgtable, vaddr);
88*031755dbSPaolo Bonzini 	*p_pte = pte;
89*031755dbSPaolo Bonzini 	return p_pte;
90*031755dbSPaolo Bonzini }
91*031755dbSPaolo Bonzini 
92*031755dbSPaolo Bonzini static pteval_t *install_page_prot(pgd_t *pgtable, phys_addr_t phys,
93*031755dbSPaolo Bonzini 				   uintptr_t vaddr, pgprot_t prot)
94*031755dbSPaolo Bonzini {
95*031755dbSPaolo Bonzini 	pteval_t pte = phys;
96*031755dbSPaolo Bonzini 	pte |= PTE_TYPE_PAGE | PTE_AF | PTE_SHARED;
97*031755dbSPaolo Bonzini 	pte |= pgprot_val(prot);
98*031755dbSPaolo Bonzini 	return install_pte(pgtable, vaddr, pte);
99*031755dbSPaolo Bonzini }
100*031755dbSPaolo Bonzini 
101*031755dbSPaolo Bonzini pteval_t *install_page(pgd_t *pgtable, phys_addr_t phys, void *virt)
102*031755dbSPaolo Bonzini {
103*031755dbSPaolo Bonzini 	return install_page_prot(pgtable, phys, (uintptr_t)virt,
104*031755dbSPaolo Bonzini 				 __pgprot(PTE_WBWA | PTE_USER));
105*031755dbSPaolo Bonzini }
106*031755dbSPaolo Bonzini 
107*031755dbSPaolo Bonzini phys_addr_t virt_to_pte_phys(pgd_t *pgtable, void *mem)
108*031755dbSPaolo Bonzini {
109*031755dbSPaolo Bonzini 	return (*get_pte(pgtable, (uintptr_t)mem) & PHYS_MASK & -PAGE_SIZE)
110*031755dbSPaolo Bonzini 		+ ((ulong)mem & (PAGE_SIZE - 1));
111*031755dbSPaolo Bonzini }
112*031755dbSPaolo Bonzini 
113f0671a7bSPaolo Bonzini void mmu_set_range_ptes(pgd_t *pgtable, uintptr_t virt_offset,
114f0671a7bSPaolo Bonzini 			phys_addr_t phys_start, phys_addr_t phys_end,
1152f3028cdSAndrew Jones 			pgprot_t prot)
116153d1936SAndrew Jones {
117f0671a7bSPaolo Bonzini 	phys_addr_t paddr = phys_start & PAGE_MASK;
118f0671a7bSPaolo Bonzini 	uintptr_t vaddr = virt_offset & PAGE_MASK;
119f0671a7bSPaolo Bonzini 	uintptr_t virt_end = phys_end - paddr + vaddr;
1202f3028cdSAndrew Jones 
121*031755dbSPaolo Bonzini 	for (; vaddr < virt_end; vaddr += PAGE_SIZE, paddr += PAGE_SIZE)
122*031755dbSPaolo Bonzini 		install_page_prot(pgtable, paddr, vaddr, prot);
1232f3028cdSAndrew Jones }
1242f3028cdSAndrew Jones 
125f0671a7bSPaolo Bonzini void mmu_set_range_sect(pgd_t *pgtable, uintptr_t virt_offset,
126f0671a7bSPaolo Bonzini 			phys_addr_t phys_start, phys_addr_t phys_end,
1272f3028cdSAndrew Jones 			pgprot_t prot)
1282f3028cdSAndrew Jones {
129f0671a7bSPaolo Bonzini 	phys_addr_t paddr = phys_start & PGDIR_MASK;
130f0671a7bSPaolo Bonzini 	uintptr_t vaddr = virt_offset & PGDIR_MASK;
131f0671a7bSPaolo Bonzini 	uintptr_t virt_end = phys_end - paddr + vaddr;
1322f3028cdSAndrew Jones 
1332f3028cdSAndrew Jones 	for (; vaddr < virt_end; vaddr += PGDIR_SIZE, paddr += PGDIR_SIZE) {
1342f3028cdSAndrew Jones 		pgd_t *pgd = pgd_offset(pgtable, vaddr);
1352f3028cdSAndrew Jones 		pgd_val(*pgd) = paddr;
1362f3028cdSAndrew Jones 		pgd_val(*pgd) |= PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S;
1372f3028cdSAndrew Jones 		pgd_val(*pgd) |= pgprot_val(prot);
1382f3028cdSAndrew Jones 	}
1392f3028cdSAndrew Jones }
1402f3028cdSAndrew Jones 
1412f3028cdSAndrew Jones 
142*031755dbSPaolo Bonzini void *setup_mmu(phys_addr_t phys_end)
143153d1936SAndrew Jones {
144f0671a7bSPaolo Bonzini 	uintptr_t code_end = (uintptr_t)&etext;
145153d1936SAndrew Jones 
146*031755dbSPaolo Bonzini 	/* 0G-1G = I/O, 1G-3G = identity, 3G-4G = vmalloc */
147*031755dbSPaolo Bonzini 	if (phys_end > (3ul << 30))
148*031755dbSPaolo Bonzini 		phys_end = 3ul << 30;
149*031755dbSPaolo Bonzini 
150*031755dbSPaolo Bonzini #ifdef __aarch64__
151*031755dbSPaolo Bonzini 	init_alloc_vpage((void*)(4ul << 30));
152*031755dbSPaolo Bonzini #endif
153*031755dbSPaolo Bonzini 
154*031755dbSPaolo Bonzini 	mmu_idmap = alloc_page();
155*031755dbSPaolo Bonzini 	memset(mmu_idmap, 0, PAGE_SIZE);
156153d1936SAndrew Jones 
157f0671a7bSPaolo Bonzini 	mmu_set_range_sect(mmu_idmap, PHYS_IO_OFFSET,
158f0671a7bSPaolo Bonzini 		PHYS_IO_OFFSET, PHYS_IO_END,
159f0671a7bSPaolo Bonzini 		__pgprot(PMD_SECT_UNCACHED | PMD_SECT_USER));
160153d1936SAndrew Jones 
161db328a24SAndrew Jones 	/* armv8 requires code shared between EL1 and EL0 to be read-only */
1622f3028cdSAndrew Jones 	mmu_set_range_ptes(mmu_idmap, PHYS_OFFSET,
163db328a24SAndrew Jones 		PHYS_OFFSET, code_end,
164db328a24SAndrew Jones 		__pgprot(PTE_WBWA | PTE_RDONLY | PTE_USER));
165db328a24SAndrew Jones 
166db328a24SAndrew Jones 	mmu_set_range_ptes(mmu_idmap, code_end,
167db328a24SAndrew Jones 		code_end, phys_end,
1682f3028cdSAndrew Jones 		__pgprot(PTE_WBWA | PTE_USER));
169153d1936SAndrew Jones 
1702f3028cdSAndrew Jones 	mmu_enable(mmu_idmap);
171*031755dbSPaolo Bonzini 	return mmu_idmap;
172153d1936SAndrew Jones }
173