xref: /kvm-unit-tests/lib/arm/mmu.c (revision 4b5caf0c5621cd74baca7faad994777a98793c34)
1 /*
2  * MMU enable and page table manipulation functions
3  *
4  * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5  *
6  * This work is licensed under the terms of the GNU LGPL, version 2.
7  */
8 #include <asm/setup.h>
9 #include <asm/thread_info.h>
10 #include <asm/cpumask.h>
11 #include <asm/mmu.h>
12 
13 extern unsigned long etext;
14 
15 pgd_t *mmu_idmap;
16 
17 /* CPU 0 starts with disabled MMU */
18 static cpumask_t mmu_disabled_cpumask = { {1} };
19 unsigned int mmu_disabled_cpu_count = 1;
20 
21 bool __mmu_enabled(void)
22 {
23 	int cpu = current_thread_info()->cpu;
24 
25 	/*
26 	 * mmu_enabled is called from places that are guarding the
27 	 * use of exclusive ops (which require the mmu to be enabled).
28 	 * That means we CANNOT call anything from here that may use a
29 	 * spinlock, atomic bitop, etc., otherwise we'll recurse.
30 	 * [cpumask_]test_bit is safe though.
31 	 */
32 	return !cpumask_test_cpu(cpu, &mmu_disabled_cpumask);
33 }
34 
35 void mmu_mark_enabled(int cpu)
36 {
37 	if (cpumask_test_and_clear_cpu(cpu, &mmu_disabled_cpumask))
38 		--mmu_disabled_cpu_count;
39 }
40 
41 void mmu_mark_disabled(int cpu)
42 {
43 	if (!cpumask_test_and_set_cpu(cpu, &mmu_disabled_cpumask))
44 		++mmu_disabled_cpu_count;
45 }
46 
47 extern void asm_mmu_enable(phys_addr_t pgtable);
48 void mmu_enable(pgd_t *pgtable)
49 {
50 	int cpu = current_thread_info()->cpu;
51 
52 	asm_mmu_enable(__pa(pgtable));
53 	flush_tlb_all();
54 
55 	mmu_mark_enabled(cpu);
56 }
57 
58 extern void asm_mmu_disable(void);
59 void mmu_disable(void)
60 {
61 	int cpu = current_thread_info()->cpu;
62 
63 	mmu_mark_disabled(cpu);
64 
65 	asm_mmu_disable();
66 }
67 
68 void mmu_set_range_ptes(pgd_t *pgtable, unsigned long virt_offset,
69 			unsigned long phys_start, unsigned long phys_end,
70 			pgprot_t prot)
71 {
72 	unsigned long vaddr = virt_offset & PAGE_MASK;
73 	unsigned long paddr = phys_start & PAGE_MASK;
74 	unsigned long virt_end = phys_end - paddr + vaddr;
75 
76 	for (; vaddr < virt_end; vaddr += PAGE_SIZE, paddr += PAGE_SIZE) {
77 		pgd_t *pgd = pgd_offset(pgtable, vaddr);
78 		pud_t *pud = pud_alloc(pgd, vaddr);
79 		pmd_t *pmd = pmd_alloc(pud, vaddr);
80 		pte_t *pte = pte_alloc(pmd, vaddr);
81 
82 		pte_val(*pte) = paddr;
83 		pte_val(*pte) |= PTE_TYPE_PAGE | PTE_AF | PTE_SHARED;
84 		pte_val(*pte) |= pgprot_val(prot);
85 	}
86 }
87 
88 void mmu_set_range_sect(pgd_t *pgtable, unsigned long virt_offset,
89 			unsigned long phys_start, unsigned long phys_end,
90 			pgprot_t prot)
91 {
92 	unsigned long vaddr = virt_offset & PGDIR_MASK;
93 	unsigned long paddr = phys_start & PGDIR_MASK;
94 	unsigned long virt_end = phys_end - paddr + vaddr;
95 
96 	for (; vaddr < virt_end; vaddr += PGDIR_SIZE, paddr += PGDIR_SIZE) {
97 		pgd_t *pgd = pgd_offset(pgtable, vaddr);
98 		pgd_val(*pgd) = paddr;
99 		pgd_val(*pgd) |= PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S;
100 		pgd_val(*pgd) |= pgprot_val(prot);
101 	}
102 }
103 
104 
105 void mmu_init_io_sect(pgd_t *pgtable, unsigned long virt_offset)
106 {
107 	mmu_set_range_sect(pgtable, virt_offset,
108 		PHYS_IO_OFFSET, PHYS_IO_END,
109 		__pgprot(PMD_SECT_UNCACHED | PMD_SECT_USER));
110 }
111 
112 void mmu_enable_idmap(void)
113 {
114 	unsigned long phys_end = sizeof(long) == 8 || !(PHYS_END >> 32)
115 						? PHYS_END : 0xfffff000;
116 	unsigned long code_end = (unsigned long)&etext;
117 
118 	mmu_idmap = pgd_alloc();
119 
120 	mmu_init_io_sect(mmu_idmap, PHYS_IO_OFFSET);
121 
122 	/* armv8 requires code shared between EL1 and EL0 to be read-only */
123 	mmu_set_range_ptes(mmu_idmap, PHYS_OFFSET,
124 		PHYS_OFFSET, code_end,
125 		__pgprot(PTE_WBWA | PTE_RDONLY | PTE_USER));
126 
127 	mmu_set_range_ptes(mmu_idmap, code_end,
128 		code_end, phys_end,
129 		__pgprot(PTE_WBWA | PTE_USER));
130 
131 	mmu_enable(mmu_idmap);
132 }
133