xref: /kvm-unit-tests/lib/arm/mmu.c (revision 573687f8ae790b2e434330aef68c53abfe45e86a)
1 /*
2  * MMU enable and page table manipulation functions
3  *
4  * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5  *
6  * This work is licensed under the terms of the GNU LGPL, version 2.
7  */
8 #include <asm/setup.h>
9 #include <asm/thread_info.h>
10 #include <asm/cpumask.h>
11 #include <asm/mmu.h>
12 
13 extern unsigned long etext;
14 
15 pgd_t *mmu_idmap;
16 
17 static cpumask_t mmu_enabled_cpumask;
18 bool mmu_enabled(void)
19 {
20 	struct thread_info *ti = current_thread_info();
21 	return cpumask_test_cpu(ti->cpu, &mmu_enabled_cpumask);
22 }
23 
24 void mmu_set_enabled(void)
25 {
26 	struct thread_info *ti = current_thread_info();
27 	cpumask_set_cpu(ti->cpu, &mmu_enabled_cpumask);
28 }
29 
30 extern void asm_mmu_enable(phys_addr_t pgtable);
31 void mmu_enable(pgd_t *pgtable)
32 {
33 	asm_mmu_enable(__pa(pgtable));
34 	flush_tlb_all();
35 	mmu_set_enabled();
36 }
37 
38 void mmu_set_range_ptes(pgd_t *pgtable, unsigned long virt_offset,
39 			unsigned long phys_start, unsigned long phys_end,
40 			pgprot_t prot)
41 {
42 	unsigned long vaddr = virt_offset & PAGE_MASK;
43 	unsigned long paddr = phys_start & PAGE_MASK;
44 	unsigned long virt_end = phys_end - paddr + vaddr;
45 
46 	for (; vaddr < virt_end; vaddr += PAGE_SIZE, paddr += PAGE_SIZE) {
47 		pgd_t *pgd = pgd_offset(pgtable, vaddr);
48 		pud_t *pud = pud_alloc(pgd, vaddr);
49 		pmd_t *pmd = pmd_alloc(pud, vaddr);
50 		pte_t *pte = pte_alloc(pmd, vaddr);
51 
52 		pte_val(*pte) = paddr;
53 		pte_val(*pte) |= PTE_TYPE_PAGE | PTE_AF | PTE_SHARED;
54 		pte_val(*pte) |= pgprot_val(prot);
55 	}
56 }
57 
58 void mmu_set_range_sect(pgd_t *pgtable, unsigned long virt_offset,
59 			unsigned long phys_start, unsigned long phys_end,
60 			pgprot_t prot)
61 {
62 	unsigned long vaddr = virt_offset & PGDIR_MASK;
63 	unsigned long paddr = phys_start & PGDIR_MASK;
64 	unsigned long virt_end = phys_end - paddr + vaddr;
65 
66 	for (; vaddr < virt_end; vaddr += PGDIR_SIZE, paddr += PGDIR_SIZE) {
67 		pgd_t *pgd = pgd_offset(pgtable, vaddr);
68 		pgd_val(*pgd) = paddr;
69 		pgd_val(*pgd) |= PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S;
70 		pgd_val(*pgd) |= pgprot_val(prot);
71 	}
72 }
73 
74 
75 void mmu_init_io_sect(pgd_t *pgtable, unsigned long virt_offset)
76 {
77 	mmu_set_range_sect(pgtable, virt_offset,
78 		PHYS_IO_OFFSET, PHYS_IO_END,
79 		__pgprot(PMD_SECT_UNCACHED | PMD_SECT_USER));
80 }
81 
82 void mmu_enable_idmap(void)
83 {
84 	unsigned long phys_end = sizeof(long) == 8 || !(PHYS_END >> 32)
85 						? PHYS_END : 0xfffff000;
86 	unsigned long code_end = (unsigned long)&etext;
87 
88 	mmu_idmap = pgd_alloc();
89 
90 	mmu_init_io_sect(mmu_idmap, PHYS_IO_OFFSET);
91 
92 	/* armv8 requires code shared between EL1 and EL0 to be read-only */
93 	mmu_set_range_ptes(mmu_idmap, PHYS_OFFSET,
94 		PHYS_OFFSET, code_end,
95 		__pgprot(PTE_WBWA | PTE_RDONLY | PTE_USER));
96 
97 	mmu_set_range_ptes(mmu_idmap, code_end,
98 		code_end, phys_end,
99 		__pgprot(PTE_WBWA | PTE_USER));
100 
101 	mmu_enable(mmu_idmap);
102 }
103