xref: /kvm-unit-tests/lib/arm/mmu.c (revision db328a24b7030e9dd7e3012f25096a9188722144)
1 /*
2  * MMU enable and page table manipulation functions
3  *
4  * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5  *
6  * This work is licensed under the terms of the GNU LGPL, version 2.
7  */
8 #include <asm/setup.h>
9 #include <asm/mmu.h>
10 
11 extern unsigned long etext;
12 
13 pgd_t *mmu_idmap;
14 
15 static bool mmu_on;
16 bool mmu_enabled(void)
17 {
18 	return mmu_on;
19 }
20 
21 extern void asm_mmu_enable(phys_addr_t pgtable);
22 void mmu_enable(pgd_t *pgtable)
23 {
24 	asm_mmu_enable(__pa(pgtable));
25 	flush_tlb_all();
26 	mmu_on = true;
27 }
28 
29 void mmu_set_range_ptes(pgd_t *pgtable, unsigned long virt_offset,
30 			unsigned long phys_start, unsigned long phys_end,
31 			pgprot_t prot)
32 {
33 	unsigned long vaddr = virt_offset & PAGE_MASK;
34 	unsigned long paddr = phys_start & PAGE_MASK;
35 	unsigned long virt_end = phys_end - paddr + vaddr;
36 
37 	for (; vaddr < virt_end; vaddr += PAGE_SIZE, paddr += PAGE_SIZE) {
38 		pgd_t *pgd = pgd_offset(pgtable, vaddr);
39 		pud_t *pud = pud_alloc(pgd, vaddr);
40 		pmd_t *pmd = pmd_alloc(pud, vaddr);
41 		pte_t *pte = pte_alloc(pmd, vaddr);
42 
43 		pte_val(*pte) = paddr;
44 		pte_val(*pte) |= PTE_TYPE_PAGE | PTE_AF | PTE_SHARED;
45 		pte_val(*pte) |= pgprot_val(prot);
46 	}
47 }
48 
49 void mmu_set_range_sect(pgd_t *pgtable, unsigned long virt_offset,
50 			unsigned long phys_start, unsigned long phys_end,
51 			pgprot_t prot)
52 {
53 	unsigned long vaddr = virt_offset & PGDIR_MASK;
54 	unsigned long paddr = phys_start & PGDIR_MASK;
55 	unsigned long virt_end = phys_end - paddr + vaddr;
56 
57 	for (; vaddr < virt_end; vaddr += PGDIR_SIZE, paddr += PGDIR_SIZE) {
58 		pgd_t *pgd = pgd_offset(pgtable, vaddr);
59 		pgd_val(*pgd) = paddr;
60 		pgd_val(*pgd) |= PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S;
61 		pgd_val(*pgd) |= pgprot_val(prot);
62 	}
63 }
64 
65 
66 void mmu_init_io_sect(pgd_t *pgtable, unsigned long virt_offset)
67 {
68 	mmu_set_range_sect(pgtable, virt_offset,
69 		PHYS_IO_OFFSET, PHYS_IO_END,
70 		__pgprot(PMD_SECT_UNCACHED | PMD_SECT_USER));
71 }
72 
73 void mmu_enable_idmap(void)
74 {
75 	unsigned long phys_end = sizeof(long) == 8 || !(PHYS_END >> 32)
76 						? PHYS_END : 0xfffff000;
77 	unsigned long code_end = (unsigned long)&etext;
78 
79 	mmu_idmap = pgd_alloc();
80 
81 	mmu_init_io_sect(mmu_idmap, PHYS_IO_OFFSET);
82 
83 	/* armv8 requires code shared between EL1 and EL0 to be read-only */
84 	mmu_set_range_ptes(mmu_idmap, PHYS_OFFSET,
85 		PHYS_OFFSET, code_end,
86 		__pgprot(PTE_WBWA | PTE_RDONLY | PTE_USER));
87 
88 	mmu_set_range_ptes(mmu_idmap, code_end,
89 		code_end, phys_end,
90 		__pgprot(PTE_WBWA | PTE_USER));
91 
92 	mmu_enable(mmu_idmap);
93 }
94