xref: /kvm-unit-tests/lib/riscv/mmu.c (revision 92f91f64bc722ce8cced0f27e6d486f915cf0011)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2023, Ventana Micro Systems Inc., Andrew Jones <ajones@ventanamicro.com>
4  */
5 #include <libcflat.h>
6 #include <alloc_page.h>
7 #include <memregions.h>
8 #include <vmalloc.h>
9 #include <asm/csr.h>
10 #include <asm/io.h>
11 #include <asm/mmu.h>
12 #include <asm/page.h>
13 
14 static pgd_t *__initial_pgtable;
15 
16 static int pte_index(uintptr_t vaddr, int level)
17 {
18 	return (vaddr >> (PGDIR_BITS * level + PAGE_SHIFT)) & PGDIR_MASK;
19 }
20 
21 static phys_addr_t pteval_to_phys_addr(pteval_t pteval)
22 {
23 	return (phys_addr_t)((pteval & PTE_PPN) >> PPN_SHIFT) << PAGE_SHIFT;
24 }
25 
26 static pte_t *pteval_to_ptep(pteval_t pteval)
27 {
28 	phys_addr_t paddr = pteval_to_phys_addr(pteval);
29 	assert(paddr == __pa(paddr));
30 	return (pte_t *)__pa(paddr);
31 }
32 
33 static pteval_t ptep_to_pteval(pte_t *ptep)
34 {
35 	return ((pteval_t)ptep >> PAGE_SHIFT) << PPN_SHIFT;
36 }
37 
38 pte_t *get_pte(pgd_t *pgtable, uintptr_t vaddr)
39 {
40 	pte_t *ptep = (pte_t *)pgtable;
41 
42 	assert(pgtable && !((uintptr_t)pgtable & ~PAGE_MASK));
43 
44 	for (int level = NR_LEVELS - 1; level > 0; --level) {
45 		pte_t *next = &ptep[pte_index(vaddr, level)];
46 		if (!pte_val(*next)) {
47 			void *page = alloc_page();
48 			*next = __pte(ptep_to_pteval(page) | _PAGE_PRESENT);
49 		}
50 		ptep = pteval_to_ptep(pte_val(*next));
51 	}
52 	ptep = &ptep[pte_index(vaddr, 0)];
53 
54 	return ptep;
55 }
56 
57 static pteval_t *__install_page(pgd_t *pgtable, phys_addr_t paddr,
58 				uintptr_t vaddr, pgprot_t prot, bool flush)
59 {
60 	phys_addr_t ppn = (paddr >> PAGE_SHIFT) << PPN_SHIFT;
61 	pteval_t pte = (pteval_t)ppn;
62 	pte_t *ptep;
63 
64 	assert(!(ppn & ~PTE_PPN));
65 
66 	ptep = get_pte(pgtable, vaddr);
67 	*ptep = __pte(pte | pgprot_val(prot) | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
68 
69 	if (flush)
70 		local_flush_tlb_page(vaddr);
71 
72 	return (pteval_t *)ptep;
73 }
74 
75 pteval_t *install_page(pgd_t *pgtable, phys_addr_t phys, void *virt)
76 {
77 	phys_addr_t paddr = phys & PAGE_MASK;
78 	uintptr_t vaddr = (uintptr_t)virt & PAGE_MASK;
79 
80 	return __install_page(pgtable, paddr, vaddr,
81 			      __pgprot(_PAGE_READ | _PAGE_WRITE), true);
82 }
83 
84 void mmu_set_range_ptes(pgd_t *pgtable, uintptr_t virt_offset,
85 			phys_addr_t phys_start, phys_addr_t phys_end,
86 			pgprot_t prot, bool flush)
87 {
88 	phys_addr_t paddr = phys_start & PAGE_MASK;
89 	uintptr_t vaddr = virt_offset & PAGE_MASK;
90 	uintptr_t virt_end = phys_end - paddr + vaddr;
91 
92 	assert(phys_start < phys_end);
93 
94 	for (; vaddr < virt_end; vaddr += PAGE_SIZE, paddr += PAGE_SIZE)
95 		__install_page(pgtable, paddr, vaddr, prot, flush);
96 }
97 
98 void mmu_disable(void)
99 {
100 	__asm__ __volatile__ (
101 	"	csrw	" xstr(CSR_SATP) ", zero\n"
102 	"	sfence.vma\n"
103 	: : : "memory");
104 }
105 
106 void __mmu_enable(unsigned long satp)
107 {
108 	__asm__ __volatile__ (
109 	"	sfence.vma\n"
110 	"	csrw	" xstr(CSR_SATP) ", %0\n"
111 	: : "r" (satp) : "memory");
112 }
113 
114 void mmu_enable(unsigned long mode, pgd_t *pgtable)
115 {
116 	unsigned long ppn = __pa(pgtable) >> PAGE_SHIFT;
117 	unsigned long satp = mode | ppn;
118 
119 	assert(!(ppn & ~SATP_PPN));
120 	__mmu_enable(satp);
121 }
122 
123 void *setup_mmu(phys_addr_t top, void *opaque)
124 {
125 	struct mem_region *r;
126 	pgd_t *pgtable;
127 
128 	/* The initial page table uses an identity mapping. */
129 	assert(top == __pa(top));
130 
131 	if (!__initial_pgtable)
132 		__initial_pgtable = alloc_page();
133 	pgtable = __initial_pgtable;
134 
135 	for (r = mem_regions; r->end; ++r) {
136 		if (r->flags & (MR_F_IO | MR_F_RESERVED))
137 			continue;
138 		if (r->flags & MR_F_CODE) {
139 			mmu_set_range_ptes(pgtable, r->start, r->start, r->end,
140 					   __pgprot(_PAGE_READ | _PAGE_EXEC), false);
141 		} else {
142 			mmu_set_range_ptes(pgtable, r->start, r->start, r->end,
143 					   __pgprot(_PAGE_READ | _PAGE_WRITE), false);
144 		}
145 	}
146 
147 	mmu_enable(SATP_MODE_DEFAULT, pgtable);
148 
149 	return pgtable;
150 }
151 
152 void __iomem *ioremap(phys_addr_t phys_addr, size_t size)
153 {
154 	phys_addr_t start = phys_addr & PAGE_MASK;
155 	phys_addr_t end = PAGE_ALIGN(phys_addr + size);
156 	pgd_t *pgtable = current_pgtable();
157 	bool flush = true;
158 
159 	/* I/O is always identity mapped. */
160 	assert(end == __pa(end));
161 
162 	if (!pgtable) {
163 		if (!__initial_pgtable)
164 			__initial_pgtable = alloc_page();
165 		pgtable = __initial_pgtable;
166 		flush = false;
167 	}
168 
169 	mmu_set_range_ptes(pgtable, start, start, end,
170 			   __pgprot(_PAGE_READ | _PAGE_WRITE), flush);
171 
172 	return (void __iomem *)__pa(phys_addr);
173 }
174 
175 phys_addr_t virt_to_pte_phys(pgd_t *pgtable, void *virt)
176 {
177 	uintptr_t vaddr = (uintptr_t)virt;
178 	pte_t *ptep = (pte_t *)pgtable;
179 
180 	assert(pgtable && !((uintptr_t)pgtable & ~PAGE_MASK));
181 
182 	for (int level = NR_LEVELS - 1; level > 0; --level) {
183 		pte_t *next = &ptep[pte_index(vaddr, level)];
184 		if (!pte_val(*next))
185 			return 0;
186 		ptep = pteval_to_ptep(pte_val(*next));
187 	}
188 	ptep = &ptep[pte_index(vaddr, 0)];
189 
190 	if (!pte_val(*ptep))
191 		return 0;
192 
193 	return pteval_to_phys_addr(pte_val(*ptep)) | offset_in_page(virt);
194 }
195 
196 phys_addr_t virt_to_phys(volatile void *address)
197 {
198 	unsigned long satp = csr_read(CSR_SATP);
199 	pgd_t *pgtable = (pgd_t *)((satp & SATP_PPN) << PAGE_SHIFT);
200 
201 	if ((satp >> SATP_MODE_SHIFT) == 0)
202 		return __pa(address);
203 
204 	return virt_to_pte_phys(pgtable, (void *)address);
205 }
206 
207 void *phys_to_virt(phys_addr_t address)
208 {
209 	/* @address must have an identity mapping for this to work. */
210 	assert(address == __pa(address));
211 	assert(virt_to_phys(__va(address)) == address);
212 	return __va(address);
213 }
214