xref: /kvm-unit-tests/lib/riscv/mmu.c (revision 1878b4b663fd50b87de7ba2b1c90614e2703542f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2023, Ventana Micro Systems Inc., Andrew Jones <ajones@ventanamicro.com>
4  */
5 #include <libcflat.h>
6 #include <alloc_page.h>
7 #include <memregions.h>
8 #include <vmalloc.h>
9 #include <asm/csr.h>
10 #include <asm/io.h>
11 #include <asm/mmu.h>
12 #include <asm/page.h>
13 
14 static pgd_t *__initial_pgtable;
15 
16 static int pte_index(uintptr_t vaddr, int level)
17 {
18 	return (vaddr >> (PGDIR_BITS * level + PAGE_SHIFT)) & PGDIR_MASK;
19 }
20 
21 static pte_t *pteval_to_ptep(pteval_t pteval)
22 {
23 	return (pte_t *)(((pteval & PTE_PPN) >> PPN_SHIFT) << PAGE_SHIFT);
24 }
25 
26 static pteval_t ptep_to_pteval(pte_t *ptep)
27 {
28 	return ((pteval_t)ptep >> PAGE_SHIFT) << PPN_SHIFT;
29 }
30 
31 pte_t *get_pte(pgd_t *pgtable, uintptr_t vaddr)
32 {
33 	pte_t *ptep = (pte_t *)pgtable;
34 
35 	assert(pgtable && !((uintptr_t)pgtable & ~PAGE_MASK));
36 
37 	for (int level = NR_LEVELS - 1; level > 0; --level) {
38 		pte_t *next = &ptep[pte_index(vaddr, level)];
39 		if (!pte_val(*next)) {
40 			void *page = alloc_page();
41 			*next = __pte(ptep_to_pteval(page) | _PAGE_PRESENT);
42 		}
43 		ptep = pteval_to_ptep(pte_val(*next));
44 	}
45 	ptep = &ptep[pte_index(vaddr, 0)];
46 
47 	return ptep;
48 }
49 
50 static pteval_t *__install_page(pgd_t *pgtable, phys_addr_t paddr,
51 				uintptr_t vaddr, pgprot_t prot, bool flush)
52 {
53 	phys_addr_t ppn = (paddr >> PAGE_SHIFT) << PPN_SHIFT;
54 	pteval_t pte = (pteval_t)ppn;
55 	pte_t *ptep;
56 
57 	assert(!(ppn & ~PTE_PPN));
58 
59 	ptep = get_pte(pgtable, vaddr);
60 	*ptep = __pte(pte | pgprot_val(prot) | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
61 
62 	if (flush)
63 		local_flush_tlb_page(vaddr);
64 
65 	return (pteval_t *)ptep;
66 }
67 
68 pteval_t *install_page(pgd_t *pgtable, phys_addr_t phys, void *virt)
69 {
70 	phys_addr_t paddr = phys & PAGE_MASK;
71 	uintptr_t vaddr = (uintptr_t)virt & PAGE_MASK;
72 
73 	return __install_page(pgtable, paddr, vaddr,
74 			      __pgprot(_PAGE_READ | _PAGE_WRITE), true);
75 }
76 
77 void mmu_set_range_ptes(pgd_t *pgtable, uintptr_t virt_offset,
78 			phys_addr_t phys_start, phys_addr_t phys_end,
79 			pgprot_t prot, bool flush)
80 {
81 	phys_addr_t paddr = phys_start & PAGE_MASK;
82 	uintptr_t vaddr = virt_offset & PAGE_MASK;
83 	uintptr_t virt_end = phys_end - paddr + vaddr;
84 
85 	assert(phys_start < phys_end);
86 
87 	for (; vaddr < virt_end; vaddr += PAGE_SIZE, paddr += PAGE_SIZE)
88 		__install_page(pgtable, paddr, vaddr, prot, flush);
89 }
90 
91 void mmu_disable(void)
92 {
93 	__asm__ __volatile__ (
94 	"	csrw	" xstr(CSR_SATP) ", zero\n"
95 	"	sfence.vma\n"
96 	: : : "memory");
97 }
98 
99 void __mmu_enable(unsigned long satp)
100 {
101 	__asm__ __volatile__ (
102 	"	sfence.vma\n"
103 	"	csrw	" xstr(CSR_SATP) ", %0\n"
104 	: : "r" (satp) : "memory");
105 }
106 
107 void mmu_enable(unsigned long mode, pgd_t *pgtable)
108 {
109 	unsigned long ppn = (unsigned long)pgtable >> PAGE_SHIFT;
110 	unsigned long satp = mode | ppn;
111 
112 	assert(!(ppn & ~SATP_PPN));
113 	__mmu_enable(satp);
114 }
115 
116 void *setup_mmu(phys_addr_t top, void *opaque)
117 {
118 	struct mem_region *r;
119 	pgd_t *pgtable;
120 
121 	if (!__initial_pgtable)
122 		__initial_pgtable = alloc_page();
123 	pgtable = __initial_pgtable;
124 
125 	for (r = mem_regions; r->end; ++r) {
126 		if (r->flags & (MR_F_IO | MR_F_RESERVED))
127 			continue;
128 		if (r->flags & MR_F_CODE) {
129 			mmu_set_range_ptes(pgtable, r->start, r->start, r->end,
130 					   __pgprot(_PAGE_READ | _PAGE_EXEC), false);
131 		} else {
132 			mmu_set_range_ptes(pgtable, r->start, r->start, r->end,
133 					   __pgprot(_PAGE_READ | _PAGE_WRITE), false);
134 		}
135 	}
136 
137 	mmu_enable(SATP_MODE_DEFAULT, pgtable);
138 
139 	return pgtable;
140 }
141 
142 void __iomem *ioremap(phys_addr_t phys_addr, size_t size)
143 {
144 	phys_addr_t start = phys_addr & PAGE_MASK;
145 	phys_addr_t end = PAGE_ALIGN(phys_addr + size);
146 	pgd_t *pgtable = current_pgtable();
147 	bool flush = true;
148 
149 	assert(sizeof(long) == 8 || !(phys_addr >> 32));
150 
151 	if (!pgtable) {
152 		if (!__initial_pgtable)
153 			__initial_pgtable = alloc_page();
154 		pgtable = __initial_pgtable;
155 		flush = false;
156 	}
157 
158 	mmu_set_range_ptes(pgtable, start, start, end,
159 			   __pgprot(_PAGE_READ | _PAGE_WRITE), flush);
160 
161 	return (void __iomem *)(unsigned long)phys_addr;
162 }
163 
164 phys_addr_t virt_to_pte_phys(pgd_t *pgtable, void *virt)
165 {
166 	uintptr_t vaddr = (uintptr_t)virt;
167 	pte_t *ptep = (pte_t *)pgtable;
168 
169 	assert(pgtable && !((uintptr_t)pgtable & ~PAGE_MASK));
170 
171 	for (int level = NR_LEVELS - 1; level > 0; --level) {
172 		pte_t *next = &ptep[pte_index(vaddr, level)];
173 		if (!pte_val(*next))
174 			return 0;
175 		ptep = pteval_to_ptep(pte_val(*next));
176 	}
177 	ptep = &ptep[pte_index(vaddr, 0)];
178 
179 	if (!pte_val(*ptep))
180 		return 0;
181 
182 	return __pa(pteval_to_ptep(pte_val(*ptep)));
183 }
184 
185 unsigned long virt_to_phys(volatile void *address)
186 {
187 	unsigned long satp = csr_read(CSR_SATP);
188 	pgd_t *pgtable = (pgd_t *)((satp & SATP_PPN) << PAGE_SHIFT);
189 	phys_addr_t paddr;
190 
191 	if ((satp >> SATP_MODE_SHIFT) == 0)
192 		return __pa(address);
193 
194 	paddr = virt_to_pte_phys(pgtable, (void *)address);
195 	assert(sizeof(long) == 8 || !(paddr >> 32));
196 
197 	return (unsigned long)paddr | offset_in_page(address);
198 }
199 
200 void *phys_to_virt(unsigned long address)
201 {
202 	/* @address must have an identity mapping for this to work. */
203 	assert(virt_to_phys(__va(address)) == address);
204 	return __va(address);
205 }
206