xref: /kvm-unit-tests/lib/riscv/mmu.c (revision 6b801c8981f74d75419d77e031dd37f5ad356efe)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2023, Ventana Micro Systems Inc., Andrew Jones <ajones@ventanamicro.com>
4  */
5 #include <libcflat.h>
6 #include <alloc_page.h>
7 #include <memregions.h>
8 #include <vmalloc.h>
9 #include <asm/csr.h>
10 #include <asm/io.h>
11 #include <asm/mmu.h>
12 #include <asm/page.h>
13 
14 static pgd_t *__initial_pgtable;
15 
pte_index(uintptr_t vaddr,int level)16 static int pte_index(uintptr_t vaddr, int level)
17 {
18 	return (vaddr >> (PGDIR_BITS * level + PAGE_SHIFT)) & PGDIR_MASK;
19 }
20 
pteval_to_phys_addr(pteval_t pteval)21 static phys_addr_t pteval_to_phys_addr(pteval_t pteval)
22 {
23 	return (phys_addr_t)((pteval & PTE_PPN) >> PPN_SHIFT) << PAGE_SHIFT;
24 }
25 
pteval_to_ptep(pteval_t pteval)26 static pte_t *pteval_to_ptep(pteval_t pteval)
27 {
28 	phys_addr_t paddr = pteval_to_phys_addr(pteval);
29 	assert(paddr == __pa(paddr));
30 	return (pte_t *)__pa(paddr);
31 }
32 
ptep_to_pteval(pte_t * ptep)33 static pteval_t ptep_to_pteval(pte_t *ptep)
34 {
35 	return ((pteval_t)ptep >> PAGE_SHIFT) << PPN_SHIFT;
36 }
37 
get_pte(pgd_t * pgtable,uintptr_t vaddr)38 pte_t *get_pte(pgd_t *pgtable, uintptr_t vaddr)
39 {
40 	pte_t *ptep = (pte_t *)pgtable;
41 
42 	assert(pgtable && !((uintptr_t)pgtable & ~PAGE_MASK));
43 
44 	for (int level = NR_LEVELS - 1; level > 0; --level) {
45 		pte_t *next = &ptep[pte_index(vaddr, level)];
46 		if (!pte_val(*next)) {
47 			void *page = alloc_page();
48 			*next = __pte(ptep_to_pteval(page) | _PAGE_PRESENT);
49 		}
50 		ptep = pteval_to_ptep(pte_val(*next));
51 	}
52 	ptep = &ptep[pte_index(vaddr, 0)];
53 
54 	return ptep;
55 }
56 
__install_page(pgd_t * pgtable,phys_addr_t paddr,uintptr_t vaddr,pgprot_t prot,bool flush)57 static pteval_t *__install_page(pgd_t *pgtable, phys_addr_t paddr,
58 				uintptr_t vaddr, pgprot_t prot, bool flush)
59 {
60 	phys_addr_t ppn = (paddr >> PAGE_SHIFT) << PPN_SHIFT;
61 	pteval_t pte = (pteval_t)ppn;
62 	pte_t *ptep;
63 
64 	assert(!(ppn & ~PTE_PPN));
65 
66 	ptep = get_pte(pgtable, vaddr);
67 	pte |= pgprot_val(prot) | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY;
68 	WRITE_ONCE(*ptep, __pte(pte));
69 
70 	if (flush)
71 		local_flush_tlb_page(vaddr);
72 
73 	return (pteval_t *)ptep;
74 }
75 
install_page(pgd_t * pgtable,phys_addr_t phys,void * virt)76 pteval_t *install_page(pgd_t *pgtable, phys_addr_t phys, void *virt)
77 {
78 	phys_addr_t paddr = phys & PHYS_PAGE_MASK;
79 	uintptr_t vaddr = (uintptr_t)virt & PAGE_MASK;
80 
81 	assert(phys == (phys & PHYS_MASK));
82 
83 	return __install_page(pgtable, paddr, vaddr,
84 			      __pgprot(_PAGE_READ | _PAGE_WRITE), true);
85 }
86 
mmu_set_range_ptes(pgd_t * pgtable,uintptr_t virt_offset,phys_addr_t phys_start,phys_addr_t phys_end,pgprot_t prot,bool flush)87 void mmu_set_range_ptes(pgd_t *pgtable, uintptr_t virt_offset,
88 			phys_addr_t phys_start, phys_addr_t phys_end,
89 			pgprot_t prot, bool flush)
90 {
91 	phys_addr_t paddr = phys_start & PHYS_PAGE_MASK;
92 	uintptr_t vaddr = virt_offset & PAGE_MASK;
93 	uintptr_t virt_end = phys_end - paddr + vaddr;
94 
95 	assert(phys_start == (phys_start & PHYS_MASK));
96 	assert(phys_end == (phys_end & PHYS_MASK));
97 	assert(phys_start < phys_end);
98 
99 	for (; vaddr < virt_end; vaddr += PAGE_SIZE, paddr += PAGE_SIZE)
100 		__install_page(pgtable, paddr, vaddr, prot, flush);
101 }
102 
mmu_disable(void)103 void mmu_disable(void)
104 {
105 	__asm__ __volatile__ (
106 	"	csrw	" xstr(CSR_SATP) ", zero\n"
107 	"	sfence.vma\n"
108 	: : : "memory");
109 }
110 
__mmu_enable(unsigned long satp)111 void __mmu_enable(unsigned long satp)
112 {
113 	__asm__ __volatile__ (
114 	"	sfence.vma\n"
115 	"	csrw	" xstr(CSR_SATP) ", %0\n"
116 	: : "r" (satp) : "memory");
117 }
118 
mmu_enable(unsigned long mode,pgd_t * pgtable)119 void mmu_enable(unsigned long mode, pgd_t *pgtable)
120 {
121 	unsigned long ppn = __pa(pgtable) >> PAGE_SHIFT;
122 	unsigned long satp = mode | ppn;
123 
124 	assert(!(ppn & ~SATP_PPN));
125 	__mmu_enable(satp);
126 }
127 
setup_mmu(phys_addr_t top,void * opaque)128 void *setup_mmu(phys_addr_t top, void *opaque)
129 {
130 	struct mem_region *r;
131 	pgd_t *pgtable;
132 
133 	/* The initial page table uses an identity mapping. */
134 	assert(top == __pa(top));
135 
136 	if (!__initial_pgtable)
137 		__initial_pgtable = alloc_page();
138 	pgtable = __initial_pgtable;
139 
140 	for (r = mem_regions; r->end; ++r) {
141 		if (r->flags & (MR_F_IO | MR_F_RESERVED))
142 			continue;
143 		if (r->flags & MR_F_CODE) {
144 			mmu_set_range_ptes(pgtable, r->start, r->start, r->end,
145 					   __pgprot(_PAGE_READ | _PAGE_EXEC), false);
146 		} else {
147 			mmu_set_range_ptes(pgtable, r->start, r->start, r->end,
148 					   __pgprot(_PAGE_READ | _PAGE_WRITE), false);
149 		}
150 	}
151 
152 	mmu_enable(SATP_MODE_DEFAULT, pgtable);
153 
154 	return pgtable;
155 }
156 
ioremap(phys_addr_t phys_addr,size_t size)157 void __iomem *ioremap(phys_addr_t phys_addr, size_t size)
158 {
159 	phys_addr_t start = phys_addr & PHYS_PAGE_MASK;
160 	phys_addr_t end = PAGE_ALIGN(phys_addr + size);
161 	pgd_t *pgtable = current_pgtable();
162 	bool flush = true;
163 
164 	/* I/O is always identity mapped. */
165 	assert(end == __pa(end));
166 
167 	if (!pgtable) {
168 		if (!__initial_pgtable)
169 			__initial_pgtable = alloc_page();
170 		pgtable = __initial_pgtable;
171 		flush = false;
172 	}
173 
174 	mmu_set_range_ptes(pgtable, start, start, end,
175 			   __pgprot(_PAGE_READ | _PAGE_WRITE), flush);
176 
177 	return (void __iomem *)__pa(phys_addr);
178 }
179 
virt_to_pte_phys(pgd_t * pgtable,void * virt)180 phys_addr_t virt_to_pte_phys(pgd_t *pgtable, void *virt)
181 {
182 	uintptr_t vaddr = (uintptr_t)virt;
183 	pte_t *ptep = (pte_t *)pgtable;
184 
185 	assert(pgtable && !((uintptr_t)pgtable & ~PAGE_MASK));
186 
187 	for (int level = NR_LEVELS - 1; level > 0; --level) {
188 		pte_t *next = &ptep[pte_index(vaddr, level)];
189 		if (!pte_val(*next))
190 			return 0;
191 		ptep = pteval_to_ptep(pte_val(*next));
192 	}
193 	ptep = &ptep[pte_index(vaddr, 0)];
194 
195 	if (!pte_val(*ptep))
196 		return 0;
197 
198 	return pteval_to_phys_addr(pte_val(*ptep)) | offset_in_page(virt);
199 }
200 
virt_to_phys(volatile void * address)201 phys_addr_t virt_to_phys(volatile void *address)
202 {
203 	unsigned long satp = csr_read(CSR_SATP);
204 	pgd_t *pgtable = (pgd_t *)((satp & SATP_PPN) << PAGE_SHIFT);
205 
206 	if ((satp >> SATP_MODE_SHIFT) == 0)
207 		return __pa(address);
208 
209 	return virt_to_pte_phys(pgtable, (void *)address);
210 }
211 
phys_to_virt(phys_addr_t address)212 void *phys_to_virt(phys_addr_t address)
213 {
214 	/* @address must have an identity mapping for this to work. */
215 	assert(address == __pa(address));
216 	assert(virt_to_phys(__va(address)) == address);
217 	return __va(address);
218 }
219