xref: /kvm-unit-tests/lib/riscv/mmu.c (revision ad435a714fb07cd4bfd5af479b177e245ec48c56)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2023, Ventana Micro Systems Inc., Andrew Jones <ajones@ventanamicro.com>
4  */
5 #include <libcflat.h>
6 #include <alloc_page.h>
7 #include <memregions.h>
8 #include <asm/csr.h>
9 #include <asm/io.h>
10 #include <asm/mmu.h>
11 #include <asm/page.h>
12 
13 static pgd_t *__initial_pgtable;
14 
15 static int pte_index(uintptr_t vaddr, int level)
16 {
17 	return (vaddr >> (PGDIR_BITS * level + PAGE_SHIFT)) & PGDIR_MASK;
18 }
19 
20 static pte_t *pteval_to_ptep(pteval_t pteval)
21 {
22 	return (pte_t *)(((pteval & PTE_PPN) >> PPN_SHIFT) << PAGE_SHIFT);
23 }
24 
25 static pteval_t ptep_to_pteval(pte_t *ptep)
26 {
27 	return ((pteval_t)ptep >> PAGE_SHIFT) << PPN_SHIFT;
28 }
29 
30 pte_t *get_pte(pgd_t *pgtable, uintptr_t vaddr)
31 {
32 	pte_t *ptep = (pte_t *)pgtable;
33 
34 	assert(pgtable && !((uintptr_t)pgtable & ~PAGE_MASK));
35 
36 	for (int level = NR_LEVELS - 1; level > 0; --level) {
37 		pte_t *next = &ptep[pte_index(vaddr, level)];
38 		if (!pte_val(*next)) {
39 			void *page = alloc_page();
40 			*next = __pte(ptep_to_pteval(page) | _PAGE_PRESENT);
41 		}
42 		ptep = pteval_to_ptep(pte_val(*next));
43 	}
44 	ptep = &ptep[pte_index(vaddr, 0)];
45 
46 	return ptep;
47 }
48 
49 static pteval_t *__install_page(pgd_t *pgtable, phys_addr_t paddr,
50 				uintptr_t vaddr, pgprot_t prot, bool flush)
51 {
52 	phys_addr_t ppn = (paddr >> PAGE_SHIFT) << PPN_SHIFT;
53 	pteval_t pte = (pteval_t)ppn;
54 	pte_t *ptep;
55 
56 	assert(!(ppn & ~PTE_PPN));
57 
58 	ptep = get_pte(pgtable, vaddr);
59 	*ptep = __pte(pte | pgprot_val(prot) | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
60 
61 	if (flush)
62 		local_flush_tlb_page(vaddr);
63 
64 	return (pteval_t *)ptep;
65 }
66 
67 void mmu_set_range_ptes(pgd_t *pgtable, uintptr_t virt_offset,
68 			phys_addr_t phys_start, phys_addr_t phys_end,
69 			pgprot_t prot, bool flush)
70 {
71 	phys_addr_t paddr = phys_start & PAGE_MASK;
72 	uintptr_t vaddr = virt_offset & PAGE_MASK;
73 	uintptr_t virt_end = phys_end - paddr + vaddr;
74 
75 	assert(phys_start < phys_end);
76 
77 	for (; vaddr < virt_end; vaddr += PAGE_SIZE, paddr += PAGE_SIZE)
78 		__install_page(pgtable, paddr, vaddr, prot, flush);
79 }
80 
81 void mmu_disable(void)
82 {
83 	__asm__ __volatile__ (
84 	"	csrw	" xstr(CSR_SATP) ", zero\n"
85 	"	sfence.vma\n"
86 	: : : "memory");
87 }
88 
89 void __mmu_enable(unsigned long satp)
90 {
91 	__asm__ __volatile__ (
92 	"	sfence.vma\n"
93 	"	csrw	" xstr(CSR_SATP) ", %0\n"
94 	: : "r" (satp) : "memory");
95 }
96 
97 void mmu_enable(unsigned long mode, pgd_t *pgtable)
98 {
99 	unsigned long ppn = (unsigned long)pgtable >> PAGE_SHIFT;
100 	unsigned long satp = mode | ppn;
101 
102 	assert(!(ppn & ~SATP_PPN));
103 	__mmu_enable(satp);
104 }
105 
106 void setup_mmu(void)
107 {
108 	struct mem_region *r;
109 	pgd_t *pgtable;
110 
111 	if (!__initial_pgtable)
112 		__initial_pgtable = alloc_page();
113 	pgtable = __initial_pgtable;
114 
115 	for (r = mem_regions; r->end; ++r) {
116 		if (r->flags & (MR_F_IO | MR_F_RESERVED))
117 			continue;
118 		if (r->flags & MR_F_CODE) {
119 			mmu_set_range_ptes(pgtable, r->start, r->start, r->end,
120 					   __pgprot(_PAGE_READ | _PAGE_EXEC), false);
121 		} else {
122 			mmu_set_range_ptes(pgtable, r->start, r->start, r->end,
123 					   __pgprot(_PAGE_READ | _PAGE_WRITE), false);
124 		}
125 	}
126 
127 	mmu_enable(SATP_MODE_DEFAULT, pgtable);
128 }
129 
130 void __iomem *ioremap(phys_addr_t phys_addr, size_t size)
131 {
132 	phys_addr_t start = phys_addr & PAGE_MASK;
133 	phys_addr_t end = PAGE_ALIGN(phys_addr + size);
134 	pgd_t *pgtable = current_pgtable();
135 	bool flush = true;
136 
137 	assert(sizeof(long) == 8 || !(phys_addr >> 32));
138 
139 	if (!pgtable) {
140 		if (!__initial_pgtable)
141 			__initial_pgtable = alloc_page();
142 		pgtable = __initial_pgtable;
143 		flush = false;
144 	}
145 
146 	mmu_set_range_ptes(pgtable, start, start, end,
147 			   __pgprot(_PAGE_READ | _PAGE_WRITE), flush);
148 
149 	return (void __iomem *)(unsigned long)phys_addr;
150 }
151