1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2023, Ventana Micro Systems Inc., Andrew Jones <ajones@ventanamicro.com> 4 */ 5 #include <libcflat.h> 6 #include <alloc_page.h> 7 #include <memregions.h> 8 #include <vmalloc.h> 9 #include <asm/csr.h> 10 #include <asm/io.h> 11 #include <asm/mmu.h> 12 #include <asm/page.h> 13 14 static pgd_t *__initial_pgtable; 15 16 static int pte_index(uintptr_t vaddr, int level) 17 { 18 return (vaddr >> (PGDIR_BITS * level + PAGE_SHIFT)) & PGDIR_MASK; 19 } 20 21 static phys_addr_t pteval_to_phys_addr(pteval_t pteval) 22 { 23 return (phys_addr_t)((pteval & PTE_PPN) >> PPN_SHIFT) << PAGE_SHIFT; 24 } 25 26 static pte_t *pteval_to_ptep(pteval_t pteval) 27 { 28 phys_addr_t paddr = pteval_to_phys_addr(pteval); 29 assert(paddr == __pa(paddr)); 30 return (pte_t *)__pa(paddr); 31 } 32 33 static pteval_t ptep_to_pteval(pte_t *ptep) 34 { 35 return ((pteval_t)ptep >> PAGE_SHIFT) << PPN_SHIFT; 36 } 37 38 pte_t *get_pte(pgd_t *pgtable, uintptr_t vaddr) 39 { 40 pte_t *ptep = (pte_t *)pgtable; 41 42 assert(pgtable && !((uintptr_t)pgtable & ~PAGE_MASK)); 43 44 for (int level = NR_LEVELS - 1; level > 0; --level) { 45 pte_t *next = &ptep[pte_index(vaddr, level)]; 46 if (!pte_val(*next)) { 47 void *page = alloc_page(); 48 *next = __pte(ptep_to_pteval(page) | _PAGE_PRESENT); 49 } 50 ptep = pteval_to_ptep(pte_val(*next)); 51 } 52 ptep = &ptep[pte_index(vaddr, 0)]; 53 54 return ptep; 55 } 56 57 static pteval_t *__install_page(pgd_t *pgtable, phys_addr_t paddr, 58 uintptr_t vaddr, pgprot_t prot, bool flush) 59 { 60 phys_addr_t ppn = (paddr >> PAGE_SHIFT) << PPN_SHIFT; 61 pteval_t pte = (pteval_t)ppn; 62 pte_t *ptep; 63 64 assert(!(ppn & ~PTE_PPN)); 65 66 ptep = get_pte(pgtable, vaddr); 67 *ptep = __pte(pte | pgprot_val(prot) | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY); 68 69 if (flush) 70 local_flush_tlb_page(vaddr); 71 72 return (pteval_t *)ptep; 73 } 74 75 pteval_t *install_page(pgd_t *pgtable, phys_addr_t phys, void *virt) 76 { 77 phys_addr_t paddr = phys & PHYS_PAGE_MASK; 78 uintptr_t vaddr = (uintptr_t)virt & PAGE_MASK; 79 80 assert(phys == (phys & PHYS_MASK)); 81 82 return __install_page(pgtable, paddr, vaddr, 83 __pgprot(_PAGE_READ | _PAGE_WRITE), true); 84 } 85 86 void mmu_set_range_ptes(pgd_t *pgtable, uintptr_t virt_offset, 87 phys_addr_t phys_start, phys_addr_t phys_end, 88 pgprot_t prot, bool flush) 89 { 90 phys_addr_t paddr = phys_start & PHYS_PAGE_MASK; 91 uintptr_t vaddr = virt_offset & PAGE_MASK; 92 uintptr_t virt_end = phys_end - paddr + vaddr; 93 94 assert(phys_start == (phys_start & PHYS_MASK)); 95 assert(phys_end == (phys_end & PHYS_MASK)); 96 assert(phys_start < phys_end); 97 98 for (; vaddr < virt_end; vaddr += PAGE_SIZE, paddr += PAGE_SIZE) 99 __install_page(pgtable, paddr, vaddr, prot, flush); 100 } 101 102 void mmu_disable(void) 103 { 104 __asm__ __volatile__ ( 105 " csrw " xstr(CSR_SATP) ", zero\n" 106 " sfence.vma\n" 107 : : : "memory"); 108 } 109 110 void __mmu_enable(unsigned long satp) 111 { 112 __asm__ __volatile__ ( 113 " sfence.vma\n" 114 " csrw " xstr(CSR_SATP) ", %0\n" 115 : : "r" (satp) : "memory"); 116 } 117 118 void mmu_enable(unsigned long mode, pgd_t *pgtable) 119 { 120 unsigned long ppn = __pa(pgtable) >> PAGE_SHIFT; 121 unsigned long satp = mode | ppn; 122 123 assert(!(ppn & ~SATP_PPN)); 124 __mmu_enable(satp); 125 } 126 127 void *setup_mmu(phys_addr_t top, void *opaque) 128 { 129 struct mem_region *r; 130 pgd_t *pgtable; 131 132 /* The initial page table uses an identity mapping. */ 133 assert(top == __pa(top)); 134 135 if (!__initial_pgtable) 136 __initial_pgtable = alloc_page(); 137 pgtable = __initial_pgtable; 138 139 for (r = mem_regions; r->end; ++r) { 140 if (r->flags & (MR_F_IO | MR_F_RESERVED)) 141 continue; 142 if (r->flags & MR_F_CODE) { 143 mmu_set_range_ptes(pgtable, r->start, r->start, r->end, 144 __pgprot(_PAGE_READ | _PAGE_EXEC), false); 145 } else { 146 mmu_set_range_ptes(pgtable, r->start, r->start, r->end, 147 __pgprot(_PAGE_READ | _PAGE_WRITE), false); 148 } 149 } 150 151 mmu_enable(SATP_MODE_DEFAULT, pgtable); 152 153 return pgtable; 154 } 155 156 void __iomem *ioremap(phys_addr_t phys_addr, size_t size) 157 { 158 phys_addr_t start = phys_addr & PHYS_PAGE_MASK; 159 phys_addr_t end = PAGE_ALIGN(phys_addr + size); 160 pgd_t *pgtable = current_pgtable(); 161 bool flush = true; 162 163 /* I/O is always identity mapped. */ 164 assert(end == __pa(end)); 165 166 if (!pgtable) { 167 if (!__initial_pgtable) 168 __initial_pgtable = alloc_page(); 169 pgtable = __initial_pgtable; 170 flush = false; 171 } 172 173 mmu_set_range_ptes(pgtable, start, start, end, 174 __pgprot(_PAGE_READ | _PAGE_WRITE), flush); 175 176 return (void __iomem *)__pa(phys_addr); 177 } 178 179 phys_addr_t virt_to_pte_phys(pgd_t *pgtable, void *virt) 180 { 181 uintptr_t vaddr = (uintptr_t)virt; 182 pte_t *ptep = (pte_t *)pgtable; 183 184 assert(pgtable && !((uintptr_t)pgtable & ~PAGE_MASK)); 185 186 for (int level = NR_LEVELS - 1; level > 0; --level) { 187 pte_t *next = &ptep[pte_index(vaddr, level)]; 188 if (!pte_val(*next)) 189 return 0; 190 ptep = pteval_to_ptep(pte_val(*next)); 191 } 192 ptep = &ptep[pte_index(vaddr, 0)]; 193 194 if (!pte_val(*ptep)) 195 return 0; 196 197 return pteval_to_phys_addr(pte_val(*ptep)) | offset_in_page(virt); 198 } 199 200 phys_addr_t virt_to_phys(volatile void *address) 201 { 202 unsigned long satp = csr_read(CSR_SATP); 203 pgd_t *pgtable = (pgd_t *)((satp & SATP_PPN) << PAGE_SHIFT); 204 205 if ((satp >> SATP_MODE_SHIFT) == 0) 206 return __pa(address); 207 208 return virt_to_pte_phys(pgtable, (void *)address); 209 } 210 211 void *phys_to_virt(phys_addr_t address) 212 { 213 /* @address must have an identity mapping for this to work. */ 214 assert(address == __pa(address)); 215 assert(virt_to_phys(__va(address)) == address); 216 return __va(address); 217 } 218