xref: /kvm-unit-tests/lib/riscv/asm/mmu.h (revision 12e0faac079872cb66fd71cec6a86354907aee9f)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #ifndef _ASMRISCV_MMU_H_
3 #define _ASMRISCV_MMU_H_
4 #include <libcflat.h>
5 #include <asm/csr.h>
6 #include <asm/page.h>
7 #include <asm/pgtable.h>
8 
9 #define PHYS_MASK	((phys_addr_t)SATP_PPN << PAGE_SHIFT | (PAGE_SIZE - 1))
10 
11 static inline pgd_t *current_pgtable(void)
12 {
13 	return (pgd_t *)((csr_read(CSR_SATP) & SATP_PPN) << PAGE_SHIFT);
14 }
15 
16 void mmu_set_range_ptes(pgd_t *pgtable, uintptr_t virt_offset,
17 			phys_addr_t phys_start, phys_addr_t phys_end,
18 			pgprot_t prot, bool flush);
19 void __mmu_enable(unsigned long satp);
20 void mmu_enable(unsigned long mode, pgd_t *pgtable);
21 void mmu_disable(void);
22 
23 static inline void local_flush_tlb_page(unsigned long addr)
24 {
25 	asm volatile("sfence.vma %0" : : "r" (addr) : "memory");
26 }
27 
28 /*
29  * Get the pte pointer for a virtual address, even if it's not mapped.
30  * Constructs upper levels of the table as necessary.
31  */
32 pte_t *get_pte(pgd_t *pgtable, uintptr_t vaddr);
33 
34 #endif /* _ASMRISCV_MMU_H_ */
35