xref: /kvm-unit-tests/lib/riscv/asm/mmu.h (revision 6b801c8981f74d75419d77e031dd37f5ad356efe)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #ifndef _ASMRISCV_MMU_H_
3 #define _ASMRISCV_MMU_H_
4 #include <libcflat.h>
5 #include <asm/csr.h>
6 #include <asm/page.h>
7 #include <asm/pgtable.h>
8 
9 #define PHYS_MASK	((phys_addr_t)SATP_PPN << PAGE_SHIFT | (PAGE_SIZE - 1))
10 #define PHYS_PAGE_MASK	(~((phys_addr_t)PAGE_SIZE - 1))
11 
current_pgtable(void)12 static inline pgd_t *current_pgtable(void)
13 {
14 	return (pgd_t *)((csr_read(CSR_SATP) & SATP_PPN) << PAGE_SHIFT);
15 }
16 
17 void mmu_set_range_ptes(pgd_t *pgtable, uintptr_t virt_offset,
18 			phys_addr_t phys_start, phys_addr_t phys_end,
19 			pgprot_t prot, bool flush);
20 void __mmu_enable(unsigned long satp);
21 void mmu_enable(unsigned long mode, pgd_t *pgtable);
22 void mmu_disable(void);
23 
local_flush_tlb_page(unsigned long addr)24 static inline void local_flush_tlb_page(unsigned long addr)
25 {
26 	asm volatile("sfence.vma %0" : : "r" (addr) : "memory");
27 }
28 
29 /*
30  * Get the pte pointer for a virtual address, even if it's not mapped.
31  * Constructs upper levels of the table as necessary.
32  */
33 pte_t *get_pte(pgd_t *pgtable, uintptr_t vaddr);
34 
35 #endif /* _ASMRISCV_MMU_H_ */
36