xref: /linux/arch/riscv/include/asm/kvm_gstage.h (revision 63eb28bb1402891b1ad2be02a530f29a9dd7f1cd)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  * Copyright (c) 2025 Ventana Micro Systems Inc.
5  */
6 
7 #ifndef __RISCV_KVM_GSTAGE_H_
8 #define __RISCV_KVM_GSTAGE_H_
9 
10 #include <linux/kvm_types.h>
11 
12 struct kvm_gstage {
13 	struct kvm *kvm;
14 	unsigned long flags;
15 #define KVM_GSTAGE_FLAGS_LOCAL		BIT(0)
16 	unsigned long vmid;
17 	pgd_t *pgd;
18 };
19 
20 struct kvm_gstage_mapping {
21 	gpa_t addr;
22 	pte_t pte;
23 	u32 level;
24 };
25 
26 #ifdef CONFIG_64BIT
27 #define kvm_riscv_gstage_index_bits	9
28 #else
29 #define kvm_riscv_gstage_index_bits	10
30 #endif
31 
32 extern unsigned long kvm_riscv_gstage_mode;
33 extern unsigned long kvm_riscv_gstage_pgd_levels;
34 
35 #define kvm_riscv_gstage_pgd_xbits	2
36 #define kvm_riscv_gstage_pgd_size	(1UL << (HGATP_PAGE_SHIFT + kvm_riscv_gstage_pgd_xbits))
37 #define kvm_riscv_gstage_gpa_bits	(HGATP_PAGE_SHIFT + \
38 					 (kvm_riscv_gstage_pgd_levels * \
39 					  kvm_riscv_gstage_index_bits) + \
40 					 kvm_riscv_gstage_pgd_xbits)
41 #define kvm_riscv_gstage_gpa_size	((gpa_t)(1ULL << kvm_riscv_gstage_gpa_bits))
42 
43 bool kvm_riscv_gstage_get_leaf(struct kvm_gstage *gstage, gpa_t addr,
44 			       pte_t **ptepp, u32 *ptep_level);
45 
46 int kvm_riscv_gstage_set_pte(struct kvm_gstage *gstage,
47 			     struct kvm_mmu_memory_cache *pcache,
48 			     const struct kvm_gstage_mapping *map);
49 
50 int kvm_riscv_gstage_map_page(struct kvm_gstage *gstage,
51 			      struct kvm_mmu_memory_cache *pcache,
52 			      gpa_t gpa, phys_addr_t hpa, unsigned long page_size,
53 			      bool page_rdonly, bool page_exec,
54 			      struct kvm_gstage_mapping *out_map);
55 
56 enum kvm_riscv_gstage_op {
57 	GSTAGE_OP_NOP = 0,	/* Nothing */
58 	GSTAGE_OP_CLEAR,	/* Clear/Unmap */
59 	GSTAGE_OP_WP,		/* Write-protect */
60 };
61 
62 void kvm_riscv_gstage_op_pte(struct kvm_gstage *gstage, gpa_t addr,
63 			     pte_t *ptep, u32 ptep_level, enum kvm_riscv_gstage_op op);
64 
65 void kvm_riscv_gstage_unmap_range(struct kvm_gstage *gstage,
66 				  gpa_t start, gpa_t size, bool may_block);
67 
68 void kvm_riscv_gstage_wp_range(struct kvm_gstage *gstage, gpa_t start, gpa_t end);
69 
70 void kvm_riscv_gstage_mode_detect(void);
71 
72 #endif
73