1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020 - Google LLC
4 * Author: Quentin Perret <qperret@google.com>
5 */
6 #ifndef __ARM64_KVM_PKVM_H__
7 #define __ARM64_KVM_PKVM_H__
8
9 #include <linux/arm_ffa.h>
10 #include <linux/memblock.h>
11 #include <linux/scatterlist.h>
12 #include <asm/kvm_host.h>
13 #include <asm/kvm_pgtable.h>
14
15 /* Maximum number of VMs that can co-exist under pKVM. */
16 #define KVM_MAX_PVMS 255
17
18 #define HYP_MEMBLOCK_REGIONS 128
19
20 int pkvm_init_host_vm(struct kvm *kvm);
21 int pkvm_create_hyp_vm(struct kvm *kvm);
22 bool pkvm_hyp_vm_is_created(struct kvm *kvm);
23 void pkvm_destroy_hyp_vm(struct kvm *kvm);
24 int pkvm_create_hyp_vcpu(struct kvm_vcpu *vcpu);
25
26 /*
27 * Check whether the specific capability is allowed in pKVM.
28 *
29 * Certain features are allowed only for non-protected VMs in pKVM, which is why
30 * this takes the VM (kvm) as a parameter.
31 */
kvm_pkvm_ext_allowed(struct kvm * kvm,long ext)32 static inline bool kvm_pkvm_ext_allowed(struct kvm *kvm, long ext)
33 {
34 switch (ext) {
35 case KVM_CAP_IRQCHIP:
36 case KVM_CAP_ARM_PSCI:
37 case KVM_CAP_ARM_PSCI_0_2:
38 case KVM_CAP_NR_VCPUS:
39 case KVM_CAP_MAX_VCPUS:
40 case KVM_CAP_MAX_VCPU_ID:
41 case KVM_CAP_MSI_DEVID:
42 case KVM_CAP_ARM_VM_IPA_SIZE:
43 case KVM_CAP_ARM_PMU_V3:
44 case KVM_CAP_ARM_SVE:
45 case KVM_CAP_ARM_PTRAUTH_ADDRESS:
46 case KVM_CAP_ARM_PTRAUTH_GENERIC:
47 return true;
48 case KVM_CAP_ARM_MTE:
49 return false;
50 default:
51 return !kvm || !kvm_vm_is_protected(kvm);
52 }
53 }
54
55 /*
56 * Check whether the KVM VM IOCTL is allowed in pKVM.
57 *
58 * Certain features are allowed only for non-protected VMs in pKVM, which is why
59 * this takes the VM (kvm) as a parameter.
60 */
kvm_pkvm_ioctl_allowed(struct kvm * kvm,unsigned int ioctl)61 static inline bool kvm_pkvm_ioctl_allowed(struct kvm *kvm, unsigned int ioctl)
62 {
63 long ext;
64 int r;
65
66 r = kvm_get_cap_for_kvm_ioctl(ioctl, &ext);
67
68 if (WARN_ON_ONCE(r < 0))
69 return false;
70
71 return kvm_pkvm_ext_allowed(kvm, ext);
72 }
73
74 extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
75 extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
76
77 static inline unsigned long
hyp_vmemmap_memblock_size(struct memblock_region * reg,size_t vmemmap_entry_size)78 hyp_vmemmap_memblock_size(struct memblock_region *reg, size_t vmemmap_entry_size)
79 {
80 unsigned long nr_pages = reg->size >> PAGE_SHIFT;
81 unsigned long start, end;
82
83 start = (reg->base >> PAGE_SHIFT) * vmemmap_entry_size;
84 end = start + nr_pages * vmemmap_entry_size;
85 start = ALIGN_DOWN(start, PAGE_SIZE);
86 end = ALIGN(end, PAGE_SIZE);
87
88 return end - start;
89 }
90
hyp_vmemmap_pages(size_t vmemmap_entry_size)91 static inline unsigned long hyp_vmemmap_pages(size_t vmemmap_entry_size)
92 {
93 unsigned long res = 0, i;
94
95 for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
96 res += hyp_vmemmap_memblock_size(&kvm_nvhe_sym(hyp_memory)[i],
97 vmemmap_entry_size);
98 }
99
100 return res >> PAGE_SHIFT;
101 }
102
hyp_vm_table_pages(void)103 static inline unsigned long hyp_vm_table_pages(void)
104 {
105 return PAGE_ALIGN(KVM_MAX_PVMS * sizeof(void *)) >> PAGE_SHIFT;
106 }
107
__hyp_pgtable_max_pages(unsigned long nr_pages)108 static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
109 {
110 unsigned long total = 0;
111 int i;
112
113 /* Provision the worst case scenario */
114 for (i = KVM_PGTABLE_FIRST_LEVEL; i <= KVM_PGTABLE_LAST_LEVEL; i++) {
115 nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
116 total += nr_pages;
117 }
118
119 return total;
120 }
121
__hyp_pgtable_total_pages(void)122 static inline unsigned long __hyp_pgtable_total_pages(void)
123 {
124 unsigned long res = 0, i;
125
126 /* Cover all of memory with page-granularity */
127 for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
128 struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
129 res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
130 }
131
132 return res;
133 }
134
hyp_s1_pgtable_pages(void)135 static inline unsigned long hyp_s1_pgtable_pages(void)
136 {
137 unsigned long res;
138
139 res = __hyp_pgtable_total_pages();
140
141 /* Allow 1 GiB for private mappings */
142 res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
143
144 return res;
145 }
146
host_s2_pgtable_pages(void)147 static inline unsigned long host_s2_pgtable_pages(void)
148 {
149 unsigned long res;
150
151 /*
152 * Include an extra 16 pages to safely upper-bound the worst case of
153 * concatenated pgds.
154 */
155 res = __hyp_pgtable_total_pages() + 16;
156
157 /* Allow 1 GiB for MMIO mappings */
158 res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
159
160 return res;
161 }
162
163 #ifdef CONFIG_NVHE_EL2_DEBUG
pkvm_selftest_pages(void)164 static inline unsigned long pkvm_selftest_pages(void) { return 32; }
165 #else
pkvm_selftest_pages(void)166 static inline unsigned long pkvm_selftest_pages(void) { return 0; }
167 #endif
168
169 #define KVM_FFA_MBOX_NR_PAGES 1
170
hyp_ffa_proxy_pages(void)171 static inline unsigned long hyp_ffa_proxy_pages(void)
172 {
173 size_t desc_max;
174
175 /*
176 * The hypervisor FFA proxy needs enough memory to buffer a fragmented
177 * descriptor returned from EL3 in response to a RETRIEVE_REQ call.
178 */
179 desc_max = sizeof(struct ffa_mem_region) +
180 sizeof(struct ffa_mem_region_attributes) +
181 sizeof(struct ffa_composite_mem_region) +
182 SG_MAX_SEGMENTS * sizeof(struct ffa_mem_region_addr_range);
183
184 /* Plus a page each for the hypervisor's RX and TX mailboxes. */
185 return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
186 }
187
pkvm_host_sve_state_size(void)188 static inline size_t pkvm_host_sve_state_size(void)
189 {
190 if (!system_supports_sve())
191 return 0;
192
193 return size_add(sizeof(struct cpu_sve_state),
194 SVE_SIG_REGS_SIZE(sve_vq_from_vl(kvm_host_sve_max_vl)));
195 }
196
197 struct pkvm_mapping {
198 struct rb_node node;
199 u64 gfn;
200 u64 pfn;
201 u64 nr_pages;
202 u64 __subtree_last; /* Internal member for interval tree */
203 };
204
205 int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
206 struct kvm_pgtable_mm_ops *mm_ops);
207 void pkvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
208 u64 addr, u64 size);
209 void pkvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt);
210 int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
211 enum kvm_pgtable_prot prot, void *mc,
212 enum kvm_pgtable_walk_flags flags);
213 int pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
214 int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
215 int pkvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
216 bool pkvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64 size, bool mkold);
217 int pkvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot,
218 enum kvm_pgtable_walk_flags flags);
219 void pkvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr,
220 enum kvm_pgtable_walk_flags flags);
221 int pkvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
222 struct kvm_mmu_memory_cache *mc);
223 void pkvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level);
224 kvm_pte_t *pkvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, u64 phys, s8 level,
225 enum kvm_pgtable_prot prot, void *mc,
226 bool force_pte);
227 #endif /* __ARM64_KVM_PKVM_H__ */
228