1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020 Google LLC
4 * Author: Quentin Perret <qperret@google.com>
5 */
6
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/kvm_mmu.h>
10 #include <asm/kvm_pgtable.h>
11 #include <asm/kvm_pkvm.h>
12
13 #include <nvhe/early_alloc.h>
14 #include <nvhe/ffa.h>
15 #include <nvhe/gfp.h>
16 #include <nvhe/memory.h>
17 #include <nvhe/mem_protect.h>
18 #include <nvhe/mm.h>
19 #include <nvhe/pkvm.h>
20 #include <nvhe/trap_handler.h>
21
22 unsigned long hyp_nr_cpus;
23
24 #define hyp_percpu_size ((unsigned long)__per_cpu_end - \
25 (unsigned long)__per_cpu_start)
26
27 static void *vmemmap_base;
28 static void *vm_table_base;
29 static void *hyp_pgt_base;
30 static void *host_s2_pgt_base;
31 static void *selftest_base;
32 static void *ffa_proxy_pages;
33 static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
34 static struct hyp_pool hpool;
35
divide_memory_pool(void * virt,unsigned long size)36 static int divide_memory_pool(void *virt, unsigned long size)
37 {
38 unsigned long nr_pages;
39
40 hyp_early_alloc_init(virt, size);
41
42 nr_pages = pkvm_selftest_pages();
43 selftest_base = hyp_early_alloc_contig(nr_pages);
44 if (nr_pages && !selftest_base)
45 return -ENOMEM;
46
47 nr_pages = hyp_vmemmap_pages(sizeof(struct hyp_page));
48 vmemmap_base = hyp_early_alloc_contig(nr_pages);
49 if (!vmemmap_base)
50 return -ENOMEM;
51
52 nr_pages = hyp_vm_table_pages();
53 vm_table_base = hyp_early_alloc_contig(nr_pages);
54 if (!vm_table_base)
55 return -ENOMEM;
56
57 nr_pages = hyp_s1_pgtable_pages();
58 hyp_pgt_base = hyp_early_alloc_contig(nr_pages);
59 if (!hyp_pgt_base)
60 return -ENOMEM;
61
62 nr_pages = host_s2_pgtable_pages();
63 host_s2_pgt_base = hyp_early_alloc_contig(nr_pages);
64 if (!host_s2_pgt_base)
65 return -ENOMEM;
66
67 nr_pages = hyp_ffa_proxy_pages();
68 ffa_proxy_pages = hyp_early_alloc_contig(nr_pages);
69 if (!ffa_proxy_pages)
70 return -ENOMEM;
71
72 return 0;
73 }
74
pkvm_create_host_sve_mappings(void)75 static int pkvm_create_host_sve_mappings(void)
76 {
77 void *start, *end;
78 int ret, i;
79
80 if (!system_supports_sve())
81 return 0;
82
83 for (i = 0; i < hyp_nr_cpus; i++) {
84 struct kvm_host_data *host_data = per_cpu_ptr(&kvm_host_data, i);
85 struct cpu_sve_state *sve_state = host_data->sve_state;
86
87 start = kern_hyp_va(sve_state);
88 end = start + PAGE_ALIGN(pkvm_host_sve_state_size());
89 ret = pkvm_create_mappings(start, end, PAGE_HYP);
90 if (ret)
91 return ret;
92 }
93
94 return 0;
95 }
96
recreate_hyp_mappings(phys_addr_t phys,unsigned long size,unsigned long * per_cpu_base,u32 hyp_va_bits)97 static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
98 unsigned long *per_cpu_base,
99 u32 hyp_va_bits)
100 {
101 void *start, *end, *virt = hyp_phys_to_virt(phys);
102 unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT;
103 int ret, i;
104
105 /* Recreate the hyp page-table using the early page allocator */
106 hyp_early_alloc_init(hyp_pgt_base, pgt_size);
107 ret = kvm_pgtable_hyp_init(&pkvm_pgtable, hyp_va_bits,
108 &hyp_early_alloc_mm_ops);
109 if (ret)
110 return ret;
111
112 ret = hyp_create_idmap(hyp_va_bits);
113 if (ret)
114 return ret;
115
116 ret = hyp_map_vectors();
117 if (ret)
118 return ret;
119
120 ret = hyp_back_vmemmap(hyp_virt_to_phys(vmemmap_base));
121 if (ret)
122 return ret;
123
124 ret = pkvm_create_mappings(__hyp_text_start, __hyp_text_end, PAGE_HYP_EXEC);
125 if (ret)
126 return ret;
127
128 ret = pkvm_create_mappings(__hyp_data_start, __hyp_data_end, PAGE_HYP);
129 if (ret)
130 return ret;
131
132 ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO);
133 if (ret)
134 return ret;
135
136 ret = pkvm_create_mappings(__hyp_bss_start, __hyp_bss_end, PAGE_HYP);
137 if (ret)
138 return ret;
139
140 ret = pkvm_create_mappings(virt, virt + size, PAGE_HYP);
141 if (ret)
142 return ret;
143
144 for (i = 0; i < hyp_nr_cpus; i++) {
145 struct kvm_nvhe_init_params *params = per_cpu_ptr(&kvm_init_params, i);
146
147 start = (void *)kern_hyp_va(per_cpu_base[i]);
148 end = start + PAGE_ALIGN(hyp_percpu_size);
149 ret = pkvm_create_mappings(start, end, PAGE_HYP);
150 if (ret)
151 return ret;
152
153 ret = pkvm_create_stack(params->stack_pa, ¶ms->stack_hyp_va);
154 if (ret)
155 return ret;
156 }
157
158 return pkvm_create_host_sve_mappings();
159 }
160
update_nvhe_init_params(void)161 static void update_nvhe_init_params(void)
162 {
163 struct kvm_nvhe_init_params *params;
164 unsigned long i;
165
166 for (i = 0; i < hyp_nr_cpus; i++) {
167 params = per_cpu_ptr(&kvm_init_params, i);
168 params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
169 dcache_clean_inval_poc((unsigned long)params,
170 (unsigned long)params + sizeof(*params));
171 }
172 }
173
hyp_zalloc_hyp_page(void * arg)174 static void *hyp_zalloc_hyp_page(void *arg)
175 {
176 return hyp_alloc_pages(&hpool, 0);
177 }
178
hpool_get_page(void * addr)179 static void hpool_get_page(void *addr)
180 {
181 hyp_get_page(&hpool, addr);
182 }
183
hpool_put_page(void * addr)184 static void hpool_put_page(void *addr)
185 {
186 hyp_put_page(&hpool, addr);
187 }
188
fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)189 static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
190 enum kvm_pgtable_walk_flags visit)
191 {
192 enum pkvm_page_state state;
193 struct hyp_page *page;
194 phys_addr_t phys;
195 enum kvm_pgtable_prot prot;
196
197 if (!kvm_pte_valid(ctx->old))
198 return 0;
199
200 if (ctx->level != KVM_PGTABLE_LAST_LEVEL)
201 return -EINVAL;
202
203 phys = kvm_pte_to_phys(ctx->old);
204 if (!addr_is_memory(phys))
205 return -EINVAL;
206
207 page = hyp_phys_to_page(phys);
208
209 /*
210 * Adjust the host stage-2 mappings to match the ownership attributes
211 * configured in the hypervisor stage-1, and make sure to propagate them
212 * to the hyp_vmemmap state.
213 */
214 prot = kvm_pgtable_hyp_pte_prot(ctx->old);
215 state = pkvm_getstate(prot);
216 switch (state) {
217 case PKVM_PAGE_OWNED:
218 set_hyp_state(page, PKVM_PAGE_OWNED);
219 /* hyp text is RO in the host stage-2 to be inspected on panic. */
220 if (prot == PAGE_HYP_EXEC) {
221 set_host_state(page, PKVM_NOPAGE);
222 return host_stage2_idmap_locked(phys, PAGE_SIZE, KVM_PGTABLE_PROT_R);
223 } else {
224 return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
225 }
226 case PKVM_PAGE_SHARED_OWNED:
227 set_hyp_state(page, PKVM_PAGE_SHARED_OWNED);
228 set_host_state(page, PKVM_PAGE_SHARED_BORROWED);
229 break;
230 case PKVM_PAGE_SHARED_BORROWED:
231 set_hyp_state(page, PKVM_PAGE_SHARED_BORROWED);
232 set_host_state(page, PKVM_PAGE_SHARED_OWNED);
233 break;
234 default:
235 return -EINVAL;
236 }
237
238 return 0;
239 }
240
fix_hyp_pgtable_refcnt_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)241 static int fix_hyp_pgtable_refcnt_walker(const struct kvm_pgtable_visit_ctx *ctx,
242 enum kvm_pgtable_walk_flags visit)
243 {
244 /*
245 * Fix-up the refcount for the page-table pages as the early allocator
246 * was unable to access the hyp_vmemmap and so the buddy allocator has
247 * initialised the refcount to '1'.
248 */
249 if (kvm_pte_valid(ctx->old))
250 ctx->mm_ops->get_page(ctx->ptep);
251
252 return 0;
253 }
254
fix_host_ownership(void)255 static int fix_host_ownership(void)
256 {
257 struct kvm_pgtable_walker walker = {
258 .cb = fix_host_ownership_walker,
259 .flags = KVM_PGTABLE_WALK_LEAF,
260 };
261 int i, ret;
262
263 for (i = 0; i < hyp_memblock_nr; i++) {
264 struct memblock_region *reg = &hyp_memory[i];
265 u64 start = (u64)hyp_phys_to_virt(reg->base);
266
267 ret = kvm_pgtable_walk(&pkvm_pgtable, start, reg->size, &walker);
268 if (ret)
269 return ret;
270 }
271
272 return 0;
273 }
274
fix_hyp_pgtable_refcnt(void)275 static int fix_hyp_pgtable_refcnt(void)
276 {
277 struct kvm_pgtable_walker walker = {
278 .cb = fix_hyp_pgtable_refcnt_walker,
279 .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
280 .arg = pkvm_pgtable.mm_ops,
281 };
282
283 return kvm_pgtable_walk(&pkvm_pgtable, 0, BIT(pkvm_pgtable.ia_bits),
284 &walker);
285 }
286
__pkvm_init_finalise(void)287 void __noreturn __pkvm_init_finalise(void)
288 {
289 struct kvm_cpu_context *host_ctxt = host_data_ptr(host_ctxt);
290 unsigned long nr_pages, reserved_pages, pfn;
291 int ret;
292
293 /* Now that the vmemmap is backed, install the full-fledged allocator */
294 pfn = hyp_virt_to_pfn(hyp_pgt_base);
295 nr_pages = hyp_s1_pgtable_pages();
296 reserved_pages = hyp_early_alloc_nr_used_pages();
297 ret = hyp_pool_init(&hpool, pfn, nr_pages, reserved_pages);
298 if (ret)
299 goto out;
300
301 ret = kvm_host_prepare_stage2(host_s2_pgt_base);
302 if (ret)
303 goto out;
304
305 pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) {
306 .zalloc_page = hyp_zalloc_hyp_page,
307 .phys_to_virt = hyp_phys_to_virt,
308 .virt_to_phys = hyp_virt_to_phys,
309 .get_page = hpool_get_page,
310 .put_page = hpool_put_page,
311 .page_count = hyp_page_count,
312 };
313 pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
314
315 ret = fix_host_ownership();
316 if (ret)
317 goto out;
318
319 ret = fix_hyp_pgtable_refcnt();
320 if (ret)
321 goto out;
322
323 ret = hyp_create_fixmap();
324 if (ret)
325 goto out;
326
327 ret = hyp_ffa_init(ffa_proxy_pages);
328 if (ret)
329 goto out;
330
331 pkvm_hyp_vm_table_init(vm_table_base);
332
333 pkvm_ownership_selftest(selftest_base);
334 out:
335 /*
336 * We tail-called to here from handle___pkvm_init() and will not return,
337 * so make sure to propagate the return value to the host.
338 */
339 cpu_reg(host_ctxt, 1) = ret;
340
341 __host_enter(host_ctxt);
342 }
343
__pkvm_init(phys_addr_t phys,unsigned long size,unsigned long nr_cpus,unsigned long * per_cpu_base,u32 hyp_va_bits)344 int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
345 unsigned long *per_cpu_base, u32 hyp_va_bits)
346 {
347 struct kvm_nvhe_init_params *params;
348 void *virt = hyp_phys_to_virt(phys);
349 typeof(__pkvm_init_switch_pgd) *fn;
350 int ret;
351
352 BUG_ON(kvm_check_pvm_sysreg_table());
353
354 if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size))
355 return -EINVAL;
356
357 hyp_spin_lock_init(&pkvm_pgd_lock);
358 hyp_nr_cpus = nr_cpus;
359
360 ret = divide_memory_pool(virt, size);
361 if (ret)
362 return ret;
363
364 ret = recreate_hyp_mappings(phys, size, per_cpu_base, hyp_va_bits);
365 if (ret)
366 return ret;
367
368 update_nvhe_init_params();
369
370 /* Jump in the idmap page to switch to the new page-tables */
371 params = this_cpu_ptr(&kvm_init_params);
372 fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
373 fn(params->pgd_pa, params->stack_hyp_va, __pkvm_init_finalise);
374
375 unreachable();
376 }
377