1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2021 Google LLC
4 * Author: Fuad Tabba <tabba@google.com>
5 */
6
7 #include <linux/kvm_host.h>
8 #include <linux/mm.h>
9 #include <nvhe/fixed_config.h>
10 #include <nvhe/mem_protect.h>
11 #include <nvhe/memory.h>
12 #include <nvhe/pkvm.h>
13 #include <nvhe/trap_handler.h>
14
15 /* Used by icache_is_aliasing(). */
16 unsigned long __icache_flags;
17
18 /* Used by kvm_get_vttbr(). */
19 unsigned int kvm_arm_vmid_bits;
20
21 /*
22 * Set trap register values based on features in ID_AA64PFR0.
23 */
pvm_init_traps_aa64pfr0(struct kvm_vcpu * vcpu)24 static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
25 {
26 const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
27 u64 hcr_set = HCR_RW;
28 u64 hcr_clear = 0;
29 u64 cptr_set = 0;
30 u64 cptr_clear = 0;
31
32 /* Protected KVM does not support AArch32 guests. */
33 BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
34 PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
35 BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
36 PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
37
38 /*
39 * Linux guests assume support for floating-point and Advanced SIMD. Do
40 * not change the trapping behavior for these from the KVM default.
41 */
42 BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP),
43 PVM_ID_AA64PFR0_ALLOW));
44 BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD),
45 PVM_ID_AA64PFR0_ALLOW));
46
47 if (has_hvhe())
48 hcr_set |= HCR_E2H;
49
50 /* Trap RAS unless all current versions are supported */
51 if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) <
52 ID_AA64PFR0_EL1_RAS_V1P1) {
53 hcr_set |= HCR_TERR | HCR_TEA;
54 hcr_clear |= HCR_FIEN;
55 }
56
57 /* Trap AMU */
58 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
59 hcr_clear |= HCR_AMVOFFEN;
60 cptr_set |= CPTR_EL2_TAM;
61 }
62
63 /* Trap SVE */
64 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
65 if (has_hvhe())
66 cptr_clear |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
67 else
68 cptr_set |= CPTR_EL2_TZ;
69 }
70
71 vcpu->arch.hcr_el2 |= hcr_set;
72 vcpu->arch.hcr_el2 &= ~hcr_clear;
73 vcpu->arch.cptr_el2 |= cptr_set;
74 vcpu->arch.cptr_el2 &= ~cptr_clear;
75 }
76
77 /*
78 * Set trap register values based on features in ID_AA64PFR1.
79 */
pvm_init_traps_aa64pfr1(struct kvm_vcpu * vcpu)80 static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu)
81 {
82 const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR1_EL1);
83 u64 hcr_set = 0;
84 u64 hcr_clear = 0;
85
86 /* Memory Tagging: Trap and Treat as Untagged if not supported. */
87 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), feature_ids)) {
88 hcr_set |= HCR_TID5;
89 hcr_clear |= HCR_DCT | HCR_ATA;
90 }
91
92 vcpu->arch.hcr_el2 |= hcr_set;
93 vcpu->arch.hcr_el2 &= ~hcr_clear;
94 }
95
96 /*
97 * Set trap register values based on features in ID_AA64DFR0.
98 */
pvm_init_traps_aa64dfr0(struct kvm_vcpu * vcpu)99 static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
100 {
101 const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
102 u64 mdcr_set = 0;
103 u64 mdcr_clear = 0;
104 u64 cptr_set = 0;
105
106 /* Trap/constrain PMU */
107 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
108 mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
109 mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME |
110 MDCR_EL2_HPMN_MASK;
111 }
112
113 /* Trap Debug */
114 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), feature_ids))
115 mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE;
116
117 /* Trap OS Double Lock */
118 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DoubleLock), feature_ids))
119 mdcr_set |= MDCR_EL2_TDOSA;
120
121 /* Trap SPE */
122 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) {
123 mdcr_set |= MDCR_EL2_TPMS;
124 mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
125 }
126
127 /* Trap Trace Filter */
128 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
129 mdcr_set |= MDCR_EL2_TTRF;
130
131 /* Trap Trace */
132 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) {
133 if (has_hvhe())
134 cptr_set |= CPACR_EL1_TTA;
135 else
136 cptr_set |= CPTR_EL2_TTA;
137 }
138
139 /* Trap External Trace */
140 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids))
141 mdcr_clear |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT;
142
143 vcpu->arch.mdcr_el2 |= mdcr_set;
144 vcpu->arch.mdcr_el2 &= ~mdcr_clear;
145 vcpu->arch.cptr_el2 |= cptr_set;
146 }
147
148 /*
149 * Set trap register values based on features in ID_AA64MMFR0.
150 */
pvm_init_traps_aa64mmfr0(struct kvm_vcpu * vcpu)151 static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu)
152 {
153 const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR0_EL1);
154 u64 mdcr_set = 0;
155
156 /* Trap Debug Communications Channel registers */
157 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_FGT), feature_ids))
158 mdcr_set |= MDCR_EL2_TDCC;
159
160 vcpu->arch.mdcr_el2 |= mdcr_set;
161 }
162
163 /*
164 * Set trap register values based on features in ID_AA64MMFR1.
165 */
pvm_init_traps_aa64mmfr1(struct kvm_vcpu * vcpu)166 static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu)
167 {
168 const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR1_EL1);
169 u64 hcr_set = 0;
170
171 /* Trap LOR */
172 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_LO), feature_ids))
173 hcr_set |= HCR_TLOR;
174
175 vcpu->arch.hcr_el2 |= hcr_set;
176 }
177
178 /*
179 * Set baseline trap register values.
180 */
pvm_init_trap_regs(struct kvm_vcpu * vcpu)181 static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
182 {
183 const u64 hcr_trap_feat_regs = HCR_TID3;
184 const u64 hcr_trap_impdef = HCR_TACR | HCR_TIDCP | HCR_TID1;
185
186 /*
187 * Always trap:
188 * - Feature id registers: to control features exposed to guests
189 * - Implementation-defined features
190 */
191 vcpu->arch.hcr_el2 |= hcr_trap_feat_regs | hcr_trap_impdef;
192
193 /* Clear res0 and set res1 bits to trap potential new features. */
194 vcpu->arch.hcr_el2 &= ~(HCR_RES0);
195 vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
196 if (!has_hvhe()) {
197 vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
198 vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
199 }
200 }
201
202 /*
203 * Initialize trap register values for protected VMs.
204 */
__pkvm_vcpu_init_traps(struct kvm_vcpu * vcpu)205 void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
206 {
207 pvm_init_trap_regs(vcpu);
208 pvm_init_traps_aa64pfr0(vcpu);
209 pvm_init_traps_aa64pfr1(vcpu);
210 pvm_init_traps_aa64dfr0(vcpu);
211 pvm_init_traps_aa64mmfr0(vcpu);
212 pvm_init_traps_aa64mmfr1(vcpu);
213 }
214
215 /*
216 * Start the VM table handle at the offset defined instead of at 0.
217 * Mainly for sanity checking and debugging.
218 */
219 #define HANDLE_OFFSET 0x1000
220
vm_handle_to_idx(pkvm_handle_t handle)221 static unsigned int vm_handle_to_idx(pkvm_handle_t handle)
222 {
223 return handle - HANDLE_OFFSET;
224 }
225
idx_to_vm_handle(unsigned int idx)226 static pkvm_handle_t idx_to_vm_handle(unsigned int idx)
227 {
228 return idx + HANDLE_OFFSET;
229 }
230
231 /*
232 * Spinlock for protecting state related to the VM table. Protects writes
233 * to 'vm_table' and 'nr_table_entries' as well as reads and writes to
234 * 'last_hyp_vcpu_lookup'.
235 */
236 static DEFINE_HYP_SPINLOCK(vm_table_lock);
237
238 /*
239 * The table of VM entries for protected VMs in hyp.
240 * Allocated at hyp initialization and setup.
241 */
242 static struct pkvm_hyp_vm **vm_table;
243
pkvm_hyp_vm_table_init(void * tbl)244 void pkvm_hyp_vm_table_init(void *tbl)
245 {
246 WARN_ON(vm_table);
247 vm_table = tbl;
248 }
249
250 /*
251 * Return the hyp vm structure corresponding to the handle.
252 */
get_vm_by_handle(pkvm_handle_t handle)253 static struct pkvm_hyp_vm *get_vm_by_handle(pkvm_handle_t handle)
254 {
255 unsigned int idx = vm_handle_to_idx(handle);
256
257 if (unlikely(idx >= KVM_MAX_PVMS))
258 return NULL;
259
260 return vm_table[idx];
261 }
262
pkvm_load_hyp_vcpu(pkvm_handle_t handle,unsigned int vcpu_idx)263 struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
264 unsigned int vcpu_idx)
265 {
266 struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
267 struct pkvm_hyp_vm *hyp_vm;
268
269 hyp_spin_lock(&vm_table_lock);
270 hyp_vm = get_vm_by_handle(handle);
271 if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx)
272 goto unlock;
273
274 hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
275 hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
276 unlock:
277 hyp_spin_unlock(&vm_table_lock);
278 return hyp_vcpu;
279 }
280
pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu * hyp_vcpu)281 void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
282 {
283 struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
284
285 hyp_spin_lock(&vm_table_lock);
286 hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
287 hyp_spin_unlock(&vm_table_lock);
288 }
289
unpin_host_vcpu(struct kvm_vcpu * host_vcpu)290 static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
291 {
292 if (host_vcpu)
293 hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1);
294 }
295
unpin_host_vcpus(struct pkvm_hyp_vcpu * hyp_vcpus[],unsigned int nr_vcpus)296 static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
297 unsigned int nr_vcpus)
298 {
299 int i;
300
301 for (i = 0; i < nr_vcpus; i++)
302 unpin_host_vcpu(hyp_vcpus[i]->host_vcpu);
303 }
304
init_pkvm_hyp_vm(struct kvm * host_kvm,struct pkvm_hyp_vm * hyp_vm,unsigned int nr_vcpus)305 static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
306 unsigned int nr_vcpus)
307 {
308 hyp_vm->host_kvm = host_kvm;
309 hyp_vm->kvm.created_vcpus = nr_vcpus;
310 hyp_vm->kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr;
311 }
312
init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu * hyp_vcpu,struct pkvm_hyp_vm * hyp_vm,struct kvm_vcpu * host_vcpu,unsigned int vcpu_idx)313 static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
314 struct pkvm_hyp_vm *hyp_vm,
315 struct kvm_vcpu *host_vcpu,
316 unsigned int vcpu_idx)
317 {
318 int ret = 0;
319
320 if (hyp_pin_shared_mem(host_vcpu, host_vcpu + 1))
321 return -EBUSY;
322
323 if (host_vcpu->vcpu_idx != vcpu_idx) {
324 ret = -EINVAL;
325 goto done;
326 }
327
328 hyp_vcpu->host_vcpu = host_vcpu;
329
330 hyp_vcpu->vcpu.kvm = &hyp_vm->kvm;
331 hyp_vcpu->vcpu.vcpu_id = READ_ONCE(host_vcpu->vcpu_id);
332 hyp_vcpu->vcpu.vcpu_idx = vcpu_idx;
333
334 hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu;
335 hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
336 done:
337 if (ret)
338 unpin_host_vcpu(host_vcpu);
339 return ret;
340 }
341
find_free_vm_table_entry(struct kvm * host_kvm)342 static int find_free_vm_table_entry(struct kvm *host_kvm)
343 {
344 int i;
345
346 for (i = 0; i < KVM_MAX_PVMS; ++i) {
347 if (!vm_table[i])
348 return i;
349 }
350
351 return -ENOMEM;
352 }
353
354 /*
355 * Allocate a VM table entry and insert a pointer to the new vm.
356 *
357 * Return a unique handle to the protected VM on success,
358 * negative error code on failure.
359 */
insert_vm_table_entry(struct kvm * host_kvm,struct pkvm_hyp_vm * hyp_vm)360 static pkvm_handle_t insert_vm_table_entry(struct kvm *host_kvm,
361 struct pkvm_hyp_vm *hyp_vm)
362 {
363 struct kvm_s2_mmu *mmu = &hyp_vm->kvm.arch.mmu;
364 int idx;
365
366 hyp_assert_lock_held(&vm_table_lock);
367
368 /*
369 * Initializing protected state might have failed, yet a malicious
370 * host could trigger this function. Thus, ensure that 'vm_table'
371 * exists.
372 */
373 if (unlikely(!vm_table))
374 return -EINVAL;
375
376 idx = find_free_vm_table_entry(host_kvm);
377 if (idx < 0)
378 return idx;
379
380 hyp_vm->kvm.arch.pkvm.handle = idx_to_vm_handle(idx);
381
382 /* VMID 0 is reserved for the host */
383 atomic64_set(&mmu->vmid.id, idx + 1);
384
385 mmu->arch = &hyp_vm->kvm.arch;
386 mmu->pgt = &hyp_vm->pgt;
387
388 vm_table[idx] = hyp_vm;
389 return hyp_vm->kvm.arch.pkvm.handle;
390 }
391
392 /*
393 * Deallocate and remove the VM table entry corresponding to the handle.
394 */
remove_vm_table_entry(pkvm_handle_t handle)395 static void remove_vm_table_entry(pkvm_handle_t handle)
396 {
397 hyp_assert_lock_held(&vm_table_lock);
398 vm_table[vm_handle_to_idx(handle)] = NULL;
399 }
400
pkvm_get_hyp_vm_size(unsigned int nr_vcpus)401 static size_t pkvm_get_hyp_vm_size(unsigned int nr_vcpus)
402 {
403 return size_add(sizeof(struct pkvm_hyp_vm),
404 size_mul(sizeof(struct pkvm_hyp_vcpu *), nr_vcpus));
405 }
406
map_donated_memory_noclear(unsigned long host_va,size_t size)407 static void *map_donated_memory_noclear(unsigned long host_va, size_t size)
408 {
409 void *va = (void *)kern_hyp_va(host_va);
410
411 if (!PAGE_ALIGNED(va))
412 return NULL;
413
414 if (__pkvm_host_donate_hyp(hyp_virt_to_pfn(va),
415 PAGE_ALIGN(size) >> PAGE_SHIFT))
416 return NULL;
417
418 return va;
419 }
420
map_donated_memory(unsigned long host_va,size_t size)421 static void *map_donated_memory(unsigned long host_va, size_t size)
422 {
423 void *va = map_donated_memory_noclear(host_va, size);
424
425 if (va)
426 memset(va, 0, size);
427
428 return va;
429 }
430
__unmap_donated_memory(void * va,size_t size)431 static void __unmap_donated_memory(void *va, size_t size)
432 {
433 WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(va),
434 PAGE_ALIGN(size) >> PAGE_SHIFT));
435 }
436
unmap_donated_memory(void * va,size_t size)437 static void unmap_donated_memory(void *va, size_t size)
438 {
439 if (!va)
440 return;
441
442 memset(va, 0, size);
443 __unmap_donated_memory(va, size);
444 }
445
unmap_donated_memory_noclear(void * va,size_t size)446 static void unmap_donated_memory_noclear(void *va, size_t size)
447 {
448 if (!va)
449 return;
450
451 __unmap_donated_memory(va, size);
452 }
453
454 /*
455 * Initialize the hypervisor copy of the protected VM state using the
456 * memory donated by the host.
457 *
458 * Unmaps the donated memory from the host at stage 2.
459 *
460 * host_kvm: A pointer to the host's struct kvm.
461 * vm_hva: The host va of the area being donated for the VM state.
462 * Must be page aligned.
463 * pgd_hva: The host va of the area being donated for the stage-2 PGD for
464 * the VM. Must be page aligned. Its size is implied by the VM's
465 * VTCR.
466 *
467 * Return a unique handle to the protected VM on success,
468 * negative error code on failure.
469 */
__pkvm_init_vm(struct kvm * host_kvm,unsigned long vm_hva,unsigned long pgd_hva)470 int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
471 unsigned long pgd_hva)
472 {
473 struct pkvm_hyp_vm *hyp_vm = NULL;
474 size_t vm_size, pgd_size;
475 unsigned int nr_vcpus;
476 void *pgd = NULL;
477 int ret;
478
479 ret = hyp_pin_shared_mem(host_kvm, host_kvm + 1);
480 if (ret)
481 return ret;
482
483 nr_vcpus = READ_ONCE(host_kvm->created_vcpus);
484 if (nr_vcpus < 1) {
485 ret = -EINVAL;
486 goto err_unpin_kvm;
487 }
488
489 vm_size = pkvm_get_hyp_vm_size(nr_vcpus);
490 pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.mmu.vtcr);
491
492 ret = -ENOMEM;
493
494 hyp_vm = map_donated_memory(vm_hva, vm_size);
495 if (!hyp_vm)
496 goto err_remove_mappings;
497
498 pgd = map_donated_memory_noclear(pgd_hva, pgd_size);
499 if (!pgd)
500 goto err_remove_mappings;
501
502 init_pkvm_hyp_vm(host_kvm, hyp_vm, nr_vcpus);
503
504 hyp_spin_lock(&vm_table_lock);
505 ret = insert_vm_table_entry(host_kvm, hyp_vm);
506 if (ret < 0)
507 goto err_unlock;
508
509 ret = kvm_guest_prepare_stage2(hyp_vm, pgd);
510 if (ret)
511 goto err_remove_vm_table_entry;
512 hyp_spin_unlock(&vm_table_lock);
513
514 return hyp_vm->kvm.arch.pkvm.handle;
515
516 err_remove_vm_table_entry:
517 remove_vm_table_entry(hyp_vm->kvm.arch.pkvm.handle);
518 err_unlock:
519 hyp_spin_unlock(&vm_table_lock);
520 err_remove_mappings:
521 unmap_donated_memory(hyp_vm, vm_size);
522 unmap_donated_memory(pgd, pgd_size);
523 err_unpin_kvm:
524 hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
525 return ret;
526 }
527
528 /*
529 * Initialize the hypervisor copy of the protected vCPU state using the
530 * memory donated by the host.
531 *
532 * handle: The handle for the protected vm.
533 * host_vcpu: A pointer to the corresponding host vcpu.
534 * vcpu_hva: The host va of the area being donated for the vcpu state.
535 * Must be page aligned. The size of the area must be equal to
536 * the page-aligned size of 'struct pkvm_hyp_vcpu'.
537 * Return 0 on success, negative error code on failure.
538 */
__pkvm_init_vcpu(pkvm_handle_t handle,struct kvm_vcpu * host_vcpu,unsigned long vcpu_hva)539 int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
540 unsigned long vcpu_hva)
541 {
542 struct pkvm_hyp_vcpu *hyp_vcpu;
543 struct pkvm_hyp_vm *hyp_vm;
544 unsigned int idx;
545 int ret;
546
547 hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu));
548 if (!hyp_vcpu)
549 return -ENOMEM;
550
551 hyp_spin_lock(&vm_table_lock);
552
553 hyp_vm = get_vm_by_handle(handle);
554 if (!hyp_vm) {
555 ret = -ENOENT;
556 goto unlock;
557 }
558
559 idx = hyp_vm->nr_vcpus;
560 if (idx >= hyp_vm->kvm.created_vcpus) {
561 ret = -EINVAL;
562 goto unlock;
563 }
564
565 ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu, idx);
566 if (ret)
567 goto unlock;
568
569 hyp_vm->vcpus[idx] = hyp_vcpu;
570 hyp_vm->nr_vcpus++;
571 unlock:
572 hyp_spin_unlock(&vm_table_lock);
573
574 if (ret)
575 unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
576
577 return ret;
578 }
579
580 static void
teardown_donated_memory(struct kvm_hyp_memcache * mc,void * addr,size_t size)581 teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
582 {
583 size = PAGE_ALIGN(size);
584 memset(addr, 0, size);
585
586 for (void *start = addr; start < addr + size; start += PAGE_SIZE)
587 push_hyp_memcache(mc, start, hyp_virt_to_phys);
588
589 unmap_donated_memory_noclear(addr, size);
590 }
591
__pkvm_teardown_vm(pkvm_handle_t handle)592 int __pkvm_teardown_vm(pkvm_handle_t handle)
593 {
594 struct kvm_hyp_memcache *mc;
595 struct pkvm_hyp_vm *hyp_vm;
596 struct kvm *host_kvm;
597 unsigned int idx;
598 size_t vm_size;
599 int err;
600
601 hyp_spin_lock(&vm_table_lock);
602 hyp_vm = get_vm_by_handle(handle);
603 if (!hyp_vm) {
604 err = -ENOENT;
605 goto err_unlock;
606 }
607
608 if (WARN_ON(hyp_page_count(hyp_vm))) {
609 err = -EBUSY;
610 goto err_unlock;
611 }
612
613 host_kvm = hyp_vm->host_kvm;
614
615 /* Ensure the VMID is clean before it can be reallocated */
616 __kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
617 remove_vm_table_entry(handle);
618 hyp_spin_unlock(&vm_table_lock);
619
620 /* Reclaim guest pages (including page-table pages) */
621 mc = &host_kvm->arch.pkvm.teardown_mc;
622 reclaim_guest_pages(hyp_vm, mc);
623 unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus);
624
625 /* Push the metadata pages to the teardown memcache */
626 for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
627 struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
628
629 teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
630 }
631
632 vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);
633 teardown_donated_memory(mc, hyp_vm, vm_size);
634 hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
635 return 0;
636
637 err_unlock:
638 hyp_spin_unlock(&vm_table_lock);
639 return err;
640 }
641