1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Hosting Protected Virtual Machines
4  *
5  * Copyright IBM Corp. 2019, 2020
6  *    Author(s): Janosch Frank <frankja@linux.ibm.com>
7  */
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/minmax.h>
11 #include <linux/pagemap.h>
12 #include <linux/sched/signal.h>
13 #include <asm/gmap.h>
14 #include <asm/uv.h>
15 #include <asm/mman.h>
16 #include <linux/pagewalk.h>
17 #include <linux/sched/mm.h>
18 #include <linux/mmu_notifier.h>
19 #include "kvm-s390.h"
20 
21 bool kvm_s390_pv_is_protected(struct kvm *kvm)
22 {
23 	lockdep_assert_held(&kvm->lock);
24 	return !!kvm_s390_pv_get_handle(kvm);
25 }
26 EXPORT_SYMBOL_GPL(kvm_s390_pv_is_protected);
27 
28 bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu)
29 {
30 	lockdep_assert_held(&vcpu->mutex);
31 	return !!kvm_s390_pv_cpu_get_handle(vcpu);
32 }
33 EXPORT_SYMBOL_GPL(kvm_s390_pv_cpu_is_protected);
34 
35 /**
36  * kvm_s390_pv_make_secure() - make one guest page secure
37  * @kvm: the guest
38  * @gaddr: the guest address that needs to be made secure
39  * @uvcb: the UVCB specifying which operation needs to be performed
40  *
41  * Context: needs to be called with kvm->srcu held.
42  * Return: 0 on success, < 0 in case of error.
43  */
44 int kvm_s390_pv_make_secure(struct kvm *kvm, unsigned long gaddr, void *uvcb)
45 {
46 	unsigned long vmaddr;
47 
48 	lockdep_assert_held(&kvm->srcu);
49 
50 	vmaddr = gfn_to_hva(kvm, gpa_to_gfn(gaddr));
51 	if (kvm_is_error_hva(vmaddr))
52 		return -EFAULT;
53 	return make_hva_secure(kvm->mm, vmaddr, uvcb);
54 }
55 
56 int kvm_s390_pv_convert_to_secure(struct kvm *kvm, unsigned long gaddr)
57 {
58 	struct uv_cb_cts uvcb = {
59 		.header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
60 		.header.len = sizeof(uvcb),
61 		.guest_handle = kvm_s390_pv_get_handle(kvm),
62 		.gaddr = gaddr,
63 	};
64 
65 	return kvm_s390_pv_make_secure(kvm, gaddr, &uvcb);
66 }
67 
68 /**
69  * kvm_s390_pv_destroy_page() - Destroy a guest page.
70  * @kvm: the guest
71  * @gaddr: the guest address to destroy
72  *
73  * An attempt will be made to destroy the given guest page. If the attempt
74  * fails, an attempt is made to export the page. If both attempts fail, an
75  * appropriate error is returned.
76  *
77  * Context: may sleep.
78  */
79 int kvm_s390_pv_destroy_page(struct kvm *kvm, unsigned long gaddr)
80 {
81 	struct page *page;
82 	int rc = 0;
83 
84 	mmap_read_lock(kvm->mm);
85 	page = gfn_to_page(kvm, gpa_to_gfn(gaddr));
86 	if (page)
87 		rc = __kvm_s390_pv_destroy_page(page);
88 	kvm_release_page_clean(page);
89 	mmap_read_unlock(kvm->mm);
90 	return rc;
91 }
92 
93 /**
94  * struct pv_vm_to_be_destroyed - Represents a protected VM that needs to
95  * be destroyed
96  *
97  * @list: list head for the list of leftover VMs
98  * @old_gmap_table: the gmap table of the leftover protected VM
99  * @handle: the handle of the leftover protected VM
100  * @stor_var: pointer to the variable storage of the leftover protected VM
101  * @stor_base: address of the base storage of the leftover protected VM
102  *
103  * Represents a protected VM that is still registered with the Ultravisor,
104  * but which does not correspond any longer to an active KVM VM. It should
105  * be destroyed at some point later, either asynchronously or when the
106  * process terminates.
107  */
108 struct pv_vm_to_be_destroyed {
109 	struct list_head list;
110 	unsigned long old_gmap_table;
111 	u64 handle;
112 	void *stor_var;
113 	unsigned long stor_base;
114 };
115 
116 static void kvm_s390_clear_pv_state(struct kvm *kvm)
117 {
118 	kvm->arch.pv.handle = 0;
119 	kvm->arch.pv.guest_len = 0;
120 	kvm->arch.pv.stor_base = 0;
121 	kvm->arch.pv.stor_var = NULL;
122 }
123 
124 int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
125 {
126 	int cc;
127 
128 	if (!kvm_s390_pv_cpu_get_handle(vcpu))
129 		return 0;
130 
131 	cc = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), UVC_CMD_DESTROY_SEC_CPU, rc, rrc);
132 
133 	KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT DESTROY VCPU %d: rc %x rrc %x",
134 		     vcpu->vcpu_id, *rc, *rrc);
135 	WARN_ONCE(cc, "protvirt destroy cpu failed rc %x rrc %x", *rc, *rrc);
136 
137 	/* Intended memory leak for something that should never happen. */
138 	if (!cc)
139 		free_pages(vcpu->arch.pv.stor_base,
140 			   get_order(uv_info.guest_cpu_stor_len));
141 
142 	free_page((unsigned long)sida_addr(vcpu->arch.sie_block));
143 	vcpu->arch.sie_block->pv_handle_cpu = 0;
144 	vcpu->arch.sie_block->pv_handle_config = 0;
145 	memset(&vcpu->arch.pv, 0, sizeof(vcpu->arch.pv));
146 	vcpu->arch.sie_block->sdf = 0;
147 	/*
148 	 * The sidad field (for sdf == 2) is now the gbea field (for sdf == 0).
149 	 * Use the reset value of gbea to avoid leaking the kernel pointer of
150 	 * the just freed sida.
151 	 */
152 	vcpu->arch.sie_block->gbea = 1;
153 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
154 
155 	return cc ? EIO : 0;
156 }
157 
158 int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
159 {
160 	struct uv_cb_csc uvcb = {
161 		.header.cmd = UVC_CMD_CREATE_SEC_CPU,
162 		.header.len = sizeof(uvcb),
163 	};
164 	void *sida_addr;
165 	int cc;
166 
167 	if (kvm_s390_pv_cpu_get_handle(vcpu))
168 		return -EINVAL;
169 
170 	vcpu->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT,
171 						   get_order(uv_info.guest_cpu_stor_len));
172 	if (!vcpu->arch.pv.stor_base)
173 		return -ENOMEM;
174 
175 	/* Input */
176 	uvcb.guest_handle = kvm_s390_pv_get_handle(vcpu->kvm);
177 	uvcb.num = vcpu->arch.sie_block->icpua;
178 	uvcb.state_origin = virt_to_phys(vcpu->arch.sie_block);
179 	uvcb.stor_origin = virt_to_phys((void *)vcpu->arch.pv.stor_base);
180 
181 	/* Alloc Secure Instruction Data Area Designation */
182 	sida_addr = (void *)__get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
183 	if (!sida_addr) {
184 		free_pages(vcpu->arch.pv.stor_base,
185 			   get_order(uv_info.guest_cpu_stor_len));
186 		return -ENOMEM;
187 	}
188 	vcpu->arch.sie_block->sidad = virt_to_phys(sida_addr);
189 
190 	cc = uv_call(0, (u64)&uvcb);
191 	*rc = uvcb.header.rc;
192 	*rrc = uvcb.header.rrc;
193 	KVM_UV_EVENT(vcpu->kvm, 3,
194 		     "PROTVIRT CREATE VCPU: cpu %d handle %llx rc %x rrc %x",
195 		     vcpu->vcpu_id, uvcb.cpu_handle, uvcb.header.rc,
196 		     uvcb.header.rrc);
197 
198 	if (cc) {
199 		u16 dummy;
200 
201 		kvm_s390_pv_destroy_cpu(vcpu, &dummy, &dummy);
202 		return -EIO;
203 	}
204 
205 	/* Output */
206 	vcpu->arch.pv.handle = uvcb.cpu_handle;
207 	vcpu->arch.sie_block->pv_handle_cpu = uvcb.cpu_handle;
208 	vcpu->arch.sie_block->pv_handle_config = kvm_s390_pv_get_handle(vcpu->kvm);
209 	vcpu->arch.sie_block->sdf = 2;
210 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
211 	return 0;
212 }
213 
214 /* only free resources when the destroy was successful */
215 static void kvm_s390_pv_dealloc_vm(struct kvm *kvm)
216 {
217 	vfree(kvm->arch.pv.stor_var);
218 	free_pages(kvm->arch.pv.stor_base,
219 		   get_order(uv_info.guest_base_stor_len));
220 	kvm_s390_clear_pv_state(kvm);
221 }
222 
223 static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
224 {
225 	unsigned long base = uv_info.guest_base_stor_len;
226 	unsigned long virt = uv_info.guest_virt_var_stor_len;
227 	unsigned long npages = 0, vlen = 0;
228 
229 	kvm->arch.pv.stor_var = NULL;
230 	kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT, get_order(base));
231 	if (!kvm->arch.pv.stor_base)
232 		return -ENOMEM;
233 
234 	/*
235 	 * Calculate current guest storage for allocation of the
236 	 * variable storage, which is based on the length in MB.
237 	 *
238 	 * Slots are sorted by GFN
239 	 */
240 	mutex_lock(&kvm->slots_lock);
241 	npages = kvm_s390_get_gfn_end(kvm_memslots(kvm));
242 	mutex_unlock(&kvm->slots_lock);
243 
244 	kvm->arch.pv.guest_len = npages * PAGE_SIZE;
245 
246 	/* Allocate variable storage */
247 	vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE);
248 	vlen += uv_info.guest_virt_base_stor_len;
249 	kvm->arch.pv.stor_var = vzalloc(vlen);
250 	if (!kvm->arch.pv.stor_var)
251 		goto out_err;
252 	return 0;
253 
254 out_err:
255 	kvm_s390_pv_dealloc_vm(kvm);
256 	return -ENOMEM;
257 }
258 
259 /**
260  * kvm_s390_pv_dispose_one_leftover - Clean up one leftover protected VM.
261  * @kvm: the KVM that was associated with this leftover protected VM
262  * @leftover: details about the leftover protected VM that needs a clean up
263  * @rc: the RC code of the Destroy Secure Configuration UVC
264  * @rrc: the RRC code of the Destroy Secure Configuration UVC
265  *
266  * Destroy one leftover protected VM.
267  * On success, kvm->mm->context.protected_count will be decremented atomically
268  * and all other resources used by the VM will be freed.
269  *
270  * Return: 0 in case of success, otherwise 1
271  */
272 static int kvm_s390_pv_dispose_one_leftover(struct kvm *kvm,
273 					    struct pv_vm_to_be_destroyed *leftover,
274 					    u16 *rc, u16 *rrc)
275 {
276 	int cc;
277 
278 	/* It used the destroy-fast UVC, nothing left to do here */
279 	if (!leftover->handle)
280 		goto done_fast;
281 	cc = uv_cmd_nodata(leftover->handle, UVC_CMD_DESTROY_SEC_CONF, rc, rrc);
282 	KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY LEFTOVER VM: rc %x rrc %x", *rc, *rrc);
283 	WARN_ONCE(cc, "protvirt destroy leftover vm failed rc %x rrc %x", *rc, *rrc);
284 	if (cc)
285 		return cc;
286 	/*
287 	 * Intentionally leak unusable memory. If the UVC fails, the memory
288 	 * used for the VM and its metadata is permanently unusable.
289 	 * This can only happen in case of a serious KVM or hardware bug; it
290 	 * is not expected to happen in normal operation.
291 	 */
292 	free_pages(leftover->stor_base, get_order(uv_info.guest_base_stor_len));
293 	free_pages(leftover->old_gmap_table, CRST_ALLOC_ORDER);
294 	vfree(leftover->stor_var);
295 done_fast:
296 	atomic_dec(&kvm->mm->context.protected_count);
297 	return 0;
298 }
299 
300 /**
301  * kvm_s390_destroy_lower_2g - Destroy the first 2GB of protected guest memory.
302  * @kvm: the VM whose memory is to be cleared.
303  *
304  * Destroy the first 2GB of guest memory, to avoid prefix issues after reboot.
305  * The CPUs of the protected VM need to be destroyed beforehand.
306  */
307 static void kvm_s390_destroy_lower_2g(struct kvm *kvm)
308 {
309 	const unsigned long pages_2g = SZ_2G / PAGE_SIZE;
310 	struct kvm_memory_slot *slot;
311 	unsigned long len;
312 	int srcu_idx;
313 
314 	srcu_idx = srcu_read_lock(&kvm->srcu);
315 
316 	/* Take the memslot containing guest absolute address 0 */
317 	slot = gfn_to_memslot(kvm, 0);
318 	/* Clear all slots or parts thereof that are below 2GB */
319 	while (slot && slot->base_gfn < pages_2g) {
320 		len = min_t(u64, slot->npages, pages_2g - slot->base_gfn) * PAGE_SIZE;
321 		s390_uv_destroy_range(kvm->mm, slot->userspace_addr, slot->userspace_addr + len);
322 		/* Take the next memslot */
323 		slot = gfn_to_memslot(kvm, slot->base_gfn + slot->npages);
324 	}
325 
326 	srcu_read_unlock(&kvm->srcu, srcu_idx);
327 }
328 
329 static int kvm_s390_pv_deinit_vm_fast(struct kvm *kvm, u16 *rc, u16 *rrc)
330 {
331 	struct uv_cb_destroy_fast uvcb = {
332 		.header.cmd = UVC_CMD_DESTROY_SEC_CONF_FAST,
333 		.header.len = sizeof(uvcb),
334 		.handle = kvm_s390_pv_get_handle(kvm),
335 	};
336 	int cc;
337 
338 	cc = uv_call_sched(0, (u64)&uvcb);
339 	if (rc)
340 		*rc = uvcb.header.rc;
341 	if (rrc)
342 		*rrc = uvcb.header.rrc;
343 	WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
344 	KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM FAST: rc %x rrc %x",
345 		     uvcb.header.rc, uvcb.header.rrc);
346 	WARN_ONCE(cc && uvcb.header.rc != 0x104,
347 		  "protvirt destroy vm fast failed handle %llx rc %x rrc %x",
348 		  kvm_s390_pv_get_handle(kvm), uvcb.header.rc, uvcb.header.rrc);
349 	/* Intended memory leak on "impossible" error */
350 	if (!cc)
351 		kvm_s390_pv_dealloc_vm(kvm);
352 	return cc ? -EIO : 0;
353 }
354 
355 static inline bool is_destroy_fast_available(void)
356 {
357 	return test_bit_inv(BIT_UVC_CMD_DESTROY_SEC_CONF_FAST, uv_info.inst_calls_list);
358 }
359 
360 /**
361  * kvm_s390_pv_set_aside - Set aside a protected VM for later teardown.
362  * @kvm: the VM
363  * @rc: return value for the RC field of the UVCB
364  * @rrc: return value for the RRC field of the UVCB
365  *
366  * Set aside the protected VM for a subsequent teardown. The VM will be able
367  * to continue immediately as a non-secure VM, and the information needed to
368  * properly tear down the protected VM is set aside. If another protected VM
369  * was already set aside without starting its teardown, this function will
370  * fail.
371  * The CPUs of the protected VM need to be destroyed beforehand.
372  *
373  * Context: kvm->lock needs to be held
374  *
375  * Return: 0 in case of success, -EINVAL if another protected VM was already set
376  * aside, -ENOMEM if the system ran out of memory.
377  */
378 int kvm_s390_pv_set_aside(struct kvm *kvm, u16 *rc, u16 *rrc)
379 {
380 	struct pv_vm_to_be_destroyed *priv;
381 	int res = 0;
382 
383 	lockdep_assert_held(&kvm->lock);
384 	/*
385 	 * If another protected VM was already prepared for teardown, refuse.
386 	 * A normal deinitialization has to be performed instead.
387 	 */
388 	if (kvm->arch.pv.set_aside)
389 		return -EINVAL;
390 
391 	/* Guest with segment type ASCE, refuse to destroy asynchronously */
392 	if ((kvm->arch.gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT)
393 		return -EINVAL;
394 
395 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
396 	if (!priv)
397 		return -ENOMEM;
398 
399 	if (is_destroy_fast_available()) {
400 		res = kvm_s390_pv_deinit_vm_fast(kvm, rc, rrc);
401 	} else {
402 		priv->stor_var = kvm->arch.pv.stor_var;
403 		priv->stor_base = kvm->arch.pv.stor_base;
404 		priv->handle = kvm_s390_pv_get_handle(kvm);
405 		priv->old_gmap_table = (unsigned long)kvm->arch.gmap->table;
406 		WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
407 		if (s390_replace_asce(kvm->arch.gmap))
408 			res = -ENOMEM;
409 	}
410 
411 	if (res) {
412 		kfree(priv);
413 		return res;
414 	}
415 
416 	kvm_s390_destroy_lower_2g(kvm);
417 	kvm_s390_clear_pv_state(kvm);
418 	kvm->arch.pv.set_aside = priv;
419 
420 	*rc = UVC_RC_EXECUTED;
421 	*rrc = 42;
422 	return 0;
423 }
424 
425 /**
426  * kvm_s390_pv_deinit_vm - Deinitialize the current protected VM
427  * @kvm: the KVM whose protected VM needs to be deinitialized
428  * @rc: the RC code of the UVC
429  * @rrc: the RRC code of the UVC
430  *
431  * Deinitialize the current protected VM. This function will destroy and
432  * cleanup the current protected VM, but it will not cleanup the guest
433  * memory. This function should only be called when the protected VM has
434  * just been created and therefore does not have any guest memory, or when
435  * the caller cleans up the guest memory separately.
436  *
437  * This function should not fail, but if it does, the donated memory must
438  * not be freed.
439  *
440  * Context: kvm->lock needs to be held
441  *
442  * Return: 0 in case of success, otherwise -EIO
443  */
444 int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
445 {
446 	int cc;
447 
448 	cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
449 			   UVC_CMD_DESTROY_SEC_CONF, rc, rrc);
450 	WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
451 	if (!cc) {
452 		atomic_dec(&kvm->mm->context.protected_count);
453 		kvm_s390_pv_dealloc_vm(kvm);
454 	} else {
455 		/* Intended memory leak on "impossible" error */
456 		s390_replace_asce(kvm->arch.gmap);
457 	}
458 	KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM: rc %x rrc %x", *rc, *rrc);
459 	WARN_ONCE(cc, "protvirt destroy vm failed rc %x rrc %x", *rc, *rrc);
460 
461 	return cc ? -EIO : 0;
462 }
463 
464 /**
465  * kvm_s390_pv_deinit_cleanup_all - Clean up all protected VMs associated
466  * with a specific KVM.
467  * @kvm: the KVM to be cleaned up
468  * @rc: the RC code of the first failing UVC
469  * @rrc: the RRC code of the first failing UVC
470  *
471  * This function will clean up all protected VMs associated with a KVM.
472  * This includes the active one, the one prepared for deinitialization with
473  * kvm_s390_pv_set_aside, and any still pending in the need_cleanup list.
474  *
475  * Context: kvm->lock needs to be held unless being called from
476  * kvm_arch_destroy_vm.
477  *
478  * Return: 0 if all VMs are successfully cleaned up, otherwise -EIO
479  */
480 int kvm_s390_pv_deinit_cleanup_all(struct kvm *kvm, u16 *rc, u16 *rrc)
481 {
482 	struct pv_vm_to_be_destroyed *cur;
483 	bool need_zap = false;
484 	u16 _rc, _rrc;
485 	int cc = 0;
486 
487 	/*
488 	 * Nothing to do if the counter was already 0. Otherwise make sure
489 	 * the counter does not reach 0 before calling s390_uv_destroy_range.
490 	 */
491 	if (!atomic_inc_not_zero(&kvm->mm->context.protected_count))
492 		return 0;
493 
494 	*rc = 1;
495 	/* If the current VM is protected, destroy it */
496 	if (kvm_s390_pv_get_handle(kvm)) {
497 		cc = kvm_s390_pv_deinit_vm(kvm, rc, rrc);
498 		need_zap = true;
499 	}
500 
501 	/* If a previous protected VM was set aside, put it in the need_cleanup list */
502 	if (kvm->arch.pv.set_aside) {
503 		list_add(kvm->arch.pv.set_aside, &kvm->arch.pv.need_cleanup);
504 		kvm->arch.pv.set_aside = NULL;
505 	}
506 
507 	/* Cleanup all protected VMs in the need_cleanup list */
508 	while (!list_empty(&kvm->arch.pv.need_cleanup)) {
509 		cur = list_first_entry(&kvm->arch.pv.need_cleanup, typeof(*cur), list);
510 		need_zap = true;
511 		if (kvm_s390_pv_dispose_one_leftover(kvm, cur, &_rc, &_rrc)) {
512 			cc = 1;
513 			/*
514 			 * Only return the first error rc and rrc, so make
515 			 * sure it is not overwritten. All destroys will
516 			 * additionally be reported via KVM_UV_EVENT().
517 			 */
518 			if (*rc == UVC_RC_EXECUTED) {
519 				*rc = _rc;
520 				*rrc = _rrc;
521 			}
522 		}
523 		list_del(&cur->list);
524 		kfree(cur);
525 	}
526 
527 	/*
528 	 * If the mm still has a mapping, try to mark all its pages as
529 	 * accessible. The counter should not reach zero before this
530 	 * cleanup has been performed.
531 	 */
532 	if (need_zap && mmget_not_zero(kvm->mm)) {
533 		s390_uv_destroy_range(kvm->mm, 0, TASK_SIZE);
534 		mmput(kvm->mm);
535 	}
536 
537 	/* Now the counter can safely reach 0 */
538 	atomic_dec(&kvm->mm->context.protected_count);
539 	return cc ? -EIO : 0;
540 }
541 
542 /**
543  * kvm_s390_pv_deinit_aside_vm - Teardown a previously set aside protected VM.
544  * @kvm: the VM previously associated with the protected VM
545  * @rc: return value for the RC field of the UVCB
546  * @rrc: return value for the RRC field of the UVCB
547  *
548  * Tear down the protected VM that had been previously prepared for teardown
549  * using kvm_s390_pv_set_aside_vm. Ideally this should be called by
550  * userspace asynchronously from a separate thread.
551  *
552  * Context: kvm->lock must not be held.
553  *
554  * Return: 0 in case of success, -EINVAL if no protected VM had been
555  * prepared for asynchronous teardowm, -EIO in case of other errors.
556  */
557 int kvm_s390_pv_deinit_aside_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
558 {
559 	struct pv_vm_to_be_destroyed *p;
560 	int ret = 0;
561 
562 	lockdep_assert_not_held(&kvm->lock);
563 	mutex_lock(&kvm->lock);
564 	p = kvm->arch.pv.set_aside;
565 	kvm->arch.pv.set_aside = NULL;
566 	mutex_unlock(&kvm->lock);
567 	if (!p)
568 		return -EINVAL;
569 
570 	/* When a fatal signal is received, stop immediately */
571 	if (s390_uv_destroy_range_interruptible(kvm->mm, 0, TASK_SIZE_MAX))
572 		goto done;
573 	if (kvm_s390_pv_dispose_one_leftover(kvm, p, rc, rrc))
574 		ret = -EIO;
575 	kfree(p);
576 	p = NULL;
577 done:
578 	/*
579 	 * p is not NULL if we aborted because of a fatal signal, in which
580 	 * case queue the leftover for later cleanup.
581 	 */
582 	if (p) {
583 		mutex_lock(&kvm->lock);
584 		list_add(&p->list, &kvm->arch.pv.need_cleanup);
585 		mutex_unlock(&kvm->lock);
586 		/* Did not finish, but pretend things went well */
587 		*rc = UVC_RC_EXECUTED;
588 		*rrc = 42;
589 	}
590 	return ret;
591 }
592 
593 static void kvm_s390_pv_mmu_notifier_release(struct mmu_notifier *subscription,
594 					     struct mm_struct *mm)
595 {
596 	struct kvm *kvm = container_of(subscription, struct kvm, arch.pv.mmu_notifier);
597 	u16 dummy;
598 	int r;
599 
600 	/*
601 	 * No locking is needed since this is the last thread of the last user of this
602 	 * struct mm.
603 	 * When the struct kvm gets deinitialized, this notifier is also
604 	 * unregistered. This means that if this notifier runs, then the
605 	 * struct kvm is still valid.
606 	 */
607 	r = kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
608 	if (!r && is_destroy_fast_available() && kvm_s390_pv_get_handle(kvm))
609 		kvm_s390_pv_deinit_vm_fast(kvm, &dummy, &dummy);
610 }
611 
612 static const struct mmu_notifier_ops kvm_s390_pv_mmu_notifier_ops = {
613 	.release = kvm_s390_pv_mmu_notifier_release,
614 };
615 
616 int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
617 {
618 	struct uv_cb_cgc uvcb = {
619 		.header.cmd = UVC_CMD_CREATE_SEC_CONF,
620 		.header.len = sizeof(uvcb)
621 	};
622 	int cc, ret;
623 	u16 dummy;
624 
625 	ret = kvm_s390_pv_alloc_vm(kvm);
626 	if (ret)
627 		return ret;
628 
629 	/* Inputs */
630 	uvcb.guest_stor_origin = 0; /* MSO is 0 for KVM */
631 	uvcb.guest_stor_len = kvm->arch.pv.guest_len;
632 	uvcb.guest_asce = kvm->arch.gmap->asce;
633 	uvcb.guest_sca = virt_to_phys(kvm->arch.sca);
634 	uvcb.conf_base_stor_origin =
635 		virt_to_phys((void *)kvm->arch.pv.stor_base);
636 	uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var;
637 	uvcb.flags.ap_allow_instr = kvm->arch.model.uv_feat_guest.ap;
638 	uvcb.flags.ap_instr_intr = kvm->arch.model.uv_feat_guest.ap_intr;
639 
640 	cc = uv_call_sched(0, (u64)&uvcb);
641 	*rc = uvcb.header.rc;
642 	*rrc = uvcb.header.rrc;
643 	KVM_UV_EVENT(kvm, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x flags %04x",
644 		     uvcb.guest_handle, uvcb.guest_stor_len, *rc, *rrc, uvcb.flags.raw);
645 
646 	/* Outputs */
647 	kvm->arch.pv.handle = uvcb.guest_handle;
648 
649 	atomic_inc(&kvm->mm->context.protected_count);
650 	if (cc) {
651 		if (uvcb.header.rc & UVC_RC_NEED_DESTROY) {
652 			kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
653 		} else {
654 			atomic_dec(&kvm->mm->context.protected_count);
655 			kvm_s390_pv_dealloc_vm(kvm);
656 		}
657 		return -EIO;
658 	}
659 	kvm->arch.gmap->guest_handle = uvcb.guest_handle;
660 	/* Add the notifier only once. No races because we hold kvm->lock */
661 	if (kvm->arch.pv.mmu_notifier.ops != &kvm_s390_pv_mmu_notifier_ops) {
662 		kvm->arch.pv.mmu_notifier.ops = &kvm_s390_pv_mmu_notifier_ops;
663 		mmu_notifier_register(&kvm->arch.pv.mmu_notifier, kvm->mm);
664 	}
665 	return 0;
666 }
667 
668 int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
669 			      u16 *rrc)
670 {
671 	struct uv_cb_ssc uvcb = {
672 		.header.cmd = UVC_CMD_SET_SEC_CONF_PARAMS,
673 		.header.len = sizeof(uvcb),
674 		.sec_header_origin = (u64)hdr,
675 		.sec_header_len = length,
676 		.guest_handle = kvm_s390_pv_get_handle(kvm),
677 	};
678 	int cc = uv_call(0, (u64)&uvcb);
679 
680 	*rc = uvcb.header.rc;
681 	*rrc = uvcb.header.rrc;
682 	KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x",
683 		     *rc, *rrc);
684 	return cc ? -EINVAL : 0;
685 }
686 
687 static int unpack_one(struct kvm *kvm, unsigned long addr, u64 tweak,
688 		      u64 offset, u16 *rc, u16 *rrc)
689 {
690 	struct uv_cb_unp uvcb = {
691 		.header.cmd = UVC_CMD_UNPACK_IMG,
692 		.header.len = sizeof(uvcb),
693 		.guest_handle = kvm_s390_pv_get_handle(kvm),
694 		.gaddr = addr,
695 		.tweak[0] = tweak,
696 		.tweak[1] = offset,
697 	};
698 	int ret = kvm_s390_pv_make_secure(kvm, addr, &uvcb);
699 	unsigned long vmaddr;
700 	bool unlocked;
701 
702 	*rc = uvcb.header.rc;
703 	*rrc = uvcb.header.rrc;
704 
705 	if (ret == -ENXIO) {
706 		mmap_read_lock(kvm->mm);
707 		vmaddr = gfn_to_hva(kvm, gpa_to_gfn(addr));
708 		if (kvm_is_error_hva(vmaddr)) {
709 			ret = -EFAULT;
710 		} else {
711 			ret = fixup_user_fault(kvm->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked);
712 			if (!ret)
713 				ret = __gmap_link(kvm->arch.gmap, addr, vmaddr);
714 		}
715 		mmap_read_unlock(kvm->mm);
716 		if (!ret)
717 			return -EAGAIN;
718 		return ret;
719 	}
720 
721 	if (ret && ret != -EAGAIN)
722 		KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: failed addr %llx with rc %x rrc %x",
723 			     uvcb.gaddr, *rc, *rrc);
724 	return ret;
725 }
726 
727 int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
728 		       unsigned long tweak, u16 *rc, u16 *rrc)
729 {
730 	u64 offset = 0;
731 	int ret = 0;
732 
733 	if (addr & ~PAGE_MASK || !size || size & ~PAGE_MASK)
734 		return -EINVAL;
735 
736 	KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: start addr %lx size %lx",
737 		     addr, size);
738 
739 	guard(srcu)(&kvm->srcu);
740 
741 	while (offset < size) {
742 		ret = unpack_one(kvm, addr, tweak, offset, rc, rrc);
743 		if (ret == -EAGAIN) {
744 			cond_resched();
745 			if (fatal_signal_pending(current))
746 				break;
747 			continue;
748 		}
749 		if (ret)
750 			break;
751 		addr += PAGE_SIZE;
752 		offset += PAGE_SIZE;
753 	}
754 	if (!ret)
755 		KVM_UV_EVENT(kvm, 3, "%s", "PROTVIRT VM UNPACK: successful");
756 	return ret;
757 }
758 
759 int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state)
760 {
761 	struct uv_cb_cpu_set_state uvcb = {
762 		.header.cmd	= UVC_CMD_CPU_SET_STATE,
763 		.header.len	= sizeof(uvcb),
764 		.cpu_handle	= kvm_s390_pv_cpu_get_handle(vcpu),
765 		.state		= state,
766 	};
767 	int cc;
768 
769 	cc = uv_call(0, (u64)&uvcb);
770 	KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT SET CPU %d STATE %d rc %x rrc %x",
771 		     vcpu->vcpu_id, state, uvcb.header.rc, uvcb.header.rrc);
772 	if (cc)
773 		return -EINVAL;
774 	return 0;
775 }
776 
777 int kvm_s390_pv_dump_cpu(struct kvm_vcpu *vcpu, void *buff, u16 *rc, u16 *rrc)
778 {
779 	struct uv_cb_dump_cpu uvcb = {
780 		.header.cmd = UVC_CMD_DUMP_CPU,
781 		.header.len = sizeof(uvcb),
782 		.cpu_handle = vcpu->arch.pv.handle,
783 		.dump_area_origin = (u64)buff,
784 	};
785 	int cc;
786 
787 	cc = uv_call_sched(0, (u64)&uvcb);
788 	*rc = uvcb.header.rc;
789 	*rrc = uvcb.header.rrc;
790 	return cc;
791 }
792 
793 /* Size of the cache for the storage state dump data. 1MB for now */
794 #define DUMP_BUFF_LEN HPAGE_SIZE
795 
796 /**
797  * kvm_s390_pv_dump_stor_state
798  *
799  * @kvm: pointer to the guest's KVM struct
800  * @buff_user: Userspace pointer where we will write the results to
801  * @gaddr: Starting absolute guest address for which the storage state
802  *	   is requested.
803  * @buff_user_len: Length of the buff_user buffer
804  * @rc: Pointer to where the uvcb return code is stored
805  * @rrc: Pointer to where the uvcb return reason code is stored
806  *
807  * Stores buff_len bytes of tweak component values to buff_user
808  * starting with the 1MB block specified by the absolute guest address
809  * (gaddr). The gaddr pointer will be updated with the last address
810  * for which data was written when returning to userspace. buff_user
811  * might be written to even if an error rc is returned. For instance
812  * if we encounter a fault after writing the first page of data.
813  *
814  * Context: kvm->lock needs to be held
815  *
816  * Return:
817  *  0 on success
818  *  -ENOMEM if allocating the cache fails
819  *  -EINVAL if gaddr is not aligned to 1MB
820  *  -EINVAL if buff_user_len is not aligned to uv_info.conf_dump_storage_state_len
821  *  -EINVAL if the UV call fails, rc and rrc will be set in this case
822  *  -EFAULT if copying the result to buff_user failed
823  */
824 int kvm_s390_pv_dump_stor_state(struct kvm *kvm, void __user *buff_user,
825 				u64 *gaddr, u64 buff_user_len, u16 *rc, u16 *rrc)
826 {
827 	struct uv_cb_dump_stor_state uvcb = {
828 		.header.cmd = UVC_CMD_DUMP_CONF_STOR_STATE,
829 		.header.len = sizeof(uvcb),
830 		.config_handle = kvm->arch.pv.handle,
831 		.gaddr = *gaddr,
832 		.dump_area_origin = 0,
833 	};
834 	const u64 increment_len = uv_info.conf_dump_storage_state_len;
835 	size_t buff_kvm_size;
836 	size_t size_done = 0;
837 	u8 *buff_kvm = NULL;
838 	int cc, ret;
839 
840 	ret = -EINVAL;
841 	/* UV call processes 1MB guest storage chunks at a time */
842 	if (!IS_ALIGNED(*gaddr, HPAGE_SIZE))
843 		goto out;
844 
845 	/*
846 	 * We provide the storage state for 1MB chunks of guest
847 	 * storage. The buffer will need to be aligned to
848 	 * conf_dump_storage_state_len so we don't end on a partial
849 	 * chunk.
850 	 */
851 	if (!buff_user_len ||
852 	    !IS_ALIGNED(buff_user_len, increment_len))
853 		goto out;
854 
855 	/*
856 	 * Allocate a buffer from which we will later copy to the user
857 	 * process. We don't want userspace to dictate our buffer size
858 	 * so we limit it to DUMP_BUFF_LEN.
859 	 */
860 	ret = -ENOMEM;
861 	buff_kvm_size = min_t(u64, buff_user_len, DUMP_BUFF_LEN);
862 	buff_kvm = vzalloc(buff_kvm_size);
863 	if (!buff_kvm)
864 		goto out;
865 
866 	ret = 0;
867 	uvcb.dump_area_origin = (u64)buff_kvm;
868 	/* We will loop until the user buffer is filled or an error occurs */
869 	do {
870 		/* Get 1MB worth of guest storage state data */
871 		cc = uv_call_sched(0, (u64)&uvcb);
872 
873 		/* All or nothing */
874 		if (cc) {
875 			ret = -EINVAL;
876 			break;
877 		}
878 
879 		size_done += increment_len;
880 		uvcb.dump_area_origin += increment_len;
881 		buff_user_len -= increment_len;
882 		uvcb.gaddr += HPAGE_SIZE;
883 
884 		/* KVM Buffer full, time to copy to the process */
885 		if (!buff_user_len || size_done == DUMP_BUFF_LEN) {
886 			if (copy_to_user(buff_user, buff_kvm, size_done)) {
887 				ret = -EFAULT;
888 				break;
889 			}
890 
891 			buff_user += size_done;
892 			size_done = 0;
893 			uvcb.dump_area_origin = (u64)buff_kvm;
894 		}
895 	} while (buff_user_len);
896 
897 	/* Report back where we ended dumping */
898 	*gaddr = uvcb.gaddr;
899 
900 	/* Lets only log errors, we don't want to spam */
901 out:
902 	if (ret)
903 		KVM_UV_EVENT(kvm, 3,
904 			     "PROTVIRT DUMP STORAGE STATE: addr %llx ret %d, uvcb rc %x rrc %x",
905 			     uvcb.gaddr, ret, uvcb.header.rc, uvcb.header.rrc);
906 	*rc = uvcb.header.rc;
907 	*rrc = uvcb.header.rrc;
908 	vfree(buff_kvm);
909 
910 	return ret;
911 }
912 
913 /**
914  * kvm_s390_pv_dump_complete
915  *
916  * @kvm: pointer to the guest's KVM struct
917  * @buff_user: Userspace pointer where we will write the results to
918  * @rc: Pointer to where the uvcb return code is stored
919  * @rrc: Pointer to where the uvcb return reason code is stored
920  *
921  * Completes the dumping operation and writes the completion data to
922  * user space.
923  *
924  * Context: kvm->lock needs to be held
925  *
926  * Return:
927  *  0 on success
928  *  -ENOMEM if allocating the completion buffer fails
929  *  -EINVAL if the UV call fails, rc and rrc will be set in this case
930  *  -EFAULT if copying the result to buff_user failed
931  */
932 int kvm_s390_pv_dump_complete(struct kvm *kvm, void __user *buff_user,
933 			      u16 *rc, u16 *rrc)
934 {
935 	struct uv_cb_dump_complete complete = {
936 		.header.len = sizeof(complete),
937 		.header.cmd = UVC_CMD_DUMP_COMPLETE,
938 		.config_handle = kvm_s390_pv_get_handle(kvm),
939 	};
940 	u64 *compl_data;
941 	int ret;
942 
943 	/* Allocate dump area */
944 	compl_data = vzalloc(uv_info.conf_dump_finalize_len);
945 	if (!compl_data)
946 		return -ENOMEM;
947 	complete.dump_area_origin = (u64)compl_data;
948 
949 	ret = uv_call_sched(0, (u64)&complete);
950 	*rc = complete.header.rc;
951 	*rrc = complete.header.rrc;
952 	KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP COMPLETE: rc %x rrc %x",
953 		     complete.header.rc, complete.header.rrc);
954 
955 	if (!ret) {
956 		/*
957 		 * kvm_s390_pv_dealloc_vm() will also (mem)set
958 		 * this to false on a reboot or other destroy
959 		 * operation for this vm.
960 		 */
961 		kvm->arch.pv.dumping = false;
962 		kvm_s390_vcpu_unblock_all(kvm);
963 		ret = copy_to_user(buff_user, compl_data, uv_info.conf_dump_finalize_len);
964 		if (ret)
965 			ret = -EFAULT;
966 	}
967 	vfree(compl_data);
968 	/* If the UVC returned an error, translate it to -EINVAL */
969 	if (ret > 0)
970 		ret = -EINVAL;
971 	return ret;
972 }
973