Lines Matching +full:- +full:- +full:enable +full:- +full:trace +full:- +full:backends
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
5 * AMD SVM-SEV support
16 #include <linux/psp-sev.h>
22 #include <uapi/linux/sev-guest.h>
36 #include "trace.h"
44 /* enable/disable SEV support */
48 /* enable/disable SEV-ES support */
52 /* enable/disable SEV-SNP support */
56 /* enable/disable SEV-ES DebugSwap support */
65 /* As defined by SEV-SNP Firmware ABI, under "Guest Policy". */
111 return -EBUSY; in sev_flush_asids()
130 sev_snp_enabled ? "-SNP" : "", ret, error); in sev_flush_asids()
137 return !!to_kvm_sev_info(kvm)->enc_context_owner; in is_mirroring_enc_context()
142 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_vcpu_has_debug_swap()
143 struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm); in sev_vcpu_has_debug_swap()
145 return sev->vmsa_features & SVM_SEV_FEAT_DEBUG_SWAP; in sev_vcpu_has_debug_swap()
154 /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */ in __sev_recycle_asids()
164 enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV; in sev_misc_cg_try_charge()
165 return misc_cg_try_charge(type, sev->misc_cg, 1); in sev_misc_cg_try_charge()
170 enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV; in sev_misc_cg_uncharge()
171 misc_cg_uncharge(type, sev->misc_cg, 1); in sev_misc_cg_uncharge()
177 * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid. in sev_asid_new()
178 * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1. in sev_asid_new()
182 unsigned int min_asid = sev->es_active ? 1 : min_sev_asid; in sev_asid_new()
183 unsigned int max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid; in sev_asid_new()
189 return -ENOTTY; in sev_asid_new()
191 WARN_ON(sev->misc_cg); in sev_asid_new()
192 sev->misc_cg = get_current_misc_cg(); in sev_asid_new()
195 put_misc_cg(sev->misc_cg); in sev_asid_new()
196 sev->misc_cg = NULL; in sev_asid_new()
210 ret = -EBUSY; in sev_asid_new()
218 sev->asid = asid; in sev_asid_new()
222 put_misc_cg(sev->misc_cg); in sev_asid_new()
223 sev->misc_cg = NULL; in sev_asid_new()
229 return to_kvm_sev_info(kvm)->asid; in sev_get_asid()
239 __set_bit(sev->asid, sev_reclaim_asid_bitmap); in sev_asid_free()
243 sd->sev_vmcbs[sev->asid] = NULL; in sev_asid_free()
249 put_misc_cg(sev->misc_cg); in sev_asid_free()
250 sev->misc_cg = NULL; in sev_asid_free()
265 * Transition a page to hypervisor-owned/shared state in the RMP table. This
273 return -EIO; in kvm_rmp_make_shared()
280 * Certain page-states, such as Pre-Guest and Firmware pages (as documented
281 * in Chapter 5 of the SEV-SNP Firmware ABI under "Page States") cannot be
282 * directly transitioned back to normal/hypervisor-owned state via RMPUPDATE
302 return -EIO; in snp_page_reclaim()
306 return -EIO; in snp_page_reclaim()
338 * - Both pages are from shared guest memory, so they need to be protected
342 * on what type of memory backends userspace can use for shared guest
345 * - The response page needs to be switched to Firmware-owned[1] state
351 * Both of these issues can be avoided completely by using separately-allocated
361 * [1] See the "Page States" section of the SEV-SNP Firmware ABI for more
362 * details on Firmware-owned pages, along with "RMP and VMPL Access Checks"
372 return -ENOMEM; in snp_guest_req_init()
374 sev->guest_resp_buf = snp_alloc_firmware_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); in snp_guest_req_init()
375 if (!sev->guest_resp_buf) { in snp_guest_req_init()
377 return -EIO; in snp_guest_req_init()
380 sev->guest_req_buf = page_address(req_page); in snp_guest_req_init()
381 mutex_init(&sev->guest_req_mutex); in snp_guest_req_init()
390 if (sev->guest_resp_buf) in snp_guest_req_cleanup()
391 snp_free_firmware_page(sev->guest_resp_buf); in snp_guest_req_cleanup()
393 if (sev->guest_req_buf) in snp_guest_req_cleanup()
394 __free_page(virt_to_page(sev->guest_req_buf)); in snp_guest_req_cleanup()
396 sev->guest_req_buf = NULL; in snp_guest_req_cleanup()
397 sev->guest_resp_buf = NULL; in snp_guest_req_cleanup()
410 if (kvm->created_vcpus) in __sev_guest_init()
411 return -EINVAL; in __sev_guest_init()
413 if (data->flags) in __sev_guest_init()
414 return -EINVAL; in __sev_guest_init()
416 if (data->vmsa_features & ~valid_vmsa_features) in __sev_guest_init()
417 return -EINVAL; in __sev_guest_init()
419 if (data->ghcb_version > GHCB_VERSION_MAX || (!es_active && data->ghcb_version)) in __sev_guest_init()
420 return -EINVAL; in __sev_guest_init()
422 if (unlikely(sev->active)) in __sev_guest_init()
423 return -EINVAL; in __sev_guest_init()
425 sev->active = true; in __sev_guest_init()
426 sev->es_active = es_active; in __sev_guest_init()
427 sev->vmsa_features = data->vmsa_features; in __sev_guest_init()
428 sev->ghcb_version = data->ghcb_version; in __sev_guest_init()
432 * by version 2 of the GHCB protocol, so default to that for SEV-ES in __sev_guest_init()
435 if (sev->es_active && !sev->ghcb_version) in __sev_guest_init()
436 sev->ghcb_version = GHCB_VERSION_DEFAULT; in __sev_guest_init()
439 sev->vmsa_features |= SVM_SEV_FEAT_SNP_ACTIVE; in __sev_guest_init()
457 INIT_LIST_HEAD(&sev->regions_list); in __sev_guest_init()
458 INIT_LIST_HEAD(&sev->mirror_vms); in __sev_guest_init()
459 sev->need_init = false; in __sev_guest_init()
466 argp->error = init_args.error; in __sev_guest_init()
468 sev->asid = 0; in __sev_guest_init()
470 sev->vmsa_features = 0; in __sev_guest_init()
471 sev->es_active = false; in __sev_guest_init()
472 sev->active = false; in __sev_guest_init()
484 if (kvm->arch.vm_type != KVM_X86_DEFAULT_VM) in sev_guest_init()
485 return -EINVAL; in sev_guest_init()
487 vm_type = (argp->id == KVM_SEV_INIT ? KVM_X86_SEV_VM : KVM_X86_SEV_ES_VM); in sev_guest_init()
503 if (!to_kvm_sev_info(kvm)->need_init) in sev_guest_init2()
504 return -EINVAL; in sev_guest_init2()
506 if (kvm->arch.vm_type != KVM_X86_SEV_VM && in sev_guest_init2()
507 kvm->arch.vm_type != KVM_X86_SEV_ES_VM && in sev_guest_init2()
508 kvm->arch.vm_type != KVM_X86_SNP_VM) in sev_guest_init2()
509 return -EINVAL; in sev_guest_init2()
511 if (copy_from_user(&data, u64_to_user_ptr(argp->data), sizeof(data))) in sev_guest_init2()
512 return -EFAULT; in sev_guest_init2()
514 return __sev_guest_init(kvm, argp, &data, kvm->arch.vm_type); in sev_guest_init2()
536 return -EBADF; in __sev_issue_cmd()
545 return __sev_issue_cmd(sev->fd, id, data, error); in sev_issue_cmd()
554 int *error = &argp->error; in sev_launch_start()
558 return -ENOTTY; in sev_launch_start()
560 if (copy_from_user(¶ms, u64_to_user_ptr(argp->data), sizeof(params))) in sev_launch_start()
561 return -EFAULT; in sev_launch_start()
591 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, &start, error); in sev_launch_start()
604 if (copy_to_user(u64_to_user_ptr(argp->data), ¶ms, sizeof(params))) { in sev_launch_start()
606 ret = -EFAULT; in sev_launch_start()
610 sev->handle = start.handle; in sev_launch_start()
611 sev->fd = argp->sev_fd; in sev_launch_start()
632 lockdep_assert_held(&kvm->lock); in sev_pin_memory()
635 return ERR_PTR(-EINVAL); in sev_pin_memory()
639 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT; in sev_pin_memory()
640 npages = (last - first + 1); in sev_pin_memory()
642 locked = sev->pages_locked + npages; in sev_pin_memory()
646 return ERR_PTR(-ENOMEM); in sev_pin_memory()
650 return ERR_PTR(-EINVAL); in sev_pin_memory()
660 return ERR_PTR(-ENOMEM); in sev_pin_memory()
666 ret = -ENOMEM; in sev_pin_memory()
671 sev->pages_locked = locked; in sev_pin_memory()
688 to_kvm_sev_info(kvm)->pages_locked -= npages; in sev_unpin_memory()
738 return -ENOTTY; in sev_launch_update_data()
740 if (copy_from_user(¶ms, u64_to_user_ptr(argp->data), sizeof(params))) in sev_launch_update_data()
741 return -EFAULT; in sev_launch_update_data()
753 * Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in in sev_launch_update_data()
759 data.handle = to_kvm_sev_info(kvm)->handle; in sev_launch_update_data()
765 * If the user buffer is not page-aligned, calculate the offset in sev_launch_update_data()
768 offset = vaddr & (PAGE_SIZE - 1); in sev_launch_update_data()
773 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size); in sev_launch_update_data()
777 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, &data, &argp->error); in sev_launch_update_data()
781 size -= len; in sev_launch_update_data()
798 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_sync_vmsa()
799 struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm); in sev_es_sync_vmsa()
800 struct sev_es_save_area *save = svm->sev_es.vmsa; in sev_es_sync_vmsa()
807 if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1)) in sev_es_sync_vmsa()
808 return -EINVAL; in sev_es_sync_vmsa()
811 * SEV-ES will use a VMSA that is pointed to by the VMCB, not in sev_es_sync_vmsa()
814 * for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state. in sev_es_sync_vmsa()
816 memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save)); in sev_es_sync_vmsa()
819 save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX]; in sev_es_sync_vmsa()
820 save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX]; in sev_es_sync_vmsa()
821 save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; in sev_es_sync_vmsa()
822 save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX]; in sev_es_sync_vmsa()
823 save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP]; in sev_es_sync_vmsa()
824 save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP]; in sev_es_sync_vmsa()
825 save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI]; in sev_es_sync_vmsa()
826 save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI]; in sev_es_sync_vmsa()
828 save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8]; in sev_es_sync_vmsa()
829 save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9]; in sev_es_sync_vmsa()
830 save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10]; in sev_es_sync_vmsa()
831 save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11]; in sev_es_sync_vmsa()
832 save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12]; in sev_es_sync_vmsa()
833 save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13]; in sev_es_sync_vmsa()
834 save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14]; in sev_es_sync_vmsa()
835 save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15]; in sev_es_sync_vmsa()
837 save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP]; in sev_es_sync_vmsa()
839 /* Sync some non-GPR registers before encrypting */ in sev_es_sync_vmsa()
840 save->xcr0 = svm->vcpu.arch.xcr0; in sev_es_sync_vmsa()
841 save->pkru = svm->vcpu.arch.pkru; in sev_es_sync_vmsa()
842 save->xss = svm->vcpu.arch.ia32_xss; in sev_es_sync_vmsa()
843 save->dr6 = svm->vcpu.arch.dr6; in sev_es_sync_vmsa()
845 save->sev_features = sev->vmsa_features; in sev_es_sync_vmsa()
851 if (vcpu->kvm->arch.vm_type != KVM_X86_DEFAULT_VM) { in sev_es_sync_vmsa()
852 xsave = &vcpu->arch.guest_fpu.fpstate->regs.xsave; in sev_es_sync_vmsa()
853 save->x87_dp = xsave->i387.rdp; in sev_es_sync_vmsa()
854 save->mxcsr = xsave->i387.mxcsr; in sev_es_sync_vmsa()
855 save->x87_ftw = xsave->i387.twd; in sev_es_sync_vmsa()
856 save->x87_fsw = xsave->i387.swd; in sev_es_sync_vmsa()
857 save->x87_fcw = xsave->i387.cwd; in sev_es_sync_vmsa()
858 save->x87_fop = xsave->i387.fop; in sev_es_sync_vmsa()
859 save->x87_ds = 0; in sev_es_sync_vmsa()
860 save->x87_cs = 0; in sev_es_sync_vmsa()
861 save->x87_rip = xsave->i387.rip; in sev_es_sync_vmsa()
867 * an 8*8 bytes area with bytes 0-7, and an 8*2 bytes in sev_es_sync_vmsa()
868 * area with bytes 8-9 of each register. in sev_es_sync_vmsa()
870 d = save->fpreg_x87 + i * 8; in sev_es_sync_vmsa()
871 s = ((u8 *)xsave->i387.st_space) + i * 16; in sev_es_sync_vmsa()
873 save->fpreg_x87[64 + i * 2] = s[8]; in sev_es_sync_vmsa()
874 save->fpreg_x87[64 + i * 2 + 1] = s[9]; in sev_es_sync_vmsa()
876 memcpy(save->fpreg_xmm, xsave->i387.xmm_space, 256); in sev_es_sync_vmsa()
880 memcpy(save->fpreg_ymm, s, 256); in sev_es_sync_vmsa()
882 memset(save->fpreg_ymm, 0, 256); in sev_es_sync_vmsa()
898 if (vcpu->guest_debug) { in __sev_launch_update_vmsa()
899 pr_warn_once("KVM_SET_GUEST_DEBUG for SEV-ES guest is not supported"); in __sev_launch_update_vmsa()
900 return -EINVAL; in __sev_launch_update_vmsa()
903 /* Perform some pre-encryption checks against the VMSA */ in __sev_launch_update_vmsa()
909 * The LAUNCH_UPDATE_VMSA command will perform in-place encryption of in __sev_launch_update_vmsa()
913 clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); in __sev_launch_update_vmsa()
916 vmsa.handle = to_kvm_sev_info(kvm)->handle; in __sev_launch_update_vmsa()
917 vmsa.address = __sme_pa(svm->sev_es.vmsa); in __sev_launch_update_vmsa()
924 * SEV-ES guests maintain an encrypted version of their FPU in __sev_launch_update_vmsa()
926 * Mark vcpu->arch.guest_fpu->fpstate as scratch so it won't in __sev_launch_update_vmsa()
929 fpstate_set_confidential(&vcpu->arch.guest_fpu); in __sev_launch_update_vmsa()
930 vcpu->arch.guest_state_protected = true; in __sev_launch_update_vmsa()
933 * SEV-ES guest mandates LBR Virtualization to be _always_ ON. Enable it in __sev_launch_update_vmsa()
949 return -ENOTTY; in sev_launch_update_vmsa()
952 ret = mutex_lock_killable(&vcpu->mutex); in sev_launch_update_vmsa()
956 ret = __sev_launch_update_vmsa(kvm, vcpu, &argp->error); in sev_launch_update_vmsa()
958 mutex_unlock(&vcpu->mutex); in sev_launch_update_vmsa()
968 void __user *measure = u64_to_user_ptr(argp->data); in sev_launch_measure()
976 return -ENOTTY; in sev_launch_measure()
979 return -EFAULT; in sev_launch_measure()
990 return -EINVAL; in sev_launch_measure()
994 return -ENOMEM; in sev_launch_measure()
1001 data.handle = to_kvm_sev_info(kvm)->handle; in sev_launch_measure()
1002 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, &data, &argp->error); in sev_launch_measure()
1015 ret = -EFAULT; in sev_launch_measure()
1021 ret = -EFAULT; in sev_launch_measure()
1032 return -ENOTTY; in sev_launch_finish()
1034 data.handle = to_kvm_sev_info(kvm)->handle; in sev_launch_finish()
1035 return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, &data, &argp->error); in sev_launch_finish()
1045 return -ENOTTY; in sev_guest_status()
1049 data.handle = to_kvm_sev_info(kvm)->handle; in sev_guest_status()
1050 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, &data, &argp->error); in sev_guest_status()
1058 if (copy_to_user(u64_to_user_ptr(argp->data), ¶ms, sizeof(params))) in sev_guest_status()
1059 ret = -EFAULT; in sev_guest_status()
1071 data.handle = to_kvm_sev_info(kvm)->handle; in __sev_issue_dbg_cmd()
1105 /* if inputs are not 16-byte then use intermediate buffer */ in __sev_dbg_decrypt_user()
1111 return -ENOMEM; in __sev_dbg_decrypt_user()
1123 ret = -EFAULT; in __sev_dbg_decrypt_user()
1147 return -ENOMEM; in __sev_dbg_encrypt_user()
1151 return -EFAULT; in __sev_dbg_encrypt_user()
1158 * If destination buffer or length is not aligned then do read-modify-write: in __sev_dbg_encrypt_user()
1159 * - decrypt destination in an intermediate buffer in __sev_dbg_encrypt_user()
1160 * - copy the source buffer in an intermediate buffer in __sev_dbg_encrypt_user()
1161 * - use the intermediate buffer as source buffer in __sev_dbg_encrypt_user()
1168 ret = -ENOMEM; in __sev_dbg_encrypt_user()
1189 ret = -EFAULT; in __sev_dbg_encrypt_user()
1220 return -ENOTTY; in sev_dbg_crypt()
1222 if (copy_from_user(&debug, u64_to_user_ptr(argp->data), sizeof(debug))) in sev_dbg_crypt()
1223 return -EFAULT; in sev_dbg_crypt()
1226 return -EINVAL; in sev_dbg_crypt()
1228 return -EINVAL; in sev_dbg_crypt()
1250 * Flush (on non-coherent CPUs) before DBG_{DE,EN}CRYPT read or modify in sev_dbg_crypt()
1263 len = min_t(size_t, (PAGE_SIZE - s_off), size); in sev_dbg_crypt()
1270 len, &argp->error); in sev_dbg_crypt()
1277 len, &argp->error); in sev_dbg_crypt()
1287 size -= len; in sev_dbg_crypt()
1303 return -ENOTTY; in sev_launch_secret()
1305 if (copy_from_user(¶ms, u64_to_user_ptr(argp->data), sizeof(params))) in sev_launch_secret()
1306 return -EFAULT; in sev_launch_secret()
1313 * Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts pages in in sev_launch_secret()
1323 ret = -EINVAL; in sev_launch_secret()
1329 offset = params.guest_uaddr & (PAGE_SIZE - 1); in sev_launch_secret()
1350 data.handle = to_kvm_sev_info(kvm)->handle; in sev_launch_secret()
1351 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, &data, &argp->error); in sev_launch_secret()
1369 void __user *report = u64_to_user_ptr(argp->data); in sev_get_attestation_report()
1377 return -ENOTTY; in sev_get_attestation_report()
1379 if (copy_from_user(¶ms, u64_to_user_ptr(argp->data), sizeof(params))) in sev_get_attestation_report()
1380 return -EFAULT; in sev_get_attestation_report()
1391 return -EINVAL; in sev_get_attestation_report()
1395 return -ENOMEM; in sev_get_attestation_report()
1402 data.handle = to_kvm_sev_info(kvm)->handle; in sev_get_attestation_report()
1403 ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, &data, &argp->error); in sev_get_attestation_report()
1415 ret = -EFAULT; in sev_get_attestation_report()
1421 ret = -EFAULT; in sev_get_attestation_report()
1436 data.handle = to_kvm_sev_info(kvm)->handle; in __sev_send_start_query_session_length()
1437 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error); in __sev_send_start_query_session_length()
1439 params->session_len = data.session_len; in __sev_send_start_query_session_length()
1440 if (copy_to_user(u64_to_user_ptr(argp->data), params, in __sev_send_start_query_session_length()
1442 ret = -EFAULT; in __sev_send_start_query_session_length()
1456 return -ENOTTY; in sev_send_start()
1458 if (copy_from_user(¶ms, u64_to_user_ptr(argp->data), in sev_send_start()
1460 return -EFAULT; in sev_send_start()
1470 return -EINVAL; in sev_send_start()
1475 return -ENOMEM; in sev_send_start()
1509 data.handle = to_kvm_sev_info(kvm)->handle; in sev_send_start()
1511 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error); in sev_send_start()
1515 ret = -EFAULT; in sev_send_start()
1521 if (copy_to_user(u64_to_user_ptr(argp->data), ¶ms, in sev_send_start()
1523 ret = -EFAULT; in sev_send_start()
1545 data.handle = to_kvm_sev_info(kvm)->handle; in __sev_send_update_data_query_lengths()
1546 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error); in __sev_send_update_data_query_lengths()
1548 params->hdr_len = data.hdr_len; in __sev_send_update_data_query_lengths()
1549 params->trans_len = data.trans_len; in __sev_send_update_data_query_lengths()
1551 if (copy_to_user(u64_to_user_ptr(argp->data), params, in __sev_send_update_data_query_lengths()
1553 ret = -EFAULT; in __sev_send_update_data_query_lengths()
1568 return -ENOTTY; in sev_send_update_data()
1570 if (copy_from_user(¶ms, u64_to_user_ptr(argp->data), in sev_send_update_data()
1572 return -EFAULT; in sev_send_update_data()
1580 return -EINVAL; in sev_send_update_data()
1583 offset = params.guest_uaddr & (PAGE_SIZE - 1); in sev_send_update_data()
1585 return -EINVAL; in sev_send_update_data()
1594 ret = -ENOMEM; in sev_send_update_data()
1609 /* The SEND_UPDATE_DATA command requires C-bit to be always set. */ in sev_send_update_data()
1613 data.handle = to_kvm_sev_info(kvm)->handle; in sev_send_update_data()
1615 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error); in sev_send_update_data()
1623 ret = -EFAULT; in sev_send_update_data()
1630 ret = -EFAULT; in sev_send_update_data()
1647 return -ENOTTY; in sev_send_finish()
1649 data.handle = to_kvm_sev_info(kvm)->handle; in sev_send_finish()
1650 return sev_issue_cmd(kvm, SEV_CMD_SEND_FINISH, &data, &argp->error); in sev_send_finish()
1658 return -ENOTTY; in sev_send_cancel()
1660 data.handle = to_kvm_sev_info(kvm)->handle; in sev_send_cancel()
1661 return sev_issue_cmd(kvm, SEV_CMD_SEND_CANCEL, &data, &argp->error); in sev_send_cancel()
1669 int *error = &argp->error; in sev_receive_start()
1675 return -ENOTTY; in sev_receive_start()
1678 if (copy_from_user(¶ms, u64_to_user_ptr(argp->data), in sev_receive_start()
1680 return -EFAULT; in sev_receive_start()
1685 return -EINVAL; in sev_receive_start()
1707 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_RECEIVE_START, &start, in sev_receive_start()
1720 if (copy_to_user(u64_to_user_ptr(argp->data), in sev_receive_start()
1722 ret = -EFAULT; in sev_receive_start()
1727 sev->handle = start.handle; in sev_receive_start()
1728 sev->fd = argp->sev_fd; in sev_receive_start()
1748 return -EINVAL; in sev_receive_update_data()
1750 if (copy_from_user(¶ms, u64_to_user_ptr(argp->data), in sev_receive_update_data()
1752 return -EFAULT; in sev_receive_update_data()
1757 return -EINVAL; in sev_receive_update_data()
1760 offset = params.guest_uaddr & (PAGE_SIZE - 1); in sev_receive_update_data()
1762 return -EINVAL; in sev_receive_update_data()
1789 * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP in sev_receive_update_data()
1795 /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */ in sev_receive_update_data()
1799 data.handle = to_kvm_sev_info(kvm)->handle; in sev_receive_update_data()
1802 &argp->error); in sev_receive_update_data()
1819 return -ENOTTY; in sev_receive_finish()
1821 data.handle = to_kvm_sev_info(kvm)->handle; in sev_receive_finish()
1822 return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error); in sev_receive_finish()
1828 * Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES in is_cmd_allowed_from_mirror()
1843 int r = -EBUSY; in sev_lock_two_vms()
1846 return -EINVAL; in sev_lock_two_vms()
1852 if (atomic_cmpxchg_acquire(&dst_sev->migration_in_progress, 0, 1)) in sev_lock_two_vms()
1853 return -EBUSY; in sev_lock_two_vms()
1855 if (atomic_cmpxchg_acquire(&src_sev->migration_in_progress, 0, 1)) in sev_lock_two_vms()
1858 r = -EINTR; in sev_lock_two_vms()
1859 if (mutex_lock_killable(&dst_kvm->lock)) in sev_lock_two_vms()
1861 if (mutex_lock_killable_nested(&src_kvm->lock, SINGLE_DEPTH_NESTING)) in sev_lock_two_vms()
1866 mutex_unlock(&dst_kvm->lock); in sev_lock_two_vms()
1868 atomic_set_release(&src_sev->migration_in_progress, 0); in sev_lock_two_vms()
1870 atomic_set_release(&dst_sev->migration_in_progress, 0); in sev_lock_two_vms()
1879 mutex_unlock(&dst_kvm->lock); in sev_unlock_two_vms()
1880 mutex_unlock(&src_kvm->lock); in sev_unlock_two_vms()
1881 atomic_set_release(&dst_sev->migration_in_progress, 0); in sev_unlock_two_vms()
1882 atomic_set_release(&src_sev->migration_in_progress, 0); in sev_unlock_two_vms()
1899 if (mutex_lock_killable_nested(&vcpu->mutex, role)) in sev_lock_vcpus_for_migration()
1910 mutex_release(&vcpu->mutex.dep_map, _THIS_IP_); in sev_lock_vcpus_for_migration()
1924 mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_); in sev_lock_vcpus_for_migration()
1927 mutex_unlock(&vcpu->mutex); in sev_lock_vcpus_for_migration()
1929 return -EINTR; in sev_lock_vcpus_for_migration()
1942 mutex_acquire(&vcpu->mutex.dep_map, in sev_unlock_vcpus_for_migration()
1945 mutex_unlock(&vcpu->mutex); in sev_unlock_vcpus_for_migration()
1958 dst->active = true; in sev_migrate_from()
1959 dst->asid = src->asid; in sev_migrate_from()
1960 dst->handle = src->handle; in sev_migrate_from()
1961 dst->pages_locked = src->pages_locked; in sev_migrate_from()
1962 dst->enc_context_owner = src->enc_context_owner; in sev_migrate_from()
1963 dst->es_active = src->es_active; in sev_migrate_from()
1964 dst->vmsa_features = src->vmsa_features; in sev_migrate_from()
1966 src->asid = 0; in sev_migrate_from()
1967 src->active = false; in sev_migrate_from()
1968 src->handle = 0; in sev_migrate_from()
1969 src->pages_locked = 0; in sev_migrate_from()
1970 src->enc_context_owner = NULL; in sev_migrate_from()
1971 src->es_active = false; in sev_migrate_from()
1973 list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list); in sev_migrate_from()
1978 * to the source, so there's no danger of use-after-free. in sev_migrate_from()
1980 list_cut_before(&dst->mirror_vms, &src->mirror_vms, &src->mirror_vms); in sev_migrate_from()
1981 list_for_each_entry(mirror, &dst->mirror_vms, mirror_entry) { in sev_migrate_from()
1984 mirror->enc_context_owner = dst_kvm; in sev_migrate_from()
1992 struct kvm_sev_info *owner_sev_info = to_kvm_sev_info(dst->enc_context_owner); in sev_migrate_from()
1994 list_del(&src->mirror_entry); in sev_migrate_from()
1995 list_add_tail(&dst->mirror_entry, &owner_sev_info->mirror_vms); in sev_migrate_from()
2003 if (!dst->es_active) in sev_migrate_from()
2018 memcpy(&dst_svm->sev_es, &src_svm->sev_es, sizeof(src_svm->sev_es)); in sev_migrate_from()
2019 dst_svm->vmcb->control.ghcb_gpa = src_svm->vmcb->control.ghcb_gpa; in sev_migrate_from()
2020 dst_svm->vmcb->control.vmsa_pa = src_svm->vmcb->control.vmsa_pa; in sev_migrate_from()
2021 dst_vcpu->arch.guest_state_protected = true; in sev_migrate_from()
2023 memset(&src_svm->sev_es, 0, sizeof(src_svm->sev_es)); in sev_migrate_from()
2024 src_svm->vmcb->control.ghcb_gpa = INVALID_PAGE; in sev_migrate_from()
2025 src_svm->vmcb->control.vmsa_pa = INVALID_PAGE; in sev_migrate_from()
2026 src_vcpu->arch.guest_state_protected = false; in sev_migrate_from()
2038 if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus)) in sev_check_source_vcpus()
2039 return -EINVAL; in sev_check_source_vcpus()
2042 if (!src_vcpu->arch.guest_state_protected) in sev_check_source_vcpus()
2043 return -EINVAL; in sev_check_source_vcpus()
2059 return -EBADF; in sev_vm_move_enc_context_from()
2062 return -EBADF; in sev_vm_move_enc_context_from()
2064 source_kvm = fd_file(f)->private_data; in sev_vm_move_enc_context_from()
2069 if (kvm->arch.vm_type != source_kvm->arch.vm_type || in sev_vm_move_enc_context_from()
2071 ret = -EINVAL; in sev_vm_move_enc_context_from()
2077 dst_sev->misc_cg = get_current_misc_cg(); in sev_vm_move_enc_context_from()
2079 if (dst_sev->misc_cg != src_sev->misc_cg) { in sev_vm_move_enc_context_from()
2110 put_misc_cg(cg_cleanup_sev->misc_cg); in sev_vm_move_enc_context_from()
2111 cg_cleanup_sev->misc_cg = NULL; in sev_vm_move_enc_context_from()
2120 return -ENXIO; in sev_dev_get_attr()
2128 return -ENXIO; in sev_dev_get_attr()
2150 rc = __sev_issue_cmd(argp->sev_fd, SEV_CMD_SNP_GCTX_CREATE, &data, &argp->error); in snp_context_create()
2152 pr_warn("Failed to create SEV-SNP context, rc %d fw_error %d", in snp_context_create()
2153 rc, argp->error); in snp_context_create()
2166 data.gctx_paddr = __psp_pa(sev->snp_context); in snp_bind_asid()
2179 return -ENOTTY; in snp_launch_start()
2181 if (copy_from_user(¶ms, u64_to_user_ptr(argp->data), sizeof(params))) in snp_launch_start()
2182 return -EFAULT; in snp_launch_start()
2185 if (sev->snp_context) in snp_launch_start()
2186 return -EINVAL; in snp_launch_start()
2189 return -EINVAL; in snp_launch_start()
2192 return -EINVAL; in snp_launch_start()
2197 return -EINVAL; in snp_launch_start()
2200 return -EINVAL; in snp_launch_start()
2202 sev->snp_context = snp_context_create(kvm, argp); in snp_launch_start()
2203 if (!sev->snp_context) in snp_launch_start()
2204 return -ENOTTY; in snp_launch_start()
2206 start.gctx_paddr = __psp_pa(sev->snp_context); in snp_launch_start()
2209 rc = __sev_issue_cmd(argp->sev_fd, SEV_CMD_SNP_LAUNCH_START, &start, &argp->error); in snp_launch_start()
2216 sev->fd = argp->sev_fd; in snp_launch_start()
2217 rc = snp_bind_asid(kvm, &argp->error); in snp_launch_start()
2219 pr_debug("%s: Failed to bind ASID to SEV-SNP context, rc %d\n", in snp_launch_start()
2247 if (WARN_ON_ONCE(sev_populate_args->type != KVM_SEV_SNP_PAGE_TYPE_ZERO && !src)) in sev_gmem_post_populate()
2248 return -EINVAL; in sev_gmem_post_populate()
2259 ret = ret ? -EINVAL : -EEXIST; in sev_gmem_post_populate()
2267 ret = -EFAULT; in sev_gmem_post_populate()
2280 fw_args.gctx_paddr = __psp_pa(sev->snp_context); in sev_gmem_post_populate()
2283 fw_args.page_type = sev_populate_args->type; in sev_gmem_post_populate()
2285 ret = __sev_issue_cmd(sev_populate_args->sev_fd, SEV_CMD_SNP_LAUNCH_UPDATE, in sev_gmem_post_populate()
2286 &fw_args, &sev_populate_args->fw_error); in sev_gmem_post_populate()
2301 * unencrypted so it can be used for debugging and error-reporting. in sev_gmem_post_populate()
2308 sev_populate_args->type == KVM_SEV_SNP_PAGE_TYPE_CPUID && in sev_gmem_post_populate()
2309 sev_populate_args->fw_error == SEV_RET_INVALID_PARAM) { in sev_gmem_post_populate()
2318 /* pfn + i is hypervisor-owned now, so skip below cleanup for it. */ in sev_gmem_post_populate()
2319 n_private--; in sev_gmem_post_populate()
2323 __func__, ret, sev_populate_args->fw_error, n_private); in sev_gmem_post_populate()
2340 if (!sev_snp_guest(kvm) || !sev->snp_context) in snp_launch_update()
2341 return -EINVAL; in snp_launch_update()
2343 if (copy_from_user(¶ms, u64_to_user_ptr(argp->data), sizeof(params))) in snp_launch_update()
2344 return -EFAULT; in snp_launch_update()
2355 return -EINVAL; in snp_launch_update()
2361 * state, the following pre-conditions are verified: in snp_launch_update()
2369 * The KVM MMU relies on kvm->mmu_invalidate_seq to retry nested page in snp_launch_update()
2372 * here. However, kvm->slots_lock guards against both this as well as in snp_launch_update()
2378 mutex_lock(&kvm->slots_lock); in snp_launch_update()
2382 ret = -EINVAL; in snp_launch_update()
2386 sev_populate_args.sev_fd = argp->sev_fd; in snp_launch_update()
2393 argp->error = sev_populate_args.fw_error; in snp_launch_update()
2395 __func__, count, argp->error); in snp_launch_update()
2396 ret = -EIO; in snp_launch_update()
2399 params.len -= count * PAGE_SIZE; in snp_launch_update()
2404 if (copy_to_user(u64_to_user_ptr(argp->data), ¶ms, sizeof(params))) in snp_launch_update()
2405 ret = -EFAULT; in snp_launch_update()
2409 mutex_unlock(&kvm->slots_lock); in snp_launch_update()
2422 data.gctx_paddr = __psp_pa(sev->snp_context); in snp_launch_update_vmsa()
2427 u64 pfn = __pa(svm->sev_es.vmsa) >> PAGE_SHIFT; in snp_launch_update_vmsa()
2434 ret = rmp_make_private(pfn, INITIAL_VMSA_GPA, PG_LEVEL_4K, sev->asid, true); in snp_launch_update_vmsa()
2439 data.address = __sme_pa(svm->sev_es.vmsa); in snp_launch_update_vmsa()
2440 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_SNP_LAUNCH_UPDATE, in snp_launch_update_vmsa()
2441 &data, &argp->error); in snp_launch_update_vmsa()
2448 svm->vcpu.arch.guest_state_protected = true; in snp_launch_update_vmsa()
2450 * SEV-ES (and thus SNP) guest mandates LBR Virtualization to in snp_launch_update_vmsa()
2451 * be _always_ ON. Enable it only after setting in snp_launch_update_vmsa()
2471 return -ENOTTY; in snp_launch_finish()
2473 if (!sev->snp_context) in snp_launch_finish()
2474 return -EINVAL; in snp_launch_finish()
2476 if (copy_from_user(¶ms, u64_to_user_ptr(argp->data), sizeof(params))) in snp_launch_finish()
2477 return -EFAULT; in snp_launch_finish()
2480 return -EINVAL; in snp_launch_finish()
2489 return -ENOMEM; in snp_launch_finish()
2498 data->id_block_en = 1; in snp_launch_finish()
2499 data->id_block_paddr = __sme_pa(id_block); in snp_launch_finish()
2507 data->id_auth_paddr = __sme_pa(id_auth); in snp_launch_finish()
2510 data->auth_key_en = 1; in snp_launch_finish()
2513 data->vcek_disabled = params.vcek_disabled; in snp_launch_finish()
2515 memcpy(data->host_data, params.host_data, KVM_SEV_SNP_FINISH_DATA_SIZE); in snp_launch_finish()
2516 data->gctx_paddr = __psp_pa(sev->snp_context); in snp_launch_finish()
2517 ret = sev_issue_cmd(kvm, SEV_CMD_SNP_LAUNCH_FINISH, data, &argp->error); in snp_launch_finish()
2525 kvm->arch.pre_fault_allowed = true; in snp_launch_finish()
2544 return -ENOTTY; in sev_mem_enc_ioctl()
2550 return -EFAULT; in sev_mem_enc_ioctl()
2552 mutex_lock(&kvm->lock); in sev_mem_enc_ioctl()
2557 r = -EINVAL; in sev_mem_enc_ioctl()
2563 * allow the use of SNP-specific commands. in sev_mem_enc_ioctl()
2566 r = -EPERM; in sev_mem_enc_ioctl()
2573 r = -ENOTTY; in sev_mem_enc_ioctl()
2644 r = -EINVAL; in sev_mem_enc_ioctl()
2649 r = -EFAULT; in sev_mem_enc_ioctl()
2652 mutex_unlock(&kvm->lock); in sev_mem_enc_ioctl()
2664 return -ENOTTY; in sev_mem_enc_register_region()
2668 return -EINVAL; in sev_mem_enc_register_region()
2670 if (range->addr > ULONG_MAX || range->size > ULONG_MAX) in sev_mem_enc_register_region()
2671 return -EINVAL; in sev_mem_enc_register_region()
2675 return -ENOMEM; in sev_mem_enc_register_region()
2677 mutex_lock(&kvm->lock); in sev_mem_enc_register_region()
2678 region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, in sev_mem_enc_register_region()
2680 if (IS_ERR(region->pages)) { in sev_mem_enc_register_region()
2681 ret = PTR_ERR(region->pages); in sev_mem_enc_register_region()
2682 mutex_unlock(&kvm->lock); in sev_mem_enc_register_region()
2687 * The guest may change the memory encryption attribute from C=0 -> C=1 in sev_mem_enc_register_region()
2690 * correct C-bit. Note, this must be done before dropping kvm->lock, in sev_mem_enc_register_region()
2692 * once kvm->lock is released. in sev_mem_enc_register_region()
2694 sev_clflush_pages(region->pages, region->npages); in sev_mem_enc_register_region()
2696 region->uaddr = range->addr; in sev_mem_enc_register_region()
2697 region->size = range->size; in sev_mem_enc_register_region()
2699 list_add_tail(®ion->list, &sev->regions_list); in sev_mem_enc_register_region()
2700 mutex_unlock(&kvm->lock); in sev_mem_enc_register_region()
2713 struct list_head *head = &sev->regions_list; in find_enc_region()
2717 if (i->uaddr == range->addr && in find_enc_region()
2718 i->size == range->size) in find_enc_region()
2728 sev_unpin_memory(kvm, region->pages, region->npages); in __unregister_enc_region_locked()
2729 list_del(®ion->list); in __unregister_enc_region_locked()
2741 return -EINVAL; in sev_mem_enc_unregister_region()
2743 mutex_lock(&kvm->lock); in sev_mem_enc_unregister_region()
2746 ret = -ENOTTY; in sev_mem_enc_unregister_region()
2752 ret = -EINVAL; in sev_mem_enc_unregister_region()
2765 mutex_unlock(&kvm->lock); in sev_mem_enc_unregister_region()
2769 mutex_unlock(&kvm->lock); in sev_mem_enc_unregister_region()
2781 return -EBADF; in sev_vm_copy_enc_context_from()
2784 return -EBADF; in sev_vm_copy_enc_context_from()
2786 source_kvm = fd_file(f)->private_data; in sev_vm_copy_enc_context_from()
2793 * disallow out-of-band SEV/SEV-ES init if the target is already an in sev_vm_copy_enc_context_from()
2795 * created after SEV/SEV-ES initialization, e.g. to init intercepts. in sev_vm_copy_enc_context_from()
2798 is_mirroring_enc_context(source_kvm) || kvm->created_vcpus) { in sev_vm_copy_enc_context_from()
2799 ret = -EINVAL; in sev_vm_copy_enc_context_from()
2810 list_add_tail(&mirror_sev->mirror_entry, &source_sev->mirror_vms); in sev_vm_copy_enc_context_from()
2813 mirror_sev->enc_context_owner = source_kvm; in sev_vm_copy_enc_context_from()
2814 mirror_sev->active = true; in sev_vm_copy_enc_context_from()
2815 mirror_sev->asid = source_sev->asid; in sev_vm_copy_enc_context_from()
2816 mirror_sev->fd = source_sev->fd; in sev_vm_copy_enc_context_from()
2817 mirror_sev->es_active = source_sev->es_active; in sev_vm_copy_enc_context_from()
2818 mirror_sev->need_init = false; in sev_vm_copy_enc_context_from()
2819 mirror_sev->handle = source_sev->handle; in sev_vm_copy_enc_context_from()
2820 INIT_LIST_HEAD(&mirror_sev->regions_list); in sev_vm_copy_enc_context_from()
2821 INIT_LIST_HEAD(&mirror_sev->mirror_vms); in sev_vm_copy_enc_context_from()
2827 * memory-views. in sev_vm_copy_enc_context_from()
2842 if (!sev->snp_context) in snp_decommission_context()
2846 data.address = __sme_pa(sev->snp_context); in snp_decommission_context()
2854 snp_free_firmware_page(sev->snp_context); in snp_decommission_context()
2855 sev->snp_context = NULL; in snp_decommission_context()
2863 struct list_head *head = &sev->regions_list; in sev_vm_destroy()
2869 WARN_ON(!list_empty(&sev->mirror_vms)); in sev_vm_destroy()
2873 struct kvm *owner_kvm = sev->enc_context_owner; in sev_vm_destroy()
2875 mutex_lock(&owner_kvm->lock); in sev_vm_destroy()
2876 list_del(&sev->mirror_entry); in sev_vm_destroy()
2877 mutex_unlock(&owner_kvm->lock); in sev_vm_destroy()
2911 sev_unbind_asid(kvm, sev->handle); in sev_vm_destroy()
2959 * PSP SEV driver is initialized before proceeding if KVM is built-in, in sev_hardware_setup()
2968 /* Set encryption bit location for SEV-ES guests */ in sev_hardware_setup()
2998 sev_asid_count = max_sev_asid - min_sev_asid + 1; in sev_hardware_setup()
3003 /* SEV-ES support requested? */ in sev_hardware_setup()
3008 * SEV-ES requires MMIO caching as KVM doesn't have access to the guest in sev_hardware_setup()
3016 /* Does the CPU support SEV-ES? */ in sev_hardware_setup()
3022 "LBRV must be present for SEV-ES support"); in sev_hardware_setup()
3026 /* Has the system been allocated ASIDs for SEV-ES? */ in sev_hardware_setup()
3030 sev_es_asid_count = min_sev_asid - 1; in sev_hardware_setup()
3037 pr_info("SEV %s (ASIDs %u - %u)\n", in sev_hardware_setup()
3043 pr_info("SEV-ES %s (ASIDs %u - %u)\n", in sev_hardware_setup()
3045 min_sev_asid > 1 ? 1 : 0, min_sev_asid - 1); in sev_hardware_setup()
3047 pr_info("SEV-SNP %s (ASIDs %u - %u)\n", in sev_hardware_setup()
3049 min_sev_asid > 1 ? 1 : 0, min_sev_asid - 1); in sev_hardware_setup()
3084 sd->sev_vmcbs = kcalloc(nr_asids, sizeof(void *), GFP_KERNEL); in sev_cpu_init()
3085 if (!sd->sev_vmcbs) in sev_cpu_init()
3086 return -ENOMEM; in sev_cpu_init()
3097 unsigned int asid = sev_get_asid(vcpu->kvm); in sev_flush_encrypted_page()
3102 * address is non-deterministic and unsafe. This function deliberately in sev_flush_encrypted_page()
3135 * hva-based mmu notifiers, so these events are only actually in sev_guest_memory_reclaimed()
3149 if (!sev_es_guest(vcpu->kvm)) in sev_free_vcpu()
3156 * a guest-owned page. Transition the page to hypervisor state before in sev_free_vcpu()
3159 if (sev_snp_guest(vcpu->kvm)) { in sev_free_vcpu()
3160 u64 pfn = __pa(svm->sev_es.vmsa) >> PAGE_SHIFT; in sev_free_vcpu()
3162 if (kvm_rmp_make_shared(vcpu->kvm, pfn, PG_LEVEL_4K)) in sev_free_vcpu()
3166 if (vcpu->arch.guest_state_protected) in sev_free_vcpu()
3167 sev_flush_encrypted_page(vcpu, svm->sev_es.vmsa); in sev_free_vcpu()
3169 __free_page(virt_to_page(svm->sev_es.vmsa)); in sev_free_vcpu()
3172 if (svm->sev_es.ghcb_sa_free) in sev_free_vcpu()
3173 kvfree(svm->sev_es.ghcb_sa); in sev_free_vcpu()
3178 return (((u64)control->exit_code_hi) << 32) | control->exit_code; in kvm_ghcb_get_sw_exit_code()
3183 struct vmcb_control_area *control = &svm->vmcb->control; in dump_ghcb()
3186 /* Re-use the dump_invalid_vmcb module parameter */ in dump_ghcb()
3192 nbits = sizeof(svm->sev_es.valid_bitmap) * 8; in dump_ghcb()
3200 pr_err("GHCB (GPA=%016llx) snapshot:\n", svm->vmcb->control.ghcb_gpa); in dump_ghcb()
3201 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code", in dump_ghcb()
3203 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1", in dump_ghcb()
3204 control->exit_info_1, kvm_ghcb_sw_exit_info_1_is_valid(svm)); in dump_ghcb()
3205 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2", in dump_ghcb()
3206 control->exit_info_2, kvm_ghcb_sw_exit_info_2_is_valid(svm)); in dump_ghcb()
3207 pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch", in dump_ghcb()
3208 svm->sev_es.sw_scratch, kvm_ghcb_sw_scratch_is_valid(svm)); in dump_ghcb()
3209 pr_err("%-20s%*pb\n", "valid_bitmap", nbits, svm->sev_es.valid_bitmap); in dump_ghcb()
3214 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_sync_to_ghcb()
3215 struct ghcb *ghcb = svm->sev_es.ghcb; in sev_es_sync_to_ghcb()
3223 * VM-Exit. It's the guest's responsibility to not consume random data. in sev_es_sync_to_ghcb()
3225 ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]); in sev_es_sync_to_ghcb()
3226 ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]); in sev_es_sync_to_ghcb()
3227 ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]); in sev_es_sync_to_ghcb()
3228 ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]); in sev_es_sync_to_ghcb()
3233 struct vmcb_control_area *control = &svm->vmcb->control; in sev_es_sync_from_ghcb()
3234 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_sync_from_ghcb()
3235 struct ghcb *ghcb = svm->sev_es.ghcb; in sev_es_sync_from_ghcb()
3250 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); in sev_es_sync_from_ghcb()
3252 BUILD_BUG_ON(sizeof(svm->sev_es.valid_bitmap) != sizeof(ghcb->save.valid_bitmap)); in sev_es_sync_from_ghcb()
3253 memcpy(&svm->sev_es.valid_bitmap, &ghcb->save.valid_bitmap, sizeof(ghcb->save.valid_bitmap)); in sev_es_sync_from_ghcb()
3255 vcpu->arch.regs[VCPU_REGS_RAX] = kvm_ghcb_get_rax_if_valid(svm, ghcb); in sev_es_sync_from_ghcb()
3256 vcpu->arch.regs[VCPU_REGS_RBX] = kvm_ghcb_get_rbx_if_valid(svm, ghcb); in sev_es_sync_from_ghcb()
3257 vcpu->arch.regs[VCPU_REGS_RCX] = kvm_ghcb_get_rcx_if_valid(svm, ghcb); in sev_es_sync_from_ghcb()
3258 vcpu->arch.regs[VCPU_REGS_RDX] = kvm_ghcb_get_rdx_if_valid(svm, ghcb); in sev_es_sync_from_ghcb()
3259 vcpu->arch.regs[VCPU_REGS_RSI] = kvm_ghcb_get_rsi_if_valid(svm, ghcb); in sev_es_sync_from_ghcb()
3261 svm->vmcb->save.cpl = kvm_ghcb_get_cpl_if_valid(svm, ghcb); in sev_es_sync_from_ghcb()
3264 vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb); in sev_es_sync_from_ghcb()
3265 vcpu->arch.cpuid_dynamic_bits_dirty = true; in sev_es_sync_from_ghcb()
3270 control->exit_code = lower_32_bits(exit_code); in sev_es_sync_from_ghcb()
3271 control->exit_code_hi = upper_32_bits(exit_code); in sev_es_sync_from_ghcb()
3272 control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb); in sev_es_sync_from_ghcb()
3273 control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb); in sev_es_sync_from_ghcb()
3274 svm->sev_es.sw_scratch = kvm_ghcb_get_sw_scratch_if_valid(svm, ghcb); in sev_es_sync_from_ghcb()
3277 memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); in sev_es_sync_from_ghcb()
3282 struct vmcb_control_area *control = &svm->vmcb->control; in sev_es_validate_vmgexit()
3283 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_validate_vmgexit()
3294 if (svm->sev_es.ghcb->ghcb_usage) { in sev_es_validate_vmgexit()
3323 if (vcpu->arch.regs[VCPU_REGS_RAX] == 0xd) in sev_es_validate_vmgexit()
3330 if (control->exit_info_1 & SVM_IOIO_STR_MASK) { in sev_es_validate_vmgexit()
3334 if (!(control->exit_info_1 & SVM_IOIO_TYPE_MASK)) in sev_es_validate_vmgexit()
3342 if (control->exit_info_1) { in sev_es_validate_vmgexit()
3374 if (!sev_snp_guest(vcpu->kvm)) in sev_es_validate_vmgexit()
3376 if (lower_32_bits(control->exit_info_1) != SVM_VMGEXIT_AP_DESTROY) in sev_es_validate_vmgexit()
3388 if (!sev_snp_guest(vcpu->kvm) || !kvm_ghcb_sw_scratch_is_valid(svm)) in sev_es_validate_vmgexit()
3393 if (!sev_snp_guest(vcpu->kvm) || in sev_es_validate_vmgexit()
3394 !PAGE_ALIGNED(control->exit_info_1) || in sev_es_validate_vmgexit()
3395 !PAGE_ALIGNED(control->exit_info_2) || in sev_es_validate_vmgexit()
3396 control->exit_info_1 == control->exit_info_2) in sev_es_validate_vmgexit()
3409 svm->sev_es.ghcb->ghcb_usage); in sev_es_validate_vmgexit()
3428 svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_NONE; in sev_es_unmap_ghcb()
3430 if (!svm->sev_es.ghcb) in sev_es_unmap_ghcb()
3433 if (svm->sev_es.ghcb_sa_free) { in sev_es_unmap_ghcb()
3439 if (svm->sev_es.ghcb_sa_sync) { in sev_es_unmap_ghcb()
3440 kvm_write_guest(svm->vcpu.kvm, in sev_es_unmap_ghcb()
3441 svm->sev_es.sw_scratch, in sev_es_unmap_ghcb()
3442 svm->sev_es.ghcb_sa, in sev_es_unmap_ghcb()
3443 svm->sev_es.ghcb_sa_len); in sev_es_unmap_ghcb()
3444 svm->sev_es.ghcb_sa_sync = false; in sev_es_unmap_ghcb()
3447 kvfree(svm->sev_es.ghcb_sa); in sev_es_unmap_ghcb()
3448 svm->sev_es.ghcb_sa = NULL; in sev_es_unmap_ghcb()
3449 svm->sev_es.ghcb_sa_free = false; in sev_es_unmap_ghcb()
3452 trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->sev_es.ghcb); in sev_es_unmap_ghcb()
3456 kvm_vcpu_unmap(&svm->vcpu, &svm->sev_es.ghcb_map); in sev_es_unmap_ghcb()
3457 svm->sev_es.ghcb = NULL; in sev_es_unmap_ghcb()
3463 struct kvm *kvm = svm->vcpu.kvm; in pre_sev_run()
3471 if (sev_es_guest(kvm) && !VALID_PAGE(svm->vmcb->control.vmsa_pa)) in pre_sev_run()
3472 return -EINVAL; in pre_sev_run()
3475 svm->asid = asid; in pre_sev_run()
3483 if (sd->sev_vmcbs[asid] == svm->vmcb && in pre_sev_run()
3484 svm->vcpu.arch.last_vmentry_cpu == cpu) in pre_sev_run()
3487 sd->sev_vmcbs[asid] = svm->vmcb; in pre_sev_run()
3488 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; in pre_sev_run()
3489 vmcb_mark_dirty(svm->vmcb, VMCB_ASID); in pre_sev_run()
3496 struct vmcb_control_area *control = &svm->vmcb->control; in setup_vmgexit_scratch()
3501 scratch_gpa_beg = svm->sev_es.sw_scratch; in setup_vmgexit_scratch()
3514 if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) { in setup_vmgexit_scratch()
3516 ghcb_scratch_beg = control->ghcb_gpa + in setup_vmgexit_scratch()
3518 ghcb_scratch_end = control->ghcb_gpa + in setup_vmgexit_scratch()
3527 pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n", in setup_vmgexit_scratch()
3532 scratch_va = (void *)svm->sev_es.ghcb; in setup_vmgexit_scratch()
3533 scratch_va += (scratch_gpa_beg - control->ghcb_gpa); in setup_vmgexit_scratch()
3546 return -ENOMEM; in setup_vmgexit_scratch()
3548 if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) { in setup_vmgexit_scratch()
3553 return -EFAULT; in setup_vmgexit_scratch()
3562 svm->sev_es.ghcb_sa_sync = sync; in setup_vmgexit_scratch()
3563 svm->sev_es.ghcb_sa_free = true; in setup_vmgexit_scratch()
3566 svm->sev_es.ghcb_sa = scratch_va; in setup_vmgexit_scratch()
3567 svm->sev_es.ghcb_sa_len = len; in setup_vmgexit_scratch()
3580 svm->vmcb->control.ghcb_gpa &= ~(mask << pos); in set_ghcb_msr_bits()
3581 svm->vmcb->control.ghcb_gpa |= (value & mask) << pos; in set_ghcb_msr_bits()
3586 return (svm->vmcb->control.ghcb_gpa >> pos) & mask; in get_ghcb_msr_bits()
3591 svm->vmcb->control.ghcb_gpa = value; in set_ghcb_msr()
3598 pfn = pfn & ~(KVM_PAGES_PER_HPAGE(PG_LEVEL_2M) - 1); in snp_rmptable_psmash()
3615 if (vcpu->run->hypercall.ret) in snp_complete_psc_msr()
3627 struct kvm_vcpu *vcpu = &svm->vcpu; in snp_begin_psc_msr()
3634 if (!user_exit_on_hypercall(vcpu->kvm, KVM_HC_MAP_GPA_RANGE)) { in snp_begin_psc_msr()
3639 vcpu->run->exit_reason = KVM_EXIT_HYPERCALL; in snp_begin_psc_msr()
3640 vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE; in snp_begin_psc_msr()
3642 * In principle this should have been -KVM_ENOSYS, but userspace (QEMU <=9.2) in snp_begin_psc_msr()
3643 * assumed that vcpu->run->hypercall.ret is never changed by KVM and thus that in snp_begin_psc_msr()
3645 * vcpu->run->hypercall.ret, ensuring that it is zero to not break QEMU. in snp_begin_psc_msr()
3647 vcpu->run->hypercall.ret = 0; in snp_begin_psc_msr()
3648 vcpu->run->hypercall.args[0] = gpa; in snp_begin_psc_msr()
3649 vcpu->run->hypercall.args[1] = 1; in snp_begin_psc_msr()
3650 vcpu->run->hypercall.args[2] = (op == SNP_PAGE_STATE_PRIVATE) in snp_begin_psc_msr()
3653 vcpu->run->hypercall.args[2] |= KVM_MAP_GPA_RANGE_PAGE_SZ_4K; in snp_begin_psc_msr()
3655 vcpu->arch.complete_userspace_io = snp_complete_psc_msr; in snp_begin_psc_msr()
3669 svm->sev_es.psc_inflight = 0; in snp_complete_psc()
3670 svm->sev_es.psc_idx = 0; in snp_complete_psc()
3671 svm->sev_es.psc_2m = false; in snp_complete_psc()
3675 * a PSC-specific return code in SW_EXITINFO2 that provides the "real" in snp_complete_psc()
3684 struct psc_buffer *psc = svm->sev_es.ghcb_sa; in __snp_complete_one_psc()
3685 struct psc_entry *entries = psc->entries; in __snp_complete_one_psc()
3686 struct psc_hdr *hdr = &psc->hdr; in __snp_complete_one_psc()
3690 * Everything in-flight has been processed successfully. Update the in __snp_complete_one_psc()
3692 * count of in-flight PSC entries. in __snp_complete_one_psc()
3694 for (idx = svm->sev_es.psc_idx; svm->sev_es.psc_inflight; in __snp_complete_one_psc()
3695 svm->sev_es.psc_inflight--, idx++) { in __snp_complete_one_psc()
3698 entry->cur_page = entry->pagesize ? 512 : 1; in __snp_complete_one_psc()
3701 hdr->cur_entry = idx; in __snp_complete_one_psc()
3707 struct psc_buffer *psc = svm->sev_es.ghcb_sa; in snp_complete_one_psc()
3709 if (vcpu->run->hypercall.ret) { in snp_complete_one_psc()
3722 struct psc_entry *entries = psc->entries; in snp_begin_psc()
3723 struct kvm_vcpu *vcpu = &svm->vcpu; in snp_begin_psc()
3724 struct psc_hdr *hdr = &psc->hdr; in snp_begin_psc()
3731 if (!user_exit_on_hypercall(vcpu->kvm, KVM_HC_MAP_GPA_RANGE)) { in snp_begin_psc()
3737 /* There should be no other PSCs in-flight at this point. */ in snp_begin_psc()
3738 if (WARN_ON_ONCE(svm->sev_es.psc_inflight)) { in snp_begin_psc()
3748 idx_start = hdr->cur_entry; in snp_begin_psc()
3749 idx_end = hdr->end_entry; in snp_begin_psc()
3757 for (idx = idx_start; idx <= idx_end; idx++, hdr->cur_entry++) { in snp_begin_psc()
3771 * If this is a partially-completed 2M range, force 4K handling in snp_begin_psc()
3777 npages -= entry_start.cur_page; in snp_begin_psc()
3792 svm->sev_es.psc_2m = huge; in snp_begin_psc()
3793 svm->sev_es.psc_idx = idx; in snp_begin_psc()
3794 svm->sev_es.psc_inflight = 1; in snp_begin_psc()
3809 svm->sev_es.psc_inflight++; in snp_begin_psc()
3816 vcpu->run->exit_reason = KVM_EXIT_HYPERCALL; in snp_begin_psc()
3817 vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE; in snp_begin_psc()
3819 * In principle this should have been -KVM_ENOSYS, but userspace (QEMU <=9.2) in snp_begin_psc()
3820 * assumed that vcpu->run->hypercall.ret is never changed by KVM and thus that in snp_begin_psc()
3822 * vcpu->run->hypercall.ret, ensuring that it is zero to not break QEMU. in snp_begin_psc()
3824 vcpu->run->hypercall.ret = 0; in snp_begin_psc()
3825 vcpu->run->hypercall.args[0] = gfn_to_gpa(gfn); in snp_begin_psc()
3826 vcpu->run->hypercall.args[1] = npages; in snp_begin_psc()
3827 vcpu->run->hypercall.args[2] = entry_start.operation == VMGEXIT_PSC_OP_PRIVATE in snp_begin_psc()
3830 vcpu->run->hypercall.args[2] |= entry_start.pagesize in snp_begin_psc()
3833 vcpu->arch.complete_userspace_io = snp_complete_one_psc; in snp_begin_psc()
3862 if (!sev_snp_guest(vcpu->kvm)) in sev_snp_init_protected_guest_state()
3865 guard(mutex)(&svm->sev_es.snp_vmsa_mutex); in sev_snp_init_protected_guest_state()
3867 if (!svm->sev_es.snp_ap_waiting_for_reset) in sev_snp_init_protected_guest_state()
3870 svm->sev_es.snp_ap_waiting_for_reset = false; in sev_snp_init_protected_guest_state()
3873 vcpu->arch.pv.pv_unhalted = false; in sev_snp_init_protected_guest_state()
3877 svm->vmcb->control.vmsa_pa = INVALID_PAGE; in sev_snp_init_protected_guest_state()
3880 * When replacing the VMSA during SEV-SNP AP creation, in sev_snp_init_protected_guest_state()
3883 vmcb_mark_all_dirty(svm->vmcb); in sev_snp_init_protected_guest_state()
3885 if (!VALID_PAGE(svm->sev_es.snp_vmsa_gpa)) in sev_snp_init_protected_guest_state()
3888 gfn = gpa_to_gfn(svm->sev_es.snp_vmsa_gpa); in sev_snp_init_protected_guest_state()
3889 svm->sev_es.snp_vmsa_gpa = INVALID_PAGE; in sev_snp_init_protected_guest_state()
3891 slot = gfn_to_memslot(vcpu->kvm, gfn); in sev_snp_init_protected_guest_state()
3899 if (kvm_gmem_get_pfn(vcpu->kvm, slot, gfn, &pfn, &page, NULL)) in sev_snp_init_protected_guest_state()
3903 * From this point forward, the VMSA will always be a guest-mapped page in sev_snp_init_protected_guest_state()
3904 * rather than the initial one allocated by KVM in svm->sev_es.vmsa. In in sev_snp_init_protected_guest_state()
3905 * theory, svm->sev_es.vmsa could be free'd and cleaned up here, but in sev_snp_init_protected_guest_state()
3908 * also allows the existing logic for SEV-ES VMSAs to be re-used with in sev_snp_init_protected_guest_state()
3909 * minimal SNP-specific changes. in sev_snp_init_protected_guest_state()
3911 svm->sev_es.snp_has_guest_vmsa = true; in sev_snp_init_protected_guest_state()
3914 svm->vmcb->control.vmsa_pa = pfn_to_hpa(pfn); in sev_snp_init_protected_guest_state()
3921 * then care should be taken to ensure svm->sev_es.vmsa is pinned in sev_snp_init_protected_guest_state()
3929 struct kvm_sev_info *sev = to_kvm_sev_info(svm->vcpu.kvm); in sev_snp_ap_creation()
3930 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_snp_ap_creation()
3936 request = lower_32_bits(svm->vmcb->control.exit_info_1); in sev_snp_ap_creation()
3937 apic_id = upper_32_bits(svm->vmcb->control.exit_info_1); in sev_snp_ap_creation()
3940 target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, apic_id); in sev_snp_ap_creation()
3944 return -EINVAL; in sev_snp_ap_creation()
3949 guard(mutex)(&target_svm->sev_es.snp_vmsa_mutex); in sev_snp_ap_creation()
3954 if (vcpu->arch.regs[VCPU_REGS_RAX] != sev->vmsa_features) { in sev_snp_ap_creation()
3956 vcpu->arch.regs[VCPU_REGS_RAX], sev->vmsa_features); in sev_snp_ap_creation()
3957 return -EINVAL; in sev_snp_ap_creation()
3960 if (!page_address_valid(vcpu, svm->vmcb->control.exit_info_2)) { in sev_snp_ap_creation()
3962 svm->vmcb->control.exit_info_2); in sev_snp_ap_creation()
3963 return -EINVAL; in sev_snp_ap_creation()
3973 if (IS_ALIGNED(svm->vmcb->control.exit_info_2, PMD_SIZE)) { in sev_snp_ap_creation()
3976 svm->vmcb->control.exit_info_2); in sev_snp_ap_creation()
3977 return -EINVAL; in sev_snp_ap_creation()
3980 target_svm->sev_es.snp_vmsa_gpa = svm->vmcb->control.exit_info_2; in sev_snp_ap_creation()
3983 target_svm->sev_es.snp_vmsa_gpa = INVALID_PAGE; in sev_snp_ap_creation()
3988 return -EINVAL; in sev_snp_ap_creation()
3991 target_svm->sev_es.snp_ap_waiting_for_reset = true; in sev_snp_ap_creation()
4008 struct kvm *kvm = svm->vcpu.kvm; in snp_handle_guest_req()
4014 return -EINVAL; in snp_handle_guest_req()
4016 mutex_lock(&sev->guest_req_mutex); in snp_handle_guest_req()
4018 if (kvm_read_guest(kvm, req_gpa, sev->guest_req_buf, PAGE_SIZE)) { in snp_handle_guest_req()
4019 ret = -EIO; in snp_handle_guest_req()
4023 data.gctx_paddr = __psp_pa(sev->snp_context); in snp_handle_guest_req()
4024 data.req_paddr = __psp_pa(sev->guest_req_buf); in snp_handle_guest_req()
4025 data.res_paddr = __psp_pa(sev->guest_resp_buf); in snp_handle_guest_req()
4036 if (kvm_write_guest(kvm, resp_gpa, sev->guest_resp_buf, PAGE_SIZE)) { in snp_handle_guest_req()
4037 ret = -EIO; in snp_handle_guest_req()
4047 mutex_unlock(&sev->guest_req_mutex); in snp_handle_guest_req()
4053 struct kvm *kvm = svm->vcpu.kvm; in snp_handle_ext_guest_req()
4057 return -EINVAL; in snp_handle_ext_guest_req()
4061 return -EIO; in snp_handle_ext_guest_req()
4066 * report via the guest-provided data pages indicated by RAX/RBX. The in snp_handle_ext_guest_req()
4071 * certificate table in the guest-provided data pages. in snp_handle_ext_guest_req()
4074 struct kvm_vcpu *vcpu = &svm->vcpu; in snp_handle_ext_guest_req()
4081 data_gpa = vcpu->arch.regs[VCPU_REGS_RAX]; in snp_handle_ext_guest_req()
4082 data_npages = vcpu->arch.regs[VCPU_REGS_RBX]; in snp_handle_ext_guest_req()
4089 * certificate table is terminated by 24-bytes of zeroes. in snp_handle_ext_guest_req()
4092 return -EIO; in snp_handle_ext_guest_req()
4104 struct vmcb_control_area *control = &svm->vmcb->control; in sev_handle_vmgexit_msr_protocol()
4105 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_handle_vmgexit_msr_protocol()
4106 struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm); in sev_handle_vmgexit_msr_protocol()
4110 ghcb_info = control->ghcb_gpa & GHCB_MSR_INFO_MASK; in sev_handle_vmgexit_msr_protocol()
4112 trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id, in sev_handle_vmgexit_msr_protocol()
4113 control->ghcb_gpa); in sev_handle_vmgexit_msr_protocol()
4117 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO((__u64)sev->ghcb_version, in sev_handle_vmgexit_msr_protocol()
4129 vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn; in sev_handle_vmgexit_msr_protocol()
4130 vcpu->arch.regs[VCPU_REGS_RCX] = 0; in sev_handle_vmgexit_msr_protocol()
4134 /* Error, keep GHCB MSR value as-is */ in sev_handle_vmgexit_msr_protocol()
4142 cpuid_value = vcpu->arch.regs[VCPU_REGS_RAX]; in sev_handle_vmgexit_msr_protocol()
4144 cpuid_value = vcpu->arch.regs[VCPU_REGS_RBX]; in sev_handle_vmgexit_msr_protocol()
4146 cpuid_value = vcpu->arch.regs[VCPU_REGS_RCX]; in sev_handle_vmgexit_msr_protocol()
4148 cpuid_value = vcpu->arch.regs[VCPU_REGS_RDX]; in sev_handle_vmgexit_msr_protocol()
4160 svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_MSR_PROTO; in sev_handle_vmgexit_msr_protocol()
4161 ret = kvm_emulate_ap_reset_hold(&svm->vcpu); in sev_handle_vmgexit_msr_protocol()
4164 * Preset the result to a non-SIPI return and then only set in sev_handle_vmgexit_msr_protocol()
4165 * the result to non-zero when delivering a SIPI. in sev_handle_vmgexit_msr_protocol()
4182 if (!sev_snp_guest(vcpu->kvm)) in sev_handle_vmgexit_msr_protocol()
4193 if (!sev_snp_guest(vcpu->kvm)) in sev_handle_vmgexit_msr_protocol()
4199 svm->sev_es.ghcb_registered_gpa = gfn_to_gpa(gfn); in sev_handle_vmgexit_msr_protocol()
4208 if (!sev_snp_guest(vcpu->kvm)) in sev_handle_vmgexit_msr_protocol()
4211 ret = snp_begin_psc_msr(svm, control->ghcb_gpa); in sev_handle_vmgexit_msr_protocol()
4222 pr_info("SEV-ES guest requested termination: %#llx:%#llx\n", in sev_handle_vmgexit_msr_protocol()
4228 /* Error, keep GHCB MSR value as-is */ in sev_handle_vmgexit_msr_protocol()
4232 trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id, in sev_handle_vmgexit_msr_protocol()
4233 control->ghcb_gpa, ret); in sev_handle_vmgexit_msr_protocol()
4238 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in sev_handle_vmgexit_msr_protocol()
4239 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SEV_TERM; in sev_handle_vmgexit_msr_protocol()
4240 vcpu->run->system_event.ndata = 1; in sev_handle_vmgexit_msr_protocol()
4241 vcpu->run->system_event.data[0] = control->ghcb_gpa; in sev_handle_vmgexit_msr_protocol()
4249 struct vmcb_control_area *control = &svm->vmcb->control; in sev_handle_vmgexit()
4254 ghcb_gpa = control->ghcb_gpa; in sev_handle_vmgexit()
4265 if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) { in sev_handle_vmgexit()
4274 svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva; in sev_handle_vmgexit()
4276 trace_kvm_vmgexit_enter(vcpu->vcpu_id, svm->sev_es.ghcb); in sev_handle_vmgexit()
4280 /* SEV-SNP guest requires that the GHCB GPA must be registered */ in sev_handle_vmgexit()
4281 if (sev_snp_guest(svm->vcpu.kvm) && !ghcb_gpa_is_registered(svm, ghcb_gpa)) { in sev_handle_vmgexit()
4282 vcpu_unimpl(&svm->vcpu, "vmgexit: GHCB GPA [%#llx] is not registered.\n", ghcb_gpa); in sev_handle_vmgexit()
4283 return -EINVAL; in sev_handle_vmgexit()
4295 ret = setup_vmgexit_scratch(svm, true, control->exit_info_2); in sev_handle_vmgexit()
4300 control->exit_info_1, in sev_handle_vmgexit()
4301 control->exit_info_2, in sev_handle_vmgexit()
4302 svm->sev_es.ghcb_sa); in sev_handle_vmgexit()
4305 ret = setup_vmgexit_scratch(svm, false, control->exit_info_2); in sev_handle_vmgexit()
4310 control->exit_info_1, in sev_handle_vmgexit()
4311 control->exit_info_2, in sev_handle_vmgexit()
4312 svm->sev_es.ghcb_sa); in sev_handle_vmgexit()
4315 ++vcpu->stat.nmi_window_exits; in sev_handle_vmgexit()
4316 svm->nmi_masked = false; in sev_handle_vmgexit()
4321 svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_NAE_EVENT; in sev_handle_vmgexit()
4325 struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm); in sev_handle_vmgexit()
4327 switch (control->exit_info_1) { in sev_handle_vmgexit()
4330 sev->ap_jump_table = control->exit_info_2; in sev_handle_vmgexit()
4334 svm_vmgexit_success(svm, sev->ap_jump_table); in sev_handle_vmgexit()
4337 pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n", in sev_handle_vmgexit()
4338 control->exit_info_1); in sev_handle_vmgexit()
4350 pr_info("SEV-ES guest requested termination: reason %#llx info %#llx\n", in sev_handle_vmgexit()
4351 control->exit_info_1, control->exit_info_2); in sev_handle_vmgexit()
4352 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in sev_handle_vmgexit()
4353 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SEV_TERM; in sev_handle_vmgexit()
4354 vcpu->run->system_event.ndata = 1; in sev_handle_vmgexit()
4355 vcpu->run->system_event.data[0] = control->ghcb_gpa; in sev_handle_vmgexit()
4358 ret = setup_vmgexit_scratch(svm, true, control->exit_info_2); in sev_handle_vmgexit()
4362 ret = snp_begin_psc(svm, svm->sev_es.ghcb_sa); in sev_handle_vmgexit()
4373 ret = snp_handle_guest_req(svm, control->exit_info_1, control->exit_info_2); in sev_handle_vmgexit()
4376 ret = snp_handle_ext_guest_req(svm, control->exit_info_1, control->exit_info_2); in sev_handle_vmgexit()
4380 "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n", in sev_handle_vmgexit()
4381 control->exit_info_1, control->exit_info_2); in sev_handle_vmgexit()
4382 ret = -EINVAL; in sev_handle_vmgexit()
4397 if (svm->vmcb->control.exit_info_2 > INT_MAX) in sev_es_string_io()
4398 return -EINVAL; in sev_es_string_io()
4400 count = svm->vmcb->control.exit_info_2; in sev_es_string_io()
4402 return -EINVAL; in sev_es_string_io()
4408 return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa, in sev_es_string_io()
4414 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_vcpu_after_set_cpuid()
4420 set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, v_tsc_aux, v_tsc_aux); in sev_es_vcpu_after_set_cpuid()
4424 * For SEV-ES, accesses to MSR_IA32_XSS should not be intercepted if in sev_es_vcpu_after_set_cpuid()
4437 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 1, 1); in sev_es_vcpu_after_set_cpuid()
4439 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 0, 0); in sev_es_vcpu_after_set_cpuid()
4444 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_vcpu_after_set_cpuid()
4450 vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f)); in sev_vcpu_after_set_cpuid()
4452 if (sev_es_guest(svm->vcpu.kvm)) in sev_vcpu_after_set_cpuid()
4458 struct vmcb *vmcb = svm->vmcb01.ptr; in sev_es_init_vmcb()
4459 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_init_vmcb()
4461 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE; in sev_es_init_vmcb()
4464 * An SEV-ES guest requires a VMSA area that is a separate from the in sev_es_init_vmcb()
4470 if (svm->sev_es.vmsa && !svm->sev_es.snp_has_guest_vmsa) in sev_es_init_vmcb()
4471 svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa); in sev_es_init_vmcb()
4489 vmcb->control.intercepts[INTERCEPT_DR] = 0; in sev_es_init_vmcb()
4491 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); in sev_es_init_vmcb()
4492 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); in sev_es_init_vmcb()
4497 * allow debugging SEV-ES guests, and enables DebugSwap iff in sev_es_init_vmcb()
4511 set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1); in sev_es_init_vmcb()
4512 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1); in sev_es_init_vmcb()
4517 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE; in sev_init_vmcb()
4526 if (sev_es_guest(svm->vcpu.kvm)) in sev_init_vmcb()
4532 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_vcpu_reset()
4533 struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm); in sev_es_vcpu_reset()
4537 * vCPU RESET for an SEV-ES guest. in sev_es_vcpu_reset()
4539 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO((__u64)sev->ghcb_version, in sev_es_vcpu_reset()
4543 mutex_init(&svm->sev_es.snp_vmsa_mutex); in sev_es_vcpu_reset()
4548 struct kvm *kvm = svm->vcpu.kvm; in sev_es_prepare_switch_to_guest()
4551 * All host state for SEV-ES guests is categorized into three swap types in sev_es_prepare_switch_to_guest()
4563 * Manually save type-B state, i.e. state that is loaded by VMEXIT but in sev_es_prepare_switch_to_guest()
4567 hostsa->xcr0 = kvm_host.xcr0; in sev_es_prepare_switch_to_guest()
4568 hostsa->pkru = read_pkru(); in sev_es_prepare_switch_to_guest()
4569 hostsa->xss = kvm_host.xss; in sev_es_prepare_switch_to_guest()
4573 * the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU does in sev_es_prepare_switch_to_guest()
4589 hostsa->dr0_addr_mask = amd_get_dr_addr_mask(0); in sev_es_prepare_switch_to_guest()
4590 hostsa->dr1_addr_mask = amd_get_dr_addr_mask(1); in sev_es_prepare_switch_to_guest()
4591 hostsa->dr2_addr_mask = amd_get_dr_addr_mask(2); in sev_es_prepare_switch_to_guest()
4592 hostsa->dr3_addr_mask = amd_get_dr_addr_mask(3); in sev_es_prepare_switch_to_guest()
4601 if (!svm->sev_es.received_first_sipi) { in sev_vcpu_deliver_sipi_vector()
4602 svm->sev_es.received_first_sipi = true; in sev_vcpu_deliver_sipi_vector()
4607 switch (svm->sev_es.ap_reset_hold_type) { in sev_vcpu_deliver_sipi_vector()
4611 * set the CS and RIP. Set SW_EXIT_INFO_2 to a non-zero value. in sev_vcpu_deliver_sipi_vector()
4618 * set the CS and RIP. Set GHCB data field to a non-zero value. in sev_vcpu_deliver_sipi_vector()
4642 * Allocate an SNP-safe page to workaround the SNP erratum where in snp_safe_alloc_page_node()
4645 * 2MB-aligned VMCB, VMSA, or AVIC backing page. in snp_safe_alloc_page_node()
4648 * 2MB-aligned, and free the other. in snp_safe_alloc_page_node()
4668 struct kvm *kvm = vcpu->kvm; in sev_handle_rmp_fault()
4679 * triggering an RMP fault for an implicit page-state change from in sev_handle_rmp_fault()
4680 * shared->private. Implicit page-state changes are forwarded to in sev_handle_rmp_fault()
4685 pr_warn_ratelimited("SEV: Unexpected RMP fault for non-private GPA 0x%llx\n", in sev_handle_rmp_fault()
4692 pr_warn_ratelimited("SEV: Unexpected RMP fault, non-private slot for GPA 0x%llx\n", in sev_handle_rmp_fault()
4717 * what is indicated by the page-size bit in the 2MB RMP entry for in sev_handle_rmp_fault()
4728 * GPA range that is backed by a 2MB-aligned PFN who's RMP entry is in in sev_handle_rmp_fault()
4799 * PFN is currently shared, then the entire 2M-aligned range can be in is_large_rmp_possible()
4824 return -ENOENT; in sev_gmem_prepare()
4843 rc = rmp_make_private(pfn_aligned, gfn_to_gpa(gfn_aligned), level, sev->asid, false); in sev_gmem_prepare()
4847 return -EINVAL; in sev_gmem_prepare()
4881 * 4K RMP entries before attempting to convert a 4K sub-page. in sev_gmem_invalidate()
4900 * SEV-ES avoids host/guest cache coherency issues through in sev_gmem_invalidate()
4901 * WBINVD hooks issued via MMU notifiers during run-time, and in sev_gmem_invalidate()
4906 * userspace may also free gmem pages during run-time via in sev_gmem_invalidate()
4907 * hole-punching operations on the guest_memfd, so flush the in sev_gmem_invalidate()