1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2022 Oracle and/or its affiliates. 4 * 5 * Based on: 6 * svm_int_ctl_test 7 * 8 * Copyright (C) 2021, Red Hat, Inc. 9 * 10 */ 11 #include <stdatomic.h> 12 #include <stdio.h> 13 #include <unistd.h> 14 #include "apic.h" 15 #include "kvm_util.h" 16 #include "processor.h" 17 #include "svm_util.h" 18 #include "test_util.h" 19 20 #define INT_NR 0x20 21 22 static_assert(ATOMIC_INT_LOCK_FREE == 2, "atomic int is not lockless"); 23 24 static unsigned int bp_fired; 25 static void guest_bp_handler(struct ex_regs *regs) 26 { 27 bp_fired++; 28 } 29 30 static unsigned int int_fired; 31 static void l2_guest_code_int(void); 32 33 static void guest_int_handler(struct ex_regs *regs) 34 { 35 int_fired++; 36 GUEST_ASSERT_EQ(regs->rip, (unsigned long)l2_guest_code_int); 37 } 38 39 static void l2_guest_code_int(void) 40 { 41 GUEST_ASSERT_EQ(int_fired, 1); 42 43 /* 44 * Same as the vmmcall() function, but with a ud2 sneaked after the 45 * vmmcall. The caller injects an exception with the return address 46 * increased by 2, so the "pop rbp" must be after the ud2 and we cannot 47 * use vmmcall() directly. 48 */ 49 __asm__ __volatile__("push %%rbp; vmmcall; ud2; pop %%rbp" 50 : : "a"(0xdeadbeef), "c"(0xbeefdead) 51 : "rbx", "rdx", "rsi", "rdi", "r8", "r9", 52 "r10", "r11", "r12", "r13", "r14", "r15"); 53 54 GUEST_ASSERT_EQ(bp_fired, 1); 55 hlt(); 56 } 57 58 static atomic_int nmi_stage; 59 #define nmi_stage_get() atomic_load_explicit(&nmi_stage, memory_order_acquire) 60 #define nmi_stage_inc() atomic_fetch_add_explicit(&nmi_stage, 1, memory_order_acq_rel) 61 static void guest_nmi_handler(struct ex_regs *regs) 62 { 63 nmi_stage_inc(); 64 65 if (nmi_stage_get() == 1) { 66 vmmcall(); 67 GUEST_FAIL("Unexpected resume after VMMCALL"); 68 } else { 69 GUEST_ASSERT_EQ(nmi_stage_get(), 3); 70 GUEST_DONE(); 71 } 72 } 73 74 static void l2_guest_code_nmi(void) 75 { 76 ud2(); 77 } 78 79 static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t idt_alt) 80 { 81 #define L2_GUEST_STACK_SIZE 64 82 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 83 struct vmcb *vmcb = svm->vmcb; 84 85 if (is_nmi) 86 x2apic_enable(); 87 88 /* Prepare for L2 execution. */ 89 generic_svm_setup(svm, 90 is_nmi ? l2_guest_code_nmi : l2_guest_code_int, 91 &l2_guest_stack[L2_GUEST_STACK_SIZE]); 92 93 vmcb->control.intercept_exceptions |= BIT(PF_VECTOR) | BIT(UD_VECTOR); 94 vmcb->control.intercept |= BIT(INTERCEPT_NMI) | BIT(INTERCEPT_HLT); 95 96 if (is_nmi) { 97 vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; 98 } else { 99 vmcb->control.event_inj = INT_NR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_SOFT; 100 /* The return address pushed on stack */ 101 vmcb->control.next_rip = vmcb->save.rip; 102 } 103 104 run_guest(vmcb, svm->vmcb_gpa); 105 __GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL, 106 "Expected VMMCAL #VMEXIT, got '0x%x', info1 = '0x%lx, info2 = '0x%lx'", 107 vmcb->control.exit_code, 108 vmcb->control.exit_info_1, vmcb->control.exit_info_2); 109 110 if (is_nmi) { 111 clgi(); 112 x2apic_write_reg(APIC_ICR, APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_NMI); 113 114 GUEST_ASSERT_EQ(nmi_stage_get(), 1); 115 nmi_stage_inc(); 116 117 stgi(); 118 /* self-NMI happens here */ 119 while (true) 120 cpu_relax(); 121 } 122 123 /* Skip over VMMCALL */ 124 vmcb->save.rip += 3; 125 126 /* Switch to alternate IDT to cause intervening NPF again */ 127 vmcb->save.idtr.base = idt_alt; 128 vmcb->control.clean = 0; /* &= ~BIT(VMCB_DT) would be enough */ 129 130 vmcb->control.event_inj = BP_VECTOR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT; 131 /* The return address pushed on stack, skip over UD2 */ 132 vmcb->control.next_rip = vmcb->save.rip + 2; 133 134 run_guest(vmcb, svm->vmcb_gpa); 135 __GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_HLT, 136 "Expected HLT #VMEXIT, got '0x%x', info1 = '0x%lx, info2 = '0x%lx'", 137 vmcb->control.exit_code, 138 vmcb->control.exit_info_1, vmcb->control.exit_info_2); 139 140 GUEST_DONE(); 141 } 142 143 static void run_test(bool is_nmi) 144 { 145 struct kvm_vcpu *vcpu; 146 struct kvm_vm *vm; 147 vm_vaddr_t svm_gva; 148 vm_vaddr_t idt_alt_vm; 149 struct kvm_guest_debug debug; 150 151 pr_info("Running %s test\n", is_nmi ? "NMI" : "soft int"); 152 153 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code); 154 155 vm_install_exception_handler(vm, NMI_VECTOR, guest_nmi_handler); 156 vm_install_exception_handler(vm, BP_VECTOR, guest_bp_handler); 157 vm_install_exception_handler(vm, INT_NR, guest_int_handler); 158 159 vcpu_alloc_svm(vm, &svm_gva); 160 161 if (!is_nmi) { 162 void *idt, *idt_alt; 163 164 idt_alt_vm = vm_vaddr_alloc_page(vm); 165 idt_alt = addr_gva2hva(vm, idt_alt_vm); 166 idt = addr_gva2hva(vm, vm->arch.idt); 167 memcpy(idt_alt, idt, getpagesize()); 168 } else { 169 idt_alt_vm = 0; 170 } 171 vcpu_args_set(vcpu, 3, svm_gva, (uint64_t)is_nmi, (uint64_t)idt_alt_vm); 172 173 memset(&debug, 0, sizeof(debug)); 174 vcpu_guest_debug_set(vcpu, &debug); 175 176 struct ucall uc; 177 178 alarm(2); 179 vcpu_run(vcpu); 180 alarm(0); 181 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); 182 183 switch (get_ucall(vcpu, &uc)) { 184 case UCALL_ABORT: 185 REPORT_GUEST_ASSERT(uc); 186 break; 187 /* NOT REACHED */ 188 case UCALL_DONE: 189 goto done; 190 default: 191 TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd); 192 } 193 done: 194 kvm_vm_free(vm); 195 } 196 197 int main(int argc, char *argv[]) 198 { 199 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM)); 200 201 TEST_ASSERT(kvm_cpu_has(X86_FEATURE_NRIPS), 202 "KVM with nSVM is supposed to unconditionally advertise nRIP Save"); 203 204 atomic_init(&nmi_stage, 0); 205 206 run_test(false); 207 run_test(true); 208 209 return 0; 210 } 211