1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "test_util.h"
3 #include "kvm_util.h"
4 #include "processor.h"
5 #include "vmx.h"
6 #include "svm_util.h"
7 
8 enum {
9 	SVM_F,
10 	VMX_F,
11 	NR_VIRTUALIZATION_FLAVORS,
12 };
13 
14 struct emulated_instruction {
15 	const char name[32];
16 	uint8_t opcode[15];
17 	uint32_t exit_reason[NR_VIRTUALIZATION_FLAVORS];
18 };
19 
20 static struct emulated_instruction instructions[] = {
21 	{
22 		.name = "pause",
23 		.opcode = { 0xf3, 0x90 },
24 		.exit_reason = { SVM_EXIT_PAUSE,
25 				 EXIT_REASON_PAUSE_INSTRUCTION, }
26 	},
27 	{
28 		.name = "hlt",
29 		.opcode = { 0xf4 },
30 		.exit_reason = { SVM_EXIT_HLT,
31 				 EXIT_REASON_HLT, }
32 	},
33 };
34 
35 static uint8_t kvm_fep[] = { 0x0f, 0x0b, 0x6b, 0x76, 0x6d };	/* ud2 ; .ascii "kvm" */
36 static uint8_t l2_guest_code[sizeof(kvm_fep) + 15];
37 static uint8_t *l2_instruction = &l2_guest_code[sizeof(kvm_fep)];
38 
get_instruction_length(struct emulated_instruction * insn)39 static uint32_t get_instruction_length(struct emulated_instruction *insn)
40 {
41 	uint32_t i;
42 
43 	for (i = 0; i < ARRAY_SIZE(insn->opcode) && insn->opcode[i]; i++)
44 		;
45 
46 	return i;
47 }
48 
guest_code(void * test_data)49 static void guest_code(void *test_data)
50 {
51 	int f = this_cpu_has(X86_FEATURE_SVM) ? SVM_F : VMX_F;
52 	int i;
53 
54 	memcpy(l2_guest_code, kvm_fep, sizeof(kvm_fep));
55 
56 	if (f == SVM_F) {
57 		struct svm_test_data *svm = test_data;
58 		struct vmcb *vmcb = svm->vmcb;
59 
60 		generic_svm_setup(svm, NULL, NULL);
61 		vmcb->save.idtr.limit = 0;
62 		vmcb->save.rip = (u64)l2_guest_code;
63 
64 		vmcb->control.intercept |= BIT_ULL(INTERCEPT_SHUTDOWN) |
65 					   BIT_ULL(INTERCEPT_PAUSE) |
66 					   BIT_ULL(INTERCEPT_HLT);
67 		vmcb->control.intercept_exceptions = 0;
68 	} else {
69 		GUEST_ASSERT(prepare_for_vmx_operation(test_data));
70 		GUEST_ASSERT(load_vmcs(test_data));
71 
72 		prepare_vmcs(test_data, NULL, NULL);
73 		GUEST_ASSERT(!vmwrite(GUEST_IDTR_LIMIT, 0));
74 		GUEST_ASSERT(!vmwrite(GUEST_RIP, (u64)l2_guest_code));
75 		GUEST_ASSERT(!vmwrite(EXCEPTION_BITMAP, 0));
76 
77 		vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmreadz(CPU_BASED_VM_EXEC_CONTROL) |
78 						   CPU_BASED_PAUSE_EXITING |
79 						   CPU_BASED_HLT_EXITING);
80 	}
81 
82 	for (i = 0; i < ARRAY_SIZE(instructions); i++) {
83 		struct emulated_instruction *insn = &instructions[i];
84 		uint32_t insn_len = get_instruction_length(insn);
85 		uint32_t exit_insn_len;
86 		u32 exit_reason;
87 
88 		/*
89 		 * Copy the target instruction to the L2 code stream, and fill
90 		 * the remaining bytes with INT3s so that a missed intercept
91 		 * results in a consistent failure mode (SHUTDOWN).
92 		 */
93 		memcpy(l2_instruction, insn->opcode, insn_len);
94 		memset(l2_instruction + insn_len, 0xcc, sizeof(insn->opcode) - insn_len);
95 
96 		if (f == SVM_F) {
97 			struct svm_test_data *svm = test_data;
98 			struct vmcb *vmcb = svm->vmcb;
99 
100 			run_guest(vmcb, svm->vmcb_gpa);
101 			exit_reason = vmcb->control.exit_code;
102 			exit_insn_len = vmcb->control.next_rip - vmcb->save.rip;
103 			GUEST_ASSERT_EQ(vmcb->save.rip, (u64)l2_instruction);
104 		} else {
105 			GUEST_ASSERT_EQ(i ? vmresume() : vmlaunch(), 0);
106 			exit_reason = vmreadz(VM_EXIT_REASON);
107 			exit_insn_len = vmreadz(VM_EXIT_INSTRUCTION_LEN);
108 			GUEST_ASSERT_EQ(vmreadz(GUEST_RIP), (u64)l2_instruction);
109 		}
110 
111 		__GUEST_ASSERT(exit_reason == insn->exit_reason[f],
112 			       "Wanted exit_reason '0x%x' for '%s', got '0x%x'",
113 			       insn->exit_reason[f], insn->name, exit_reason);
114 
115 		__GUEST_ASSERT(exit_insn_len == insn_len,
116 			       "Wanted insn_len '%u' for '%s', got '%u'",
117 			       insn_len, insn->name, exit_insn_len);
118 	}
119 
120 	GUEST_DONE();
121 }
122 
main(int argc,char * argv[])123 int main(int argc, char *argv[])
124 {
125 	vm_vaddr_t nested_test_data_gva;
126 	struct kvm_vcpu *vcpu;
127 	struct kvm_vm *vm;
128 
129 	TEST_REQUIRE(is_forced_emulation_enabled);
130 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX));
131 
132 	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
133 	vm_enable_cap(vm, KVM_CAP_EXCEPTION_PAYLOAD, -2ul);
134 
135 	if (kvm_cpu_has(X86_FEATURE_SVM))
136 		vcpu_alloc_svm(vm, &nested_test_data_gva);
137 	else
138 		vcpu_alloc_vmx(vm, &nested_test_data_gva);
139 
140 	vcpu_args_set(vcpu, 1, nested_test_data_gva);
141 
142 	vcpu_run(vcpu);
143 	TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
144 
145 	kvm_vm_free(vm);
146 }
147