1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Hyper-V HvCallSendSyntheticClusterIpi{,Ex} tests
4  *
5  * Copyright (C) 2022, Red Hat, Inc.
6  *
7  */
8 #include <pthread.h>
9 #include <inttypes.h>
10 
11 #include "kvm_util.h"
12 #include "hyperv.h"
13 #include "test_util.h"
14 #include "vmx.h"
15 
16 #define RECEIVER_VCPU_ID_1 2
17 #define RECEIVER_VCPU_ID_2 65
18 
19 #define IPI_VECTOR	 0xfe
20 
21 static volatile uint64_t ipis_rcvd[RECEIVER_VCPU_ID_2 + 1];
22 
23 struct hv_vpset {
24 	u64 format;
25 	u64 valid_bank_mask;
26 	u64 bank_contents[2];
27 };
28 
29 enum HV_GENERIC_SET_FORMAT {
30 	HV_GENERIC_SET_SPARSE_4K,
31 	HV_GENERIC_SET_ALL,
32 };
33 
34 /* HvCallSendSyntheticClusterIpi hypercall */
35 struct hv_send_ipi {
36 	u32 vector;
37 	u32 reserved;
38 	u64 cpu_mask;
39 };
40 
41 /* HvCallSendSyntheticClusterIpiEx hypercall */
42 struct hv_send_ipi_ex {
43 	u32 vector;
44 	u32 reserved;
45 	struct hv_vpset vp_set;
46 };
47 
hv_init(vm_vaddr_t pgs_gpa)48 static inline void hv_init(vm_vaddr_t pgs_gpa)
49 {
50 	wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
51 	wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
52 }
53 
receiver_code(void * hcall_page,vm_vaddr_t pgs_gpa)54 static void receiver_code(void *hcall_page, vm_vaddr_t pgs_gpa)
55 {
56 	u32 vcpu_id;
57 
58 	x2apic_enable();
59 	hv_init(pgs_gpa);
60 
61 	vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX);
62 
63 	/* Signal sender vCPU we're ready */
64 	ipis_rcvd[vcpu_id] = (u64)-1;
65 
66 	for (;;) {
67 		safe_halt();
68 		cli();
69 	}
70 }
71 
guest_ipi_handler(struct ex_regs * regs)72 static void guest_ipi_handler(struct ex_regs *regs)
73 {
74 	u32 vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX);
75 
76 	ipis_rcvd[vcpu_id]++;
77 	wrmsr(HV_X64_MSR_EOI, 1);
78 }
79 
nop_loop(void)80 static inline void nop_loop(void)
81 {
82 	int i;
83 
84 	for (i = 0; i < 100000000; i++)
85 		asm volatile("nop");
86 }
87 
sender_guest_code(void * hcall_page,vm_vaddr_t pgs_gpa)88 static void sender_guest_code(void *hcall_page, vm_vaddr_t pgs_gpa)
89 {
90 	struct hv_send_ipi *ipi = (struct hv_send_ipi *)hcall_page;
91 	struct hv_send_ipi_ex *ipi_ex = (struct hv_send_ipi_ex *)hcall_page;
92 	int stage = 1, ipis_expected[2] = {0};
93 
94 	hv_init(pgs_gpa);
95 	GUEST_SYNC(stage++);
96 
97 	/* Wait for receiver vCPUs to come up */
98 	while (!ipis_rcvd[RECEIVER_VCPU_ID_1] || !ipis_rcvd[RECEIVER_VCPU_ID_2])
99 		nop_loop();
100 	ipis_rcvd[RECEIVER_VCPU_ID_1] = ipis_rcvd[RECEIVER_VCPU_ID_2] = 0;
101 
102 	/* 'Slow' HvCallSendSyntheticClusterIpi to RECEIVER_VCPU_ID_1 */
103 	ipi->vector = IPI_VECTOR;
104 	ipi->cpu_mask = 1 << RECEIVER_VCPU_ID_1;
105 	hyperv_hypercall(HVCALL_SEND_IPI, pgs_gpa, pgs_gpa + 4096);
106 	nop_loop();
107 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
108 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
109 	GUEST_SYNC(stage++);
110 	/* 'Fast' HvCallSendSyntheticClusterIpi to RECEIVER_VCPU_ID_1 */
111 	hyperv_hypercall(HVCALL_SEND_IPI | HV_HYPERCALL_FAST_BIT,
112 			 IPI_VECTOR, 1 << RECEIVER_VCPU_ID_1);
113 	nop_loop();
114 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
115 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
116 	GUEST_SYNC(stage++);
117 
118 	/* 'Slow' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_1 */
119 	memset(hcall_page, 0, 4096);
120 	ipi_ex->vector = IPI_VECTOR;
121 	ipi_ex->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
122 	ipi_ex->vp_set.valid_bank_mask = 1 << 0;
123 	ipi_ex->vp_set.bank_contents[0] = BIT(RECEIVER_VCPU_ID_1);
124 	hyperv_hypercall(HVCALL_SEND_IPI_EX | (1 << HV_HYPERCALL_VARHEAD_OFFSET),
125 			 pgs_gpa, pgs_gpa + 4096);
126 	nop_loop();
127 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
128 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
129 	GUEST_SYNC(stage++);
130 	/* 'XMM Fast' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_1 */
131 	hyperv_write_xmm_input(&ipi_ex->vp_set.valid_bank_mask, 1);
132 	hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT |
133 			 (1 << HV_HYPERCALL_VARHEAD_OFFSET),
134 			 IPI_VECTOR, HV_GENERIC_SET_SPARSE_4K);
135 	nop_loop();
136 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
137 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
138 	GUEST_SYNC(stage++);
139 
140 	/* 'Slow' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_2 */
141 	memset(hcall_page, 0, 4096);
142 	ipi_ex->vector = IPI_VECTOR;
143 	ipi_ex->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
144 	ipi_ex->vp_set.valid_bank_mask = 1 << 1;
145 	ipi_ex->vp_set.bank_contents[0] = BIT(RECEIVER_VCPU_ID_2 - 64);
146 	hyperv_hypercall(HVCALL_SEND_IPI_EX | (1 << HV_HYPERCALL_VARHEAD_OFFSET),
147 			 pgs_gpa, pgs_gpa + 4096);
148 	nop_loop();
149 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ipis_expected[0]);
150 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
151 	GUEST_SYNC(stage++);
152 	/* 'XMM Fast' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_2 */
153 	hyperv_write_xmm_input(&ipi_ex->vp_set.valid_bank_mask, 1);
154 	hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT |
155 			 (1 << HV_HYPERCALL_VARHEAD_OFFSET),
156 			 IPI_VECTOR, HV_GENERIC_SET_SPARSE_4K);
157 	nop_loop();
158 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ipis_expected[0]);
159 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
160 	GUEST_SYNC(stage++);
161 
162 	/* 'Slow' HvCallSendSyntheticClusterIpiEx to both RECEIVER_VCPU_ID_{1,2} */
163 	memset(hcall_page, 0, 4096);
164 	ipi_ex->vector = IPI_VECTOR;
165 	ipi_ex->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
166 	ipi_ex->vp_set.valid_bank_mask = 1 << 1 | 1;
167 	ipi_ex->vp_set.bank_contents[0] = BIT(RECEIVER_VCPU_ID_1);
168 	ipi_ex->vp_set.bank_contents[1] = BIT(RECEIVER_VCPU_ID_2 - 64);
169 	hyperv_hypercall(HVCALL_SEND_IPI_EX | (2 << HV_HYPERCALL_VARHEAD_OFFSET),
170 			 pgs_gpa, pgs_gpa + 4096);
171 	nop_loop();
172 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
173 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
174 	GUEST_SYNC(stage++);
175 	/* 'XMM Fast' HvCallSendSyntheticClusterIpiEx to both RECEIVER_VCPU_ID_{1, 2} */
176 	hyperv_write_xmm_input(&ipi_ex->vp_set.valid_bank_mask, 2);
177 	hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT |
178 			 (2 << HV_HYPERCALL_VARHEAD_OFFSET),
179 			 IPI_VECTOR, HV_GENERIC_SET_SPARSE_4K);
180 	nop_loop();
181 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
182 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
183 	GUEST_SYNC(stage++);
184 
185 	/* 'Slow' HvCallSendSyntheticClusterIpiEx to HV_GENERIC_SET_ALL */
186 	memset(hcall_page, 0, 4096);
187 	ipi_ex->vector = IPI_VECTOR;
188 	ipi_ex->vp_set.format = HV_GENERIC_SET_ALL;
189 	hyperv_hypercall(HVCALL_SEND_IPI_EX, pgs_gpa, pgs_gpa + 4096);
190 	nop_loop();
191 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
192 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
193 	GUEST_SYNC(stage++);
194 	/*
195 	 * 'XMM Fast' HvCallSendSyntheticClusterIpiEx to HV_GENERIC_SET_ALL.
196 	 */
197 	ipi_ex->vp_set.valid_bank_mask = 0;
198 	hyperv_write_xmm_input(&ipi_ex->vp_set.valid_bank_mask, 2);
199 	hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT,
200 			 IPI_VECTOR, HV_GENERIC_SET_ALL);
201 	nop_loop();
202 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
203 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
204 	GUEST_SYNC(stage++);
205 
206 	GUEST_DONE();
207 }
208 
vcpu_thread(void * arg)209 static void *vcpu_thread(void *arg)
210 {
211 	struct kvm_vcpu *vcpu = (struct kvm_vcpu *)arg;
212 	int old, r;
213 
214 	r = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old);
215 	TEST_ASSERT(!r, "pthread_setcanceltype failed on vcpu_id=%u with errno=%d",
216 		    vcpu->id, r);
217 
218 	vcpu_run(vcpu);
219 
220 	TEST_FAIL("vCPU %u exited unexpectedly", vcpu->id);
221 
222 	return NULL;
223 }
224 
cancel_join_vcpu_thread(pthread_t thread,struct kvm_vcpu * vcpu)225 static void cancel_join_vcpu_thread(pthread_t thread, struct kvm_vcpu *vcpu)
226 {
227 	void *retval;
228 	int r;
229 
230 	r = pthread_cancel(thread);
231 	TEST_ASSERT(!r, "pthread_cancel on vcpu_id=%d failed with errno=%d",
232 		    vcpu->id, r);
233 
234 	r = pthread_join(thread, &retval);
235 	TEST_ASSERT(!r, "pthread_join on vcpu_id=%d failed with errno=%d",
236 		    vcpu->id, r);
237 	TEST_ASSERT(retval == PTHREAD_CANCELED,
238 		    "expected retval=%p, got %p", PTHREAD_CANCELED,
239 		    retval);
240 }
241 
main(int argc,char * argv[])242 int main(int argc, char *argv[])
243 {
244 	struct kvm_vm *vm;
245 	struct kvm_vcpu *vcpu[3];
246 	vm_vaddr_t hcall_page;
247 	pthread_t threads[2];
248 	int stage = 1, r;
249 	struct ucall uc;
250 
251 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_SEND_IPI));
252 
253 	vm = vm_create_with_one_vcpu(&vcpu[0], sender_guest_code);
254 
255 	/* Hypercall input/output */
256 	hcall_page = vm_vaddr_alloc_pages(vm, 2);
257 	memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
258 
259 
260 	vcpu[1] = vm_vcpu_add(vm, RECEIVER_VCPU_ID_1, receiver_code);
261 	vcpu_args_set(vcpu[1], 2, hcall_page, addr_gva2gpa(vm, hcall_page));
262 	vcpu_set_msr(vcpu[1], HV_X64_MSR_VP_INDEX, RECEIVER_VCPU_ID_1);
263 	vcpu_set_hv_cpuid(vcpu[1]);
264 
265 	vcpu[2] = vm_vcpu_add(vm, RECEIVER_VCPU_ID_2, receiver_code);
266 	vcpu_args_set(vcpu[2], 2, hcall_page, addr_gva2gpa(vm, hcall_page));
267 	vcpu_set_msr(vcpu[2], HV_X64_MSR_VP_INDEX, RECEIVER_VCPU_ID_2);
268 	vcpu_set_hv_cpuid(vcpu[2]);
269 
270 	vm_install_exception_handler(vm, IPI_VECTOR, guest_ipi_handler);
271 
272 	vcpu_args_set(vcpu[0], 2, hcall_page, addr_gva2gpa(vm, hcall_page));
273 	vcpu_set_hv_cpuid(vcpu[0]);
274 
275 	r = pthread_create(&threads[0], NULL, vcpu_thread, vcpu[1]);
276 	TEST_ASSERT(!r, "pthread_create failed errno=%d", r);
277 
278 	r = pthread_create(&threads[1], NULL, vcpu_thread, vcpu[2]);
279 	TEST_ASSERT(!r, "pthread_create failed errno=%d", errno);
280 
281 	while (true) {
282 		vcpu_run(vcpu[0]);
283 
284 		TEST_ASSERT_KVM_EXIT_REASON(vcpu[0], KVM_EXIT_IO);
285 
286 		switch (get_ucall(vcpu[0], &uc)) {
287 		case UCALL_SYNC:
288 			TEST_ASSERT(uc.args[1] == stage,
289 				    "Unexpected stage: %ld (%d expected)",
290 				    uc.args[1], stage);
291 			break;
292 		case UCALL_DONE:
293 			goto done;
294 		case UCALL_ABORT:
295 			REPORT_GUEST_ASSERT(uc);
296 			/* NOT REACHED */
297 		default:
298 			TEST_FAIL("Unknown ucall %lu", uc.cmd);
299 		}
300 
301 		stage++;
302 	}
303 
304 done:
305 	cancel_join_vcpu_thread(threads[0], vcpu[1]);
306 	cancel_join_vcpu_thread(threads[1], vcpu[2]);
307 	kvm_vm_free(vm);
308 
309 	return r;
310 }
311