xref: /kvm-unit-tests/x86/hyperv_connections.c (revision 1472a3843b122481866e805d40460f96819ade9d)
1 #include "libcflat.h"
2 #include "vm.h"
3 #include "smp.h"
4 #include "isr.h"
5 #include "atomic.h"
6 #include "hyperv.h"
7 #include "bitops.h"
8 
9 #define MAX_CPUS 64
10 
11 #define MSG_VEC 0xb0
12 #define EVT_VEC 0xb1
13 #define MSG_SINT 0x8
14 #define EVT_SINT 0x9
15 #define MSG_CONN_BASE 0x10
16 #define EVT_CONN_BASE 0x20
17 #define MSG_TYPE 0x12345678
18 
19 #define WAIT_CYCLES 10000000
20 
21 static atomic_t ncpus_done;
22 
23 struct hv_vcpu {
24 	struct hv_message_page *msg_page;
25 	struct hv_event_flags_page *evt_page;
26 	struct hv_input_post_message *post_msg;
27 	u8 msg_conn;
28 	u8 evt_conn;
29 	u64 hvcall_status;
30 	atomic_t sint_received;
31 };
32 
33 static struct hv_vcpu hv_vcpus[MAX_CPUS];
34 
35 static void sint_isr(isr_regs_t *regs)
36 {
37 	atomic_inc(&hv_vcpus[smp_id()].sint_received);
38 }
39 
40 static void *hypercall_page;
41 
42 static void setup_hypercall(void)
43 {
44 	u64 guestid = (0x8f00ull << 48);
45 
46 	hypercall_page = alloc_page();
47 	if (!hypercall_page)
48 		report_abort("failed to allocate hypercall page");
49 	memset(hypercall_page, 0, PAGE_SIZE);
50 
51 	wrmsr(HV_X64_MSR_GUEST_OS_ID, guestid);
52 
53 	wrmsr(HV_X64_MSR_HYPERCALL,
54 	      (u64)virt_to_phys(hypercall_page) | HV_X64_MSR_HYPERCALL_ENABLE);
55 }
56 
57 static void teardown_hypercall(void)
58 {
59 	wrmsr(HV_X64_MSR_HYPERCALL, 0);
60 	wrmsr(HV_X64_MSR_GUEST_OS_ID, 0);
61 	free_page(hypercall_page);
62 }
63 
64 static u64 do_hypercall(u16 code, u64 arg, bool fast)
65 {
66 	u64 ret;
67 	u64 ctl = code;
68 	if (fast)
69 		ctl |= HV_HYPERCALL_FAST;
70 
71 	asm volatile ("call *%[hcall_page]"
72 #ifdef __x86_64__
73 		      "\n mov $0,%%r8"
74 		      : "=a"(ret)
75 		      : "c"(ctl), "d"(arg),
76 #else
77 		      : "=A"(ret)
78 		      : "A"(ctl),
79 			"b" ((u32)(arg >> 32)), "c" ((u32)arg),
80 			"D"(0), "S"(0),
81 #endif
82 		      [hcall_page] "m" (hypercall_page)
83 #ifdef __x86_64__
84 		      : "r8"
85 #endif
86 		     );
87 
88 	return ret;
89 }
90 
91 static void setup_cpu(void *ctx)
92 {
93 	int vcpu;
94 	struct hv_vcpu *hv;
95 
96 	write_cr3((ulong)ctx);
97 	irq_enable();
98 
99 	vcpu = smp_id();
100 	hv = &hv_vcpus[vcpu];
101 
102 	hv->msg_page = alloc_page();
103 	hv->evt_page = alloc_page();
104 	hv->post_msg = alloc_page();
105 	if (!hv->msg_page || !hv->evt_page || !hv->post_msg)
106 		report_abort("failed to allocate synic pages for vcpu");
107 	memset(hv->msg_page, 0, sizeof(*hv->msg_page));
108 	memset(hv->evt_page, 0, sizeof(*hv->evt_page));
109 	memset(hv->post_msg, 0, sizeof(*hv->post_msg));
110 	hv->msg_conn = MSG_CONN_BASE + vcpu;
111 	hv->evt_conn = EVT_CONN_BASE + vcpu;
112 
113 	wrmsr(HV_X64_MSR_SIMP,
114 	      (u64)virt_to_phys(hv->msg_page) | HV_SYNIC_SIMP_ENABLE);
115 	wrmsr(HV_X64_MSR_SIEFP,
116 	      (u64)virt_to_phys(hv->evt_page) | HV_SYNIC_SIEFP_ENABLE);
117 	wrmsr(HV_X64_MSR_SCONTROL, HV_SYNIC_CONTROL_ENABLE);
118 
119 	msg_conn_create(MSG_SINT, MSG_VEC, hv->msg_conn);
120 	evt_conn_create(EVT_SINT, EVT_VEC, hv->evt_conn);
121 
122 	hv->post_msg->connectionid = hv->msg_conn;
123 	hv->post_msg->message_type = MSG_TYPE;
124 	hv->post_msg->payload_size = 8;
125 	hv->post_msg->payload[0] = (u64)vcpu << 16;
126 }
127 
128 static void teardown_cpu(void *ctx)
129 {
130 	int vcpu = smp_id();
131 	struct hv_vcpu *hv = &hv_vcpus[vcpu];
132 
133 	evt_conn_destroy(EVT_SINT, hv->evt_conn);
134 	msg_conn_destroy(MSG_SINT, hv->msg_conn);
135 
136 	wrmsr(HV_X64_MSR_SCONTROL, 0);
137 	wrmsr(HV_X64_MSR_SIEFP, 0);
138 	wrmsr(HV_X64_MSR_SIMP, 0);
139 
140 	free_page(hv->post_msg);
141 	free_page(hv->evt_page);
142 	free_page(hv->msg_page);
143 }
144 
145 static void do_msg(void *ctx)
146 {
147 	int vcpu = (ulong)ctx;
148 	struct hv_vcpu *hv = &hv_vcpus[vcpu];
149 	struct hv_input_post_message *msg = hv->post_msg;
150 
151 	msg->payload[0]++;
152 	atomic_set(&hv->sint_received, 0);
153 	hv->hvcall_status = do_hypercall(HVCALL_POST_MESSAGE,
154 					 virt_to_phys(msg), 0);
155 	atomic_inc(&ncpus_done);
156 }
157 
158 static void clear_msg(void *ctx)
159 {
160 	/* should only be done on the current vcpu */
161 	int vcpu = smp_id();
162 	struct hv_vcpu *hv = &hv_vcpus[vcpu];
163 	struct hv_message *msg = &hv->msg_page->sint_message[MSG_SINT];
164 
165 	atomic_set(&hv->sint_received, 0);
166 	msg->header.message_type = 0;
167 	barrier();
168 	wrmsr(HV_X64_MSR_EOM, 0);
169 	atomic_inc(&ncpus_done);
170 }
171 
172 static bool msg_ok(int vcpu)
173 {
174 	struct hv_vcpu *hv = &hv_vcpus[vcpu];
175 	struct hv_input_post_message *post_msg = hv->post_msg;
176 	struct hv_message *msg = &hv->msg_page->sint_message[MSG_SINT];
177 
178 	return msg->header.message_type == post_msg->message_type &&
179 		msg->header.payload_size == post_msg->payload_size &&
180 		msg->header.message_flags.msg_pending == 0 &&
181 		msg->u.payload[0] == post_msg->payload[0] &&
182 		hv->hvcall_status == 0 &&
183 		atomic_read(&hv->sint_received) == 1;
184 }
185 
186 static bool msg_busy(int vcpu)
187 {
188 	struct hv_vcpu *hv = &hv_vcpus[vcpu];
189 	struct hv_input_post_message *post_msg = hv->post_msg;
190 	struct hv_message *msg = &hv->msg_page->sint_message[MSG_SINT];
191 
192 	return msg->header.message_type == post_msg->message_type &&
193 		msg->header.payload_size == post_msg->payload_size &&
194 		msg->header.message_flags.msg_pending == 1 &&
195 		msg->u.payload[0] == post_msg->payload[0] - 1 &&
196 		hv->hvcall_status == 0 &&
197 		atomic_read(&hv->sint_received) == 0;
198 }
199 
200 static void do_evt(void *ctx)
201 {
202 	int vcpu = (ulong)ctx;
203 	struct hv_vcpu *hv = &hv_vcpus[vcpu];
204 
205 	atomic_set(&hv->sint_received, 0);
206 	hv->hvcall_status = do_hypercall(HVCALL_SIGNAL_EVENT,
207 					 hv->evt_conn, 1);
208 	atomic_inc(&ncpus_done);
209 }
210 
211 static void clear_evt(void *ctx)
212 {
213 	/* should only be done on the current vcpu */
214 	int vcpu = smp_id();
215 	struct hv_vcpu *hv = &hv_vcpus[vcpu];
216 	ulong *flags = hv->evt_page->slot[EVT_SINT].flags;
217 
218 	atomic_set(&hv->sint_received, 0);
219 	flags[BIT_WORD(hv->evt_conn)] &= ~BIT_MASK(hv->evt_conn);
220 	barrier();
221 	atomic_inc(&ncpus_done);
222 }
223 
224 static bool evt_ok(int vcpu)
225 {
226 	struct hv_vcpu *hv = &hv_vcpus[vcpu];
227 	ulong *flags = hv->evt_page->slot[EVT_SINT].flags;
228 
229 	return flags[BIT_WORD(hv->evt_conn)] == BIT_MASK(hv->evt_conn) &&
230 		hv->hvcall_status == 0 &&
231 		atomic_read(&hv->sint_received) == 1;
232 }
233 
234 static bool evt_busy(int vcpu)
235 {
236 	struct hv_vcpu *hv = &hv_vcpus[vcpu];
237 	ulong *flags = hv->evt_page->slot[EVT_SINT].flags;
238 
239 	return flags[BIT_WORD(hv->evt_conn)] == BIT_MASK(hv->evt_conn) &&
240 		hv->hvcall_status == 0 &&
241 		atomic_read(&hv->sint_received) == 0;
242 }
243 
244 static int run_test(int ncpus, int dst_add, ulong wait_cycles,
245 		    void (*func)(void *), bool (*is_ok)(int))
246 {
247 	int i, ret = 0;
248 
249 	atomic_set(&ncpus_done, 0);
250 	for (i = 0; i < ncpus; i++) {
251 		ulong dst = (i + dst_add) % ncpus;
252 		on_cpu_async(i, func, (void *)dst);
253 	}
254 	while (atomic_read(&ncpus_done) != ncpus)
255 		pause();
256 
257 	while (wait_cycles--)
258 		pause();
259 
260 	if (is_ok)
261 		for (i = 0; i < ncpus; i++)
262 			ret += is_ok(i);
263 	return ret;
264 }
265 
266 #define HV_STATUS_INVALID_HYPERCALL_CODE        2
267 
268 int main(int ac, char **av)
269 {
270 	int ncpus, ncpus_ok, i;
271 
272 	if (!synic_supported()) {
273 		report_skip("Hyper-V SynIC is not supported");
274 		goto summary;
275 	}
276 
277 	setup_vm();
278 	smp_init();
279 	ncpus = cpu_count();
280 	if (ncpus > MAX_CPUS)
281 		report_abort("# cpus: %d > %d", ncpus, MAX_CPUS);
282 
283 	handle_irq(MSG_VEC, sint_isr);
284 	handle_irq(EVT_VEC, sint_isr);
285 
286 	setup_hypercall();
287 
288 	if (do_hypercall(HVCALL_SIGNAL_EVENT, 0x1234, 1) ==
289 	    HV_STATUS_INVALID_HYPERCALL_CODE) {
290 		report_skip("Hyper-V SynIC connections are not supported");
291 		goto summary;
292 	}
293 
294 	for (i = 0; i < ncpus; i++)
295 		on_cpu(i, setup_cpu, (void *)read_cr3());
296 
297 	ncpus_ok = run_test(ncpus, 0, WAIT_CYCLES, do_msg, msg_ok);
298 	report("send message to self: %d/%d",
299 	       ncpus_ok == ncpus, ncpus_ok, ncpus);
300 
301 	run_test(ncpus, 0, 0, clear_msg, NULL);
302 
303 	ncpus_ok = run_test(ncpus, 1, WAIT_CYCLES, do_msg, msg_ok);
304 	report("send message to another cpu: %d/%d",
305 	       ncpus_ok == ncpus, ncpus_ok, ncpus);
306 
307 	ncpus_ok = run_test(ncpus, 1, WAIT_CYCLES, do_msg, msg_busy);
308 	report("send message to busy slot: %d/%d",
309 	       ncpus_ok == ncpus, ncpus_ok, ncpus);
310 
311 	ncpus_ok = run_test(ncpus, 0, WAIT_CYCLES, clear_msg, msg_ok);
312 	report("receive pending message: %d/%d",
313 	       ncpus_ok == ncpus, ncpus_ok, ncpus);
314 
315 	ncpus_ok = run_test(ncpus, 0, WAIT_CYCLES, do_evt, evt_ok);
316 	report("signal event on self: %d/%d",
317 	       ncpus_ok == ncpus, ncpus_ok, ncpus);
318 
319 	run_test(ncpus, 0, 0, clear_evt, NULL);
320 
321 	ncpus_ok = run_test(ncpus, 1, WAIT_CYCLES, do_evt, evt_ok);
322 	report("signal event on another cpu: %d/%d",
323 	       ncpus_ok == ncpus, ncpus_ok, ncpus);
324 
325 	ncpus_ok = run_test(ncpus, 1, WAIT_CYCLES, do_evt, evt_busy);
326 	report("signal event already set: %d/%d",
327 	       ncpus_ok == ncpus, ncpus_ok, ncpus);
328 
329 	for (i = 0; i < ncpus; i++)
330 		on_cpu(i, teardown_cpu, NULL);
331 
332 	teardown_hypercall();
333 
334 summary:
335 	return report_summary();
336 }
337