xref: /kvm-unit-tests/x86/hyperv_connections.c (revision ddbcb8f4ca8aedc9835851bd9acf2cd5221d7476)
1 #include "libcflat.h"
2 #include "vm.h"
3 #include "smp.h"
4 #include "isr.h"
5 #include "atomic.h"
6 #include "hyperv.h"
7 #include "bitops.h"
8 #include "alloc_page.h"
9 
10 #define MAX_CPUS 64
11 
12 #define MSG_VEC 0xb0
13 #define EVT_VEC 0xb1
14 #define MSG_SINT 0x8
15 #define EVT_SINT 0x9
16 #define MSG_CONN_BASE 0x10
17 #define EVT_CONN_BASE 0x20
18 #define MSG_TYPE 0x12345678
19 
20 #define WAIT_CYCLES 10000000
21 
22 static atomic_t ncpus_done;
23 
24 struct hv_vcpu {
25 	struct hv_message_page *msg_page;
26 	struct hv_event_flags_page *evt_page;
27 	struct hv_input_post_message *post_msg;
28 	u8 msg_conn;
29 	u8 evt_conn;
30 	u64 hvcall_status;
31 	atomic_t sint_received;
32 };
33 
34 static struct hv_vcpu hv_vcpus[MAX_CPUS];
35 
sint_isr(isr_regs_t * regs)36 static void sint_isr(isr_regs_t *regs)
37 {
38 	atomic_inc(&hv_vcpus[smp_id()].sint_received);
39 }
40 
41 static void *hypercall_page;
42 
setup_hypercall(void)43 static void setup_hypercall(void)
44 {
45 	u64 guestid = (0x8f00ull << 48);
46 
47 	hypercall_page = alloc_page();
48 	if (!hypercall_page)
49 		report_abort("failed to allocate hypercall page");
50 
51 	wrmsr(HV_X64_MSR_GUEST_OS_ID, guestid);
52 
53 	wrmsr(HV_X64_MSR_HYPERCALL,
54 	      (u64)virt_to_phys(hypercall_page) | HV_X64_MSR_HYPERCALL_ENABLE);
55 }
56 
teardown_hypercall(void)57 static void teardown_hypercall(void)
58 {
59 	wrmsr(HV_X64_MSR_HYPERCALL, 0);
60 	wrmsr(HV_X64_MSR_GUEST_OS_ID, 0);
61 	free_page(hypercall_page);
62 }
63 
do_hypercall(u16 code,u64 arg,bool fast)64 static u64 do_hypercall(u16 code, u64 arg, bool fast)
65 {
66 	u64 ret;
67 	u64 ctl = code;
68 	if (fast)
69 		ctl |= HV_HYPERCALL_FAST;
70 
71 	asm volatile ("call *%[hcall_page]"
72 #ifdef __x86_64__
73 		      "\n mov $0,%%r8"
74 		      : "=a"(ret)
75 		      : "c"(ctl), "d"(arg),
76 #else
77 		      : "=A"(ret)
78 		      : "A"(ctl),
79 			"b" ((u32)(arg >> 32)), "c" ((u32)arg),
80 			"D"(0), "S"(0),
81 #endif
82 		      [hcall_page] "m" (hypercall_page)
83 #ifdef __x86_64__
84 		      : "r8"
85 #endif
86 		     );
87 
88 	return ret;
89 }
90 
setup_cpu(void * ctx)91 static void setup_cpu(void *ctx)
92 {
93 	int vcpu;
94 	struct hv_vcpu *hv;
95 
96 	write_cr3((ulong)ctx);
97 	sti();
98 
99 	vcpu = smp_id();
100 	hv = &hv_vcpus[vcpu];
101 
102 	hv->msg_page = alloc_page();
103 	hv->evt_page = alloc_page();
104 	hv->post_msg = alloc_page();
105 	if (!hv->msg_page || !hv->evt_page || !hv->post_msg)
106 		report_abort("failed to allocate synic pages for vcpu");
107 	hv->msg_conn = MSG_CONN_BASE + vcpu;
108 	hv->evt_conn = EVT_CONN_BASE + vcpu;
109 
110 	wrmsr(HV_X64_MSR_SIMP,
111 	      (u64)virt_to_phys(hv->msg_page) | HV_SYNIC_SIMP_ENABLE);
112 	wrmsr(HV_X64_MSR_SIEFP,
113 	      (u64)virt_to_phys(hv->evt_page) | HV_SYNIC_SIEFP_ENABLE);
114 	wrmsr(HV_X64_MSR_SCONTROL, HV_SYNIC_CONTROL_ENABLE);
115 
116 	msg_conn_create(MSG_SINT, MSG_VEC, hv->msg_conn);
117 	evt_conn_create(EVT_SINT, EVT_VEC, hv->evt_conn);
118 
119 	hv->post_msg->connectionid = hv->msg_conn;
120 	hv->post_msg->message_type = MSG_TYPE;
121 	hv->post_msg->payload_size = 8;
122 	hv->post_msg->payload[0] = (u64)vcpu << 16;
123 }
124 
teardown_cpu(void * ctx)125 static void teardown_cpu(void *ctx)
126 {
127 	int vcpu = smp_id();
128 	struct hv_vcpu *hv = &hv_vcpus[vcpu];
129 
130 	evt_conn_destroy(EVT_SINT, hv->evt_conn);
131 	msg_conn_destroy(MSG_SINT, hv->msg_conn);
132 
133 	wrmsr(HV_X64_MSR_SCONTROL, 0);
134 	wrmsr(HV_X64_MSR_SIEFP, 0);
135 	wrmsr(HV_X64_MSR_SIMP, 0);
136 
137 	free_page(hv->post_msg);
138 	free_page(hv->evt_page);
139 	free_page(hv->msg_page);
140 }
141 
do_msg(void * ctx)142 static void do_msg(void *ctx)
143 {
144 	int vcpu = (ulong)ctx;
145 	struct hv_vcpu *hv = &hv_vcpus[vcpu];
146 	struct hv_input_post_message *msg = hv->post_msg;
147 
148 	msg->payload[0]++;
149 	atomic_set(&hv->sint_received, 0);
150 	hv->hvcall_status = do_hypercall(HVCALL_POST_MESSAGE,
151 					 virt_to_phys(msg), 0);
152 	atomic_inc(&ncpus_done);
153 }
154 
clear_msg(void * ctx)155 static void clear_msg(void *ctx)
156 {
157 	/* should only be done on the current vcpu */
158 	int vcpu = smp_id();
159 	struct hv_vcpu *hv = &hv_vcpus[vcpu];
160 	struct hv_message *msg = &hv->msg_page->sint_message[MSG_SINT];
161 
162 	atomic_set(&hv->sint_received, 0);
163 	msg->header.message_type = 0;
164 	barrier();
165 	wrmsr(HV_X64_MSR_EOM, 0);
166 	atomic_inc(&ncpus_done);
167 }
168 
msg_ok(int vcpu)169 static bool msg_ok(int vcpu)
170 {
171 	struct hv_vcpu *hv = &hv_vcpus[vcpu];
172 	struct hv_input_post_message *post_msg = hv->post_msg;
173 	struct hv_message *msg = &hv->msg_page->sint_message[MSG_SINT];
174 
175 	return msg->header.message_type == post_msg->message_type &&
176 		msg->header.payload_size == post_msg->payload_size &&
177 		msg->header.message_flags.msg_pending == 0 &&
178 		msg->u.payload[0] == post_msg->payload[0] &&
179 		hv->hvcall_status == 0 &&
180 		atomic_read(&hv->sint_received) == 1;
181 }
182 
msg_busy(int vcpu)183 static bool msg_busy(int vcpu)
184 {
185 	struct hv_vcpu *hv = &hv_vcpus[vcpu];
186 	struct hv_input_post_message *post_msg = hv->post_msg;
187 	struct hv_message *msg = &hv->msg_page->sint_message[MSG_SINT];
188 
189 	return msg->header.message_type == post_msg->message_type &&
190 		msg->header.payload_size == post_msg->payload_size &&
191 		msg->header.message_flags.msg_pending == 1 &&
192 		msg->u.payload[0] == post_msg->payload[0] - 1 &&
193 		hv->hvcall_status == 0 &&
194 		atomic_read(&hv->sint_received) == 0;
195 }
196 
do_evt(void * ctx)197 static void do_evt(void *ctx)
198 {
199 	int vcpu = (ulong)ctx;
200 	struct hv_vcpu *hv = &hv_vcpus[vcpu];
201 
202 	atomic_set(&hv->sint_received, 0);
203 	hv->hvcall_status = do_hypercall(HVCALL_SIGNAL_EVENT,
204 					 hv->evt_conn, 1);
205 	atomic_inc(&ncpus_done);
206 }
207 
clear_evt(void * ctx)208 static void clear_evt(void *ctx)
209 {
210 	/* should only be done on the current vcpu */
211 	int vcpu = smp_id();
212 	struct hv_vcpu *hv = &hv_vcpus[vcpu];
213 	ulong *flags = hv->evt_page->slot[EVT_SINT].flags;
214 
215 	atomic_set(&hv->sint_received, 0);
216 	flags[BIT_WORD(hv->evt_conn)] &= ~BIT_MASK(hv->evt_conn);
217 	barrier();
218 	atomic_inc(&ncpus_done);
219 }
220 
evt_ok(int vcpu)221 static bool evt_ok(int vcpu)
222 {
223 	struct hv_vcpu *hv = &hv_vcpus[vcpu];
224 	ulong *flags = hv->evt_page->slot[EVT_SINT].flags;
225 
226 	return flags[BIT_WORD(hv->evt_conn)] == BIT_MASK(hv->evt_conn) &&
227 		hv->hvcall_status == 0 &&
228 		atomic_read(&hv->sint_received) == 1;
229 }
230 
evt_busy(int vcpu)231 static bool evt_busy(int vcpu)
232 {
233 	struct hv_vcpu *hv = &hv_vcpus[vcpu];
234 	ulong *flags = hv->evt_page->slot[EVT_SINT].flags;
235 
236 	return flags[BIT_WORD(hv->evt_conn)] == BIT_MASK(hv->evt_conn) &&
237 		hv->hvcall_status == 0 &&
238 		atomic_read(&hv->sint_received) == 0;
239 }
240 
run_test(int ncpus,int dst_add,ulong wait_cycles,void (* func)(void *),bool (* is_ok)(int))241 static int run_test(int ncpus, int dst_add, ulong wait_cycles,
242 		    void (*func)(void *), bool (*is_ok)(int))
243 {
244 	int i, ret = 0;
245 
246 	atomic_set(&ncpus_done, 0);
247 	for (i = 0; i < ncpus; i++) {
248 		ulong dst = (i + dst_add) % ncpus;
249 		on_cpu_async(i, func, (void *)dst);
250 	}
251 	while (atomic_read(&ncpus_done) != ncpus)
252 		pause();
253 
254 	while (wait_cycles--)
255 		pause();
256 
257 	if (is_ok)
258 		for (i = 0; i < ncpus; i++)
259 			ret += is_ok(i);
260 	return ret;
261 }
262 
263 #define HV_STATUS_INVALID_HYPERCALL_CODE        2
264 
main(int ac,char ** av)265 int main(int ac, char **av)
266 {
267 	int ncpus, ncpus_ok, i;
268 
269 	if (!hv_synic_supported()) {
270 		report_skip("Hyper-V SynIC is not supported");
271 		goto summary;
272 	}
273 
274 	setup_vm();
275 	ncpus = cpu_count();
276 	if (ncpus > MAX_CPUS)
277 		report_abort("# cpus: %d > %d", ncpus, MAX_CPUS);
278 
279 	handle_irq(MSG_VEC, sint_isr);
280 	handle_irq(EVT_VEC, sint_isr);
281 
282 	setup_hypercall();
283 
284 	if (do_hypercall(HVCALL_SIGNAL_EVENT, 0x1234, 1) ==
285 	    HV_STATUS_INVALID_HYPERCALL_CODE) {
286 		report_skip("Hyper-V SynIC connections are not supported");
287 		goto summary;
288 	}
289 
290 	for (i = 0; i < ncpus; i++)
291 		on_cpu(i, setup_cpu, (void *)read_cr3());
292 
293 	ncpus_ok = run_test(ncpus, 0, WAIT_CYCLES, do_msg, msg_ok);
294 	report(ncpus_ok == ncpus, "send message to self: %d/%d", ncpus_ok,
295 	       ncpus);
296 
297 	run_test(ncpus, 0, 0, clear_msg, NULL);
298 
299 	ncpus_ok = run_test(ncpus, 1, WAIT_CYCLES, do_msg, msg_ok);
300 	report(ncpus_ok == ncpus, "send message to another cpu: %d/%d",
301 	       ncpus_ok, ncpus);
302 
303 	ncpus_ok = run_test(ncpus, 1, WAIT_CYCLES, do_msg, msg_busy);
304 	report(ncpus_ok == ncpus, "send message to busy slot: %d/%d",
305 	       ncpus_ok, ncpus);
306 
307 	ncpus_ok = run_test(ncpus, 0, WAIT_CYCLES, clear_msg, msg_ok);
308 	report(ncpus_ok == ncpus, "receive pending message: %d/%d", ncpus_ok,
309 	       ncpus);
310 
311 	ncpus_ok = run_test(ncpus, 0, WAIT_CYCLES, do_evt, evt_ok);
312 	report(ncpus_ok == ncpus, "signal event on self: %d/%d", ncpus_ok,
313 	       ncpus);
314 
315 	run_test(ncpus, 0, 0, clear_evt, NULL);
316 
317 	ncpus_ok = run_test(ncpus, 1, WAIT_CYCLES, do_evt, evt_ok);
318 	report(ncpus_ok == ncpus, "signal event on another cpu: %d/%d",
319 	       ncpus_ok, ncpus);
320 
321 	ncpus_ok = run_test(ncpus, 1, WAIT_CYCLES, do_evt, evt_busy);
322 	report(ncpus_ok == ncpus, "signal event already set: %d/%d", ncpus_ok,
323 	       ncpus);
324 
325 	for (i = 0; i < ncpus; i++)
326 		on_cpu(i, teardown_cpu, NULL);
327 
328 	teardown_hypercall();
329 
330 summary:
331 	return report_summary();
332 }
333