xref: /qemu/hw/hyperv/hyperv.c (revision 648fe157d33436f042d6b6434b9b88079f67fa33)
1 /*
2  * Hyper-V guest/hypervisor interaction
3  *
4  * Copyright (c) 2015-2018 Virtuozzo International GmbH.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
7  * See the COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/main-loop.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "system/address-spaces.h"
15 #include "system/memory.h"
16 #include "exec/target_page.h"
17 #include "linux/kvm.h"
18 #include "system/kvm.h"
19 #include "qemu/bitops.h"
20 #include "qemu/error-report.h"
21 #include "qemu/lockable.h"
22 #include "qemu/queue.h"
23 #include "qemu/rcu.h"
24 #include "qemu/rcu_queue.h"
25 #include "hw/hyperv/hyperv.h"
26 #include "qom/object.h"
27 #include "target/i386/kvm/hyperv-proto.h"
28 #include "exec/target_page.h"
29 
30 struct SynICState {
31     DeviceState parent_obj;
32 
33     CPUState *cs;
34 
35     bool sctl_enabled;
36     hwaddr msg_page_addr;
37     hwaddr event_page_addr;
38     MemoryRegion msg_page_mr;
39     MemoryRegion event_page_mr;
40     struct hyperv_message_page *msg_page;
41     struct hyperv_event_flags_page *event_page;
42 
43     QemuMutex sint_routes_mutex;
44     QLIST_HEAD(, HvSintRoute) sint_routes;
45 };
46 
47 #define TYPE_SYNIC "hyperv-synic"
48 OBJECT_DECLARE_SIMPLE_TYPE(SynICState, SYNIC)
49 
50 static bool synic_enabled;
51 
52 bool hyperv_is_synic_enabled(void)
53 {
54     return synic_enabled;
55 }
56 
57 static SynICState *get_synic(CPUState *cs)
58 {
59     return SYNIC(object_resolve_path_component(OBJECT(cs), "synic"));
60 }
61 
62 static void synic_update(SynICState *synic, bool sctl_enable,
63                          hwaddr msg_page_addr, hwaddr event_page_addr)
64 {
65 
66     synic->sctl_enabled = sctl_enable;
67     if (synic->msg_page_addr != msg_page_addr) {
68         if (synic->msg_page_addr) {
69             memory_region_del_subregion(get_system_memory(),
70                                         &synic->msg_page_mr);
71         }
72         if (msg_page_addr) {
73             memory_region_add_subregion(get_system_memory(), msg_page_addr,
74                                         &synic->msg_page_mr);
75         }
76         synic->msg_page_addr = msg_page_addr;
77     }
78     if (synic->event_page_addr != event_page_addr) {
79         if (synic->event_page_addr) {
80             memory_region_del_subregion(get_system_memory(),
81                                         &synic->event_page_mr);
82         }
83         if (event_page_addr) {
84             memory_region_add_subregion(get_system_memory(), event_page_addr,
85                                         &synic->event_page_mr);
86         }
87         synic->event_page_addr = event_page_addr;
88     }
89 }
90 
91 void hyperv_synic_update(CPUState *cs, bool sctl_enable,
92                          hwaddr msg_page_addr, hwaddr event_page_addr)
93 {
94     SynICState *synic = get_synic(cs);
95 
96     if (!synic) {
97         return;
98     }
99 
100     synic_update(synic, sctl_enable, msg_page_addr, event_page_addr);
101 }
102 
103 static void synic_realize(DeviceState *dev, Error **errp)
104 {
105     Object *obj = OBJECT(dev);
106     SynICState *synic = SYNIC(dev);
107     char *msgp_name, *eventp_name;
108     uint32_t vp_index;
109 
110     /* memory region names have to be globally unique */
111     vp_index = hyperv_vp_index(synic->cs);
112     msgp_name = g_strdup_printf("synic-%u-msg-page", vp_index);
113     eventp_name = g_strdup_printf("synic-%u-event-page", vp_index);
114 
115     memory_region_init_ram(&synic->msg_page_mr, obj, msgp_name,
116                            sizeof(*synic->msg_page), &error_abort);
117     memory_region_init_ram(&synic->event_page_mr, obj, eventp_name,
118                            sizeof(*synic->event_page), &error_abort);
119     synic->msg_page = memory_region_get_ram_ptr(&synic->msg_page_mr);
120     synic->event_page = memory_region_get_ram_ptr(&synic->event_page_mr);
121     qemu_mutex_init(&synic->sint_routes_mutex);
122     QLIST_INIT(&synic->sint_routes);
123 
124     g_free(msgp_name);
125     g_free(eventp_name);
126 }
127 
128 static void synic_reset(DeviceState *dev)
129 {
130     SynICState *synic = SYNIC(dev);
131     memset(synic->msg_page, 0, sizeof(*synic->msg_page));
132     memset(synic->event_page, 0, sizeof(*synic->event_page));
133     synic_update(synic, false, 0, 0);
134     assert(QLIST_EMPTY(&synic->sint_routes));
135 }
136 
137 static void synic_class_init(ObjectClass *klass, const void *data)
138 {
139     DeviceClass *dc = DEVICE_CLASS(klass);
140 
141     dc->realize = synic_realize;
142     device_class_set_legacy_reset(dc, synic_reset);
143     dc->user_creatable = false;
144 }
145 
146 void hyperv_synic_add(CPUState *cs)
147 {
148     Object *obj;
149     SynICState *synic;
150 
151     obj = object_new(TYPE_SYNIC);
152     synic = SYNIC(obj);
153     synic->cs = cs;
154     object_property_add_child(OBJECT(cs), "synic", obj);
155     object_unref(obj);
156     qdev_realize(DEVICE(obj), NULL, &error_abort);
157     synic_enabled = true;
158 }
159 
160 void hyperv_synic_reset(CPUState *cs)
161 {
162     SynICState *synic = get_synic(cs);
163 
164     if (synic) {
165         device_cold_reset(DEVICE(synic));
166     }
167 }
168 
169 static const TypeInfo synic_type_info = {
170     .name = TYPE_SYNIC,
171     .parent = TYPE_DEVICE,
172     .instance_size = sizeof(SynICState),
173     .class_init = synic_class_init,
174 };
175 
176 static void synic_register_types(void)
177 {
178     type_register_static(&synic_type_info);
179 }
180 
181 type_init(synic_register_types)
182 
183 /*
184  * KVM has its own message producers (SynIC timers).  To guarantee
185  * serialization with both KVM vcpu and the guest cpu, the messages are first
186  * staged in an intermediate area and then posted to the SynIC message page in
187  * the vcpu thread.
188  */
189 typedef struct HvSintStagedMessage {
190     /* message content staged by hyperv_post_msg */
191     struct hyperv_message msg;
192     /* callback + data (r/o) to complete the processing in a BH */
193     HvSintMsgCb cb;
194     void *cb_data;
195     /* message posting status filled by cpu_post_msg */
196     int status;
197     /* passing the buck: */
198     enum {
199         /* initial state */
200         HV_STAGED_MSG_FREE,
201         /*
202          * hyperv_post_msg (e.g. in main loop) grabs the staged area (FREE ->
203          * BUSY), copies msg, and schedules cpu_post_msg on the assigned cpu
204          */
205         HV_STAGED_MSG_BUSY,
206         /*
207          * cpu_post_msg (vcpu thread) tries to copy staged msg to msg slot,
208          * notify the guest, records the status, marks the posting done (BUSY
209          * -> POSTED), and schedules sint_msg_bh BH
210          */
211         HV_STAGED_MSG_POSTED,
212         /*
213          * sint_msg_bh (BH) verifies that the posting is done, runs the
214          * callback, and starts over (POSTED -> FREE)
215          */
216     } state;
217 } HvSintStagedMessage;
218 
219 struct HvSintRoute {
220     uint32_t sint;
221     SynICState *synic;
222     int gsi;
223     EventNotifier sint_set_notifier;
224     EventNotifier sint_ack_notifier;
225 
226     HvSintStagedMessage *staged_msg;
227 
228     unsigned refcount;
229     QLIST_ENTRY(HvSintRoute) link;
230 };
231 
232 static CPUState *hyperv_find_vcpu(uint32_t vp_index)
233 {
234     CPUState *cs = qemu_get_cpu(vp_index);
235     assert(hyperv_vp_index(cs) == vp_index);
236     return cs;
237 }
238 
239 /*
240  * BH to complete the processing of a staged message.
241  */
242 static void sint_msg_bh(void *opaque)
243 {
244     HvSintRoute *sint_route = opaque;
245     HvSintStagedMessage *staged_msg = sint_route->staged_msg;
246 
247     if (qatomic_read(&staged_msg->state) != HV_STAGED_MSG_POSTED) {
248         /* status nor ready yet (spurious ack from guest?), ignore */
249         return;
250     }
251 
252     staged_msg->cb(staged_msg->cb_data, staged_msg->status);
253     staged_msg->status = 0;
254 
255     /* staged message processing finished, ready to start over */
256     qatomic_set(&staged_msg->state, HV_STAGED_MSG_FREE);
257     /* drop the reference taken in hyperv_post_msg */
258     hyperv_sint_route_unref(sint_route);
259 }
260 
261 /*
262  * Worker to transfer the message from the staging area into the SynIC message
263  * page in vcpu context.
264  */
265 static void cpu_post_msg(CPUState *cs, run_on_cpu_data data)
266 {
267     HvSintRoute *sint_route = data.host_ptr;
268     HvSintStagedMessage *staged_msg = sint_route->staged_msg;
269     SynICState *synic = sint_route->synic;
270     struct hyperv_message *dst_msg;
271     bool wait_for_sint_ack = false;
272 
273     assert(staged_msg->state == HV_STAGED_MSG_BUSY);
274 
275     if (!synic->msg_page_addr) {
276         staged_msg->status = -ENXIO;
277         goto posted;
278     }
279 
280     dst_msg = &synic->msg_page->slot[sint_route->sint];
281 
282     if (dst_msg->header.message_type != HV_MESSAGE_NONE) {
283         dst_msg->header.message_flags |= HV_MESSAGE_FLAG_PENDING;
284         staged_msg->status = -EAGAIN;
285         wait_for_sint_ack = true;
286     } else {
287         memcpy(dst_msg, &staged_msg->msg, sizeof(*dst_msg));
288         staged_msg->status = hyperv_sint_route_set_sint(sint_route);
289     }
290 
291     memory_region_set_dirty(&synic->msg_page_mr, 0, sizeof(*synic->msg_page));
292 
293 posted:
294     qatomic_set(&staged_msg->state, HV_STAGED_MSG_POSTED);
295     /*
296      * Notify the msg originator of the progress made; if the slot was busy we
297      * set msg_pending flag in it so it will be the guest who will do EOM and
298      * trigger the notification from KVM via sint_ack_notifier
299      */
300     if (!wait_for_sint_ack) {
301         aio_bh_schedule_oneshot(qemu_get_aio_context(), sint_msg_bh,
302                                 sint_route);
303     }
304 }
305 
306 /*
307  * Post a Hyper-V message to the staging area, for delivery to guest in the
308  * vcpu thread.
309  */
310 int hyperv_post_msg(HvSintRoute *sint_route, struct hyperv_message *src_msg)
311 {
312     HvSintStagedMessage *staged_msg = sint_route->staged_msg;
313 
314     assert(staged_msg);
315 
316     /* grab the staging area */
317     if (qatomic_cmpxchg(&staged_msg->state, HV_STAGED_MSG_FREE,
318                        HV_STAGED_MSG_BUSY) != HV_STAGED_MSG_FREE) {
319         return -EAGAIN;
320     }
321 
322     memcpy(&staged_msg->msg, src_msg, sizeof(*src_msg));
323 
324     /* hold a reference on sint_route until the callback is finished */
325     hyperv_sint_route_ref(sint_route);
326 
327     /* schedule message posting attempt in vcpu thread */
328     async_run_on_cpu(sint_route->synic->cs, cpu_post_msg,
329                      RUN_ON_CPU_HOST_PTR(sint_route));
330     return 0;
331 }
332 
333 static void sint_ack_handler(EventNotifier *notifier)
334 {
335     HvSintRoute *sint_route = container_of(notifier, HvSintRoute,
336                                            sint_ack_notifier);
337     event_notifier_test_and_clear(notifier);
338 
339     /*
340      * the guest consumed the previous message so complete the current one with
341      * -EAGAIN and let the msg originator retry
342      */
343     aio_bh_schedule_oneshot(qemu_get_aio_context(), sint_msg_bh, sint_route);
344 }
345 
346 /*
347  * Set given event flag for a given sint on a given vcpu, and signal the sint.
348  */
349 int hyperv_set_event_flag(HvSintRoute *sint_route, unsigned eventno)
350 {
351     int ret;
352     SynICState *synic = sint_route->synic;
353     unsigned long *flags, set_mask;
354     unsigned set_idx;
355 
356     if (eventno > HV_EVENT_FLAGS_COUNT) {
357         return -EINVAL;
358     }
359     if (!synic->sctl_enabled || !synic->event_page_addr) {
360         return -ENXIO;
361     }
362 
363     set_idx = BIT_WORD(eventno);
364     set_mask = BIT_MASK(eventno);
365     flags = synic->event_page->slot[sint_route->sint].flags;
366 
367     if ((qatomic_fetch_or(&flags[set_idx], set_mask) & set_mask) != set_mask) {
368         memory_region_set_dirty(&synic->event_page_mr, 0,
369                                 sizeof(*synic->event_page));
370         ret = hyperv_sint_route_set_sint(sint_route);
371     } else {
372         ret = 0;
373     }
374     return ret;
375 }
376 
377 static int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
378 {
379     struct kvm_irq_routing_entry kroute = {};
380     int virq;
381 
382     if (!kvm_gsi_routing_enabled()) {
383         return -ENOSYS;
384     }
385     virq = kvm_irqchip_get_virq(s);
386     if (virq < 0) {
387         return virq;
388     }
389 
390     kroute.gsi = virq;
391     kroute.type = KVM_IRQ_ROUTING_HV_SINT;
392     kroute.flags = 0;
393     kroute.u.hv_sint.vcpu = vcpu;
394     kroute.u.hv_sint.sint = sint;
395 
396     kvm_add_routing_entry(s, &kroute);
397     kvm_irqchip_commit_routes(s);
398 
399     return virq;
400 }
401 
402 HvSintRoute *hyperv_sint_route_new(uint32_t vp_index, uint32_t sint,
403                                    HvSintMsgCb cb, void *cb_data)
404 {
405     HvSintRoute *sint_route = NULL;
406     EventNotifier *ack_notifier = NULL;
407     int r, gsi;
408     CPUState *cs;
409     SynICState *synic;
410     bool ack_event_initialized = false;
411 
412     cs = hyperv_find_vcpu(vp_index);
413     if (!cs) {
414         return NULL;
415     }
416 
417     synic = get_synic(cs);
418     if (!synic) {
419         return NULL;
420     }
421 
422     sint_route = g_new0(HvSintRoute, 1);
423     if (!sint_route) {
424         return NULL;
425     }
426 
427     sint_route->synic = synic;
428     sint_route->sint = sint;
429     sint_route->refcount = 1;
430 
431     ack_notifier = cb ? &sint_route->sint_ack_notifier : NULL;
432     if (ack_notifier) {
433         sint_route->staged_msg = g_new0(HvSintStagedMessage, 1);
434         if (!sint_route->staged_msg) {
435             goto cleanup_err_sint;
436         }
437         sint_route->staged_msg->cb = cb;
438         sint_route->staged_msg->cb_data = cb_data;
439 
440         r = event_notifier_init(ack_notifier, false);
441         if (r) {
442             goto cleanup_err_sint;
443         }
444         event_notifier_set_handler(ack_notifier, sint_ack_handler);
445         ack_event_initialized = true;
446     }
447 
448     /* See if we are done or we need to setup a GSI for this SintRoute */
449     if (!synic->sctl_enabled) {
450         goto cleanup;
451     }
452 
453     /* We need to setup a GSI for this SintRoute */
454     r = event_notifier_init(&sint_route->sint_set_notifier, false);
455     if (r) {
456         goto cleanup_err_sint;
457     }
458 
459     gsi = kvm_irqchip_add_hv_sint_route(kvm_state, vp_index, sint);
460     if (gsi < 0) {
461         goto cleanup_err_sint_notifier;
462     }
463 
464     r = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state,
465                                            &sint_route->sint_set_notifier,
466                                            ack_notifier, gsi);
467     if (r) {
468         goto cleanup_err_irqfd;
469     }
470     sint_route->gsi = gsi;
471 cleanup:
472     qemu_mutex_lock(&synic->sint_routes_mutex);
473     QLIST_INSERT_HEAD(&synic->sint_routes, sint_route, link);
474     qemu_mutex_unlock(&synic->sint_routes_mutex);
475     return sint_route;
476 
477 cleanup_err_irqfd:
478     kvm_irqchip_release_virq(kvm_state, gsi);
479 
480 cleanup_err_sint_notifier:
481     event_notifier_cleanup(&sint_route->sint_set_notifier);
482 
483 cleanup_err_sint:
484     if (ack_notifier) {
485         if (ack_event_initialized) {
486             event_notifier_set_handler(ack_notifier, NULL);
487             event_notifier_cleanup(ack_notifier);
488         }
489 
490         g_free(sint_route->staged_msg);
491     }
492 
493     g_free(sint_route);
494     return NULL;
495 }
496 
497 void hyperv_sint_route_ref(HvSintRoute *sint_route)
498 {
499     sint_route->refcount++;
500 }
501 
502 void hyperv_sint_route_unref(HvSintRoute *sint_route)
503 {
504     SynICState *synic;
505 
506     if (!sint_route) {
507         return;
508     }
509 
510     assert(sint_route->refcount > 0);
511 
512     if (--sint_route->refcount) {
513         return;
514     }
515 
516     synic = sint_route->synic;
517     qemu_mutex_lock(&synic->sint_routes_mutex);
518     QLIST_REMOVE(sint_route, link);
519     qemu_mutex_unlock(&synic->sint_routes_mutex);
520 
521     if (sint_route->gsi) {
522         kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state,
523                                               &sint_route->sint_set_notifier,
524                                               sint_route->gsi);
525         kvm_irqchip_release_virq(kvm_state, sint_route->gsi);
526         event_notifier_cleanup(&sint_route->sint_set_notifier);
527     }
528 
529     if (sint_route->staged_msg) {
530         event_notifier_set_handler(&sint_route->sint_ack_notifier, NULL);
531         event_notifier_cleanup(&sint_route->sint_ack_notifier);
532         g_free(sint_route->staged_msg);
533     }
534     g_free(sint_route);
535 }
536 
537 int hyperv_sint_route_set_sint(HvSintRoute *sint_route)
538 {
539     if (!sint_route->gsi) {
540         return 0;
541     }
542 
543     return event_notifier_set(&sint_route->sint_set_notifier);
544 }
545 
546 typedef struct MsgHandler {
547     struct rcu_head rcu;
548     QLIST_ENTRY(MsgHandler) link;
549     uint32_t conn_id;
550     HvMsgHandler handler;
551     void *data;
552 } MsgHandler;
553 
554 typedef struct EventFlagHandler {
555     struct rcu_head rcu;
556     QLIST_ENTRY(EventFlagHandler) link;
557     uint32_t conn_id;
558     EventNotifier *notifier;
559 } EventFlagHandler;
560 
561 static QLIST_HEAD(, MsgHandler) msg_handlers;
562 static QLIST_HEAD(, EventFlagHandler) event_flag_handlers;
563 static QemuMutex handlers_mutex;
564 
565 static void __attribute__((constructor)) hv_init(void)
566 {
567     QLIST_INIT(&msg_handlers);
568     QLIST_INIT(&event_flag_handlers);
569     qemu_mutex_init(&handlers_mutex);
570 }
571 
572 int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
573 {
574     int ret;
575     MsgHandler *mh;
576 
577     QEMU_LOCK_GUARD(&handlers_mutex);
578     QLIST_FOREACH(mh, &msg_handlers, link) {
579         if (mh->conn_id == conn_id) {
580             if (handler) {
581                 ret = -EEXIST;
582             } else {
583                 QLIST_REMOVE_RCU(mh, link);
584                 g_free_rcu(mh, rcu);
585                 ret = 0;
586             }
587             return ret;
588         }
589     }
590 
591     if (handler) {
592         mh = g_new(MsgHandler, 1);
593         mh->conn_id = conn_id;
594         mh->handler = handler;
595         mh->data = data;
596         QLIST_INSERT_HEAD_RCU(&msg_handlers, mh, link);
597         ret = 0;
598     } else {
599         ret = -ENOENT;
600     }
601 
602     return ret;
603 }
604 
605 uint16_t hyperv_hcall_post_message(uint64_t param, bool fast)
606 {
607     uint16_t ret;
608     hwaddr len;
609     struct hyperv_post_message_input *msg;
610     MsgHandler *mh;
611 
612     if (fast) {
613         return HV_STATUS_INVALID_HYPERCALL_CODE;
614     }
615     if (param & (__alignof__(*msg) - 1)) {
616         return HV_STATUS_INVALID_ALIGNMENT;
617     }
618 
619     len = sizeof(*msg);
620     msg = cpu_physical_memory_map(param, &len, 0);
621     if (len < sizeof(*msg)) {
622         ret = HV_STATUS_INSUFFICIENT_MEMORY;
623         goto unmap;
624     }
625     if (msg->payload_size > sizeof(msg->payload)) {
626         ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
627         goto unmap;
628     }
629 
630     ret = HV_STATUS_INVALID_CONNECTION_ID;
631     WITH_RCU_READ_LOCK_GUARD() {
632         QLIST_FOREACH_RCU(mh, &msg_handlers, link) {
633             if (mh->conn_id == (msg->connection_id & HV_CONNECTION_ID_MASK)) {
634                 ret = mh->handler(msg, mh->data);
635                 break;
636             }
637         }
638     }
639 
640 unmap:
641     cpu_physical_memory_unmap(msg, len, 0, 0);
642     return ret;
643 }
644 
645 static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
646 {
647     int ret;
648     EventFlagHandler *handler;
649 
650     QEMU_LOCK_GUARD(&handlers_mutex);
651     QLIST_FOREACH(handler, &event_flag_handlers, link) {
652         if (handler->conn_id == conn_id) {
653             if (notifier) {
654                 ret = -EEXIST;
655             } else {
656                 QLIST_REMOVE_RCU(handler, link);
657                 g_free_rcu(handler, rcu);
658                 ret = 0;
659             }
660             return ret;
661         }
662     }
663 
664     if (notifier) {
665         handler = g_new(EventFlagHandler, 1);
666         handler->conn_id = conn_id;
667         handler->notifier = notifier;
668         QLIST_INSERT_HEAD_RCU(&event_flag_handlers, handler, link);
669         ret = 0;
670     } else {
671         ret = -ENOENT;
672     }
673 
674     return ret;
675 }
676 
677 static bool process_event_flags_userspace;
678 
679 int hyperv_set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
680 {
681     if (!process_event_flags_userspace &&
682         !kvm_check_extension(kvm_state, KVM_CAP_HYPERV_EVENTFD)) {
683         process_event_flags_userspace = true;
684 
685         warn_report("Hyper-V event signaling is not supported by this kernel; "
686                     "using slower userspace hypercall processing");
687     }
688 
689     if (!process_event_flags_userspace) {
690         struct kvm_hyperv_eventfd hvevfd = {
691             .conn_id = conn_id,
692             .fd = notifier ? event_notifier_get_fd(notifier) : -1,
693             .flags = notifier ? 0 : KVM_HYPERV_EVENTFD_DEASSIGN,
694         };
695 
696         return kvm_vm_ioctl(kvm_state, KVM_HYPERV_EVENTFD, &hvevfd);
697     }
698     return set_event_flag_handler(conn_id, notifier);
699 }
700 
701 uint16_t hyperv_hcall_signal_event(uint64_t param, bool fast)
702 {
703     EventFlagHandler *handler;
704 
705     if (unlikely(!fast)) {
706         hwaddr addr = param;
707 
708         if (addr & (__alignof__(addr) - 1)) {
709             return HV_STATUS_INVALID_ALIGNMENT;
710         }
711 
712         param = ldq_phys(&address_space_memory, addr);
713     }
714 
715     /*
716      * Per spec, bits 32-47 contain the extra "flag number".  However, we
717      * have no use for it, and in all known usecases it is zero, so just
718      * report lookup failure if it isn't.
719      */
720     if (param & 0xffff00000000ULL) {
721         return HV_STATUS_INVALID_PORT_ID;
722     }
723     /* remaining bits are reserved-zero */
724     if (param & ~HV_CONNECTION_ID_MASK) {
725         return HV_STATUS_INVALID_HYPERCALL_INPUT;
726     }
727 
728     RCU_READ_LOCK_GUARD();
729     QLIST_FOREACH_RCU(handler, &event_flag_handlers, link) {
730         if (handler->conn_id == param) {
731             event_notifier_set(handler->notifier);
732             return 0;
733         }
734     }
735     return HV_STATUS_INVALID_CONNECTION_ID;
736 }
737 
738 static HvSynDbgHandler hv_syndbg_handler;
739 static void *hv_syndbg_context;
740 
741 void hyperv_set_syndbg_handler(HvSynDbgHandler handler, void *context)
742 {
743     assert(!hv_syndbg_handler);
744     hv_syndbg_handler = handler;
745     hv_syndbg_context = context;
746 }
747 
748 uint16_t hyperv_hcall_reset_dbg_session(uint64_t outgpa)
749 {
750     uint16_t ret;
751     HvSynDbgMsg msg;
752     struct hyperv_reset_debug_session_output *reset_dbg_session = NULL;
753     hwaddr len;
754 
755     if (!hv_syndbg_handler) {
756         ret = HV_STATUS_INVALID_HYPERCALL_CODE;
757         goto cleanup;
758     }
759 
760     len = sizeof(*reset_dbg_session);
761     reset_dbg_session = cpu_physical_memory_map(outgpa, &len, 1);
762     if (!reset_dbg_session || len < sizeof(*reset_dbg_session)) {
763         ret = HV_STATUS_INSUFFICIENT_MEMORY;
764         goto cleanup;
765     }
766 
767     msg.type = HV_SYNDBG_MSG_CONNECTION_INFO;
768     ret = hv_syndbg_handler(hv_syndbg_context, &msg);
769     if (ret) {
770         goto cleanup;
771     }
772 
773     reset_dbg_session->host_ip = msg.u.connection_info.host_ip;
774     reset_dbg_session->host_port = msg.u.connection_info.host_port;
775     /* The following fields are only used as validation for KDVM */
776     memset(&reset_dbg_session->host_mac, 0,
777            sizeof(reset_dbg_session->host_mac));
778     reset_dbg_session->target_ip = msg.u.connection_info.host_ip;
779     reset_dbg_session->target_port = msg.u.connection_info.host_port;
780     memset(&reset_dbg_session->target_mac, 0,
781            sizeof(reset_dbg_session->target_mac));
782 cleanup:
783     if (reset_dbg_session) {
784         cpu_physical_memory_unmap(reset_dbg_session,
785                                   sizeof(*reset_dbg_session), 1, len);
786     }
787 
788     return ret;
789 }
790 
791 uint16_t hyperv_hcall_retreive_dbg_data(uint64_t ingpa, uint64_t outgpa,
792                                         bool fast)
793 {
794     uint16_t ret;
795     struct hyperv_retrieve_debug_data_input *debug_data_in = NULL;
796     struct hyperv_retrieve_debug_data_output *debug_data_out = NULL;
797     hwaddr in_len, out_len;
798     HvSynDbgMsg msg;
799 
800     if (fast || !hv_syndbg_handler) {
801         ret = HV_STATUS_INVALID_HYPERCALL_CODE;
802         goto cleanup;
803     }
804 
805     in_len = sizeof(*debug_data_in);
806     debug_data_in = cpu_physical_memory_map(ingpa, &in_len, 0);
807     if (!debug_data_in || in_len < sizeof(*debug_data_in)) {
808         ret = HV_STATUS_INSUFFICIENT_MEMORY;
809         goto cleanup;
810     }
811 
812     out_len = sizeof(*debug_data_out);
813     debug_data_out = cpu_physical_memory_map(outgpa, &out_len, 1);
814     if (!debug_data_out || out_len < sizeof(*debug_data_out)) {
815         ret = HV_STATUS_INSUFFICIENT_MEMORY;
816         goto cleanup;
817     }
818 
819     msg.type = HV_SYNDBG_MSG_RECV;
820     msg.u.recv.buf_gpa = outgpa + sizeof(*debug_data_out);
821     msg.u.recv.count = TARGET_PAGE_SIZE - sizeof(*debug_data_out);
822     msg.u.recv.options = debug_data_in->options;
823     msg.u.recv.timeout = debug_data_in->timeout;
824     msg.u.recv.is_raw = true;
825     ret = hv_syndbg_handler(hv_syndbg_context, &msg);
826     if (ret == HV_STATUS_NO_DATA) {
827         debug_data_out->retrieved_count = 0;
828         debug_data_out->remaining_count = debug_data_in->count;
829         goto cleanup;
830     } else if (ret != HV_STATUS_SUCCESS) {
831         goto cleanup;
832     }
833 
834     debug_data_out->retrieved_count = msg.u.recv.retrieved_count;
835     debug_data_out->remaining_count =
836         debug_data_in->count - msg.u.recv.retrieved_count;
837 cleanup:
838     if (debug_data_out) {
839         cpu_physical_memory_unmap(debug_data_out, sizeof(*debug_data_out), 1,
840                                   out_len);
841     }
842 
843     if (debug_data_in) {
844         cpu_physical_memory_unmap(debug_data_in, sizeof(*debug_data_in), 0,
845                                   in_len);
846     }
847 
848     return ret;
849 }
850 
851 uint16_t hyperv_hcall_post_dbg_data(uint64_t ingpa, uint64_t outgpa, bool fast)
852 {
853     uint16_t ret;
854     struct hyperv_post_debug_data_input *post_data_in = NULL;
855     struct hyperv_post_debug_data_output *post_data_out = NULL;
856     hwaddr in_len, out_len;
857     HvSynDbgMsg msg;
858 
859     if (fast || !hv_syndbg_handler) {
860         ret = HV_STATUS_INVALID_HYPERCALL_CODE;
861         goto cleanup;
862     }
863 
864     in_len = sizeof(*post_data_in);
865     post_data_in = cpu_physical_memory_map(ingpa, &in_len, 0);
866     if (!post_data_in || in_len < sizeof(*post_data_in)) {
867         ret = HV_STATUS_INSUFFICIENT_MEMORY;
868         goto cleanup;
869     }
870 
871     if (post_data_in->count > TARGET_PAGE_SIZE - sizeof(*post_data_in)) {
872         ret = HV_STATUS_INVALID_PARAMETER;
873         goto cleanup;
874     }
875 
876     out_len = sizeof(*post_data_out);
877     post_data_out = cpu_physical_memory_map(outgpa, &out_len, 1);
878     if (!post_data_out || out_len < sizeof(*post_data_out)) {
879         ret = HV_STATUS_INSUFFICIENT_MEMORY;
880         goto cleanup;
881     }
882 
883     msg.type = HV_SYNDBG_MSG_SEND;
884     msg.u.send.buf_gpa = ingpa + sizeof(*post_data_in);
885     msg.u.send.count = post_data_in->count;
886     msg.u.send.is_raw = true;
887     ret = hv_syndbg_handler(hv_syndbg_context, &msg);
888     if (ret != HV_STATUS_SUCCESS) {
889         goto cleanup;
890     }
891 
892     post_data_out->pending_count = msg.u.send.pending_count;
893     ret = post_data_out->pending_count ? HV_STATUS_INSUFFICIENT_BUFFERS :
894                                          HV_STATUS_SUCCESS;
895 cleanup:
896     if (post_data_out) {
897         cpu_physical_memory_unmap(post_data_out,
898                                   sizeof(*post_data_out), 1, out_len);
899     }
900 
901     if (post_data_in) {
902         cpu_physical_memory_unmap(post_data_in,
903                                   sizeof(*post_data_in), 0, in_len);
904     }
905 
906     return ret;
907 }
908 
909 uint32_t hyperv_syndbg_send(uint64_t ingpa, uint32_t count)
910 {
911     HvSynDbgMsg msg;
912 
913     if (!hv_syndbg_handler) {
914         return HV_SYNDBG_STATUS_INVALID;
915     }
916 
917     msg.type = HV_SYNDBG_MSG_SEND;
918     msg.u.send.buf_gpa = ingpa;
919     msg.u.send.count = count;
920     msg.u.send.is_raw = false;
921     if (hv_syndbg_handler(hv_syndbg_context, &msg)) {
922         return HV_SYNDBG_STATUS_INVALID;
923     }
924 
925     return HV_SYNDBG_STATUS_SEND_SUCCESS;
926 }
927 
928 uint32_t hyperv_syndbg_recv(uint64_t ingpa, uint32_t count)
929 {
930     uint16_t ret;
931     HvSynDbgMsg msg;
932 
933     if (!hv_syndbg_handler) {
934         return HV_SYNDBG_STATUS_INVALID;
935     }
936 
937     msg.type = HV_SYNDBG_MSG_RECV;
938     msg.u.recv.buf_gpa = ingpa;
939     msg.u.recv.count = count;
940     msg.u.recv.options = 0;
941     msg.u.recv.timeout = 0;
942     msg.u.recv.is_raw = false;
943     ret = hv_syndbg_handler(hv_syndbg_context, &msg);
944     if (ret != HV_STATUS_SUCCESS) {
945         return 0;
946     }
947 
948     return HV_SYNDBG_STATUS_SET_SIZE(HV_SYNDBG_STATUS_RECV_SUCCESS,
949                                      msg.u.recv.retrieved_count);
950 }
951 
952 void hyperv_syndbg_set_pending_page(uint64_t ingpa)
953 {
954     HvSynDbgMsg msg;
955 
956     if (!hv_syndbg_handler) {
957         return;
958     }
959 
960     msg.type = HV_SYNDBG_MSG_SET_PENDING_PAGE;
961     msg.u.pending_page.buf_gpa = ingpa;
962     hv_syndbg_handler(hv_syndbg_context, &msg);
963 }
964 
965 uint64_t hyperv_syndbg_query_options(void)
966 {
967     HvSynDbgMsg msg;
968 
969     if (!hv_syndbg_handler) {
970         return 0;
971     }
972 
973     msg.type = HV_SYNDBG_MSG_QUERY_OPTIONS;
974     if (hv_syndbg_handler(hv_syndbg_context, &msg) != HV_STATUS_SUCCESS) {
975         return 0;
976     }
977 
978     return msg.u.query_options.options;
979 }
980 
981 static bool vmbus_recommended_features_enabled;
982 
983 bool hyperv_are_vmbus_recommended_features_enabled(void)
984 {
985     return vmbus_recommended_features_enabled;
986 }
987 
988 void hyperv_set_vmbus_recommended_features_enabled(void)
989 {
990     vmbus_recommended_features_enabled = true;
991 }
992