xref: /qemu/hw/i386/kvm/xen_xenstore.c (revision c08f5d0e53b00f101c6aab7b5c7eabe22bab1962)
1*c08f5d0eSDavid Woodhouse /*
2*c08f5d0eSDavid Woodhouse  * QEMU Xen emulation: Shared/overlay pages support
3*c08f5d0eSDavid Woodhouse  *
4*c08f5d0eSDavid Woodhouse  * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5*c08f5d0eSDavid Woodhouse  *
6*c08f5d0eSDavid Woodhouse  * Authors: David Woodhouse <dwmw2@infradead.org>
7*c08f5d0eSDavid Woodhouse  *
8*c08f5d0eSDavid Woodhouse  * This work is licensed under the terms of the GNU GPL, version 2 or later.
9*c08f5d0eSDavid Woodhouse  * See the COPYING file in the top-level directory.
10*c08f5d0eSDavid Woodhouse  */
11*c08f5d0eSDavid Woodhouse 
12*c08f5d0eSDavid Woodhouse #include "qemu/osdep.h"
13*c08f5d0eSDavid Woodhouse 
14*c08f5d0eSDavid Woodhouse #include "qemu/host-utils.h"
15*c08f5d0eSDavid Woodhouse #include "qemu/module.h"
16*c08f5d0eSDavid Woodhouse #include "qemu/main-loop.h"
17*c08f5d0eSDavid Woodhouse #include "qemu/cutils.h"
18*c08f5d0eSDavid Woodhouse #include "qapi/error.h"
19*c08f5d0eSDavid Woodhouse #include "qom/object.h"
20*c08f5d0eSDavid Woodhouse #include "migration/vmstate.h"
21*c08f5d0eSDavid Woodhouse 
22*c08f5d0eSDavid Woodhouse #include "hw/sysbus.h"
23*c08f5d0eSDavid Woodhouse #include "hw/xen/xen.h"
24*c08f5d0eSDavid Woodhouse #include "xen_overlay.h"
25*c08f5d0eSDavid Woodhouse #include "xen_evtchn.h"
26*c08f5d0eSDavid Woodhouse #include "xen_xenstore.h"
27*c08f5d0eSDavid Woodhouse 
28*c08f5d0eSDavid Woodhouse #include "sysemu/kvm.h"
29*c08f5d0eSDavid Woodhouse #include "sysemu/kvm_xen.h"
30*c08f5d0eSDavid Woodhouse 
31*c08f5d0eSDavid Woodhouse #include "hw/xen/interface/io/xs_wire.h"
32*c08f5d0eSDavid Woodhouse #include "hw/xen/interface/event_channel.h"
33*c08f5d0eSDavid Woodhouse 
34*c08f5d0eSDavid Woodhouse #define TYPE_XEN_XENSTORE "xen-xenstore"
35*c08f5d0eSDavid Woodhouse OBJECT_DECLARE_SIMPLE_TYPE(XenXenstoreState, XEN_XENSTORE)
36*c08f5d0eSDavid Woodhouse 
37*c08f5d0eSDavid Woodhouse #define XEN_PAGE_SHIFT 12
38*c08f5d0eSDavid Woodhouse #define XEN_PAGE_SIZE (1ULL << XEN_PAGE_SHIFT)
39*c08f5d0eSDavid Woodhouse 
40*c08f5d0eSDavid Woodhouse #define ENTRIES_PER_FRAME_V1 (XEN_PAGE_SIZE / sizeof(grant_entry_v1_t))
41*c08f5d0eSDavid Woodhouse #define ENTRIES_PER_FRAME_V2 (XEN_PAGE_SIZE / sizeof(grant_entry_v2_t))
42*c08f5d0eSDavid Woodhouse 
43*c08f5d0eSDavid Woodhouse #define XENSTORE_HEADER_SIZE ((unsigned int)sizeof(struct xsd_sockmsg))
44*c08f5d0eSDavid Woodhouse 
45*c08f5d0eSDavid Woodhouse struct XenXenstoreState {
46*c08f5d0eSDavid Woodhouse     /*< private >*/
47*c08f5d0eSDavid Woodhouse     SysBusDevice busdev;
48*c08f5d0eSDavid Woodhouse     /*< public >*/
49*c08f5d0eSDavid Woodhouse 
50*c08f5d0eSDavid Woodhouse     MemoryRegion xenstore_page;
51*c08f5d0eSDavid Woodhouse     struct xenstore_domain_interface *xs;
52*c08f5d0eSDavid Woodhouse     uint8_t req_data[XENSTORE_HEADER_SIZE + XENSTORE_PAYLOAD_MAX];
53*c08f5d0eSDavid Woodhouse     uint8_t rsp_data[XENSTORE_HEADER_SIZE + XENSTORE_PAYLOAD_MAX];
54*c08f5d0eSDavid Woodhouse     uint32_t req_offset;
55*c08f5d0eSDavid Woodhouse     uint32_t rsp_offset;
56*c08f5d0eSDavid Woodhouse     bool rsp_pending;
57*c08f5d0eSDavid Woodhouse     bool fatal_error;
58*c08f5d0eSDavid Woodhouse 
59*c08f5d0eSDavid Woodhouse     evtchn_port_t guest_port;
60*c08f5d0eSDavid Woodhouse     evtchn_port_t be_port;
61*c08f5d0eSDavid Woodhouse     struct xenevtchn_handle *eh;
62*c08f5d0eSDavid Woodhouse };
63*c08f5d0eSDavid Woodhouse 
64*c08f5d0eSDavid Woodhouse struct XenXenstoreState *xen_xenstore_singleton;
65*c08f5d0eSDavid Woodhouse 
66*c08f5d0eSDavid Woodhouse static void xen_xenstore_event(void *opaque);
67*c08f5d0eSDavid Woodhouse 
68*c08f5d0eSDavid Woodhouse static void xen_xenstore_realize(DeviceState *dev, Error **errp)
69*c08f5d0eSDavid Woodhouse {
70*c08f5d0eSDavid Woodhouse     XenXenstoreState *s = XEN_XENSTORE(dev);
71*c08f5d0eSDavid Woodhouse 
72*c08f5d0eSDavid Woodhouse     if (xen_mode != XEN_EMULATE) {
73*c08f5d0eSDavid Woodhouse         error_setg(errp, "Xen xenstore support is for Xen emulation");
74*c08f5d0eSDavid Woodhouse         return;
75*c08f5d0eSDavid Woodhouse     }
76*c08f5d0eSDavid Woodhouse     memory_region_init_ram(&s->xenstore_page, OBJECT(dev), "xen:xenstore_page",
77*c08f5d0eSDavid Woodhouse                            XEN_PAGE_SIZE, &error_abort);
78*c08f5d0eSDavid Woodhouse     memory_region_set_enabled(&s->xenstore_page, true);
79*c08f5d0eSDavid Woodhouse     s->xs = memory_region_get_ram_ptr(&s->xenstore_page);
80*c08f5d0eSDavid Woodhouse     memset(s->xs, 0, XEN_PAGE_SIZE);
81*c08f5d0eSDavid Woodhouse 
82*c08f5d0eSDavid Woodhouse     /* We can't map it this early as KVM isn't ready */
83*c08f5d0eSDavid Woodhouse     xen_xenstore_singleton = s;
84*c08f5d0eSDavid Woodhouse 
85*c08f5d0eSDavid Woodhouse     s->eh = xen_be_evtchn_open();
86*c08f5d0eSDavid Woodhouse     if (!s->eh) {
87*c08f5d0eSDavid Woodhouse         error_setg(errp, "Xenstore evtchn port init failed");
88*c08f5d0eSDavid Woodhouse         return;
89*c08f5d0eSDavid Woodhouse     }
90*c08f5d0eSDavid Woodhouse     aio_set_fd_handler(qemu_get_aio_context(), xen_be_evtchn_fd(s->eh), true,
91*c08f5d0eSDavid Woodhouse                        xen_xenstore_event, NULL, NULL, NULL, s);
92*c08f5d0eSDavid Woodhouse }
93*c08f5d0eSDavid Woodhouse 
94*c08f5d0eSDavid Woodhouse static bool xen_xenstore_is_needed(void *opaque)
95*c08f5d0eSDavid Woodhouse {
96*c08f5d0eSDavid Woodhouse     return xen_mode == XEN_EMULATE;
97*c08f5d0eSDavid Woodhouse }
98*c08f5d0eSDavid Woodhouse 
99*c08f5d0eSDavid Woodhouse static int xen_xenstore_pre_save(void *opaque)
100*c08f5d0eSDavid Woodhouse {
101*c08f5d0eSDavid Woodhouse     XenXenstoreState *s = opaque;
102*c08f5d0eSDavid Woodhouse 
103*c08f5d0eSDavid Woodhouse     if (s->eh) {
104*c08f5d0eSDavid Woodhouse         s->guest_port = xen_be_evtchn_get_guest_port(s->eh);
105*c08f5d0eSDavid Woodhouse     }
106*c08f5d0eSDavid Woodhouse     return 0;
107*c08f5d0eSDavid Woodhouse }
108*c08f5d0eSDavid Woodhouse 
109*c08f5d0eSDavid Woodhouse static int xen_xenstore_post_load(void *opaque, int ver)
110*c08f5d0eSDavid Woodhouse {
111*c08f5d0eSDavid Woodhouse     XenXenstoreState *s = opaque;
112*c08f5d0eSDavid Woodhouse 
113*c08f5d0eSDavid Woodhouse     /*
114*c08f5d0eSDavid Woodhouse      * As qemu/dom0, rebind to the guest's port. The Windows drivers may
115*c08f5d0eSDavid Woodhouse      * unbind the XenStore evtchn and rebind to it, having obtained the
116*c08f5d0eSDavid Woodhouse      * "remote" port through EVTCHNOP_status. In the case that migration
117*c08f5d0eSDavid Woodhouse      * occurs while it's unbound, the "remote" port needs to be the same
118*c08f5d0eSDavid Woodhouse      * as before so that the guest can find it, but should remain unbound.
119*c08f5d0eSDavid Woodhouse      */
120*c08f5d0eSDavid Woodhouse     if (s->guest_port) {
121*c08f5d0eSDavid Woodhouse         int be_port = xen_be_evtchn_bind_interdomain(s->eh, xen_domid,
122*c08f5d0eSDavid Woodhouse                                                      s->guest_port);
123*c08f5d0eSDavid Woodhouse         if (be_port < 0) {
124*c08f5d0eSDavid Woodhouse             return be_port;
125*c08f5d0eSDavid Woodhouse         }
126*c08f5d0eSDavid Woodhouse         s->be_port = be_port;
127*c08f5d0eSDavid Woodhouse     }
128*c08f5d0eSDavid Woodhouse     return 0;
129*c08f5d0eSDavid Woodhouse }
130*c08f5d0eSDavid Woodhouse 
131*c08f5d0eSDavid Woodhouse static const VMStateDescription xen_xenstore_vmstate = {
132*c08f5d0eSDavid Woodhouse     .name = "xen_xenstore",
133*c08f5d0eSDavid Woodhouse     .version_id = 1,
134*c08f5d0eSDavid Woodhouse     .minimum_version_id = 1,
135*c08f5d0eSDavid Woodhouse     .needed = xen_xenstore_is_needed,
136*c08f5d0eSDavid Woodhouse     .pre_save = xen_xenstore_pre_save,
137*c08f5d0eSDavid Woodhouse     .post_load = xen_xenstore_post_load,
138*c08f5d0eSDavid Woodhouse     .fields = (VMStateField[]) {
139*c08f5d0eSDavid Woodhouse         VMSTATE_UINT8_ARRAY(req_data, XenXenstoreState,
140*c08f5d0eSDavid Woodhouse                             sizeof_field(XenXenstoreState, req_data)),
141*c08f5d0eSDavid Woodhouse         VMSTATE_UINT8_ARRAY(rsp_data, XenXenstoreState,
142*c08f5d0eSDavid Woodhouse                             sizeof_field(XenXenstoreState, rsp_data)),
143*c08f5d0eSDavid Woodhouse         VMSTATE_UINT32(req_offset, XenXenstoreState),
144*c08f5d0eSDavid Woodhouse         VMSTATE_UINT32(rsp_offset, XenXenstoreState),
145*c08f5d0eSDavid Woodhouse         VMSTATE_BOOL(rsp_pending, XenXenstoreState),
146*c08f5d0eSDavid Woodhouse         VMSTATE_UINT32(guest_port, XenXenstoreState),
147*c08f5d0eSDavid Woodhouse         VMSTATE_BOOL(fatal_error, XenXenstoreState),
148*c08f5d0eSDavid Woodhouse         VMSTATE_END_OF_LIST()
149*c08f5d0eSDavid Woodhouse     }
150*c08f5d0eSDavid Woodhouse };
151*c08f5d0eSDavid Woodhouse 
152*c08f5d0eSDavid Woodhouse static void xen_xenstore_class_init(ObjectClass *klass, void *data)
153*c08f5d0eSDavid Woodhouse {
154*c08f5d0eSDavid Woodhouse     DeviceClass *dc = DEVICE_CLASS(klass);
155*c08f5d0eSDavid Woodhouse 
156*c08f5d0eSDavid Woodhouse     dc->realize = xen_xenstore_realize;
157*c08f5d0eSDavid Woodhouse     dc->vmsd = &xen_xenstore_vmstate;
158*c08f5d0eSDavid Woodhouse }
159*c08f5d0eSDavid Woodhouse 
160*c08f5d0eSDavid Woodhouse static const TypeInfo xen_xenstore_info = {
161*c08f5d0eSDavid Woodhouse     .name          = TYPE_XEN_XENSTORE,
162*c08f5d0eSDavid Woodhouse     .parent        = TYPE_SYS_BUS_DEVICE,
163*c08f5d0eSDavid Woodhouse     .instance_size = sizeof(XenXenstoreState),
164*c08f5d0eSDavid Woodhouse     .class_init    = xen_xenstore_class_init,
165*c08f5d0eSDavid Woodhouse };
166*c08f5d0eSDavid Woodhouse 
167*c08f5d0eSDavid Woodhouse void xen_xenstore_create(void)
168*c08f5d0eSDavid Woodhouse {
169*c08f5d0eSDavid Woodhouse     DeviceState *dev = sysbus_create_simple(TYPE_XEN_XENSTORE, -1, NULL);
170*c08f5d0eSDavid Woodhouse 
171*c08f5d0eSDavid Woodhouse     xen_xenstore_singleton = XEN_XENSTORE(dev);
172*c08f5d0eSDavid Woodhouse 
173*c08f5d0eSDavid Woodhouse     /*
174*c08f5d0eSDavid Woodhouse      * Defer the init (xen_xenstore_reset()) until KVM is set up and the
175*c08f5d0eSDavid Woodhouse      * overlay page can be mapped.
176*c08f5d0eSDavid Woodhouse      */
177*c08f5d0eSDavid Woodhouse }
178*c08f5d0eSDavid Woodhouse 
179*c08f5d0eSDavid Woodhouse static void xen_xenstore_register_types(void)
180*c08f5d0eSDavid Woodhouse {
181*c08f5d0eSDavid Woodhouse     type_register_static(&xen_xenstore_info);
182*c08f5d0eSDavid Woodhouse }
183*c08f5d0eSDavid Woodhouse 
184*c08f5d0eSDavid Woodhouse type_init(xen_xenstore_register_types)
185*c08f5d0eSDavid Woodhouse 
186*c08f5d0eSDavid Woodhouse uint16_t xen_xenstore_get_port(void)
187*c08f5d0eSDavid Woodhouse {
188*c08f5d0eSDavid Woodhouse     XenXenstoreState *s = xen_xenstore_singleton;
189*c08f5d0eSDavid Woodhouse     if (!s) {
190*c08f5d0eSDavid Woodhouse         return 0;
191*c08f5d0eSDavid Woodhouse     }
192*c08f5d0eSDavid Woodhouse     return s->guest_port;
193*c08f5d0eSDavid Woodhouse }
194*c08f5d0eSDavid Woodhouse 
195*c08f5d0eSDavid Woodhouse static void xen_xenstore_event(void *opaque)
196*c08f5d0eSDavid Woodhouse {
197*c08f5d0eSDavid Woodhouse     XenXenstoreState *s = opaque;
198*c08f5d0eSDavid Woodhouse     evtchn_port_t port = xen_be_evtchn_pending(s->eh);
199*c08f5d0eSDavid Woodhouse     if (port != s->be_port) {
200*c08f5d0eSDavid Woodhouse         return;
201*c08f5d0eSDavid Woodhouse     }
202*c08f5d0eSDavid Woodhouse     printf("xenstore event\n");
203*c08f5d0eSDavid Woodhouse     /* We know this is a no-op. */
204*c08f5d0eSDavid Woodhouse     xen_be_evtchn_unmask(s->eh, port);
205*c08f5d0eSDavid Woodhouse     qemu_hexdump(stdout, "", s->xs, sizeof(*s->xs));
206*c08f5d0eSDavid Woodhouse     xen_be_evtchn_notify(s->eh, s->be_port);
207*c08f5d0eSDavid Woodhouse }
208*c08f5d0eSDavid Woodhouse 
209*c08f5d0eSDavid Woodhouse static void alloc_guest_port(XenXenstoreState *s)
210*c08f5d0eSDavid Woodhouse {
211*c08f5d0eSDavid Woodhouse     struct evtchn_alloc_unbound alloc = {
212*c08f5d0eSDavid Woodhouse         .dom = DOMID_SELF,
213*c08f5d0eSDavid Woodhouse         .remote_dom = DOMID_QEMU,
214*c08f5d0eSDavid Woodhouse     };
215*c08f5d0eSDavid Woodhouse 
216*c08f5d0eSDavid Woodhouse     if (!xen_evtchn_alloc_unbound_op(&alloc)) {
217*c08f5d0eSDavid Woodhouse         s->guest_port = alloc.port;
218*c08f5d0eSDavid Woodhouse     }
219*c08f5d0eSDavid Woodhouse }
220*c08f5d0eSDavid Woodhouse 
221*c08f5d0eSDavid Woodhouse int xen_xenstore_reset(void)
222*c08f5d0eSDavid Woodhouse {
223*c08f5d0eSDavid Woodhouse     XenXenstoreState *s = xen_xenstore_singleton;
224*c08f5d0eSDavid Woodhouse     int err;
225*c08f5d0eSDavid Woodhouse 
226*c08f5d0eSDavid Woodhouse     if (!s) {
227*c08f5d0eSDavid Woodhouse         return -ENOTSUP;
228*c08f5d0eSDavid Woodhouse     }
229*c08f5d0eSDavid Woodhouse 
230*c08f5d0eSDavid Woodhouse     s->req_offset = s->rsp_offset = 0;
231*c08f5d0eSDavid Woodhouse     s->rsp_pending = false;
232*c08f5d0eSDavid Woodhouse 
233*c08f5d0eSDavid Woodhouse     if (!memory_region_is_mapped(&s->xenstore_page)) {
234*c08f5d0eSDavid Woodhouse         uint64_t gpa = XEN_SPECIAL_PFN(XENSTORE) << TARGET_PAGE_BITS;
235*c08f5d0eSDavid Woodhouse         xen_overlay_do_map_page(&s->xenstore_page, gpa);
236*c08f5d0eSDavid Woodhouse     }
237*c08f5d0eSDavid Woodhouse 
238*c08f5d0eSDavid Woodhouse     alloc_guest_port(s);
239*c08f5d0eSDavid Woodhouse 
240*c08f5d0eSDavid Woodhouse     /*
241*c08f5d0eSDavid Woodhouse      * As qemu/dom0, bind to the guest's port. For incoming migration, this
242*c08f5d0eSDavid Woodhouse      * will be unbound as the guest's evtchn table is overwritten. We then
243*c08f5d0eSDavid Woodhouse      * rebind to the correct guest port in xen_xenstore_post_load().
244*c08f5d0eSDavid Woodhouse      */
245*c08f5d0eSDavid Woodhouse     err = xen_be_evtchn_bind_interdomain(s->eh, xen_domid, s->guest_port);
246*c08f5d0eSDavid Woodhouse     if (err < 0) {
247*c08f5d0eSDavid Woodhouse         return err;
248*c08f5d0eSDavid Woodhouse     }
249*c08f5d0eSDavid Woodhouse     s->be_port = err;
250*c08f5d0eSDavid Woodhouse 
251*c08f5d0eSDavid Woodhouse     return 0;
252*c08f5d0eSDavid Woodhouse }
253