1c08f5d0eSDavid Woodhouse /*
2c08f5d0eSDavid Woodhouse * QEMU Xen emulation: Shared/overlay pages support
3c08f5d0eSDavid Woodhouse *
4c08f5d0eSDavid Woodhouse * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5c08f5d0eSDavid Woodhouse *
6c08f5d0eSDavid Woodhouse * Authors: David Woodhouse <dwmw2@infradead.org>
7c08f5d0eSDavid Woodhouse *
8c08f5d0eSDavid Woodhouse * This work is licensed under the terms of the GNU GPL, version 2 or later.
9c08f5d0eSDavid Woodhouse * See the COPYING file in the top-level directory.
10c08f5d0eSDavid Woodhouse */
11c08f5d0eSDavid Woodhouse
12c08f5d0eSDavid Woodhouse #include "qemu/osdep.h"
13c08f5d0eSDavid Woodhouse
14c08f5d0eSDavid Woodhouse #include "qemu/host-utils.h"
15c08f5d0eSDavid Woodhouse #include "qemu/module.h"
16c08f5d0eSDavid Woodhouse #include "qemu/main-loop.h"
17c08f5d0eSDavid Woodhouse #include "qemu/cutils.h"
18cc37d98bSRichard Henderson #include "qemu/error-report.h"
19c08f5d0eSDavid Woodhouse #include "qapi/error.h"
20c08f5d0eSDavid Woodhouse #include "qom/object.h"
21c08f5d0eSDavid Woodhouse #include "migration/vmstate.h"
22c08f5d0eSDavid Woodhouse
23c08f5d0eSDavid Woodhouse #include "hw/sysbus.h"
24c08f5d0eSDavid Woodhouse #include "hw/xen/xen.h"
25d05864d2SDavid Woodhouse #include "hw/xen/xen_backend_ops.h"
26c08f5d0eSDavid Woodhouse #include "xen_overlay.h"
27c08f5d0eSDavid Woodhouse #include "xen_evtchn.h"
28a72ccc7fSDavid Woodhouse #include "xen_primary_console.h"
29c08f5d0eSDavid Woodhouse #include "xen_xenstore.h"
30c08f5d0eSDavid Woodhouse
3132cad1ffSPhilippe Mathieu-Daudé #include "system/kvm.h"
3232cad1ffSPhilippe Mathieu-Daudé #include "system/kvm_xen.h"
33c08f5d0eSDavid Woodhouse
340254c4d1SDavid Woodhouse #include "trace.h"
350254c4d1SDavid Woodhouse
360254c4d1SDavid Woodhouse #include "xenstore_impl.h"
370254c4d1SDavid Woodhouse
38c08f5d0eSDavid Woodhouse #include "hw/xen/interface/io/xs_wire.h"
39c08f5d0eSDavid Woodhouse #include "hw/xen/interface/event_channel.h"
40d05864d2SDavid Woodhouse #include "hw/xen/interface/grant_table.h"
41c08f5d0eSDavid Woodhouse
42c08f5d0eSDavid Woodhouse #define TYPE_XEN_XENSTORE "xen-xenstore"
43c08f5d0eSDavid Woodhouse OBJECT_DECLARE_SIMPLE_TYPE(XenXenstoreState, XEN_XENSTORE)
44c08f5d0eSDavid Woodhouse
45c08f5d0eSDavid Woodhouse #define ENTRIES_PER_FRAME_V1 (XEN_PAGE_SIZE / sizeof(grant_entry_v1_t))
46c08f5d0eSDavid Woodhouse #define ENTRIES_PER_FRAME_V2 (XEN_PAGE_SIZE / sizeof(grant_entry_v2_t))
47c08f5d0eSDavid Woodhouse
48c08f5d0eSDavid Woodhouse #define XENSTORE_HEADER_SIZE ((unsigned int)sizeof(struct xsd_sockmsg))
49c08f5d0eSDavid Woodhouse
50c08f5d0eSDavid Woodhouse struct XenXenstoreState {
51c08f5d0eSDavid Woodhouse /*< private >*/
52c08f5d0eSDavid Woodhouse SysBusDevice busdev;
53c08f5d0eSDavid Woodhouse /*< public >*/
54c08f5d0eSDavid Woodhouse
550254c4d1SDavid Woodhouse XenstoreImplState *impl;
5603247512SDavid Woodhouse GList *watch_events; /* for the guest */
570254c4d1SDavid Woodhouse
58c08f5d0eSDavid Woodhouse MemoryRegion xenstore_page;
59c08f5d0eSDavid Woodhouse struct xenstore_domain_interface *xs;
60c08f5d0eSDavid Woodhouse uint8_t req_data[XENSTORE_HEADER_SIZE + XENSTORE_PAYLOAD_MAX];
61c08f5d0eSDavid Woodhouse uint8_t rsp_data[XENSTORE_HEADER_SIZE + XENSTORE_PAYLOAD_MAX];
62c08f5d0eSDavid Woodhouse uint32_t req_offset;
63c08f5d0eSDavid Woodhouse uint32_t rsp_offset;
64c08f5d0eSDavid Woodhouse bool rsp_pending;
65c08f5d0eSDavid Woodhouse bool fatal_error;
66c08f5d0eSDavid Woodhouse
67c08f5d0eSDavid Woodhouse evtchn_port_t guest_port;
68c08f5d0eSDavid Woodhouse evtchn_port_t be_port;
69c08f5d0eSDavid Woodhouse struct xenevtchn_handle *eh;
70766804b1SDavid Woodhouse
71766804b1SDavid Woodhouse uint8_t *impl_state;
72766804b1SDavid Woodhouse uint32_t impl_state_size;
73d05864d2SDavid Woodhouse
74d05864d2SDavid Woodhouse struct xengntdev_handle *gt;
75d05864d2SDavid Woodhouse void *granted_xs;
76c08f5d0eSDavid Woodhouse };
77c08f5d0eSDavid Woodhouse
78c08f5d0eSDavid Woodhouse struct XenXenstoreState *xen_xenstore_singleton;
79c08f5d0eSDavid Woodhouse
80c08f5d0eSDavid Woodhouse static void xen_xenstore_event(void *opaque);
810254c4d1SDavid Woodhouse static void fire_watch_cb(void *opaque, const char *path, const char *token);
82c08f5d0eSDavid Woodhouse
8303247512SDavid Woodhouse static struct xenstore_backend_ops emu_xenstore_backend_ops;
8403247512SDavid Woodhouse
relpath_printf(XenXenstoreState * s,GList * perms,const char * relpath,const char * fmt,...)85831b0db8SPaul Durrant static void G_GNUC_PRINTF (4, 5) relpath_printf(XenXenstoreState *s,
86831b0db8SPaul Durrant GList *perms,
87831b0db8SPaul Durrant const char *relpath,
88831b0db8SPaul Durrant const char *fmt, ...)
89831b0db8SPaul Durrant {
90831b0db8SPaul Durrant gchar *abspath;
91831b0db8SPaul Durrant gchar *value;
92831b0db8SPaul Durrant va_list args;
93831b0db8SPaul Durrant GByteArray *data;
94831b0db8SPaul Durrant int err;
95831b0db8SPaul Durrant
96831b0db8SPaul Durrant abspath = g_strdup_printf("/local/domain/%u/%s", xen_domid, relpath);
97831b0db8SPaul Durrant va_start(args, fmt);
98831b0db8SPaul Durrant value = g_strdup_vprintf(fmt, args);
99831b0db8SPaul Durrant va_end(args);
100831b0db8SPaul Durrant
101831b0db8SPaul Durrant data = g_byte_array_new_take((void *)value, strlen(value));
102831b0db8SPaul Durrant
103831b0db8SPaul Durrant err = xs_impl_write(s->impl, DOMID_QEMU, XBT_NULL, abspath, data);
104831b0db8SPaul Durrant assert(!err);
105831b0db8SPaul Durrant
106831b0db8SPaul Durrant g_byte_array_unref(data);
107831b0db8SPaul Durrant
108831b0db8SPaul Durrant err = xs_impl_set_perms(s->impl, DOMID_QEMU, XBT_NULL, abspath, perms);
109831b0db8SPaul Durrant assert(!err);
110831b0db8SPaul Durrant
111831b0db8SPaul Durrant g_free(abspath);
112831b0db8SPaul Durrant }
113831b0db8SPaul Durrant
xen_xenstore_realize(DeviceState * dev,Error ** errp)114c08f5d0eSDavid Woodhouse static void xen_xenstore_realize(DeviceState *dev, Error **errp)
115c08f5d0eSDavid Woodhouse {
116c08f5d0eSDavid Woodhouse XenXenstoreState *s = XEN_XENSTORE(dev);
117831b0db8SPaul Durrant GList *perms;
118c08f5d0eSDavid Woodhouse
119c08f5d0eSDavid Woodhouse if (xen_mode != XEN_EMULATE) {
120c08f5d0eSDavid Woodhouse error_setg(errp, "Xen xenstore support is for Xen emulation");
121c08f5d0eSDavid Woodhouse return;
122c08f5d0eSDavid Woodhouse }
123c08f5d0eSDavid Woodhouse memory_region_init_ram(&s->xenstore_page, OBJECT(dev), "xen:xenstore_page",
124c08f5d0eSDavid Woodhouse XEN_PAGE_SIZE, &error_abort);
125c08f5d0eSDavid Woodhouse memory_region_set_enabled(&s->xenstore_page, true);
126c08f5d0eSDavid Woodhouse s->xs = memory_region_get_ram_ptr(&s->xenstore_page);
127c08f5d0eSDavid Woodhouse memset(s->xs, 0, XEN_PAGE_SIZE);
128c08f5d0eSDavid Woodhouse
129c08f5d0eSDavid Woodhouse /* We can't map it this early as KVM isn't ready */
130c08f5d0eSDavid Woodhouse xen_xenstore_singleton = s;
131c08f5d0eSDavid Woodhouse
132c08f5d0eSDavid Woodhouse s->eh = xen_be_evtchn_open();
133c08f5d0eSDavid Woodhouse if (!s->eh) {
134c08f5d0eSDavid Woodhouse error_setg(errp, "Xenstore evtchn port init failed");
135c08f5d0eSDavid Woodhouse return;
136c08f5d0eSDavid Woodhouse }
13760f782b6SStefan Hajnoczi aio_set_fd_handler(qemu_get_aio_context(), xen_be_evtchn_fd(s->eh),
138c08f5d0eSDavid Woodhouse xen_xenstore_event, NULL, NULL, NULL, s);
1390254c4d1SDavid Woodhouse
140be1934dfSPaul Durrant s->impl = xs_impl_create(xen_domid);
141831b0db8SPaul Durrant
142831b0db8SPaul Durrant /* Populate the default nodes */
143831b0db8SPaul Durrant
144831b0db8SPaul Durrant /* Nodes owned by 'dom0' but readable by the guest */
145831b0db8SPaul Durrant perms = g_list_append(NULL, xs_perm_as_string(XS_PERM_NONE, DOMID_QEMU));
146831b0db8SPaul Durrant perms = g_list_append(perms, xs_perm_as_string(XS_PERM_READ, xen_domid));
147831b0db8SPaul Durrant
148831b0db8SPaul Durrant relpath_printf(s, perms, "", "%s", "");
149831b0db8SPaul Durrant
150831b0db8SPaul Durrant relpath_printf(s, perms, "domid", "%u", xen_domid);
151831b0db8SPaul Durrant
152831b0db8SPaul Durrant relpath_printf(s, perms, "control/platform-feature-xs_reset_watches", "%u", 1);
153831b0db8SPaul Durrant relpath_printf(s, perms, "control/platform-feature-multiprocessor-suspend", "%u", 1);
154831b0db8SPaul Durrant
155831b0db8SPaul Durrant relpath_printf(s, perms, "platform/acpi", "%u", 1);
156831b0db8SPaul Durrant relpath_printf(s, perms, "platform/acpi_s3", "%u", 1);
157831b0db8SPaul Durrant relpath_printf(s, perms, "platform/acpi_s4", "%u", 1);
158831b0db8SPaul Durrant relpath_printf(s, perms, "platform/acpi_laptop_slate", "%u", 0);
159831b0db8SPaul Durrant
160831b0db8SPaul Durrant g_list_free_full(perms, g_free);
161831b0db8SPaul Durrant
162831b0db8SPaul Durrant /* Nodes owned by the guest */
163831b0db8SPaul Durrant perms = g_list_append(NULL, xs_perm_as_string(XS_PERM_NONE, xen_domid));
164831b0db8SPaul Durrant
165831b0db8SPaul Durrant relpath_printf(s, perms, "attr", "%s", "");
166831b0db8SPaul Durrant
167831b0db8SPaul Durrant relpath_printf(s, perms, "control/shutdown", "%s", "");
168831b0db8SPaul Durrant relpath_printf(s, perms, "control/feature-poweroff", "%u", 1);
169831b0db8SPaul Durrant relpath_printf(s, perms, "control/feature-reboot", "%u", 1);
170831b0db8SPaul Durrant relpath_printf(s, perms, "control/feature-suspend", "%u", 1);
171831b0db8SPaul Durrant relpath_printf(s, perms, "control/feature-s3", "%u", 1);
172831b0db8SPaul Durrant relpath_printf(s, perms, "control/feature-s4", "%u", 1);
173831b0db8SPaul Durrant
174831b0db8SPaul Durrant relpath_printf(s, perms, "data", "%s", "");
175831b0db8SPaul Durrant relpath_printf(s, perms, "device", "%s", "");
176831b0db8SPaul Durrant relpath_printf(s, perms, "drivers", "%s", "");
177831b0db8SPaul Durrant relpath_printf(s, perms, "error", "%s", "");
178831b0db8SPaul Durrant relpath_printf(s, perms, "feature", "%s", "");
179831b0db8SPaul Durrant
180831b0db8SPaul Durrant g_list_free_full(perms, g_free);
18103247512SDavid Woodhouse
18203247512SDavid Woodhouse xen_xenstore_ops = &emu_xenstore_backend_ops;
183c08f5d0eSDavid Woodhouse }
184c08f5d0eSDavid Woodhouse
xen_xenstore_is_needed(void * opaque)185c08f5d0eSDavid Woodhouse static bool xen_xenstore_is_needed(void *opaque)
186c08f5d0eSDavid Woodhouse {
187c08f5d0eSDavid Woodhouse return xen_mode == XEN_EMULATE;
188c08f5d0eSDavid Woodhouse }
189c08f5d0eSDavid Woodhouse
xen_xenstore_pre_save(void * opaque)190c08f5d0eSDavid Woodhouse static int xen_xenstore_pre_save(void *opaque)
191c08f5d0eSDavid Woodhouse {
192c08f5d0eSDavid Woodhouse XenXenstoreState *s = opaque;
193766804b1SDavid Woodhouse GByteArray *save;
194c08f5d0eSDavid Woodhouse
195c08f5d0eSDavid Woodhouse if (s->eh) {
196c08f5d0eSDavid Woodhouse s->guest_port = xen_be_evtchn_get_guest_port(s->eh);
197c08f5d0eSDavid Woodhouse }
198766804b1SDavid Woodhouse
199766804b1SDavid Woodhouse g_free(s->impl_state);
200766804b1SDavid Woodhouse save = xs_impl_serialize(s->impl);
201766804b1SDavid Woodhouse s->impl_state = save->data;
202766804b1SDavid Woodhouse s->impl_state_size = save->len;
203766804b1SDavid Woodhouse g_byte_array_free(save, false);
204766804b1SDavid Woodhouse
205c08f5d0eSDavid Woodhouse return 0;
206c08f5d0eSDavid Woodhouse }
207c08f5d0eSDavid Woodhouse
xen_xenstore_post_load(void * opaque,int ver)208c08f5d0eSDavid Woodhouse static int xen_xenstore_post_load(void *opaque, int ver)
209c08f5d0eSDavid Woodhouse {
210c08f5d0eSDavid Woodhouse XenXenstoreState *s = opaque;
211766804b1SDavid Woodhouse GByteArray *save;
212c08f5d0eSDavid Woodhouse
213c08f5d0eSDavid Woodhouse /*
214c08f5d0eSDavid Woodhouse * As qemu/dom0, rebind to the guest's port. The Windows drivers may
215c08f5d0eSDavid Woodhouse * unbind the XenStore evtchn and rebind to it, having obtained the
216c08f5d0eSDavid Woodhouse * "remote" port through EVTCHNOP_status. In the case that migration
217c08f5d0eSDavid Woodhouse * occurs while it's unbound, the "remote" port needs to be the same
218c08f5d0eSDavid Woodhouse * as before so that the guest can find it, but should remain unbound.
219c08f5d0eSDavid Woodhouse */
220c08f5d0eSDavid Woodhouse if (s->guest_port) {
221c08f5d0eSDavid Woodhouse int be_port = xen_be_evtchn_bind_interdomain(s->eh, xen_domid,
222c08f5d0eSDavid Woodhouse s->guest_port);
223c08f5d0eSDavid Woodhouse if (be_port < 0) {
224c08f5d0eSDavid Woodhouse return be_port;
225c08f5d0eSDavid Woodhouse }
226c08f5d0eSDavid Woodhouse s->be_port = be_port;
227c08f5d0eSDavid Woodhouse }
228766804b1SDavid Woodhouse
229766804b1SDavid Woodhouse save = g_byte_array_new_take(s->impl_state, s->impl_state_size);
230766804b1SDavid Woodhouse s->impl_state = NULL;
231766804b1SDavid Woodhouse s->impl_state_size = 0;
232766804b1SDavid Woodhouse
233720a0e41SMarkus Armbruster return xs_impl_deserialize(s->impl, save, xen_domid, fire_watch_cb, s);
234c08f5d0eSDavid Woodhouse }
235c08f5d0eSDavid Woodhouse
236c08f5d0eSDavid Woodhouse static const VMStateDescription xen_xenstore_vmstate = {
237c08f5d0eSDavid Woodhouse .name = "xen_xenstore",
238766804b1SDavid Woodhouse .unmigratable = 1, /* The PV back ends don't migrate yet */
239c08f5d0eSDavid Woodhouse .version_id = 1,
240c08f5d0eSDavid Woodhouse .minimum_version_id = 1,
241c08f5d0eSDavid Woodhouse .needed = xen_xenstore_is_needed,
242c08f5d0eSDavid Woodhouse .pre_save = xen_xenstore_pre_save,
243c08f5d0eSDavid Woodhouse .post_load = xen_xenstore_post_load,
2449231a017SRichard Henderson .fields = (const VMStateField[]) {
245c08f5d0eSDavid Woodhouse VMSTATE_UINT8_ARRAY(req_data, XenXenstoreState,
246c08f5d0eSDavid Woodhouse sizeof_field(XenXenstoreState, req_data)),
247c08f5d0eSDavid Woodhouse VMSTATE_UINT8_ARRAY(rsp_data, XenXenstoreState,
248c08f5d0eSDavid Woodhouse sizeof_field(XenXenstoreState, rsp_data)),
249c08f5d0eSDavid Woodhouse VMSTATE_UINT32(req_offset, XenXenstoreState),
250c08f5d0eSDavid Woodhouse VMSTATE_UINT32(rsp_offset, XenXenstoreState),
251c08f5d0eSDavid Woodhouse VMSTATE_BOOL(rsp_pending, XenXenstoreState),
252c08f5d0eSDavid Woodhouse VMSTATE_UINT32(guest_port, XenXenstoreState),
253c08f5d0eSDavid Woodhouse VMSTATE_BOOL(fatal_error, XenXenstoreState),
254766804b1SDavid Woodhouse VMSTATE_UINT32(impl_state_size, XenXenstoreState),
255766804b1SDavid Woodhouse VMSTATE_VARRAY_UINT32_ALLOC(impl_state, XenXenstoreState,
256766804b1SDavid Woodhouse impl_state_size, 0,
257766804b1SDavid Woodhouse vmstate_info_uint8, uint8_t),
258c08f5d0eSDavid Woodhouse VMSTATE_END_OF_LIST()
259c08f5d0eSDavid Woodhouse }
260c08f5d0eSDavid Woodhouse };
261c08f5d0eSDavid Woodhouse
xen_xenstore_class_init(ObjectClass * klass,const void * data)262*12d1a768SPhilippe Mathieu-Daudé static void xen_xenstore_class_init(ObjectClass *klass, const void *data)
263c08f5d0eSDavid Woodhouse {
264c08f5d0eSDavid Woodhouse DeviceClass *dc = DEVICE_CLASS(klass);
265c08f5d0eSDavid Woodhouse
266c08f5d0eSDavid Woodhouse dc->realize = xen_xenstore_realize;
267c08f5d0eSDavid Woodhouse dc->vmsd = &xen_xenstore_vmstate;
268c08f5d0eSDavid Woodhouse }
269c08f5d0eSDavid Woodhouse
270c08f5d0eSDavid Woodhouse static const TypeInfo xen_xenstore_info = {
271c08f5d0eSDavid Woodhouse .name = TYPE_XEN_XENSTORE,
272c08f5d0eSDavid Woodhouse .parent = TYPE_SYS_BUS_DEVICE,
273c08f5d0eSDavid Woodhouse .instance_size = sizeof(XenXenstoreState),
274c08f5d0eSDavid Woodhouse .class_init = xen_xenstore_class_init,
275c08f5d0eSDavid Woodhouse };
276c08f5d0eSDavid Woodhouse
xen_xenstore_create(void)277c08f5d0eSDavid Woodhouse void xen_xenstore_create(void)
278c08f5d0eSDavid Woodhouse {
279c08f5d0eSDavid Woodhouse DeviceState *dev = sysbus_create_simple(TYPE_XEN_XENSTORE, -1, NULL);
280c08f5d0eSDavid Woodhouse
281c08f5d0eSDavid Woodhouse xen_xenstore_singleton = XEN_XENSTORE(dev);
282c08f5d0eSDavid Woodhouse
283c08f5d0eSDavid Woodhouse /*
284c08f5d0eSDavid Woodhouse * Defer the init (xen_xenstore_reset()) until KVM is set up and the
285c08f5d0eSDavid Woodhouse * overlay page can be mapped.
286c08f5d0eSDavid Woodhouse */
287c08f5d0eSDavid Woodhouse }
288c08f5d0eSDavid Woodhouse
xen_xenstore_register_types(void)289c08f5d0eSDavid Woodhouse static void xen_xenstore_register_types(void)
290c08f5d0eSDavid Woodhouse {
291c08f5d0eSDavid Woodhouse type_register_static(&xen_xenstore_info);
292c08f5d0eSDavid Woodhouse }
293c08f5d0eSDavid Woodhouse
type_init(xen_xenstore_register_types)294c08f5d0eSDavid Woodhouse type_init(xen_xenstore_register_types)
295c08f5d0eSDavid Woodhouse
296c08f5d0eSDavid Woodhouse uint16_t xen_xenstore_get_port(void)
297c08f5d0eSDavid Woodhouse {
298c08f5d0eSDavid Woodhouse XenXenstoreState *s = xen_xenstore_singleton;
299c08f5d0eSDavid Woodhouse if (!s) {
300c08f5d0eSDavid Woodhouse return 0;
301c08f5d0eSDavid Woodhouse }
302c08f5d0eSDavid Woodhouse return s->guest_port;
303c08f5d0eSDavid Woodhouse }
304c08f5d0eSDavid Woodhouse
req_pending(XenXenstoreState * s)305f3341e7bSDavid Woodhouse static bool req_pending(XenXenstoreState *s)
306f3341e7bSDavid Woodhouse {
307f3341e7bSDavid Woodhouse struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data;
308f3341e7bSDavid Woodhouse
309f3341e7bSDavid Woodhouse return s->req_offset == XENSTORE_HEADER_SIZE + req->len;
310f3341e7bSDavid Woodhouse }
311f3341e7bSDavid Woodhouse
reset_req(XenXenstoreState * s)312f3341e7bSDavid Woodhouse static void reset_req(XenXenstoreState *s)
313f3341e7bSDavid Woodhouse {
314f3341e7bSDavid Woodhouse memset(s->req_data, 0, sizeof(s->req_data));
315f3341e7bSDavid Woodhouse s->req_offset = 0;
316f3341e7bSDavid Woodhouse }
317f3341e7bSDavid Woodhouse
reset_rsp(XenXenstoreState * s)318f3341e7bSDavid Woodhouse static void reset_rsp(XenXenstoreState *s)
319f3341e7bSDavid Woodhouse {
320f3341e7bSDavid Woodhouse s->rsp_pending = false;
321f3341e7bSDavid Woodhouse
322f3341e7bSDavid Woodhouse memset(s->rsp_data, 0, sizeof(s->rsp_data));
323f3341e7bSDavid Woodhouse s->rsp_offset = 0;
324f3341e7bSDavid Woodhouse }
325f3341e7bSDavid Woodhouse
xs_error(XenXenstoreState * s,unsigned int id,xs_transaction_t tx_id,int errnum)3260254c4d1SDavid Woodhouse static void xs_error(XenXenstoreState *s, unsigned int id,
3270254c4d1SDavid Woodhouse xs_transaction_t tx_id, int errnum)
3280254c4d1SDavid Woodhouse {
3290254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
3300254c4d1SDavid Woodhouse const char *errstr = NULL;
3310254c4d1SDavid Woodhouse
3320254c4d1SDavid Woodhouse for (unsigned int i = 0; i < ARRAY_SIZE(xsd_errors); i++) {
3338ac98aedSDavid Woodhouse const struct xsd_errors *xsd_error = &xsd_errors[i];
3340254c4d1SDavid Woodhouse
3350254c4d1SDavid Woodhouse if (xsd_error->errnum == errnum) {
3360254c4d1SDavid Woodhouse errstr = xsd_error->errstring;
3370254c4d1SDavid Woodhouse break;
3380254c4d1SDavid Woodhouse }
3390254c4d1SDavid Woodhouse }
3400254c4d1SDavid Woodhouse assert(errstr);
3410254c4d1SDavid Woodhouse
3420254c4d1SDavid Woodhouse trace_xenstore_error(id, tx_id, errstr);
3430254c4d1SDavid Woodhouse
3440254c4d1SDavid Woodhouse rsp->type = XS_ERROR;
3450254c4d1SDavid Woodhouse rsp->req_id = id;
3460254c4d1SDavid Woodhouse rsp->tx_id = tx_id;
3470254c4d1SDavid Woodhouse rsp->len = (uint32_t)strlen(errstr) + 1;
3480254c4d1SDavid Woodhouse
3490254c4d1SDavid Woodhouse memcpy(&rsp[1], errstr, rsp->len);
3500254c4d1SDavid Woodhouse }
3510254c4d1SDavid Woodhouse
xs_ok(XenXenstoreState * s,unsigned int type,unsigned int req_id,xs_transaction_t tx_id)3520254c4d1SDavid Woodhouse static void xs_ok(XenXenstoreState *s, unsigned int type, unsigned int req_id,
3530254c4d1SDavid Woodhouse xs_transaction_t tx_id)
3540254c4d1SDavid Woodhouse {
3550254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
3560254c4d1SDavid Woodhouse const char *okstr = "OK";
3570254c4d1SDavid Woodhouse
3580254c4d1SDavid Woodhouse rsp->type = type;
3590254c4d1SDavid Woodhouse rsp->req_id = req_id;
3600254c4d1SDavid Woodhouse rsp->tx_id = tx_id;
3610254c4d1SDavid Woodhouse rsp->len = (uint32_t)strlen(okstr) + 1;
3620254c4d1SDavid Woodhouse
3630254c4d1SDavid Woodhouse memcpy(&rsp[1], okstr, rsp->len);
3640254c4d1SDavid Woodhouse }
3650254c4d1SDavid Woodhouse
3660254c4d1SDavid Woodhouse /*
3670254c4d1SDavid Woodhouse * The correct request and response formats are documented in xen.git:
3680254c4d1SDavid Woodhouse * docs/misc/xenstore.txt. A summary is given below for convenience.
3690254c4d1SDavid Woodhouse * The '|' symbol represents a NUL character.
3700254c4d1SDavid Woodhouse *
3710254c4d1SDavid Woodhouse * ---------- Database read, write and permissions operations ----------
3720254c4d1SDavid Woodhouse *
3730254c4d1SDavid Woodhouse * READ <path>| <value|>
3740254c4d1SDavid Woodhouse * WRITE <path>|<value|>
3750254c4d1SDavid Woodhouse * Store and read the octet string <value> at <path>.
3760254c4d1SDavid Woodhouse * WRITE creates any missing parent paths, with empty values.
3770254c4d1SDavid Woodhouse *
3780254c4d1SDavid Woodhouse * MKDIR <path>|
3790254c4d1SDavid Woodhouse * Ensures that the <path> exists, by necessary by creating
3800254c4d1SDavid Woodhouse * it and any missing parents with empty values. If <path>
3810254c4d1SDavid Woodhouse * or any parent already exists, its value is left unchanged.
3820254c4d1SDavid Woodhouse *
3830254c4d1SDavid Woodhouse * RM <path>|
3840254c4d1SDavid Woodhouse * Ensures that the <path> does not exist, by deleting
3850254c4d1SDavid Woodhouse * it and all of its children. It is not an error if <path> does
3860254c4d1SDavid Woodhouse * not exist, but it _is_ an error if <path>'s immediate parent
3870254c4d1SDavid Woodhouse * does not exist either.
3880254c4d1SDavid Woodhouse *
3890254c4d1SDavid Woodhouse * DIRECTORY <path>| <child-leaf-name>|*
3900254c4d1SDavid Woodhouse * Gives a list of the immediate children of <path>, as only the
3910254c4d1SDavid Woodhouse * leafnames. The resulting children are each named
3920254c4d1SDavid Woodhouse * <path>/<child-leaf-name>.
3930254c4d1SDavid Woodhouse *
3940254c4d1SDavid Woodhouse * DIRECTORY_PART <path>|<offset> <gencnt>|<child-leaf-name>|*
3950254c4d1SDavid Woodhouse * Same as DIRECTORY, but to be used for children lists longer than
3960254c4d1SDavid Woodhouse * XENSTORE_PAYLOAD_MAX. Input are <path> and the byte offset into
3970254c4d1SDavid Woodhouse * the list of children to return. Return values are the generation
3980254c4d1SDavid Woodhouse * count <gencnt> of the node (to be used to ensure the node hasn't
3990254c4d1SDavid Woodhouse * changed between two reads: <gencnt> being the same for multiple
4000254c4d1SDavid Woodhouse * reads guarantees the node hasn't changed) and the list of children
4010254c4d1SDavid Woodhouse * starting at the specified <offset> of the complete list.
4020254c4d1SDavid Woodhouse *
4030254c4d1SDavid Woodhouse * GET_PERMS <path>| <perm-as-string>|+
4040254c4d1SDavid Woodhouse * SET_PERMS <path>|<perm-as-string>|+?
4050254c4d1SDavid Woodhouse * <perm-as-string> is one of the following
4060254c4d1SDavid Woodhouse * w<domid> write only
4070254c4d1SDavid Woodhouse * r<domid> read only
4080254c4d1SDavid Woodhouse * b<domid> both read and write
4090254c4d1SDavid Woodhouse * n<domid> no access
4100254c4d1SDavid Woodhouse * See https://wiki.xen.org/wiki/XenBus section
4110254c4d1SDavid Woodhouse * `Permissions' for details of the permissions system.
4120254c4d1SDavid Woodhouse * It is possible to set permissions for the special watch paths
4130254c4d1SDavid Woodhouse * "@introduceDomain" and "@releaseDomain" to enable receiving those
4140254c4d1SDavid Woodhouse * watches in unprivileged domains.
4150254c4d1SDavid Woodhouse *
4160254c4d1SDavid Woodhouse * ---------- Watches ----------
4170254c4d1SDavid Woodhouse *
4180254c4d1SDavid Woodhouse * WATCH <wpath>|<token>|?
4190254c4d1SDavid Woodhouse * Adds a watch.
4200254c4d1SDavid Woodhouse *
4210254c4d1SDavid Woodhouse * When a <path> is modified (including path creation, removal,
4220254c4d1SDavid Woodhouse * contents change or permissions change) this generates an event
4230254c4d1SDavid Woodhouse * on the changed <path>. Changes made in transactions cause an
4240254c4d1SDavid Woodhouse * event only if and when committed. Each occurring event is
4250254c4d1SDavid Woodhouse * matched against all the watches currently set up, and each
4260254c4d1SDavid Woodhouse * matching watch results in a WATCH_EVENT message (see below).
4270254c4d1SDavid Woodhouse *
4280254c4d1SDavid Woodhouse * The event's path matches the watch's <wpath> if it is an child
4290254c4d1SDavid Woodhouse * of <wpath>.
4300254c4d1SDavid Woodhouse *
4310254c4d1SDavid Woodhouse * <wpath> can be a <path> to watch or @<wspecial>. In the
4320254c4d1SDavid Woodhouse * latter case <wspecial> may have any syntax but it matches
4330254c4d1SDavid Woodhouse * (according to the rules above) only the following special
4340254c4d1SDavid Woodhouse * events which are invented by xenstored:
4350254c4d1SDavid Woodhouse * @introduceDomain occurs on INTRODUCE
4360254c4d1SDavid Woodhouse * @releaseDomain occurs on any domain crash or
4370254c4d1SDavid Woodhouse * shutdown, and also on RELEASE
4380254c4d1SDavid Woodhouse * and domain destruction
4390254c4d1SDavid Woodhouse * <wspecial> events are sent to privileged callers or explicitly
4400254c4d1SDavid Woodhouse * via SET_PERMS enabled domains only.
4410254c4d1SDavid Woodhouse *
4420254c4d1SDavid Woodhouse * When a watch is first set up it is triggered once straight
4430254c4d1SDavid Woodhouse * away, with <path> equal to <wpath>. Watches may be triggered
4440254c4d1SDavid Woodhouse * spuriously. The tx_id in a WATCH request is ignored.
4450254c4d1SDavid Woodhouse *
4460254c4d1SDavid Woodhouse * Watches are supposed to be restricted by the permissions
4470254c4d1SDavid Woodhouse * system but in practice the implementation is imperfect.
4480254c4d1SDavid Woodhouse * Applications should not rely on being sent a notification for
4490254c4d1SDavid Woodhouse * paths that they cannot read; however, an application may rely
4500254c4d1SDavid Woodhouse * on being sent a watch when a path which it _is_ able to read
4510254c4d1SDavid Woodhouse * is deleted even if that leaves only a nonexistent unreadable
4520254c4d1SDavid Woodhouse * parent. A notification may omitted if a node's permissions
4530254c4d1SDavid Woodhouse * are changed so as to make it unreadable, in which case future
4540254c4d1SDavid Woodhouse * notifications may be suppressed (and if the node is later made
4550254c4d1SDavid Woodhouse * readable, some notifications may have been lost).
4560254c4d1SDavid Woodhouse *
4570254c4d1SDavid Woodhouse * WATCH_EVENT <epath>|<token>|
4580254c4d1SDavid Woodhouse * Unsolicited `reply' generated for matching modification events
4590254c4d1SDavid Woodhouse * as described above. req_id and tx_id are both 0.
4600254c4d1SDavid Woodhouse *
4610254c4d1SDavid Woodhouse * <epath> is the event's path, ie the actual path that was
4620254c4d1SDavid Woodhouse * modified; however if the event was the recursive removal of an
4630254c4d1SDavid Woodhouse * parent of <wpath>, <epath> is just
4640254c4d1SDavid Woodhouse * <wpath> (rather than the actual path which was removed). So
4650254c4d1SDavid Woodhouse * <epath> is a child of <wpath>, regardless.
4660254c4d1SDavid Woodhouse *
4670254c4d1SDavid Woodhouse * Iff <wpath> for the watch was specified as a relative pathname,
4680254c4d1SDavid Woodhouse * the <epath> path will also be relative (with the same base,
4690254c4d1SDavid Woodhouse * obviously).
4700254c4d1SDavid Woodhouse *
4710254c4d1SDavid Woodhouse * UNWATCH <wpath>|<token>|?
4720254c4d1SDavid Woodhouse *
4730254c4d1SDavid Woodhouse * RESET_WATCHES |
4740254c4d1SDavid Woodhouse * Reset all watches and transactions of the caller.
4750254c4d1SDavid Woodhouse *
4760254c4d1SDavid Woodhouse * ---------- Transactions ----------
4770254c4d1SDavid Woodhouse *
4780254c4d1SDavid Woodhouse * TRANSACTION_START | <transid>|
4790254c4d1SDavid Woodhouse * <transid> is an opaque uint32_t allocated by xenstored
4800254c4d1SDavid Woodhouse * represented as unsigned decimal. After this, transaction may
4810254c4d1SDavid Woodhouse * be referenced by using <transid> (as 32-bit binary) in the
4820254c4d1SDavid Woodhouse * tx_id request header field. When transaction is started whole
4830254c4d1SDavid Woodhouse * db is copied; reads and writes happen on the copy.
4840254c4d1SDavid Woodhouse * It is not legal to send non-0 tx_id in TRANSACTION_START.
4850254c4d1SDavid Woodhouse *
4860254c4d1SDavid Woodhouse * TRANSACTION_END T|
4870254c4d1SDavid Woodhouse * TRANSACTION_END F|
4880254c4d1SDavid Woodhouse * tx_id must refer to existing transaction. After this
4890254c4d1SDavid Woodhouse * request the tx_id is no longer valid and may be reused by
4900254c4d1SDavid Woodhouse * xenstore. If F, the transaction is discarded. If T,
4910254c4d1SDavid Woodhouse * it is committed: if there were any other intervening writes
4920254c4d1SDavid Woodhouse * then our END gets get EAGAIN.
4930254c4d1SDavid Woodhouse *
4940254c4d1SDavid Woodhouse * The plan is that in the future only intervening `conflicting'
4950254c4d1SDavid Woodhouse * writes cause EAGAIN, meaning only writes or other commits
4960254c4d1SDavid Woodhouse * which changed paths which were read or written in the
4970254c4d1SDavid Woodhouse * transaction at hand.
4980254c4d1SDavid Woodhouse *
4990254c4d1SDavid Woodhouse */
5000254c4d1SDavid Woodhouse
xs_read(XenXenstoreState * s,unsigned int req_id,xs_transaction_t tx_id,uint8_t * req_data,unsigned int len)5010254c4d1SDavid Woodhouse static void xs_read(XenXenstoreState *s, unsigned int req_id,
5020254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, unsigned int len)
5030254c4d1SDavid Woodhouse {
5040254c4d1SDavid Woodhouse const char *path = (const char *)req_data;
5050254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
5060254c4d1SDavid Woodhouse uint8_t *rsp_data = (uint8_t *)&rsp[1];
5070254c4d1SDavid Woodhouse g_autoptr(GByteArray) data = g_byte_array_new();
5080254c4d1SDavid Woodhouse int err;
5090254c4d1SDavid Woodhouse
5100254c4d1SDavid Woodhouse if (len == 0 || req_data[len - 1] != '\0') {
5110254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL);
5120254c4d1SDavid Woodhouse return;
5130254c4d1SDavid Woodhouse }
5140254c4d1SDavid Woodhouse
5150254c4d1SDavid Woodhouse trace_xenstore_read(tx_id, path);
5160254c4d1SDavid Woodhouse err = xs_impl_read(s->impl, xen_domid, tx_id, path, data);
5170254c4d1SDavid Woodhouse if (err) {
5180254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err);
5190254c4d1SDavid Woodhouse return;
5200254c4d1SDavid Woodhouse }
5210254c4d1SDavid Woodhouse
5220254c4d1SDavid Woodhouse rsp->type = XS_READ;
5230254c4d1SDavid Woodhouse rsp->req_id = req_id;
5240254c4d1SDavid Woodhouse rsp->tx_id = tx_id;
5250254c4d1SDavid Woodhouse rsp->len = 0;
5260254c4d1SDavid Woodhouse
5270254c4d1SDavid Woodhouse len = data->len;
5280254c4d1SDavid Woodhouse if (len > XENSTORE_PAYLOAD_MAX) {
5290254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, E2BIG);
5300254c4d1SDavid Woodhouse return;
5310254c4d1SDavid Woodhouse }
5320254c4d1SDavid Woodhouse
533b6014c50SAkihiko Odaki if (!len) {
534b6014c50SAkihiko Odaki return;
535b6014c50SAkihiko Odaki }
536b6014c50SAkihiko Odaki
5370254c4d1SDavid Woodhouse memcpy(&rsp_data[rsp->len], data->data, len);
5380254c4d1SDavid Woodhouse rsp->len += len;
5390254c4d1SDavid Woodhouse }
5400254c4d1SDavid Woodhouse
xs_write(XenXenstoreState * s,unsigned int req_id,xs_transaction_t tx_id,uint8_t * req_data,unsigned int len)5410254c4d1SDavid Woodhouse static void xs_write(XenXenstoreState *s, unsigned int req_id,
5420254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data,
5430254c4d1SDavid Woodhouse unsigned int len)
5440254c4d1SDavid Woodhouse {
5450254c4d1SDavid Woodhouse g_autoptr(GByteArray) data = g_byte_array_new();
5460254c4d1SDavid Woodhouse const char *path;
5470254c4d1SDavid Woodhouse int err;
5480254c4d1SDavid Woodhouse
5490254c4d1SDavid Woodhouse if (len == 0) {
5500254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL);
5510254c4d1SDavid Woodhouse return;
5520254c4d1SDavid Woodhouse }
5530254c4d1SDavid Woodhouse
5540254c4d1SDavid Woodhouse path = (const char *)req_data;
5550254c4d1SDavid Woodhouse
5560254c4d1SDavid Woodhouse while (len--) {
5570254c4d1SDavid Woodhouse if (*req_data++ == '\0') {
5580254c4d1SDavid Woodhouse break;
5590254c4d1SDavid Woodhouse }
5600254c4d1SDavid Woodhouse if (len == 0) {
5610254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL);
5620254c4d1SDavid Woodhouse return;
5630254c4d1SDavid Woodhouse }
5640254c4d1SDavid Woodhouse }
5650254c4d1SDavid Woodhouse
5660254c4d1SDavid Woodhouse g_byte_array_append(data, req_data, len);
5670254c4d1SDavid Woodhouse
5680254c4d1SDavid Woodhouse trace_xenstore_write(tx_id, path);
5690254c4d1SDavid Woodhouse err = xs_impl_write(s->impl, xen_domid, tx_id, path, data);
5700254c4d1SDavid Woodhouse if (err) {
5710254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err);
5720254c4d1SDavid Woodhouse return;
5730254c4d1SDavid Woodhouse }
5740254c4d1SDavid Woodhouse
5750254c4d1SDavid Woodhouse xs_ok(s, XS_WRITE, req_id, tx_id);
5760254c4d1SDavid Woodhouse }
5770254c4d1SDavid Woodhouse
xs_mkdir(XenXenstoreState * s,unsigned int req_id,xs_transaction_t tx_id,uint8_t * req_data,unsigned int len)5780254c4d1SDavid Woodhouse static void xs_mkdir(XenXenstoreState *s, unsigned int req_id,
5790254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data,
5800254c4d1SDavid Woodhouse unsigned int len)
5810254c4d1SDavid Woodhouse {
5820254c4d1SDavid Woodhouse g_autoptr(GByteArray) data = g_byte_array_new();
5830254c4d1SDavid Woodhouse const char *path;
5840254c4d1SDavid Woodhouse int err;
5850254c4d1SDavid Woodhouse
5860254c4d1SDavid Woodhouse if (len == 0 || req_data[len - 1] != '\0') {
5870254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL);
5880254c4d1SDavid Woodhouse return;
5890254c4d1SDavid Woodhouse }
5900254c4d1SDavid Woodhouse
5910254c4d1SDavid Woodhouse path = (const char *)req_data;
5920254c4d1SDavid Woodhouse
5930254c4d1SDavid Woodhouse trace_xenstore_mkdir(tx_id, path);
5940254c4d1SDavid Woodhouse err = xs_impl_read(s->impl, xen_domid, tx_id, path, data);
5950254c4d1SDavid Woodhouse if (err == ENOENT) {
5960254c4d1SDavid Woodhouse err = xs_impl_write(s->impl, xen_domid, tx_id, path, data);
5970254c4d1SDavid Woodhouse }
5980254c4d1SDavid Woodhouse
5990254c4d1SDavid Woodhouse if (!err) {
6000254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err);
6010254c4d1SDavid Woodhouse return;
6020254c4d1SDavid Woodhouse }
6030254c4d1SDavid Woodhouse
6040254c4d1SDavid Woodhouse xs_ok(s, XS_MKDIR, req_id, tx_id);
6050254c4d1SDavid Woodhouse }
6060254c4d1SDavid Woodhouse
xs_append_strings(XenXenstoreState * s,struct xsd_sockmsg * rsp,GList * strings,unsigned int start,bool truncate)6070254c4d1SDavid Woodhouse static void xs_append_strings(XenXenstoreState *s, struct xsd_sockmsg *rsp,
6080254c4d1SDavid Woodhouse GList *strings, unsigned int start, bool truncate)
6090254c4d1SDavid Woodhouse {
6100254c4d1SDavid Woodhouse uint8_t *rsp_data = (uint8_t *)&rsp[1];
6110254c4d1SDavid Woodhouse GList *l;
6120254c4d1SDavid Woodhouse
6130254c4d1SDavid Woodhouse for (l = strings; l; l = l->next) {
6140254c4d1SDavid Woodhouse size_t len = strlen(l->data) + 1; /* Including the NUL termination */
6150254c4d1SDavid Woodhouse char *str = l->data;
6160254c4d1SDavid Woodhouse
6170254c4d1SDavid Woodhouse if (rsp->len + len > XENSTORE_PAYLOAD_MAX) {
6180254c4d1SDavid Woodhouse if (truncate) {
6190254c4d1SDavid Woodhouse len = XENSTORE_PAYLOAD_MAX - rsp->len;
6200254c4d1SDavid Woodhouse if (!len) {
6210254c4d1SDavid Woodhouse return;
6220254c4d1SDavid Woodhouse }
6230254c4d1SDavid Woodhouse } else {
6240254c4d1SDavid Woodhouse xs_error(s, rsp->req_id, rsp->tx_id, E2BIG);
6250254c4d1SDavid Woodhouse return;
6260254c4d1SDavid Woodhouse }
6270254c4d1SDavid Woodhouse }
6280254c4d1SDavid Woodhouse
6290254c4d1SDavid Woodhouse if (start) {
6300254c4d1SDavid Woodhouse if (start >= len) {
6310254c4d1SDavid Woodhouse start -= len;
6320254c4d1SDavid Woodhouse continue;
6330254c4d1SDavid Woodhouse }
6340254c4d1SDavid Woodhouse
6350254c4d1SDavid Woodhouse str += start;
6360254c4d1SDavid Woodhouse len -= start;
6370254c4d1SDavid Woodhouse start = 0;
6380254c4d1SDavid Woodhouse }
6390254c4d1SDavid Woodhouse
6400254c4d1SDavid Woodhouse memcpy(&rsp_data[rsp->len], str, len);
6410254c4d1SDavid Woodhouse rsp->len += len;
6420254c4d1SDavid Woodhouse }
6430254c4d1SDavid Woodhouse /* XS_DIRECTORY_PART wants an extra NUL to indicate the end */
6440254c4d1SDavid Woodhouse if (truncate && rsp->len < XENSTORE_PAYLOAD_MAX) {
6450254c4d1SDavid Woodhouse rsp_data[rsp->len++] = '\0';
6460254c4d1SDavid Woodhouse }
6470254c4d1SDavid Woodhouse }
6480254c4d1SDavid Woodhouse
xs_directory(XenXenstoreState * s,unsigned int req_id,xs_transaction_t tx_id,uint8_t * req_data,unsigned int len)6490254c4d1SDavid Woodhouse static void xs_directory(XenXenstoreState *s, unsigned int req_id,
6500254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data,
6510254c4d1SDavid Woodhouse unsigned int len)
6520254c4d1SDavid Woodhouse {
6530254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
6540254c4d1SDavid Woodhouse GList *items = NULL;
6550254c4d1SDavid Woodhouse const char *path;
6560254c4d1SDavid Woodhouse int err;
6570254c4d1SDavid Woodhouse
6580254c4d1SDavid Woodhouse if (len == 0 || req_data[len - 1] != '\0') {
6590254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL);
6600254c4d1SDavid Woodhouse return;
6610254c4d1SDavid Woodhouse }
6620254c4d1SDavid Woodhouse
6630254c4d1SDavid Woodhouse path = (const char *)req_data;
6640254c4d1SDavid Woodhouse
6650254c4d1SDavid Woodhouse trace_xenstore_directory(tx_id, path);
6660254c4d1SDavid Woodhouse err = xs_impl_directory(s->impl, xen_domid, tx_id, path, NULL, &items);
6670254c4d1SDavid Woodhouse if (err != 0) {
6680254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err);
6690254c4d1SDavid Woodhouse return;
6700254c4d1SDavid Woodhouse }
6710254c4d1SDavid Woodhouse
6720254c4d1SDavid Woodhouse rsp->type = XS_DIRECTORY;
6730254c4d1SDavid Woodhouse rsp->req_id = req_id;
6740254c4d1SDavid Woodhouse rsp->tx_id = tx_id;
6750254c4d1SDavid Woodhouse rsp->len = 0;
6760254c4d1SDavid Woodhouse
6770254c4d1SDavid Woodhouse xs_append_strings(s, rsp, items, 0, false);
6780254c4d1SDavid Woodhouse
6790254c4d1SDavid Woodhouse g_list_free_full(items, g_free);
6800254c4d1SDavid Woodhouse }
6810254c4d1SDavid Woodhouse
xs_directory_part(XenXenstoreState * s,unsigned int req_id,xs_transaction_t tx_id,uint8_t * req_data,unsigned int len)6820254c4d1SDavid Woodhouse static void xs_directory_part(XenXenstoreState *s, unsigned int req_id,
6830254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data,
6840254c4d1SDavid Woodhouse unsigned int len)
6850254c4d1SDavid Woodhouse {
6860254c4d1SDavid Woodhouse const char *offset_str, *path = (const char *)req_data;
6870254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
6880254c4d1SDavid Woodhouse char *rsp_data = (char *)&rsp[1];
6890254c4d1SDavid Woodhouse uint64_t gencnt = 0;
6900254c4d1SDavid Woodhouse unsigned int offset;
6910254c4d1SDavid Woodhouse GList *items = NULL;
6920254c4d1SDavid Woodhouse int err;
6930254c4d1SDavid Woodhouse
6940254c4d1SDavid Woodhouse if (len == 0) {
6950254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL);
6960254c4d1SDavid Woodhouse return;
6970254c4d1SDavid Woodhouse }
6980254c4d1SDavid Woodhouse
6990254c4d1SDavid Woodhouse while (len--) {
7000254c4d1SDavid Woodhouse if (*req_data++ == '\0') {
7010254c4d1SDavid Woodhouse break;
7020254c4d1SDavid Woodhouse }
7030254c4d1SDavid Woodhouse if (len == 0) {
7040254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL);
7050254c4d1SDavid Woodhouse return;
7060254c4d1SDavid Woodhouse }
7070254c4d1SDavid Woodhouse }
7080254c4d1SDavid Woodhouse
7090254c4d1SDavid Woodhouse offset_str = (const char *)req_data;
7100254c4d1SDavid Woodhouse while (len--) {
7110254c4d1SDavid Woodhouse if (*req_data++ == '\0') {
7120254c4d1SDavid Woodhouse break;
7130254c4d1SDavid Woodhouse }
7140254c4d1SDavid Woodhouse if (len == 0) {
7150254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL);
7160254c4d1SDavid Woodhouse return;
7170254c4d1SDavid Woodhouse }
7180254c4d1SDavid Woodhouse }
7190254c4d1SDavid Woodhouse
7200254c4d1SDavid Woodhouse if (len) {
7210254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL);
7220254c4d1SDavid Woodhouse return;
7230254c4d1SDavid Woodhouse }
7240254c4d1SDavid Woodhouse
7250254c4d1SDavid Woodhouse if (qemu_strtoui(offset_str, NULL, 10, &offset) < 0) {
7260254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL);
7270254c4d1SDavid Woodhouse return;
7280254c4d1SDavid Woodhouse }
7290254c4d1SDavid Woodhouse
7300254c4d1SDavid Woodhouse trace_xenstore_directory_part(tx_id, path, offset);
7310254c4d1SDavid Woodhouse err = xs_impl_directory(s->impl, xen_domid, tx_id, path, &gencnt, &items);
7320254c4d1SDavid Woodhouse if (err != 0) {
7330254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err);
7340254c4d1SDavid Woodhouse return;
7350254c4d1SDavid Woodhouse }
7360254c4d1SDavid Woodhouse
7370254c4d1SDavid Woodhouse rsp->type = XS_DIRECTORY_PART;
7380254c4d1SDavid Woodhouse rsp->req_id = req_id;
7390254c4d1SDavid Woodhouse rsp->tx_id = tx_id;
7400254c4d1SDavid Woodhouse rsp->len = snprintf(rsp_data, XENSTORE_PAYLOAD_MAX, "%" PRIu64, gencnt) + 1;
7410254c4d1SDavid Woodhouse
7420254c4d1SDavid Woodhouse xs_append_strings(s, rsp, items, offset, true);
7430254c4d1SDavid Woodhouse
7440254c4d1SDavid Woodhouse g_list_free_full(items, g_free);
7450254c4d1SDavid Woodhouse }
7460254c4d1SDavid Woodhouse
xs_transaction_start(XenXenstoreState * s,unsigned int req_id,xs_transaction_t tx_id,uint8_t * req_data,unsigned int len)7470254c4d1SDavid Woodhouse static void xs_transaction_start(XenXenstoreState *s, unsigned int req_id,
7480254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data,
7490254c4d1SDavid Woodhouse unsigned int len)
7500254c4d1SDavid Woodhouse {
7510254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
7520254c4d1SDavid Woodhouse char *rsp_data = (char *)&rsp[1];
7530254c4d1SDavid Woodhouse int err;
7540254c4d1SDavid Woodhouse
7550254c4d1SDavid Woodhouse if (len != 1 || req_data[0] != '\0') {
7560254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL);
7570254c4d1SDavid Woodhouse return;
7580254c4d1SDavid Woodhouse }
7590254c4d1SDavid Woodhouse
7600254c4d1SDavid Woodhouse rsp->type = XS_TRANSACTION_START;
7610254c4d1SDavid Woodhouse rsp->req_id = req_id;
7620254c4d1SDavid Woodhouse rsp->tx_id = tx_id;
7630254c4d1SDavid Woodhouse rsp->len = 0;
7640254c4d1SDavid Woodhouse
7650254c4d1SDavid Woodhouse err = xs_impl_transaction_start(s->impl, xen_domid, &tx_id);
7660254c4d1SDavid Woodhouse if (err) {
7670254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err);
7680254c4d1SDavid Woodhouse return;
7690254c4d1SDavid Woodhouse }
7700254c4d1SDavid Woodhouse
7710254c4d1SDavid Woodhouse trace_xenstore_transaction_start(tx_id);
7720254c4d1SDavid Woodhouse
7730254c4d1SDavid Woodhouse rsp->len = snprintf(rsp_data, XENSTORE_PAYLOAD_MAX, "%u", tx_id);
7740254c4d1SDavid Woodhouse assert(rsp->len < XENSTORE_PAYLOAD_MAX);
7750254c4d1SDavid Woodhouse rsp->len++;
7760254c4d1SDavid Woodhouse }
7770254c4d1SDavid Woodhouse
xs_transaction_end(XenXenstoreState * s,unsigned int req_id,xs_transaction_t tx_id,uint8_t * req_data,unsigned int len)7780254c4d1SDavid Woodhouse static void xs_transaction_end(XenXenstoreState *s, unsigned int req_id,
7790254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data,
7800254c4d1SDavid Woodhouse unsigned int len)
7810254c4d1SDavid Woodhouse {
7820254c4d1SDavid Woodhouse bool commit;
7830254c4d1SDavid Woodhouse int err;
7840254c4d1SDavid Woodhouse
7850254c4d1SDavid Woodhouse if (len != 2 || req_data[1] != '\0') {
7860254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL);
7870254c4d1SDavid Woodhouse return;
7880254c4d1SDavid Woodhouse }
7890254c4d1SDavid Woodhouse
7900254c4d1SDavid Woodhouse switch (req_data[0]) {
7910254c4d1SDavid Woodhouse case 'T':
7920254c4d1SDavid Woodhouse commit = true;
7930254c4d1SDavid Woodhouse break;
7940254c4d1SDavid Woodhouse case 'F':
7950254c4d1SDavid Woodhouse commit = false;
7960254c4d1SDavid Woodhouse break;
7970254c4d1SDavid Woodhouse default:
7980254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL);
7990254c4d1SDavid Woodhouse return;
8000254c4d1SDavid Woodhouse }
8010254c4d1SDavid Woodhouse
8020254c4d1SDavid Woodhouse trace_xenstore_transaction_end(tx_id, commit);
8030254c4d1SDavid Woodhouse err = xs_impl_transaction_end(s->impl, xen_domid, tx_id, commit);
8040254c4d1SDavid Woodhouse if (err) {
8050254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err);
8060254c4d1SDavid Woodhouse return;
8070254c4d1SDavid Woodhouse }
8080254c4d1SDavid Woodhouse
8090254c4d1SDavid Woodhouse xs_ok(s, XS_TRANSACTION_END, req_id, tx_id);
8100254c4d1SDavid Woodhouse }
8110254c4d1SDavid Woodhouse
xs_rm(XenXenstoreState * s,unsigned int req_id,xs_transaction_t tx_id,uint8_t * req_data,unsigned int len)8120254c4d1SDavid Woodhouse static void xs_rm(XenXenstoreState *s, unsigned int req_id,
8130254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, unsigned int len)
8140254c4d1SDavid Woodhouse {
8150254c4d1SDavid Woodhouse const char *path = (const char *)req_data;
8160254c4d1SDavid Woodhouse int err;
8170254c4d1SDavid Woodhouse
8180254c4d1SDavid Woodhouse if (len == 0 || req_data[len - 1] != '\0') {
8190254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL);
8200254c4d1SDavid Woodhouse return;
8210254c4d1SDavid Woodhouse }
8220254c4d1SDavid Woodhouse
8230254c4d1SDavid Woodhouse trace_xenstore_rm(tx_id, path);
8240254c4d1SDavid Woodhouse err = xs_impl_rm(s->impl, xen_domid, tx_id, path);
8250254c4d1SDavid Woodhouse if (err) {
8260254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err);
8270254c4d1SDavid Woodhouse return;
8280254c4d1SDavid Woodhouse }
8290254c4d1SDavid Woodhouse
8300254c4d1SDavid Woodhouse xs_ok(s, XS_RM, req_id, tx_id);
8310254c4d1SDavid Woodhouse }
8320254c4d1SDavid Woodhouse
xs_get_perms(XenXenstoreState * s,unsigned int req_id,xs_transaction_t tx_id,uint8_t * req_data,unsigned int len)8330254c4d1SDavid Woodhouse static void xs_get_perms(XenXenstoreState *s, unsigned int req_id,
8340254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data,
8350254c4d1SDavid Woodhouse unsigned int len)
8360254c4d1SDavid Woodhouse {
8370254c4d1SDavid Woodhouse const char *path = (const char *)req_data;
8380254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
8390254c4d1SDavid Woodhouse GList *perms = NULL;
8400254c4d1SDavid Woodhouse int err;
8410254c4d1SDavid Woodhouse
8420254c4d1SDavid Woodhouse if (len == 0 || req_data[len - 1] != '\0') {
8430254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL);
8440254c4d1SDavid Woodhouse return;
8450254c4d1SDavid Woodhouse }
8460254c4d1SDavid Woodhouse
8470254c4d1SDavid Woodhouse trace_xenstore_get_perms(tx_id, path);
8480254c4d1SDavid Woodhouse err = xs_impl_get_perms(s->impl, xen_domid, tx_id, path, &perms);
8490254c4d1SDavid Woodhouse if (err) {
8500254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err);
8510254c4d1SDavid Woodhouse return;
8520254c4d1SDavid Woodhouse }
8530254c4d1SDavid Woodhouse
8540254c4d1SDavid Woodhouse rsp->type = XS_GET_PERMS;
8550254c4d1SDavid Woodhouse rsp->req_id = req_id;
8560254c4d1SDavid Woodhouse rsp->tx_id = tx_id;
8570254c4d1SDavid Woodhouse rsp->len = 0;
8580254c4d1SDavid Woodhouse
8590254c4d1SDavid Woodhouse xs_append_strings(s, rsp, perms, 0, false);
8600254c4d1SDavid Woodhouse
8610254c4d1SDavid Woodhouse g_list_free_full(perms, g_free);
8620254c4d1SDavid Woodhouse }
8630254c4d1SDavid Woodhouse
xs_set_perms(XenXenstoreState * s,unsigned int req_id,xs_transaction_t tx_id,uint8_t * req_data,unsigned int len)8640254c4d1SDavid Woodhouse static void xs_set_perms(XenXenstoreState *s, unsigned int req_id,
8650254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data,
8660254c4d1SDavid Woodhouse unsigned int len)
8670254c4d1SDavid Woodhouse {
8680254c4d1SDavid Woodhouse const char *path = (const char *)req_data;
8690254c4d1SDavid Woodhouse uint8_t *perm;
8700254c4d1SDavid Woodhouse GList *perms = NULL;
8710254c4d1SDavid Woodhouse int err;
8720254c4d1SDavid Woodhouse
8730254c4d1SDavid Woodhouse if (len == 0) {
8740254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL);
8750254c4d1SDavid Woodhouse return;
8760254c4d1SDavid Woodhouse }
8770254c4d1SDavid Woodhouse
8780254c4d1SDavid Woodhouse while (len--) {
8790254c4d1SDavid Woodhouse if (*req_data++ == '\0') {
8800254c4d1SDavid Woodhouse break;
8810254c4d1SDavid Woodhouse }
8820254c4d1SDavid Woodhouse if (len == 0) {
8830254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL);
8840254c4d1SDavid Woodhouse return;
8850254c4d1SDavid Woodhouse }
8860254c4d1SDavid Woodhouse }
8870254c4d1SDavid Woodhouse
8880254c4d1SDavid Woodhouse perm = req_data;
8890254c4d1SDavid Woodhouse while (len--) {
8900254c4d1SDavid Woodhouse if (*req_data++ == '\0') {
8910254c4d1SDavid Woodhouse perms = g_list_append(perms, perm);
8920254c4d1SDavid Woodhouse perm = req_data;
8930254c4d1SDavid Woodhouse }
8940254c4d1SDavid Woodhouse }
8950254c4d1SDavid Woodhouse
8960254c4d1SDavid Woodhouse /*
8970254c4d1SDavid Woodhouse * Note that there may be trailing garbage at the end of the buffer.
8980254c4d1SDavid Woodhouse * This is explicitly permitted by the '?' at the end of the definition:
8990254c4d1SDavid Woodhouse *
9000254c4d1SDavid Woodhouse * SET_PERMS <path>|<perm-as-string>|+?
9010254c4d1SDavid Woodhouse */
9020254c4d1SDavid Woodhouse
9030254c4d1SDavid Woodhouse trace_xenstore_set_perms(tx_id, path);
9040254c4d1SDavid Woodhouse err = xs_impl_set_perms(s->impl, xen_domid, tx_id, path, perms);
9050254c4d1SDavid Woodhouse g_list_free(perms);
9060254c4d1SDavid Woodhouse if (err) {
9070254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err);
9080254c4d1SDavid Woodhouse return;
9090254c4d1SDavid Woodhouse }
9100254c4d1SDavid Woodhouse
9110254c4d1SDavid Woodhouse xs_ok(s, XS_SET_PERMS, req_id, tx_id);
9120254c4d1SDavid Woodhouse }
9130254c4d1SDavid Woodhouse
xs_watch(XenXenstoreState * s,unsigned int req_id,xs_transaction_t tx_id,uint8_t * req_data,unsigned int len)9140254c4d1SDavid Woodhouse static void xs_watch(XenXenstoreState *s, unsigned int req_id,
9150254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data,
9160254c4d1SDavid Woodhouse unsigned int len)
9170254c4d1SDavid Woodhouse {
9180254c4d1SDavid Woodhouse const char *token, *path = (const char *)req_data;
9190254c4d1SDavid Woodhouse int err;
9200254c4d1SDavid Woodhouse
9210254c4d1SDavid Woodhouse if (len == 0) {
9220254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL);
9230254c4d1SDavid Woodhouse return;
9240254c4d1SDavid Woodhouse }
9250254c4d1SDavid Woodhouse
9260254c4d1SDavid Woodhouse while (len--) {
9270254c4d1SDavid Woodhouse if (*req_data++ == '\0') {
9280254c4d1SDavid Woodhouse break;
9290254c4d1SDavid Woodhouse }
9300254c4d1SDavid Woodhouse if (len == 0) {
9310254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL);
9320254c4d1SDavid Woodhouse return;
9330254c4d1SDavid Woodhouse }
9340254c4d1SDavid Woodhouse }
9350254c4d1SDavid Woodhouse
9360254c4d1SDavid Woodhouse token = (const char *)req_data;
9370254c4d1SDavid Woodhouse while (len--) {
9380254c4d1SDavid Woodhouse if (*req_data++ == '\0') {
9390254c4d1SDavid Woodhouse break;
9400254c4d1SDavid Woodhouse }
9410254c4d1SDavid Woodhouse if (len == 0) {
9420254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL);
9430254c4d1SDavid Woodhouse return;
9440254c4d1SDavid Woodhouse }
9450254c4d1SDavid Woodhouse }
9460254c4d1SDavid Woodhouse
9470254c4d1SDavid Woodhouse /*
9480254c4d1SDavid Woodhouse * Note that there may be trailing garbage at the end of the buffer.
9490254c4d1SDavid Woodhouse * This is explicitly permitted by the '?' at the end of the definition:
9500254c4d1SDavid Woodhouse *
9510254c4d1SDavid Woodhouse * WATCH <wpath>|<token>|?
9520254c4d1SDavid Woodhouse */
9530254c4d1SDavid Woodhouse
9540254c4d1SDavid Woodhouse trace_xenstore_watch(path, token);
9550254c4d1SDavid Woodhouse err = xs_impl_watch(s->impl, xen_domid, path, token, fire_watch_cb, s);
9560254c4d1SDavid Woodhouse if (err) {
9570254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err);
9580254c4d1SDavid Woodhouse return;
9590254c4d1SDavid Woodhouse }
9600254c4d1SDavid Woodhouse
9610254c4d1SDavid Woodhouse xs_ok(s, XS_WATCH, req_id, tx_id);
9620254c4d1SDavid Woodhouse }
9630254c4d1SDavid Woodhouse
xs_unwatch(XenXenstoreState * s,unsigned int req_id,xs_transaction_t tx_id,uint8_t * req_data,unsigned int len)9640254c4d1SDavid Woodhouse static void xs_unwatch(XenXenstoreState *s, unsigned int req_id,
9650254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data,
9660254c4d1SDavid Woodhouse unsigned int len)
9670254c4d1SDavid Woodhouse {
9680254c4d1SDavid Woodhouse const char *token, *path = (const char *)req_data;
9690254c4d1SDavid Woodhouse int err;
9700254c4d1SDavid Woodhouse
9710254c4d1SDavid Woodhouse if (len == 0) {
9720254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL);
9730254c4d1SDavid Woodhouse return;
9740254c4d1SDavid Woodhouse }
9750254c4d1SDavid Woodhouse
9760254c4d1SDavid Woodhouse while (len--) {
9770254c4d1SDavid Woodhouse if (*req_data++ == '\0') {
9780254c4d1SDavid Woodhouse break;
9790254c4d1SDavid Woodhouse }
9800254c4d1SDavid Woodhouse if (len == 0) {
9810254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL);
9820254c4d1SDavid Woodhouse return;
9830254c4d1SDavid Woodhouse }
9840254c4d1SDavid Woodhouse }
9850254c4d1SDavid Woodhouse
9860254c4d1SDavid Woodhouse token = (const char *)req_data;
9870254c4d1SDavid Woodhouse while (len--) {
9880254c4d1SDavid Woodhouse if (*req_data++ == '\0') {
9890254c4d1SDavid Woodhouse break;
9900254c4d1SDavid Woodhouse }
9910254c4d1SDavid Woodhouse if (len == 0) {
9920254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL);
9930254c4d1SDavid Woodhouse return;
9940254c4d1SDavid Woodhouse }
9950254c4d1SDavid Woodhouse }
9960254c4d1SDavid Woodhouse
9970254c4d1SDavid Woodhouse trace_xenstore_unwatch(path, token);
9980254c4d1SDavid Woodhouse err = xs_impl_unwatch(s->impl, xen_domid, path, token, fire_watch_cb, s);
9990254c4d1SDavid Woodhouse if (err) {
10000254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err);
10010254c4d1SDavid Woodhouse return;
10020254c4d1SDavid Woodhouse }
10030254c4d1SDavid Woodhouse
10040254c4d1SDavid Woodhouse xs_ok(s, XS_UNWATCH, req_id, tx_id);
10050254c4d1SDavid Woodhouse }
10060254c4d1SDavid Woodhouse
xs_reset_watches(XenXenstoreState * s,unsigned int req_id,xs_transaction_t tx_id,uint8_t * req_data,unsigned int len)10070254c4d1SDavid Woodhouse static void xs_reset_watches(XenXenstoreState *s, unsigned int req_id,
10080254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data,
10090254c4d1SDavid Woodhouse unsigned int len)
10100254c4d1SDavid Woodhouse {
10110254c4d1SDavid Woodhouse if (len == 0 || req_data[len - 1] != '\0') {
10120254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL);
10130254c4d1SDavid Woodhouse return;
10140254c4d1SDavid Woodhouse }
10150254c4d1SDavid Woodhouse
10160254c4d1SDavid Woodhouse trace_xenstore_reset_watches();
10170254c4d1SDavid Woodhouse xs_impl_reset_watches(s->impl, xen_domid);
10180254c4d1SDavid Woodhouse
10190254c4d1SDavid Woodhouse xs_ok(s, XS_RESET_WATCHES, req_id, tx_id);
10200254c4d1SDavid Woodhouse }
10210254c4d1SDavid Woodhouse
xs_priv(XenXenstoreState * s,unsigned int req_id,xs_transaction_t tx_id,uint8_t * data,unsigned int len)10220254c4d1SDavid Woodhouse static void xs_priv(XenXenstoreState *s, unsigned int req_id,
10230254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *data,
10240254c4d1SDavid Woodhouse unsigned int len)
10250254c4d1SDavid Woodhouse {
10260254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EACCES);
10270254c4d1SDavid Woodhouse }
10280254c4d1SDavid Woodhouse
xs_unimpl(XenXenstoreState * s,unsigned int req_id,xs_transaction_t tx_id,uint8_t * data,unsigned int len)10290254c4d1SDavid Woodhouse static void xs_unimpl(XenXenstoreState *s, unsigned int req_id,
10300254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *data,
10310254c4d1SDavid Woodhouse unsigned int len)
10320254c4d1SDavid Woodhouse {
10330254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, ENOSYS);
10340254c4d1SDavid Woodhouse }
10350254c4d1SDavid Woodhouse
10360254c4d1SDavid Woodhouse typedef void (*xs_impl)(XenXenstoreState *s, unsigned int req_id,
10370254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *data,
10380254c4d1SDavid Woodhouse unsigned int len);
10390254c4d1SDavid Woodhouse
10400254c4d1SDavid Woodhouse struct xsd_req {
10410254c4d1SDavid Woodhouse const char *name;
10420254c4d1SDavid Woodhouse xs_impl fn;
10430254c4d1SDavid Woodhouse };
10440254c4d1SDavid Woodhouse #define XSD_REQ(_type, _fn) \
10450254c4d1SDavid Woodhouse [_type] = { .name = #_type, .fn = _fn }
10460254c4d1SDavid Woodhouse
10470254c4d1SDavid Woodhouse struct xsd_req xsd_reqs[] = {
10480254c4d1SDavid Woodhouse XSD_REQ(XS_READ, xs_read),
10490254c4d1SDavid Woodhouse XSD_REQ(XS_WRITE, xs_write),
10500254c4d1SDavid Woodhouse XSD_REQ(XS_MKDIR, xs_mkdir),
10510254c4d1SDavid Woodhouse XSD_REQ(XS_DIRECTORY, xs_directory),
10520254c4d1SDavid Woodhouse XSD_REQ(XS_DIRECTORY_PART, xs_directory_part),
10530254c4d1SDavid Woodhouse XSD_REQ(XS_TRANSACTION_START, xs_transaction_start),
10540254c4d1SDavid Woodhouse XSD_REQ(XS_TRANSACTION_END, xs_transaction_end),
10550254c4d1SDavid Woodhouse XSD_REQ(XS_RM, xs_rm),
10560254c4d1SDavid Woodhouse XSD_REQ(XS_GET_PERMS, xs_get_perms),
10570254c4d1SDavid Woodhouse XSD_REQ(XS_SET_PERMS, xs_set_perms),
10580254c4d1SDavid Woodhouse XSD_REQ(XS_WATCH, xs_watch),
10590254c4d1SDavid Woodhouse XSD_REQ(XS_UNWATCH, xs_unwatch),
10600254c4d1SDavid Woodhouse XSD_REQ(XS_CONTROL, xs_priv),
10610254c4d1SDavid Woodhouse XSD_REQ(XS_INTRODUCE, xs_priv),
10620254c4d1SDavid Woodhouse XSD_REQ(XS_RELEASE, xs_priv),
10630254c4d1SDavid Woodhouse XSD_REQ(XS_IS_DOMAIN_INTRODUCED, xs_priv),
10640254c4d1SDavid Woodhouse XSD_REQ(XS_RESUME, xs_priv),
10650254c4d1SDavid Woodhouse XSD_REQ(XS_SET_TARGET, xs_priv),
10660254c4d1SDavid Woodhouse XSD_REQ(XS_RESET_WATCHES, xs_reset_watches),
10670254c4d1SDavid Woodhouse };
10680254c4d1SDavid Woodhouse
process_req(XenXenstoreState * s)1069f3341e7bSDavid Woodhouse static void process_req(XenXenstoreState *s)
1070f3341e7bSDavid Woodhouse {
1071f3341e7bSDavid Woodhouse struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data;
10720254c4d1SDavid Woodhouse xs_impl handler = NULL;
1073f3341e7bSDavid Woodhouse
1074f3341e7bSDavid Woodhouse assert(req_pending(s));
1075f3341e7bSDavid Woodhouse assert(!s->rsp_pending);
1076f3341e7bSDavid Woodhouse
10770254c4d1SDavid Woodhouse if (req->type < ARRAY_SIZE(xsd_reqs)) {
10780254c4d1SDavid Woodhouse handler = xsd_reqs[req->type].fn;
10790254c4d1SDavid Woodhouse }
10800254c4d1SDavid Woodhouse if (!handler) {
10810254c4d1SDavid Woodhouse handler = &xs_unimpl;
10820254c4d1SDavid Woodhouse }
10830254c4d1SDavid Woodhouse
10840254c4d1SDavid Woodhouse handler(s, req->req_id, req->tx_id, (uint8_t *)&req[1], req->len);
1085f3341e7bSDavid Woodhouse
1086f3341e7bSDavid Woodhouse s->rsp_pending = true;
1087f3341e7bSDavid Woodhouse reset_req(s);
1088f3341e7bSDavid Woodhouse }
1089f3341e7bSDavid Woodhouse
copy_from_ring(XenXenstoreState * s,uint8_t * ptr,unsigned int len)1090f3341e7bSDavid Woodhouse static unsigned int copy_from_ring(XenXenstoreState *s, uint8_t *ptr,
1091f3341e7bSDavid Woodhouse unsigned int len)
1092f3341e7bSDavid Woodhouse {
1093f3341e7bSDavid Woodhouse if (!len) {
1094f3341e7bSDavid Woodhouse return 0;
1095f3341e7bSDavid Woodhouse }
1096f3341e7bSDavid Woodhouse
1097f3341e7bSDavid Woodhouse XENSTORE_RING_IDX prod = qatomic_read(&s->xs->req_prod);
1098f3341e7bSDavid Woodhouse XENSTORE_RING_IDX cons = qatomic_read(&s->xs->req_cons);
1099f3341e7bSDavid Woodhouse unsigned int copied = 0;
1100f3341e7bSDavid Woodhouse
1101f3341e7bSDavid Woodhouse /* Ensure the ring contents don't cross the req_prod access. */
1102f3341e7bSDavid Woodhouse smp_rmb();
1103f3341e7bSDavid Woodhouse
1104f3341e7bSDavid Woodhouse while (len) {
1105f3341e7bSDavid Woodhouse unsigned int avail = prod - cons;
1106f3341e7bSDavid Woodhouse unsigned int offset = MASK_XENSTORE_IDX(cons);
1107f3341e7bSDavid Woodhouse unsigned int copylen = avail;
1108f3341e7bSDavid Woodhouse
1109f3341e7bSDavid Woodhouse if (avail > XENSTORE_RING_SIZE) {
1110f3341e7bSDavid Woodhouse error_report("XenStore ring handling error");
1111f3341e7bSDavid Woodhouse s->fatal_error = true;
1112f3341e7bSDavid Woodhouse break;
1113f3341e7bSDavid Woodhouse } else if (avail == 0) {
1114f3341e7bSDavid Woodhouse break;
1115f3341e7bSDavid Woodhouse }
1116f3341e7bSDavid Woodhouse
1117f3341e7bSDavid Woodhouse if (copylen > len) {
1118f3341e7bSDavid Woodhouse copylen = len;
1119f3341e7bSDavid Woodhouse }
1120f3341e7bSDavid Woodhouse if (copylen > XENSTORE_RING_SIZE - offset) {
1121f3341e7bSDavid Woodhouse copylen = XENSTORE_RING_SIZE - offset;
1122f3341e7bSDavid Woodhouse }
1123f3341e7bSDavid Woodhouse
1124f3341e7bSDavid Woodhouse memcpy(ptr, &s->xs->req[offset], copylen);
1125f3341e7bSDavid Woodhouse copied += copylen;
1126f3341e7bSDavid Woodhouse
1127f3341e7bSDavid Woodhouse ptr += copylen;
1128f3341e7bSDavid Woodhouse len -= copylen;
1129f3341e7bSDavid Woodhouse
1130f3341e7bSDavid Woodhouse cons += copylen;
1131f3341e7bSDavid Woodhouse }
1132f3341e7bSDavid Woodhouse
1133f3341e7bSDavid Woodhouse /*
1134f3341e7bSDavid Woodhouse * Not sure this ever mattered except on Alpha, but this barrier
1135f3341e7bSDavid Woodhouse * is to ensure that the update to req_cons is globally visible
1136f3341e7bSDavid Woodhouse * only after we have consumed all the data from the ring, and we
1137f3341e7bSDavid Woodhouse * don't end up seeing data written to the ring *after* the other
1138f3341e7bSDavid Woodhouse * end sees the update and writes more to the ring. Xen's own
1139f3341e7bSDavid Woodhouse * xenstored has the same barrier here (although with no comment
1140f3341e7bSDavid Woodhouse * at all, obviously, because it's Xen code).
1141f3341e7bSDavid Woodhouse */
1142f3341e7bSDavid Woodhouse smp_mb();
1143f3341e7bSDavid Woodhouse
1144f3341e7bSDavid Woodhouse qatomic_set(&s->xs->req_cons, cons);
1145f3341e7bSDavid Woodhouse
1146f3341e7bSDavid Woodhouse return copied;
1147f3341e7bSDavid Woodhouse }
1148f3341e7bSDavid Woodhouse
copy_to_ring(XenXenstoreState * s,uint8_t * ptr,unsigned int len)1149f3341e7bSDavid Woodhouse static unsigned int copy_to_ring(XenXenstoreState *s, uint8_t *ptr,
1150f3341e7bSDavid Woodhouse unsigned int len)
1151f3341e7bSDavid Woodhouse {
1152f3341e7bSDavid Woodhouse if (!len) {
1153f3341e7bSDavid Woodhouse return 0;
1154f3341e7bSDavid Woodhouse }
1155f3341e7bSDavid Woodhouse
1156f3341e7bSDavid Woodhouse XENSTORE_RING_IDX cons = qatomic_read(&s->xs->rsp_cons);
1157f3341e7bSDavid Woodhouse XENSTORE_RING_IDX prod = qatomic_read(&s->xs->rsp_prod);
1158f3341e7bSDavid Woodhouse unsigned int copied = 0;
1159f3341e7bSDavid Woodhouse
1160f3341e7bSDavid Woodhouse /*
1161f3341e7bSDavid Woodhouse * This matches the barrier in copy_to_ring() (or the guest's
1162bad5cfcdSMichael Tokarev * equivalent) between writing the data to the ring and updating
1163f3341e7bSDavid Woodhouse * rsp_prod. It protects against the pathological case (which
1164f3341e7bSDavid Woodhouse * again I think never happened except on Alpha) where our
1165f3341e7bSDavid Woodhouse * subsequent writes to the ring could *cross* the read of
1166f3341e7bSDavid Woodhouse * rsp_cons and the guest could see the new data when it was
1167f3341e7bSDavid Woodhouse * intending to read the old.
1168f3341e7bSDavid Woodhouse */
1169f3341e7bSDavid Woodhouse smp_mb();
1170f3341e7bSDavid Woodhouse
1171f3341e7bSDavid Woodhouse while (len) {
1172f3341e7bSDavid Woodhouse unsigned int avail = cons + XENSTORE_RING_SIZE - prod;
1173f3341e7bSDavid Woodhouse unsigned int offset = MASK_XENSTORE_IDX(prod);
1174f3341e7bSDavid Woodhouse unsigned int copylen = len;
1175f3341e7bSDavid Woodhouse
1176f3341e7bSDavid Woodhouse if (avail > XENSTORE_RING_SIZE) {
1177f3341e7bSDavid Woodhouse error_report("XenStore ring handling error");
1178f3341e7bSDavid Woodhouse s->fatal_error = true;
1179f3341e7bSDavid Woodhouse break;
1180f3341e7bSDavid Woodhouse } else if (avail == 0) {
1181f3341e7bSDavid Woodhouse break;
1182f3341e7bSDavid Woodhouse }
1183f3341e7bSDavid Woodhouse
1184f3341e7bSDavid Woodhouse if (copylen > avail) {
1185f3341e7bSDavid Woodhouse copylen = avail;
1186f3341e7bSDavid Woodhouse }
1187f3341e7bSDavid Woodhouse if (copylen > XENSTORE_RING_SIZE - offset) {
1188f3341e7bSDavid Woodhouse copylen = XENSTORE_RING_SIZE - offset;
1189f3341e7bSDavid Woodhouse }
1190f3341e7bSDavid Woodhouse
1191f3341e7bSDavid Woodhouse
1192f3341e7bSDavid Woodhouse memcpy(&s->xs->rsp[offset], ptr, copylen);
1193f3341e7bSDavid Woodhouse copied += copylen;
1194f3341e7bSDavid Woodhouse
1195f3341e7bSDavid Woodhouse ptr += copylen;
1196f3341e7bSDavid Woodhouse len -= copylen;
1197f3341e7bSDavid Woodhouse
1198f3341e7bSDavid Woodhouse prod += copylen;
1199f3341e7bSDavid Woodhouse }
1200f3341e7bSDavid Woodhouse
1201f3341e7bSDavid Woodhouse /* Ensure the ring contents are seen before rsp_prod update. */
1202f3341e7bSDavid Woodhouse smp_wmb();
1203f3341e7bSDavid Woodhouse
1204f3341e7bSDavid Woodhouse qatomic_set(&s->xs->rsp_prod, prod);
1205f3341e7bSDavid Woodhouse
1206f3341e7bSDavid Woodhouse return copied;
1207f3341e7bSDavid Woodhouse }
1208f3341e7bSDavid Woodhouse
get_req(XenXenstoreState * s)1209f3341e7bSDavid Woodhouse static unsigned int get_req(XenXenstoreState *s)
1210f3341e7bSDavid Woodhouse {
1211f3341e7bSDavid Woodhouse unsigned int copied = 0;
1212f3341e7bSDavid Woodhouse
1213f3341e7bSDavid Woodhouse if (s->fatal_error) {
1214f3341e7bSDavid Woodhouse return 0;
1215f3341e7bSDavid Woodhouse }
1216f3341e7bSDavid Woodhouse
1217f3341e7bSDavid Woodhouse assert(!req_pending(s));
1218f3341e7bSDavid Woodhouse
1219f3341e7bSDavid Woodhouse if (s->req_offset < XENSTORE_HEADER_SIZE) {
1220f3341e7bSDavid Woodhouse void *ptr = s->req_data + s->req_offset;
1221f3341e7bSDavid Woodhouse unsigned int len = XENSTORE_HEADER_SIZE;
1222f3341e7bSDavid Woodhouse unsigned int copylen = copy_from_ring(s, ptr, len);
1223f3341e7bSDavid Woodhouse
1224f3341e7bSDavid Woodhouse copied += copylen;
1225f3341e7bSDavid Woodhouse s->req_offset += copylen;
1226f3341e7bSDavid Woodhouse }
1227f3341e7bSDavid Woodhouse
1228f3341e7bSDavid Woodhouse if (s->req_offset >= XENSTORE_HEADER_SIZE) {
1229f3341e7bSDavid Woodhouse struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data;
1230f3341e7bSDavid Woodhouse
1231f3341e7bSDavid Woodhouse if (req->len > (uint32_t)XENSTORE_PAYLOAD_MAX) {
1232f3341e7bSDavid Woodhouse error_report("Illegal XenStore request");
1233f3341e7bSDavid Woodhouse s->fatal_error = true;
1234f3341e7bSDavid Woodhouse return 0;
1235f3341e7bSDavid Woodhouse }
1236f3341e7bSDavid Woodhouse
1237f3341e7bSDavid Woodhouse void *ptr = s->req_data + s->req_offset;
1238f3341e7bSDavid Woodhouse unsigned int len = XENSTORE_HEADER_SIZE + req->len - s->req_offset;
1239f3341e7bSDavid Woodhouse unsigned int copylen = copy_from_ring(s, ptr, len);
1240f3341e7bSDavid Woodhouse
1241f3341e7bSDavid Woodhouse copied += copylen;
1242f3341e7bSDavid Woodhouse s->req_offset += copylen;
1243f3341e7bSDavid Woodhouse }
1244f3341e7bSDavid Woodhouse
1245f3341e7bSDavid Woodhouse return copied;
1246f3341e7bSDavid Woodhouse }
1247f3341e7bSDavid Woodhouse
put_rsp(XenXenstoreState * s)1248f3341e7bSDavid Woodhouse static unsigned int put_rsp(XenXenstoreState *s)
1249f3341e7bSDavid Woodhouse {
1250f3341e7bSDavid Woodhouse if (s->fatal_error) {
1251f3341e7bSDavid Woodhouse return 0;
1252f3341e7bSDavid Woodhouse }
1253f3341e7bSDavid Woodhouse
1254f3341e7bSDavid Woodhouse assert(s->rsp_pending);
1255f3341e7bSDavid Woodhouse
1256f3341e7bSDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
1257f3341e7bSDavid Woodhouse assert(s->rsp_offset < XENSTORE_HEADER_SIZE + rsp->len);
1258f3341e7bSDavid Woodhouse
1259f3341e7bSDavid Woodhouse void *ptr = s->rsp_data + s->rsp_offset;
1260f3341e7bSDavid Woodhouse unsigned int len = XENSTORE_HEADER_SIZE + rsp->len - s->rsp_offset;
1261f3341e7bSDavid Woodhouse unsigned int copylen = copy_to_ring(s, ptr, len);
1262f3341e7bSDavid Woodhouse
1263f3341e7bSDavid Woodhouse s->rsp_offset += copylen;
1264f3341e7bSDavid Woodhouse
1265f3341e7bSDavid Woodhouse /* Have we produced a complete response? */
1266f3341e7bSDavid Woodhouse if (s->rsp_offset == XENSTORE_HEADER_SIZE + rsp->len) {
1267f3341e7bSDavid Woodhouse reset_rsp(s);
1268f3341e7bSDavid Woodhouse }
1269f3341e7bSDavid Woodhouse
1270f3341e7bSDavid Woodhouse return copylen;
1271f3341e7bSDavid Woodhouse }
1272f3341e7bSDavid Woodhouse
deliver_watch(XenXenstoreState * s,const char * path,const char * token)12730254c4d1SDavid Woodhouse static void deliver_watch(XenXenstoreState *s, const char *path,
12740254c4d1SDavid Woodhouse const char *token)
12750254c4d1SDavid Woodhouse {
12760254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
12770254c4d1SDavid Woodhouse uint8_t *rsp_data = (uint8_t *)&rsp[1];
12780254c4d1SDavid Woodhouse unsigned int len;
12790254c4d1SDavid Woodhouse
12800254c4d1SDavid Woodhouse assert(!s->rsp_pending);
12810254c4d1SDavid Woodhouse
12820254c4d1SDavid Woodhouse trace_xenstore_watch_event(path, token);
12830254c4d1SDavid Woodhouse
12840254c4d1SDavid Woodhouse rsp->type = XS_WATCH_EVENT;
12850254c4d1SDavid Woodhouse rsp->req_id = 0;
12860254c4d1SDavid Woodhouse rsp->tx_id = 0;
12870254c4d1SDavid Woodhouse rsp->len = 0;
12880254c4d1SDavid Woodhouse
12890254c4d1SDavid Woodhouse len = strlen(path);
12900254c4d1SDavid Woodhouse
12910254c4d1SDavid Woodhouse /* XENSTORE_ABS/REL_PATH_MAX should ensure there can be no overflow */
12920254c4d1SDavid Woodhouse assert(rsp->len + len < XENSTORE_PAYLOAD_MAX);
12930254c4d1SDavid Woodhouse
12940254c4d1SDavid Woodhouse memcpy(&rsp_data[rsp->len], path, len);
12950254c4d1SDavid Woodhouse rsp->len += len;
12960254c4d1SDavid Woodhouse rsp_data[rsp->len] = '\0';
12970254c4d1SDavid Woodhouse rsp->len++;
12980254c4d1SDavid Woodhouse
12990254c4d1SDavid Woodhouse len = strlen(token);
13000254c4d1SDavid Woodhouse /*
13010254c4d1SDavid Woodhouse * It is possible for the guest to have chosen a token that will
13020254c4d1SDavid Woodhouse * not fit (along with the patch) into a watch event. We have no
13030254c4d1SDavid Woodhouse * choice but to drop the event if this is the case.
13040254c4d1SDavid Woodhouse */
13050254c4d1SDavid Woodhouse if (rsp->len + len >= XENSTORE_PAYLOAD_MAX) {
13060254c4d1SDavid Woodhouse return;
13070254c4d1SDavid Woodhouse }
13080254c4d1SDavid Woodhouse
13090254c4d1SDavid Woodhouse memcpy(&rsp_data[rsp->len], token, len);
13100254c4d1SDavid Woodhouse rsp->len += len;
13110254c4d1SDavid Woodhouse rsp_data[rsp->len] = '\0';
13120254c4d1SDavid Woodhouse rsp->len++;
13130254c4d1SDavid Woodhouse
13140254c4d1SDavid Woodhouse s->rsp_pending = true;
13150254c4d1SDavid Woodhouse }
13160254c4d1SDavid Woodhouse
13170254c4d1SDavid Woodhouse struct watch_event {
13180254c4d1SDavid Woodhouse char *path;
13190254c4d1SDavid Woodhouse char *token;
13200254c4d1SDavid Woodhouse };
13210254c4d1SDavid Woodhouse
free_watch_event(struct watch_event * ev)132203247512SDavid Woodhouse static void free_watch_event(struct watch_event *ev)
132303247512SDavid Woodhouse {
132403247512SDavid Woodhouse if (ev) {
132503247512SDavid Woodhouse g_free(ev->path);
132603247512SDavid Woodhouse g_free(ev->token);
132703247512SDavid Woodhouse g_free(ev);
132803247512SDavid Woodhouse }
132903247512SDavid Woodhouse }
133003247512SDavid Woodhouse
queue_watch(XenXenstoreState * s,const char * path,const char * token)13310254c4d1SDavid Woodhouse static void queue_watch(XenXenstoreState *s, const char *path,
13320254c4d1SDavid Woodhouse const char *token)
13330254c4d1SDavid Woodhouse {
13340254c4d1SDavid Woodhouse struct watch_event *ev = g_new0(struct watch_event, 1);
13350254c4d1SDavid Woodhouse
13360254c4d1SDavid Woodhouse ev->path = g_strdup(path);
13370254c4d1SDavid Woodhouse ev->token = g_strdup(token);
13380254c4d1SDavid Woodhouse
13390254c4d1SDavid Woodhouse s->watch_events = g_list_append(s->watch_events, ev);
13400254c4d1SDavid Woodhouse }
13410254c4d1SDavid Woodhouse
fire_watch_cb(void * opaque,const char * path,const char * token)13420254c4d1SDavid Woodhouse static void fire_watch_cb(void *opaque, const char *path, const char *token)
13430254c4d1SDavid Woodhouse {
13440254c4d1SDavid Woodhouse XenXenstoreState *s = opaque;
13450254c4d1SDavid Woodhouse
1346195801d7SStefan Hajnoczi assert(bql_locked());
13470254c4d1SDavid Woodhouse
13480254c4d1SDavid Woodhouse /*
13490254c4d1SDavid Woodhouse * If there's a response pending, we obviously can't scribble over
13500254c4d1SDavid Woodhouse * it. But if there's a request pending, it has dibs on the buffer
13510254c4d1SDavid Woodhouse * too.
13520254c4d1SDavid Woodhouse *
13530254c4d1SDavid Woodhouse * In the common case of a watch firing due to backend activity
13540254c4d1SDavid Woodhouse * when the ring was otherwise idle, we should be able to copy the
13550254c4d1SDavid Woodhouse * strings directly into the rsp_data and thence the actual ring,
13560254c4d1SDavid Woodhouse * without needing to perform any allocations and queue them.
13570254c4d1SDavid Woodhouse */
13580254c4d1SDavid Woodhouse if (s->rsp_pending || req_pending(s)) {
13590254c4d1SDavid Woodhouse queue_watch(s, path, token);
13600254c4d1SDavid Woodhouse } else {
13610254c4d1SDavid Woodhouse deliver_watch(s, path, token);
13620254c4d1SDavid Woodhouse /*
13634a5780f5SDavid Woodhouse * Attempt to queue the message into the actual ring, and send
13644a5780f5SDavid Woodhouse * the event channel notification if any bytes are copied.
13650254c4d1SDavid Woodhouse */
13664a5780f5SDavid Woodhouse if (s->rsp_pending && put_rsp(s) > 0) {
13670254c4d1SDavid Woodhouse xen_be_evtchn_notify(s->eh, s->be_port);
13680254c4d1SDavid Woodhouse }
13690254c4d1SDavid Woodhouse }
13704a5780f5SDavid Woodhouse }
13710254c4d1SDavid Woodhouse
process_watch_events(XenXenstoreState * s)13720254c4d1SDavid Woodhouse static void process_watch_events(XenXenstoreState *s)
13730254c4d1SDavid Woodhouse {
13740254c4d1SDavid Woodhouse struct watch_event *ev = s->watch_events->data;
13750254c4d1SDavid Woodhouse
13760254c4d1SDavid Woodhouse deliver_watch(s, ev->path, ev->token);
13770254c4d1SDavid Woodhouse
13780254c4d1SDavid Woodhouse s->watch_events = g_list_remove(s->watch_events, ev);
137903247512SDavid Woodhouse free_watch_event(ev);
13800254c4d1SDavid Woodhouse }
13810254c4d1SDavid Woodhouse
xen_xenstore_event(void * opaque)1382c08f5d0eSDavid Woodhouse static void xen_xenstore_event(void *opaque)
1383c08f5d0eSDavid Woodhouse {
1384c08f5d0eSDavid Woodhouse XenXenstoreState *s = opaque;
1385c08f5d0eSDavid Woodhouse evtchn_port_t port = xen_be_evtchn_pending(s->eh);
1386f3341e7bSDavid Woodhouse unsigned int copied_to, copied_from;
1387f3341e7bSDavid Woodhouse bool processed, notify = false;
1388f3341e7bSDavid Woodhouse
1389c08f5d0eSDavid Woodhouse if (port != s->be_port) {
1390c08f5d0eSDavid Woodhouse return;
1391c08f5d0eSDavid Woodhouse }
1392f3341e7bSDavid Woodhouse
1393c08f5d0eSDavid Woodhouse /* We know this is a no-op. */
1394c08f5d0eSDavid Woodhouse xen_be_evtchn_unmask(s->eh, port);
1395f3341e7bSDavid Woodhouse
1396f3341e7bSDavid Woodhouse do {
1397f3341e7bSDavid Woodhouse copied_to = copied_from = 0;
1398f3341e7bSDavid Woodhouse processed = false;
1399f3341e7bSDavid Woodhouse
14000254c4d1SDavid Woodhouse if (!s->rsp_pending && s->watch_events) {
14010254c4d1SDavid Woodhouse process_watch_events(s);
14020254c4d1SDavid Woodhouse }
14030254c4d1SDavid Woodhouse
1404f3341e7bSDavid Woodhouse if (s->rsp_pending) {
1405f3341e7bSDavid Woodhouse copied_to = put_rsp(s);
1406f3341e7bSDavid Woodhouse }
1407f3341e7bSDavid Woodhouse
1408f3341e7bSDavid Woodhouse if (!req_pending(s)) {
1409f3341e7bSDavid Woodhouse copied_from = get_req(s);
1410f3341e7bSDavid Woodhouse }
1411f3341e7bSDavid Woodhouse
14120254c4d1SDavid Woodhouse if (req_pending(s) && !s->rsp_pending && !s->watch_events) {
1413f3341e7bSDavid Woodhouse process_req(s);
1414f3341e7bSDavid Woodhouse processed = true;
1415f3341e7bSDavid Woodhouse }
1416f3341e7bSDavid Woodhouse
1417f3341e7bSDavid Woodhouse notify |= copied_to || copied_from;
1418f3341e7bSDavid Woodhouse } while (copied_to || copied_from || processed);
1419f3341e7bSDavid Woodhouse
1420f3341e7bSDavid Woodhouse if (notify) {
1421c08f5d0eSDavid Woodhouse xen_be_evtchn_notify(s->eh, s->be_port);
1422c08f5d0eSDavid Woodhouse }
1423f3341e7bSDavid Woodhouse }
1424c08f5d0eSDavid Woodhouse
alloc_guest_port(XenXenstoreState * s)1425c08f5d0eSDavid Woodhouse static void alloc_guest_port(XenXenstoreState *s)
1426c08f5d0eSDavid Woodhouse {
1427c08f5d0eSDavid Woodhouse struct evtchn_alloc_unbound alloc = {
1428c08f5d0eSDavid Woodhouse .dom = DOMID_SELF,
1429c08f5d0eSDavid Woodhouse .remote_dom = DOMID_QEMU,
1430c08f5d0eSDavid Woodhouse };
1431c08f5d0eSDavid Woodhouse
1432c08f5d0eSDavid Woodhouse if (!xen_evtchn_alloc_unbound_op(&alloc)) {
1433c08f5d0eSDavid Woodhouse s->guest_port = alloc.port;
1434c08f5d0eSDavid Woodhouse }
1435c08f5d0eSDavid Woodhouse }
1436c08f5d0eSDavid Woodhouse
xen_xenstore_reset(void)1437c08f5d0eSDavid Woodhouse int xen_xenstore_reset(void)
1438c08f5d0eSDavid Woodhouse {
1439c08f5d0eSDavid Woodhouse XenXenstoreState *s = xen_xenstore_singleton;
1440a72ccc7fSDavid Woodhouse int console_port;
1441d388c9f5SDavid Woodhouse GList *perms;
1442c08f5d0eSDavid Woodhouse int err;
1443c08f5d0eSDavid Woodhouse
1444c08f5d0eSDavid Woodhouse if (!s) {
1445c08f5d0eSDavid Woodhouse return -ENOTSUP;
1446c08f5d0eSDavid Woodhouse }
1447c08f5d0eSDavid Woodhouse
1448c08f5d0eSDavid Woodhouse s->req_offset = s->rsp_offset = 0;
1449c08f5d0eSDavid Woodhouse s->rsp_pending = false;
1450c08f5d0eSDavid Woodhouse
1451c08f5d0eSDavid Woodhouse if (!memory_region_is_mapped(&s->xenstore_page)) {
1452c08f5d0eSDavid Woodhouse uint64_t gpa = XEN_SPECIAL_PFN(XENSTORE) << TARGET_PAGE_BITS;
1453c08f5d0eSDavid Woodhouse xen_overlay_do_map_page(&s->xenstore_page, gpa);
1454c08f5d0eSDavid Woodhouse }
1455c08f5d0eSDavid Woodhouse
1456c08f5d0eSDavid Woodhouse alloc_guest_port(s);
1457c08f5d0eSDavid Woodhouse
1458c08f5d0eSDavid Woodhouse /*
1459c08f5d0eSDavid Woodhouse * As qemu/dom0, bind to the guest's port. For incoming migration, this
1460c08f5d0eSDavid Woodhouse * will be unbound as the guest's evtchn table is overwritten. We then
1461c08f5d0eSDavid Woodhouse * rebind to the correct guest port in xen_xenstore_post_load().
1462c08f5d0eSDavid Woodhouse */
1463c08f5d0eSDavid Woodhouse err = xen_be_evtchn_bind_interdomain(s->eh, xen_domid, s->guest_port);
1464c08f5d0eSDavid Woodhouse if (err < 0) {
1465c08f5d0eSDavid Woodhouse return err;
1466c08f5d0eSDavid Woodhouse }
1467c08f5d0eSDavid Woodhouse s->be_port = err;
1468c08f5d0eSDavid Woodhouse
1469d388c9f5SDavid Woodhouse /* Create frontend store nodes */
1470d388c9f5SDavid Woodhouse perms = g_list_append(NULL, xs_perm_as_string(XS_PERM_NONE, DOMID_QEMU));
1471d388c9f5SDavid Woodhouse perms = g_list_append(perms, xs_perm_as_string(XS_PERM_READ, xen_domid));
1472d388c9f5SDavid Woodhouse
1473d388c9f5SDavid Woodhouse relpath_printf(s, perms, "store/port", "%u", s->guest_port);
1474d388c9f5SDavid Woodhouse relpath_printf(s, perms, "store/ring-ref", "%lu",
1475d388c9f5SDavid Woodhouse XEN_SPECIAL_PFN(XENSTORE));
1476d388c9f5SDavid Woodhouse
1477a72ccc7fSDavid Woodhouse console_port = xen_primary_console_get_port();
1478a72ccc7fSDavid Woodhouse if (console_port) {
1479a72ccc7fSDavid Woodhouse relpath_printf(s, perms, "console/ring-ref", "%lu",
1480a72ccc7fSDavid Woodhouse XEN_SPECIAL_PFN(CONSOLE));
1481a72ccc7fSDavid Woodhouse relpath_printf(s, perms, "console/port", "%u", console_port);
1482a72ccc7fSDavid Woodhouse relpath_printf(s, perms, "console/state", "%u", XenbusStateInitialised);
1483a72ccc7fSDavid Woodhouse }
1484a72ccc7fSDavid Woodhouse
1485d388c9f5SDavid Woodhouse g_list_free_full(perms, g_free);
1486d388c9f5SDavid Woodhouse
1487d05864d2SDavid Woodhouse /*
1488d05864d2SDavid Woodhouse * We don't actually access the guest's page through the grant, because
1489d05864d2SDavid Woodhouse * this isn't real Xen, and we can just use the page we gave it in the
1490d05864d2SDavid Woodhouse * first place. Map the grant anyway, mostly for cosmetic purposes so
1491d05864d2SDavid Woodhouse * it *looks* like it's in use in the guest-visible grant table.
1492d05864d2SDavid Woodhouse */
1493d05864d2SDavid Woodhouse s->gt = qemu_xen_gnttab_open();
1494d05864d2SDavid Woodhouse uint32_t xs_gntref = GNTTAB_RESERVED_XENSTORE;
1495d05864d2SDavid Woodhouse s->granted_xs = qemu_xen_gnttab_map_refs(s->gt, 1, xen_domid, &xs_gntref,
1496d05864d2SDavid Woodhouse PROT_READ | PROT_WRITE);
1497d05864d2SDavid Woodhouse
1498c08f5d0eSDavid Woodhouse return 0;
1499c08f5d0eSDavid Woodhouse }
150003247512SDavid Woodhouse
150103247512SDavid Woodhouse struct qemu_xs_handle {
150203247512SDavid Woodhouse XenstoreImplState *impl;
150303247512SDavid Woodhouse GList *watches;
150403247512SDavid Woodhouse QEMUBH *watch_bh;
150503247512SDavid Woodhouse };
150603247512SDavid Woodhouse
150703247512SDavid Woodhouse struct qemu_xs_watch {
150803247512SDavid Woodhouse struct qemu_xs_handle *h;
150903247512SDavid Woodhouse char *path;
151003247512SDavid Woodhouse xs_watch_fn fn;
151103247512SDavid Woodhouse void *opaque;
151203247512SDavid Woodhouse GList *events;
151303247512SDavid Woodhouse };
151403247512SDavid Woodhouse
xs_be_get_domain_path(struct qemu_xs_handle * h,unsigned int domid)151503247512SDavid Woodhouse static char *xs_be_get_domain_path(struct qemu_xs_handle *h, unsigned int domid)
151603247512SDavid Woodhouse {
151703247512SDavid Woodhouse return g_strdup_printf("/local/domain/%u", domid);
151803247512SDavid Woodhouse }
151903247512SDavid Woodhouse
xs_be_directory(struct qemu_xs_handle * h,xs_transaction_t t,const char * path,unsigned int * num)152003247512SDavid Woodhouse static char **xs_be_directory(struct qemu_xs_handle *h, xs_transaction_t t,
152103247512SDavid Woodhouse const char *path, unsigned int *num)
152203247512SDavid Woodhouse {
152303247512SDavid Woodhouse GList *items = NULL, *l;
152403247512SDavid Woodhouse unsigned int i = 0;
152503247512SDavid Woodhouse char **items_ret;
152603247512SDavid Woodhouse int err;
152703247512SDavid Woodhouse
152803247512SDavid Woodhouse err = xs_impl_directory(h->impl, DOMID_QEMU, t, path, NULL, &items);
152903247512SDavid Woodhouse if (err) {
153003247512SDavid Woodhouse errno = err;
153103247512SDavid Woodhouse return NULL;
153203247512SDavid Woodhouse }
153303247512SDavid Woodhouse
153403247512SDavid Woodhouse items_ret = g_new0(char *, g_list_length(items) + 1);
153503247512SDavid Woodhouse *num = 0;
153603247512SDavid Woodhouse for (l = items; l; l = l->next) {
153703247512SDavid Woodhouse items_ret[i++] = l->data;
153803247512SDavid Woodhouse (*num)++;
153903247512SDavid Woodhouse }
154003247512SDavid Woodhouse g_list_free(items);
154103247512SDavid Woodhouse return items_ret;
154203247512SDavid Woodhouse }
154303247512SDavid Woodhouse
xs_be_read(struct qemu_xs_handle * h,xs_transaction_t t,const char * path,unsigned int * len)154403247512SDavid Woodhouse static void *xs_be_read(struct qemu_xs_handle *h, xs_transaction_t t,
154503247512SDavid Woodhouse const char *path, unsigned int *len)
154603247512SDavid Woodhouse {
154703247512SDavid Woodhouse GByteArray *data = g_byte_array_new();
154803247512SDavid Woodhouse bool free_segment = false;
154903247512SDavid Woodhouse int err;
155003247512SDavid Woodhouse
155103247512SDavid Woodhouse err = xs_impl_read(h->impl, DOMID_QEMU, t, path, data);
155203247512SDavid Woodhouse if (err) {
155303247512SDavid Woodhouse free_segment = true;
155403247512SDavid Woodhouse errno = err;
155503247512SDavid Woodhouse } else {
155603247512SDavid Woodhouse if (len) {
155703247512SDavid Woodhouse *len = data->len;
155803247512SDavid Woodhouse }
155903247512SDavid Woodhouse /* The xen-bus-helper code expects to get NUL terminated string! */
156003247512SDavid Woodhouse g_byte_array_append(data, (void *)"", 1);
156103247512SDavid Woodhouse }
156203247512SDavid Woodhouse
156303247512SDavid Woodhouse return g_byte_array_free(data, free_segment);
156403247512SDavid Woodhouse }
156503247512SDavid Woodhouse
xs_be_write(struct qemu_xs_handle * h,xs_transaction_t t,const char * path,const void * data,unsigned int len)156603247512SDavid Woodhouse static bool xs_be_write(struct qemu_xs_handle *h, xs_transaction_t t,
156703247512SDavid Woodhouse const char *path, const void *data, unsigned int len)
156803247512SDavid Woodhouse {
156903247512SDavid Woodhouse GByteArray *gdata = g_byte_array_new();
157003247512SDavid Woodhouse int err;
157103247512SDavid Woodhouse
157203247512SDavid Woodhouse g_byte_array_append(gdata, data, len);
157303247512SDavid Woodhouse err = xs_impl_write(h->impl, DOMID_QEMU, t, path, gdata);
157403247512SDavid Woodhouse g_byte_array_unref(gdata);
157503247512SDavid Woodhouse if (err) {
157603247512SDavid Woodhouse errno = err;
157703247512SDavid Woodhouse return false;
157803247512SDavid Woodhouse }
157903247512SDavid Woodhouse return true;
158003247512SDavid Woodhouse }
158103247512SDavid Woodhouse
xs_be_create(struct qemu_xs_handle * h,xs_transaction_t t,unsigned int owner,unsigned int domid,unsigned int perms,const char * path)158203247512SDavid Woodhouse static bool xs_be_create(struct qemu_xs_handle *h, xs_transaction_t t,
158303247512SDavid Woodhouse unsigned int owner, unsigned int domid,
158403247512SDavid Woodhouse unsigned int perms, const char *path)
158503247512SDavid Woodhouse {
158603247512SDavid Woodhouse g_autoptr(GByteArray) data = g_byte_array_new();
158703247512SDavid Woodhouse GList *perms_list = NULL;
158803247512SDavid Woodhouse int err;
158903247512SDavid Woodhouse
159003247512SDavid Woodhouse /* mkdir does this */
159103247512SDavid Woodhouse err = xs_impl_read(h->impl, DOMID_QEMU, t, path, data);
159203247512SDavid Woodhouse if (err == ENOENT) {
159303247512SDavid Woodhouse err = xs_impl_write(h->impl, DOMID_QEMU, t, path, data);
159403247512SDavid Woodhouse }
159503247512SDavid Woodhouse if (err) {
159603247512SDavid Woodhouse errno = err;
159703247512SDavid Woodhouse return false;
159803247512SDavid Woodhouse }
159903247512SDavid Woodhouse
160003247512SDavid Woodhouse perms_list = g_list_append(perms_list,
160103247512SDavid Woodhouse xs_perm_as_string(XS_PERM_NONE, owner));
160203247512SDavid Woodhouse perms_list = g_list_append(perms_list,
160303247512SDavid Woodhouse xs_perm_as_string(perms, domid));
160403247512SDavid Woodhouse
160503247512SDavid Woodhouse err = xs_impl_set_perms(h->impl, DOMID_QEMU, t, path, perms_list);
160603247512SDavid Woodhouse g_list_free_full(perms_list, g_free);
160703247512SDavid Woodhouse if (err) {
160803247512SDavid Woodhouse errno = err;
160903247512SDavid Woodhouse return false;
161003247512SDavid Woodhouse }
161103247512SDavid Woodhouse return true;
161203247512SDavid Woodhouse }
161303247512SDavid Woodhouse
xs_be_destroy(struct qemu_xs_handle * h,xs_transaction_t t,const char * path)161403247512SDavid Woodhouse static bool xs_be_destroy(struct qemu_xs_handle *h, xs_transaction_t t,
161503247512SDavid Woodhouse const char *path)
161603247512SDavid Woodhouse {
161703247512SDavid Woodhouse int err = xs_impl_rm(h->impl, DOMID_QEMU, t, path);
161803247512SDavid Woodhouse if (err) {
161903247512SDavid Woodhouse errno = err;
162003247512SDavid Woodhouse return false;
162103247512SDavid Woodhouse }
162203247512SDavid Woodhouse return true;
162303247512SDavid Woodhouse }
162403247512SDavid Woodhouse
be_watch_bh(void * _h)162503247512SDavid Woodhouse static void be_watch_bh(void *_h)
162603247512SDavid Woodhouse {
162703247512SDavid Woodhouse struct qemu_xs_handle *h = _h;
162803247512SDavid Woodhouse GList *l;
162903247512SDavid Woodhouse
163003247512SDavid Woodhouse for (l = h->watches; l; l = l->next) {
163103247512SDavid Woodhouse struct qemu_xs_watch *w = l->data;
163203247512SDavid Woodhouse
163303247512SDavid Woodhouse while (w->events) {
163403247512SDavid Woodhouse struct watch_event *ev = w->events->data;
163503247512SDavid Woodhouse
163603247512SDavid Woodhouse w->fn(w->opaque, ev->path);
163703247512SDavid Woodhouse
163803247512SDavid Woodhouse w->events = g_list_remove(w->events, ev);
163903247512SDavid Woodhouse free_watch_event(ev);
164003247512SDavid Woodhouse }
164103247512SDavid Woodhouse }
164203247512SDavid Woodhouse }
164303247512SDavid Woodhouse
xs_be_watch_cb(void * opaque,const char * path,const char * token)164403247512SDavid Woodhouse static void xs_be_watch_cb(void *opaque, const char *path, const char *token)
164503247512SDavid Woodhouse {
164603247512SDavid Woodhouse struct watch_event *ev = g_new0(struct watch_event, 1);
164703247512SDavid Woodhouse struct qemu_xs_watch *w = opaque;
164803247512SDavid Woodhouse
164903247512SDavid Woodhouse /* We don't care about the token */
165003247512SDavid Woodhouse ev->path = g_strdup(path);
165103247512SDavid Woodhouse w->events = g_list_append(w->events, ev);
165203247512SDavid Woodhouse
165303247512SDavid Woodhouse qemu_bh_schedule(w->h->watch_bh);
165403247512SDavid Woodhouse }
165503247512SDavid Woodhouse
xs_be_watch(struct qemu_xs_handle * h,const char * path,xs_watch_fn fn,void * opaque)165603247512SDavid Woodhouse static struct qemu_xs_watch *xs_be_watch(struct qemu_xs_handle *h,
165703247512SDavid Woodhouse const char *path, xs_watch_fn fn,
165803247512SDavid Woodhouse void *opaque)
165903247512SDavid Woodhouse {
166003247512SDavid Woodhouse struct qemu_xs_watch *w = g_new0(struct qemu_xs_watch, 1);
166103247512SDavid Woodhouse int err;
166203247512SDavid Woodhouse
166303247512SDavid Woodhouse w->h = h;
166403247512SDavid Woodhouse w->fn = fn;
166503247512SDavid Woodhouse w->opaque = opaque;
166603247512SDavid Woodhouse
166703247512SDavid Woodhouse err = xs_impl_watch(h->impl, DOMID_QEMU, path, NULL, xs_be_watch_cb, w);
166803247512SDavid Woodhouse if (err) {
166903247512SDavid Woodhouse errno = err;
167003247512SDavid Woodhouse g_free(w);
167103247512SDavid Woodhouse return NULL;
167203247512SDavid Woodhouse }
167303247512SDavid Woodhouse
167403247512SDavid Woodhouse w->path = g_strdup(path);
167503247512SDavid Woodhouse h->watches = g_list_append(h->watches, w);
167603247512SDavid Woodhouse return w;
167703247512SDavid Woodhouse }
167803247512SDavid Woodhouse
xs_be_unwatch(struct qemu_xs_handle * h,struct qemu_xs_watch * w)167903247512SDavid Woodhouse static void xs_be_unwatch(struct qemu_xs_handle *h, struct qemu_xs_watch *w)
168003247512SDavid Woodhouse {
168103247512SDavid Woodhouse xs_impl_unwatch(h->impl, DOMID_QEMU, w->path, NULL, xs_be_watch_cb, w);
168203247512SDavid Woodhouse
168303247512SDavid Woodhouse h->watches = g_list_remove(h->watches, w);
168403247512SDavid Woodhouse g_list_free_full(w->events, (GDestroyNotify)free_watch_event);
168503247512SDavid Woodhouse g_free(w->path);
168603247512SDavid Woodhouse g_free(w);
168703247512SDavid Woodhouse }
168803247512SDavid Woodhouse
xs_be_transaction_start(struct qemu_xs_handle * h)168903247512SDavid Woodhouse static xs_transaction_t xs_be_transaction_start(struct qemu_xs_handle *h)
169003247512SDavid Woodhouse {
169103247512SDavid Woodhouse unsigned int new_tx = XBT_NULL;
169203247512SDavid Woodhouse int err = xs_impl_transaction_start(h->impl, DOMID_QEMU, &new_tx);
169303247512SDavid Woodhouse if (err) {
169403247512SDavid Woodhouse errno = err;
169503247512SDavid Woodhouse return XBT_NULL;
169603247512SDavid Woodhouse }
169703247512SDavid Woodhouse return new_tx;
169803247512SDavid Woodhouse }
169903247512SDavid Woodhouse
xs_be_transaction_end(struct qemu_xs_handle * h,xs_transaction_t t,bool abort)170003247512SDavid Woodhouse static bool xs_be_transaction_end(struct qemu_xs_handle *h, xs_transaction_t t,
170103247512SDavid Woodhouse bool abort)
170203247512SDavid Woodhouse {
170303247512SDavid Woodhouse int err = xs_impl_transaction_end(h->impl, DOMID_QEMU, t, !abort);
170403247512SDavid Woodhouse if (err) {
170503247512SDavid Woodhouse errno = err;
170603247512SDavid Woodhouse return false;
170703247512SDavid Woodhouse }
170803247512SDavid Woodhouse return true;
170903247512SDavid Woodhouse }
171003247512SDavid Woodhouse
xs_be_open(void)171103247512SDavid Woodhouse static struct qemu_xs_handle *xs_be_open(void)
171203247512SDavid Woodhouse {
171303247512SDavid Woodhouse XenXenstoreState *s = xen_xenstore_singleton;
171403247512SDavid Woodhouse struct qemu_xs_handle *h;
171503247512SDavid Woodhouse
1716c9bdfe8dSDavid Woodhouse if (!s || !s->impl) {
171703247512SDavid Woodhouse errno = -ENOSYS;
171803247512SDavid Woodhouse return NULL;
171903247512SDavid Woodhouse }
172003247512SDavid Woodhouse
172103247512SDavid Woodhouse h = g_new0(struct qemu_xs_handle, 1);
172203247512SDavid Woodhouse h->impl = s->impl;
172303247512SDavid Woodhouse
172403247512SDavid Woodhouse h->watch_bh = aio_bh_new(qemu_get_aio_context(), be_watch_bh, h);
172503247512SDavid Woodhouse
172603247512SDavid Woodhouse return h;
172703247512SDavid Woodhouse }
172803247512SDavid Woodhouse
xs_be_close(struct qemu_xs_handle * h)172903247512SDavid Woodhouse static void xs_be_close(struct qemu_xs_handle *h)
173003247512SDavid Woodhouse {
173103247512SDavid Woodhouse while (h->watches) {
173203247512SDavid Woodhouse struct qemu_xs_watch *w = h->watches->data;
173303247512SDavid Woodhouse xs_be_unwatch(h, w);
173403247512SDavid Woodhouse }
173503247512SDavid Woodhouse
173603247512SDavid Woodhouse qemu_bh_delete(h->watch_bh);
173703247512SDavid Woodhouse g_free(h);
173803247512SDavid Woodhouse }
173903247512SDavid Woodhouse
174003247512SDavid Woodhouse static struct xenstore_backend_ops emu_xenstore_backend_ops = {
174103247512SDavid Woodhouse .open = xs_be_open,
174203247512SDavid Woodhouse .close = xs_be_close,
174303247512SDavid Woodhouse .get_domain_path = xs_be_get_domain_path,
174403247512SDavid Woodhouse .directory = xs_be_directory,
174503247512SDavid Woodhouse .read = xs_be_read,
174603247512SDavid Woodhouse .write = xs_be_write,
174703247512SDavid Woodhouse .create = xs_be_create,
174803247512SDavid Woodhouse .destroy = xs_be_destroy,
174903247512SDavid Woodhouse .watch = xs_be_watch,
175003247512SDavid Woodhouse .unwatch = xs_be_unwatch,
175103247512SDavid Woodhouse .transaction_start = xs_be_transaction_start,
175203247512SDavid Woodhouse .transaction_end = xs_be_transaction_end,
175303247512SDavid Woodhouse };
1754