xref: /qemu/hw/i386/kvm/xen_xenstore.c (revision be1934dfefe74aa1b978c0cda64c2b6282301196)
1c08f5d0eSDavid Woodhouse /*
2c08f5d0eSDavid Woodhouse  * QEMU Xen emulation: Shared/overlay pages support
3c08f5d0eSDavid Woodhouse  *
4c08f5d0eSDavid Woodhouse  * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5c08f5d0eSDavid Woodhouse  *
6c08f5d0eSDavid Woodhouse  * Authors: David Woodhouse <dwmw2@infradead.org>
7c08f5d0eSDavid Woodhouse  *
8c08f5d0eSDavid Woodhouse  * This work is licensed under the terms of the GNU GPL, version 2 or later.
9c08f5d0eSDavid Woodhouse  * See the COPYING file in the top-level directory.
10c08f5d0eSDavid Woodhouse  */
11c08f5d0eSDavid Woodhouse 
12c08f5d0eSDavid Woodhouse #include "qemu/osdep.h"
13c08f5d0eSDavid Woodhouse 
14c08f5d0eSDavid Woodhouse #include "qemu/host-utils.h"
15c08f5d0eSDavid Woodhouse #include "qemu/module.h"
16c08f5d0eSDavid Woodhouse #include "qemu/main-loop.h"
17c08f5d0eSDavid Woodhouse #include "qemu/cutils.h"
18c08f5d0eSDavid Woodhouse #include "qapi/error.h"
19c08f5d0eSDavid Woodhouse #include "qom/object.h"
20c08f5d0eSDavid Woodhouse #include "migration/vmstate.h"
21c08f5d0eSDavid Woodhouse 
22c08f5d0eSDavid Woodhouse #include "hw/sysbus.h"
23c08f5d0eSDavid Woodhouse #include "hw/xen/xen.h"
24c08f5d0eSDavid Woodhouse #include "xen_overlay.h"
25c08f5d0eSDavid Woodhouse #include "xen_evtchn.h"
26c08f5d0eSDavid Woodhouse #include "xen_xenstore.h"
27c08f5d0eSDavid Woodhouse 
28c08f5d0eSDavid Woodhouse #include "sysemu/kvm.h"
29c08f5d0eSDavid Woodhouse #include "sysemu/kvm_xen.h"
30c08f5d0eSDavid Woodhouse 
310254c4d1SDavid Woodhouse #include "trace.h"
320254c4d1SDavid Woodhouse 
330254c4d1SDavid Woodhouse #include "xenstore_impl.h"
340254c4d1SDavid Woodhouse 
35c08f5d0eSDavid Woodhouse #include "hw/xen/interface/io/xs_wire.h"
36c08f5d0eSDavid Woodhouse #include "hw/xen/interface/event_channel.h"
37c08f5d0eSDavid Woodhouse 
38c08f5d0eSDavid Woodhouse #define TYPE_XEN_XENSTORE "xen-xenstore"
39c08f5d0eSDavid Woodhouse OBJECT_DECLARE_SIMPLE_TYPE(XenXenstoreState, XEN_XENSTORE)
40c08f5d0eSDavid Woodhouse 
41c08f5d0eSDavid Woodhouse #define XEN_PAGE_SHIFT 12
42c08f5d0eSDavid Woodhouse #define XEN_PAGE_SIZE (1ULL << XEN_PAGE_SHIFT)
43c08f5d0eSDavid Woodhouse 
44c08f5d0eSDavid Woodhouse #define ENTRIES_PER_FRAME_V1 (XEN_PAGE_SIZE / sizeof(grant_entry_v1_t))
45c08f5d0eSDavid Woodhouse #define ENTRIES_PER_FRAME_V2 (XEN_PAGE_SIZE / sizeof(grant_entry_v2_t))
46c08f5d0eSDavid Woodhouse 
47c08f5d0eSDavid Woodhouse #define XENSTORE_HEADER_SIZE ((unsigned int)sizeof(struct xsd_sockmsg))
48c08f5d0eSDavid Woodhouse 
49c08f5d0eSDavid Woodhouse struct XenXenstoreState {
50c08f5d0eSDavid Woodhouse     /*< private >*/
51c08f5d0eSDavid Woodhouse     SysBusDevice busdev;
52c08f5d0eSDavid Woodhouse     /*< public >*/
53c08f5d0eSDavid Woodhouse 
540254c4d1SDavid Woodhouse     XenstoreImplState *impl;
550254c4d1SDavid Woodhouse     GList *watch_events;
560254c4d1SDavid Woodhouse 
57c08f5d0eSDavid Woodhouse     MemoryRegion xenstore_page;
58c08f5d0eSDavid Woodhouse     struct xenstore_domain_interface *xs;
59c08f5d0eSDavid Woodhouse     uint8_t req_data[XENSTORE_HEADER_SIZE + XENSTORE_PAYLOAD_MAX];
60c08f5d0eSDavid Woodhouse     uint8_t rsp_data[XENSTORE_HEADER_SIZE + XENSTORE_PAYLOAD_MAX];
61c08f5d0eSDavid Woodhouse     uint32_t req_offset;
62c08f5d0eSDavid Woodhouse     uint32_t rsp_offset;
63c08f5d0eSDavid Woodhouse     bool rsp_pending;
64c08f5d0eSDavid Woodhouse     bool fatal_error;
65c08f5d0eSDavid Woodhouse 
66c08f5d0eSDavid Woodhouse     evtchn_port_t guest_port;
67c08f5d0eSDavid Woodhouse     evtchn_port_t be_port;
68c08f5d0eSDavid Woodhouse     struct xenevtchn_handle *eh;
69c08f5d0eSDavid Woodhouse };
70c08f5d0eSDavid Woodhouse 
71c08f5d0eSDavid Woodhouse struct XenXenstoreState *xen_xenstore_singleton;
72c08f5d0eSDavid Woodhouse 
73c08f5d0eSDavid Woodhouse static void xen_xenstore_event(void *opaque);
740254c4d1SDavid Woodhouse static void fire_watch_cb(void *opaque, const char *path, const char *token);
75c08f5d0eSDavid Woodhouse 
76c08f5d0eSDavid Woodhouse static void xen_xenstore_realize(DeviceState *dev, Error **errp)
77c08f5d0eSDavid Woodhouse {
78c08f5d0eSDavid Woodhouse     XenXenstoreState *s = XEN_XENSTORE(dev);
79c08f5d0eSDavid Woodhouse 
80c08f5d0eSDavid Woodhouse     if (xen_mode != XEN_EMULATE) {
81c08f5d0eSDavid Woodhouse         error_setg(errp, "Xen xenstore support is for Xen emulation");
82c08f5d0eSDavid Woodhouse         return;
83c08f5d0eSDavid Woodhouse     }
84c08f5d0eSDavid Woodhouse     memory_region_init_ram(&s->xenstore_page, OBJECT(dev), "xen:xenstore_page",
85c08f5d0eSDavid Woodhouse                            XEN_PAGE_SIZE, &error_abort);
86c08f5d0eSDavid Woodhouse     memory_region_set_enabled(&s->xenstore_page, true);
87c08f5d0eSDavid Woodhouse     s->xs = memory_region_get_ram_ptr(&s->xenstore_page);
88c08f5d0eSDavid Woodhouse     memset(s->xs, 0, XEN_PAGE_SIZE);
89c08f5d0eSDavid Woodhouse 
90c08f5d0eSDavid Woodhouse     /* We can't map it this early as KVM isn't ready */
91c08f5d0eSDavid Woodhouse     xen_xenstore_singleton = s;
92c08f5d0eSDavid Woodhouse 
93c08f5d0eSDavid Woodhouse     s->eh = xen_be_evtchn_open();
94c08f5d0eSDavid Woodhouse     if (!s->eh) {
95c08f5d0eSDavid Woodhouse         error_setg(errp, "Xenstore evtchn port init failed");
96c08f5d0eSDavid Woodhouse         return;
97c08f5d0eSDavid Woodhouse     }
98c08f5d0eSDavid Woodhouse     aio_set_fd_handler(qemu_get_aio_context(), xen_be_evtchn_fd(s->eh), true,
99c08f5d0eSDavid Woodhouse                        xen_xenstore_event, NULL, NULL, NULL, s);
1000254c4d1SDavid Woodhouse 
101*be1934dfSPaul Durrant     s->impl = xs_impl_create(xen_domid);
102c08f5d0eSDavid Woodhouse }
103c08f5d0eSDavid Woodhouse 
104c08f5d0eSDavid Woodhouse static bool xen_xenstore_is_needed(void *opaque)
105c08f5d0eSDavid Woodhouse {
106c08f5d0eSDavid Woodhouse     return xen_mode == XEN_EMULATE;
107c08f5d0eSDavid Woodhouse }
108c08f5d0eSDavid Woodhouse 
109c08f5d0eSDavid Woodhouse static int xen_xenstore_pre_save(void *opaque)
110c08f5d0eSDavid Woodhouse {
111c08f5d0eSDavid Woodhouse     XenXenstoreState *s = opaque;
112c08f5d0eSDavid Woodhouse 
113c08f5d0eSDavid Woodhouse     if (s->eh) {
114c08f5d0eSDavid Woodhouse         s->guest_port = xen_be_evtchn_get_guest_port(s->eh);
115c08f5d0eSDavid Woodhouse     }
116c08f5d0eSDavid Woodhouse     return 0;
117c08f5d0eSDavid Woodhouse }
118c08f5d0eSDavid Woodhouse 
119c08f5d0eSDavid Woodhouse static int xen_xenstore_post_load(void *opaque, int ver)
120c08f5d0eSDavid Woodhouse {
121c08f5d0eSDavid Woodhouse     XenXenstoreState *s = opaque;
122c08f5d0eSDavid Woodhouse 
123c08f5d0eSDavid Woodhouse     /*
124c08f5d0eSDavid Woodhouse      * As qemu/dom0, rebind to the guest's port. The Windows drivers may
125c08f5d0eSDavid Woodhouse      * unbind the XenStore evtchn and rebind to it, having obtained the
126c08f5d0eSDavid Woodhouse      * "remote" port through EVTCHNOP_status. In the case that migration
127c08f5d0eSDavid Woodhouse      * occurs while it's unbound, the "remote" port needs to be the same
128c08f5d0eSDavid Woodhouse      * as before so that the guest can find it, but should remain unbound.
129c08f5d0eSDavid Woodhouse      */
130c08f5d0eSDavid Woodhouse     if (s->guest_port) {
131c08f5d0eSDavid Woodhouse         int be_port = xen_be_evtchn_bind_interdomain(s->eh, xen_domid,
132c08f5d0eSDavid Woodhouse                                                      s->guest_port);
133c08f5d0eSDavid Woodhouse         if (be_port < 0) {
134c08f5d0eSDavid Woodhouse             return be_port;
135c08f5d0eSDavid Woodhouse         }
136c08f5d0eSDavid Woodhouse         s->be_port = be_port;
137c08f5d0eSDavid Woodhouse     }
138c08f5d0eSDavid Woodhouse     return 0;
139c08f5d0eSDavid Woodhouse }
140c08f5d0eSDavid Woodhouse 
141c08f5d0eSDavid Woodhouse static const VMStateDescription xen_xenstore_vmstate = {
142c08f5d0eSDavid Woodhouse     .name = "xen_xenstore",
143c08f5d0eSDavid Woodhouse     .version_id = 1,
144c08f5d0eSDavid Woodhouse     .minimum_version_id = 1,
145c08f5d0eSDavid Woodhouse     .needed = xen_xenstore_is_needed,
146c08f5d0eSDavid Woodhouse     .pre_save = xen_xenstore_pre_save,
147c08f5d0eSDavid Woodhouse     .post_load = xen_xenstore_post_load,
148c08f5d0eSDavid Woodhouse     .fields = (VMStateField[]) {
149c08f5d0eSDavid Woodhouse         VMSTATE_UINT8_ARRAY(req_data, XenXenstoreState,
150c08f5d0eSDavid Woodhouse                             sizeof_field(XenXenstoreState, req_data)),
151c08f5d0eSDavid Woodhouse         VMSTATE_UINT8_ARRAY(rsp_data, XenXenstoreState,
152c08f5d0eSDavid Woodhouse                             sizeof_field(XenXenstoreState, rsp_data)),
153c08f5d0eSDavid Woodhouse         VMSTATE_UINT32(req_offset, XenXenstoreState),
154c08f5d0eSDavid Woodhouse         VMSTATE_UINT32(rsp_offset, XenXenstoreState),
155c08f5d0eSDavid Woodhouse         VMSTATE_BOOL(rsp_pending, XenXenstoreState),
156c08f5d0eSDavid Woodhouse         VMSTATE_UINT32(guest_port, XenXenstoreState),
157c08f5d0eSDavid Woodhouse         VMSTATE_BOOL(fatal_error, XenXenstoreState),
158c08f5d0eSDavid Woodhouse         VMSTATE_END_OF_LIST()
159c08f5d0eSDavid Woodhouse     }
160c08f5d0eSDavid Woodhouse };
161c08f5d0eSDavid Woodhouse 
162c08f5d0eSDavid Woodhouse static void xen_xenstore_class_init(ObjectClass *klass, void *data)
163c08f5d0eSDavid Woodhouse {
164c08f5d0eSDavid Woodhouse     DeviceClass *dc = DEVICE_CLASS(klass);
165c08f5d0eSDavid Woodhouse 
166c08f5d0eSDavid Woodhouse     dc->realize = xen_xenstore_realize;
167c08f5d0eSDavid Woodhouse     dc->vmsd = &xen_xenstore_vmstate;
168c08f5d0eSDavid Woodhouse }
169c08f5d0eSDavid Woodhouse 
170c08f5d0eSDavid Woodhouse static const TypeInfo xen_xenstore_info = {
171c08f5d0eSDavid Woodhouse     .name          = TYPE_XEN_XENSTORE,
172c08f5d0eSDavid Woodhouse     .parent        = TYPE_SYS_BUS_DEVICE,
173c08f5d0eSDavid Woodhouse     .instance_size = sizeof(XenXenstoreState),
174c08f5d0eSDavid Woodhouse     .class_init    = xen_xenstore_class_init,
175c08f5d0eSDavid Woodhouse };
176c08f5d0eSDavid Woodhouse 
177c08f5d0eSDavid Woodhouse void xen_xenstore_create(void)
178c08f5d0eSDavid Woodhouse {
179c08f5d0eSDavid Woodhouse     DeviceState *dev = sysbus_create_simple(TYPE_XEN_XENSTORE, -1, NULL);
180c08f5d0eSDavid Woodhouse 
181c08f5d0eSDavid Woodhouse     xen_xenstore_singleton = XEN_XENSTORE(dev);
182c08f5d0eSDavid Woodhouse 
183c08f5d0eSDavid Woodhouse     /*
184c08f5d0eSDavid Woodhouse      * Defer the init (xen_xenstore_reset()) until KVM is set up and the
185c08f5d0eSDavid Woodhouse      * overlay page can be mapped.
186c08f5d0eSDavid Woodhouse      */
187c08f5d0eSDavid Woodhouse }
188c08f5d0eSDavid Woodhouse 
189c08f5d0eSDavid Woodhouse static void xen_xenstore_register_types(void)
190c08f5d0eSDavid Woodhouse {
191c08f5d0eSDavid Woodhouse     type_register_static(&xen_xenstore_info);
192c08f5d0eSDavid Woodhouse }
193c08f5d0eSDavid Woodhouse 
194c08f5d0eSDavid Woodhouse type_init(xen_xenstore_register_types)
195c08f5d0eSDavid Woodhouse 
196c08f5d0eSDavid Woodhouse uint16_t xen_xenstore_get_port(void)
197c08f5d0eSDavid Woodhouse {
198c08f5d0eSDavid Woodhouse     XenXenstoreState *s = xen_xenstore_singleton;
199c08f5d0eSDavid Woodhouse     if (!s) {
200c08f5d0eSDavid Woodhouse         return 0;
201c08f5d0eSDavid Woodhouse     }
202c08f5d0eSDavid Woodhouse     return s->guest_port;
203c08f5d0eSDavid Woodhouse }
204c08f5d0eSDavid Woodhouse 
205f3341e7bSDavid Woodhouse static bool req_pending(XenXenstoreState *s)
206f3341e7bSDavid Woodhouse {
207f3341e7bSDavid Woodhouse     struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data;
208f3341e7bSDavid Woodhouse 
209f3341e7bSDavid Woodhouse     return s->req_offset == XENSTORE_HEADER_SIZE + req->len;
210f3341e7bSDavid Woodhouse }
211f3341e7bSDavid Woodhouse 
212f3341e7bSDavid Woodhouse static void reset_req(XenXenstoreState *s)
213f3341e7bSDavid Woodhouse {
214f3341e7bSDavid Woodhouse     memset(s->req_data, 0, sizeof(s->req_data));
215f3341e7bSDavid Woodhouse     s->req_offset = 0;
216f3341e7bSDavid Woodhouse }
217f3341e7bSDavid Woodhouse 
218f3341e7bSDavid Woodhouse static void reset_rsp(XenXenstoreState *s)
219f3341e7bSDavid Woodhouse {
220f3341e7bSDavid Woodhouse     s->rsp_pending = false;
221f3341e7bSDavid Woodhouse 
222f3341e7bSDavid Woodhouse     memset(s->rsp_data, 0, sizeof(s->rsp_data));
223f3341e7bSDavid Woodhouse     s->rsp_offset = 0;
224f3341e7bSDavid Woodhouse }
225f3341e7bSDavid Woodhouse 
2260254c4d1SDavid Woodhouse static void xs_error(XenXenstoreState *s, unsigned int id,
2270254c4d1SDavid Woodhouse                      xs_transaction_t tx_id, int errnum)
2280254c4d1SDavid Woodhouse {
2290254c4d1SDavid Woodhouse     struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
2300254c4d1SDavid Woodhouse     const char *errstr = NULL;
2310254c4d1SDavid Woodhouse 
2320254c4d1SDavid Woodhouse     for (unsigned int i = 0; i < ARRAY_SIZE(xsd_errors); i++) {
2330254c4d1SDavid Woodhouse         struct xsd_errors *xsd_error = &xsd_errors[i];
2340254c4d1SDavid Woodhouse 
2350254c4d1SDavid Woodhouse         if (xsd_error->errnum == errnum) {
2360254c4d1SDavid Woodhouse             errstr = xsd_error->errstring;
2370254c4d1SDavid Woodhouse             break;
2380254c4d1SDavid Woodhouse         }
2390254c4d1SDavid Woodhouse     }
2400254c4d1SDavid Woodhouse     assert(errstr);
2410254c4d1SDavid Woodhouse 
2420254c4d1SDavid Woodhouse     trace_xenstore_error(id, tx_id, errstr);
2430254c4d1SDavid Woodhouse 
2440254c4d1SDavid Woodhouse     rsp->type = XS_ERROR;
2450254c4d1SDavid Woodhouse     rsp->req_id = id;
2460254c4d1SDavid Woodhouse     rsp->tx_id = tx_id;
2470254c4d1SDavid Woodhouse     rsp->len = (uint32_t)strlen(errstr) + 1;
2480254c4d1SDavid Woodhouse 
2490254c4d1SDavid Woodhouse     memcpy(&rsp[1], errstr, rsp->len);
2500254c4d1SDavid Woodhouse }
2510254c4d1SDavid Woodhouse 
2520254c4d1SDavid Woodhouse static void xs_ok(XenXenstoreState *s, unsigned int type, unsigned int req_id,
2530254c4d1SDavid Woodhouse                   xs_transaction_t tx_id)
2540254c4d1SDavid Woodhouse {
2550254c4d1SDavid Woodhouse     struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
2560254c4d1SDavid Woodhouse     const char *okstr = "OK";
2570254c4d1SDavid Woodhouse 
2580254c4d1SDavid Woodhouse     rsp->type = type;
2590254c4d1SDavid Woodhouse     rsp->req_id = req_id;
2600254c4d1SDavid Woodhouse     rsp->tx_id = tx_id;
2610254c4d1SDavid Woodhouse     rsp->len = (uint32_t)strlen(okstr) + 1;
2620254c4d1SDavid Woodhouse 
2630254c4d1SDavid Woodhouse     memcpy(&rsp[1], okstr, rsp->len);
2640254c4d1SDavid Woodhouse }
2650254c4d1SDavid Woodhouse 
2660254c4d1SDavid Woodhouse /*
2670254c4d1SDavid Woodhouse  * The correct request and response formats are documented in xen.git:
2680254c4d1SDavid Woodhouse  * docs/misc/xenstore.txt. A summary is given below for convenience.
2690254c4d1SDavid Woodhouse  * The '|' symbol represents a NUL character.
2700254c4d1SDavid Woodhouse  *
2710254c4d1SDavid Woodhouse  * ---------- Database read, write and permissions operations ----------
2720254c4d1SDavid Woodhouse  *
2730254c4d1SDavid Woodhouse  * READ                    <path>|                 <value|>
2740254c4d1SDavid Woodhouse  * WRITE                   <path>|<value|>
2750254c4d1SDavid Woodhouse  *         Store and read the octet string <value> at <path>.
2760254c4d1SDavid Woodhouse  *         WRITE creates any missing parent paths, with empty values.
2770254c4d1SDavid Woodhouse  *
2780254c4d1SDavid Woodhouse  * MKDIR                   <path>|
2790254c4d1SDavid Woodhouse  *         Ensures that the <path> exists, by necessary by creating
2800254c4d1SDavid Woodhouse  *         it and any missing parents with empty values.  If <path>
2810254c4d1SDavid Woodhouse  *         or any parent already exists, its value is left unchanged.
2820254c4d1SDavid Woodhouse  *
2830254c4d1SDavid Woodhouse  * RM                      <path>|
2840254c4d1SDavid Woodhouse  *         Ensures that the <path> does not exist, by deleting
2850254c4d1SDavid Woodhouse  *         it and all of its children.  It is not an error if <path> does
2860254c4d1SDavid Woodhouse  *         not exist, but it _is_ an error if <path>'s immediate parent
2870254c4d1SDavid Woodhouse  *         does not exist either.
2880254c4d1SDavid Woodhouse  *
2890254c4d1SDavid Woodhouse  * DIRECTORY               <path>|                 <child-leaf-name>|*
2900254c4d1SDavid Woodhouse  *         Gives a list of the immediate children of <path>, as only the
2910254c4d1SDavid Woodhouse  *         leafnames.  The resulting children are each named
2920254c4d1SDavid Woodhouse  *         <path>/<child-leaf-name>.
2930254c4d1SDavid Woodhouse  *
2940254c4d1SDavid Woodhouse  * DIRECTORY_PART          <path>|<offset>         <gencnt>|<child-leaf-name>|*
2950254c4d1SDavid Woodhouse  *         Same as DIRECTORY, but to be used for children lists longer than
2960254c4d1SDavid Woodhouse  *         XENSTORE_PAYLOAD_MAX. Input are <path> and the byte offset into
2970254c4d1SDavid Woodhouse  *         the list of children to return. Return values are the generation
2980254c4d1SDavid Woodhouse  *         count <gencnt> of the node (to be used to ensure the node hasn't
2990254c4d1SDavid Woodhouse  *         changed between two reads: <gencnt> being the same for multiple
3000254c4d1SDavid Woodhouse  *         reads guarantees the node hasn't changed) and the list of children
3010254c4d1SDavid Woodhouse  *         starting at the specified <offset> of the complete list.
3020254c4d1SDavid Woodhouse  *
3030254c4d1SDavid Woodhouse  * GET_PERMS               <path>|                 <perm-as-string>|+
3040254c4d1SDavid Woodhouse  * SET_PERMS               <path>|<perm-as-string>|+?
3050254c4d1SDavid Woodhouse  *         <perm-as-string> is one of the following
3060254c4d1SDavid Woodhouse  *                 w<domid>        write only
3070254c4d1SDavid Woodhouse  *                 r<domid>        read only
3080254c4d1SDavid Woodhouse  *                 b<domid>        both read and write
3090254c4d1SDavid Woodhouse  *                 n<domid>        no access
3100254c4d1SDavid Woodhouse  *         See https://wiki.xen.org/wiki/XenBus section
3110254c4d1SDavid Woodhouse  *         `Permissions' for details of the permissions system.
3120254c4d1SDavid Woodhouse  *         It is possible to set permissions for the special watch paths
3130254c4d1SDavid Woodhouse  *         "@introduceDomain" and "@releaseDomain" to enable receiving those
3140254c4d1SDavid Woodhouse  *         watches in unprivileged domains.
3150254c4d1SDavid Woodhouse  *
3160254c4d1SDavid Woodhouse  * ---------- Watches ----------
3170254c4d1SDavid Woodhouse  *
3180254c4d1SDavid Woodhouse  * WATCH                   <wpath>|<token>|?
3190254c4d1SDavid Woodhouse  *         Adds a watch.
3200254c4d1SDavid Woodhouse  *
3210254c4d1SDavid Woodhouse  *         When a <path> is modified (including path creation, removal,
3220254c4d1SDavid Woodhouse  *         contents change or permissions change) this generates an event
3230254c4d1SDavid Woodhouse  *         on the changed <path>.  Changes made in transactions cause an
3240254c4d1SDavid Woodhouse  *         event only if and when committed.  Each occurring event is
3250254c4d1SDavid Woodhouse  *         matched against all the watches currently set up, and each
3260254c4d1SDavid Woodhouse  *         matching watch results in a WATCH_EVENT message (see below).
3270254c4d1SDavid Woodhouse  *
3280254c4d1SDavid Woodhouse  *         The event's path matches the watch's <wpath> if it is an child
3290254c4d1SDavid Woodhouse  *         of <wpath>.
3300254c4d1SDavid Woodhouse  *
3310254c4d1SDavid Woodhouse  *         <wpath> can be a <path> to watch or @<wspecial>.  In the
3320254c4d1SDavid Woodhouse  *         latter case <wspecial> may have any syntax but it matches
3330254c4d1SDavid Woodhouse  *         (according to the rules above) only the following special
3340254c4d1SDavid Woodhouse  *         events which are invented by xenstored:
3350254c4d1SDavid Woodhouse  *             @introduceDomain    occurs on INTRODUCE
3360254c4d1SDavid Woodhouse  *             @releaseDomain      occurs on any domain crash or
3370254c4d1SDavid Woodhouse  *                                 shutdown, and also on RELEASE
3380254c4d1SDavid Woodhouse  *                                 and domain destruction
3390254c4d1SDavid Woodhouse  *         <wspecial> events are sent to privileged callers or explicitly
3400254c4d1SDavid Woodhouse  *         via SET_PERMS enabled domains only.
3410254c4d1SDavid Woodhouse  *
3420254c4d1SDavid Woodhouse  *         When a watch is first set up it is triggered once straight
3430254c4d1SDavid Woodhouse  *         away, with <path> equal to <wpath>.  Watches may be triggered
3440254c4d1SDavid Woodhouse  *         spuriously.  The tx_id in a WATCH request is ignored.
3450254c4d1SDavid Woodhouse  *
3460254c4d1SDavid Woodhouse  *         Watches are supposed to be restricted by the permissions
3470254c4d1SDavid Woodhouse  *         system but in practice the implementation is imperfect.
3480254c4d1SDavid Woodhouse  *         Applications should not rely on being sent a notification for
3490254c4d1SDavid Woodhouse  *         paths that they cannot read; however, an application may rely
3500254c4d1SDavid Woodhouse  *         on being sent a watch when a path which it _is_ able to read
3510254c4d1SDavid Woodhouse  *         is deleted even if that leaves only a nonexistent unreadable
3520254c4d1SDavid Woodhouse  *         parent.  A notification may omitted if a node's permissions
3530254c4d1SDavid Woodhouse  *         are changed so as to make it unreadable, in which case future
3540254c4d1SDavid Woodhouse  *         notifications may be suppressed (and if the node is later made
3550254c4d1SDavid Woodhouse  *         readable, some notifications may have been lost).
3560254c4d1SDavid Woodhouse  *
3570254c4d1SDavid Woodhouse  * WATCH_EVENT                                     <epath>|<token>|
3580254c4d1SDavid Woodhouse  *         Unsolicited `reply' generated for matching modification events
3590254c4d1SDavid Woodhouse  *         as described above.  req_id and tx_id are both 0.
3600254c4d1SDavid Woodhouse  *
3610254c4d1SDavid Woodhouse  *         <epath> is the event's path, ie the actual path that was
3620254c4d1SDavid Woodhouse  *         modified; however if the event was the recursive removal of an
3630254c4d1SDavid Woodhouse  *         parent of <wpath>, <epath> is just
3640254c4d1SDavid Woodhouse  *         <wpath> (rather than the actual path which was removed).  So
3650254c4d1SDavid Woodhouse  *         <epath> is a child of <wpath>, regardless.
3660254c4d1SDavid Woodhouse  *
3670254c4d1SDavid Woodhouse  *         Iff <wpath> for the watch was specified as a relative pathname,
3680254c4d1SDavid Woodhouse  *         the <epath> path will also be relative (with the same base,
3690254c4d1SDavid Woodhouse  *         obviously).
3700254c4d1SDavid Woodhouse  *
3710254c4d1SDavid Woodhouse  * UNWATCH                 <wpath>|<token>|?
3720254c4d1SDavid Woodhouse  *
3730254c4d1SDavid Woodhouse  * RESET_WATCHES           |
3740254c4d1SDavid Woodhouse  *         Reset all watches and transactions of the caller.
3750254c4d1SDavid Woodhouse  *
3760254c4d1SDavid Woodhouse  * ---------- Transactions ----------
3770254c4d1SDavid Woodhouse  *
3780254c4d1SDavid Woodhouse  * TRANSACTION_START       |                       <transid>|
3790254c4d1SDavid Woodhouse  *         <transid> is an opaque uint32_t allocated by xenstored
3800254c4d1SDavid Woodhouse  *         represented as unsigned decimal.  After this, transaction may
3810254c4d1SDavid Woodhouse  *         be referenced by using <transid> (as 32-bit binary) in the
3820254c4d1SDavid Woodhouse  *         tx_id request header field.  When transaction is started whole
3830254c4d1SDavid Woodhouse  *         db is copied; reads and writes happen on the copy.
3840254c4d1SDavid Woodhouse  *         It is not legal to send non-0 tx_id in TRANSACTION_START.
3850254c4d1SDavid Woodhouse  *
3860254c4d1SDavid Woodhouse  * TRANSACTION_END         T|
3870254c4d1SDavid Woodhouse  * TRANSACTION_END         F|
3880254c4d1SDavid Woodhouse  *         tx_id must refer to existing transaction.  After this
3890254c4d1SDavid Woodhouse  *         request the tx_id is no longer valid and may be reused by
3900254c4d1SDavid Woodhouse  *         xenstore.  If F, the transaction is discarded.  If T,
3910254c4d1SDavid Woodhouse  *         it is committed: if there were any other intervening writes
3920254c4d1SDavid Woodhouse  *         then our END gets get EAGAIN.
3930254c4d1SDavid Woodhouse  *
3940254c4d1SDavid Woodhouse  *         The plan is that in the future only intervening `conflicting'
3950254c4d1SDavid Woodhouse  *         writes cause EAGAIN, meaning only writes or other commits
3960254c4d1SDavid Woodhouse  *         which changed paths which were read or written in the
3970254c4d1SDavid Woodhouse  *         transaction at hand.
3980254c4d1SDavid Woodhouse  *
3990254c4d1SDavid Woodhouse  */
4000254c4d1SDavid Woodhouse 
4010254c4d1SDavid Woodhouse static void xs_read(XenXenstoreState *s, unsigned int req_id,
4020254c4d1SDavid Woodhouse                     xs_transaction_t tx_id, uint8_t *req_data, unsigned int len)
4030254c4d1SDavid Woodhouse {
4040254c4d1SDavid Woodhouse     const char *path = (const char *)req_data;
4050254c4d1SDavid Woodhouse     struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
4060254c4d1SDavid Woodhouse     uint8_t *rsp_data = (uint8_t *)&rsp[1];
4070254c4d1SDavid Woodhouse     g_autoptr(GByteArray) data = g_byte_array_new();
4080254c4d1SDavid Woodhouse     int err;
4090254c4d1SDavid Woodhouse 
4100254c4d1SDavid Woodhouse     if (len == 0 || req_data[len - 1] != '\0') {
4110254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, EINVAL);
4120254c4d1SDavid Woodhouse         return;
4130254c4d1SDavid Woodhouse     }
4140254c4d1SDavid Woodhouse 
4150254c4d1SDavid Woodhouse     trace_xenstore_read(tx_id, path);
4160254c4d1SDavid Woodhouse     err = xs_impl_read(s->impl, xen_domid, tx_id, path, data);
4170254c4d1SDavid Woodhouse     if (err) {
4180254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, err);
4190254c4d1SDavid Woodhouse         return;
4200254c4d1SDavid Woodhouse     }
4210254c4d1SDavid Woodhouse 
4220254c4d1SDavid Woodhouse     rsp->type = XS_READ;
4230254c4d1SDavid Woodhouse     rsp->req_id = req_id;
4240254c4d1SDavid Woodhouse     rsp->tx_id = tx_id;
4250254c4d1SDavid Woodhouse     rsp->len = 0;
4260254c4d1SDavid Woodhouse 
4270254c4d1SDavid Woodhouse     len = data->len;
4280254c4d1SDavid Woodhouse     if (len > XENSTORE_PAYLOAD_MAX) {
4290254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, E2BIG);
4300254c4d1SDavid Woodhouse         return;
4310254c4d1SDavid Woodhouse     }
4320254c4d1SDavid Woodhouse 
4330254c4d1SDavid Woodhouse     memcpy(&rsp_data[rsp->len], data->data, len);
4340254c4d1SDavid Woodhouse     rsp->len += len;
4350254c4d1SDavid Woodhouse }
4360254c4d1SDavid Woodhouse 
4370254c4d1SDavid Woodhouse static void xs_write(XenXenstoreState *s, unsigned int req_id,
4380254c4d1SDavid Woodhouse                      xs_transaction_t tx_id, uint8_t *req_data,
4390254c4d1SDavid Woodhouse                      unsigned int len)
4400254c4d1SDavid Woodhouse {
4410254c4d1SDavid Woodhouse     g_autoptr(GByteArray) data = g_byte_array_new();
4420254c4d1SDavid Woodhouse     const char *path;
4430254c4d1SDavid Woodhouse     int err;
4440254c4d1SDavid Woodhouse 
4450254c4d1SDavid Woodhouse     if (len == 0) {
4460254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, EINVAL);
4470254c4d1SDavid Woodhouse         return;
4480254c4d1SDavid Woodhouse     }
4490254c4d1SDavid Woodhouse 
4500254c4d1SDavid Woodhouse     path = (const char *)req_data;
4510254c4d1SDavid Woodhouse 
4520254c4d1SDavid Woodhouse     while (len--) {
4530254c4d1SDavid Woodhouse         if (*req_data++ == '\0') {
4540254c4d1SDavid Woodhouse             break;
4550254c4d1SDavid Woodhouse         }
4560254c4d1SDavid Woodhouse         if (len == 0) {
4570254c4d1SDavid Woodhouse             xs_error(s, req_id, tx_id, EINVAL);
4580254c4d1SDavid Woodhouse             return;
4590254c4d1SDavid Woodhouse         }
4600254c4d1SDavid Woodhouse     }
4610254c4d1SDavid Woodhouse 
4620254c4d1SDavid Woodhouse     g_byte_array_append(data, req_data, len);
4630254c4d1SDavid Woodhouse 
4640254c4d1SDavid Woodhouse     trace_xenstore_write(tx_id, path);
4650254c4d1SDavid Woodhouse     err = xs_impl_write(s->impl, xen_domid, tx_id, path, data);
4660254c4d1SDavid Woodhouse     if (err) {
4670254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, err);
4680254c4d1SDavid Woodhouse         return;
4690254c4d1SDavid Woodhouse     }
4700254c4d1SDavid Woodhouse 
4710254c4d1SDavid Woodhouse     xs_ok(s, XS_WRITE, req_id, tx_id);
4720254c4d1SDavid Woodhouse }
4730254c4d1SDavid Woodhouse 
4740254c4d1SDavid Woodhouse static void xs_mkdir(XenXenstoreState *s, unsigned int req_id,
4750254c4d1SDavid Woodhouse                      xs_transaction_t tx_id, uint8_t *req_data,
4760254c4d1SDavid Woodhouse                      unsigned int len)
4770254c4d1SDavid Woodhouse {
4780254c4d1SDavid Woodhouse     g_autoptr(GByteArray) data = g_byte_array_new();
4790254c4d1SDavid Woodhouse     const char *path;
4800254c4d1SDavid Woodhouse     int err;
4810254c4d1SDavid Woodhouse 
4820254c4d1SDavid Woodhouse     if (len == 0 || req_data[len - 1] != '\0') {
4830254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, EINVAL);
4840254c4d1SDavid Woodhouse         return;
4850254c4d1SDavid Woodhouse     }
4860254c4d1SDavid Woodhouse 
4870254c4d1SDavid Woodhouse     path = (const char *)req_data;
4880254c4d1SDavid Woodhouse 
4890254c4d1SDavid Woodhouse     trace_xenstore_mkdir(tx_id, path);
4900254c4d1SDavid Woodhouse     err = xs_impl_read(s->impl, xen_domid, tx_id, path, data);
4910254c4d1SDavid Woodhouse     if (err == ENOENT) {
4920254c4d1SDavid Woodhouse         err = xs_impl_write(s->impl, xen_domid, tx_id, path, data);
4930254c4d1SDavid Woodhouse     }
4940254c4d1SDavid Woodhouse 
4950254c4d1SDavid Woodhouse     if (!err) {
4960254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, err);
4970254c4d1SDavid Woodhouse         return;
4980254c4d1SDavid Woodhouse     }
4990254c4d1SDavid Woodhouse 
5000254c4d1SDavid Woodhouse     xs_ok(s, XS_MKDIR, req_id, tx_id);
5010254c4d1SDavid Woodhouse }
5020254c4d1SDavid Woodhouse 
5030254c4d1SDavid Woodhouse static void xs_append_strings(XenXenstoreState *s, struct xsd_sockmsg *rsp,
5040254c4d1SDavid Woodhouse                               GList *strings, unsigned int start, bool truncate)
5050254c4d1SDavid Woodhouse {
5060254c4d1SDavid Woodhouse     uint8_t *rsp_data = (uint8_t *)&rsp[1];
5070254c4d1SDavid Woodhouse     GList *l;
5080254c4d1SDavid Woodhouse 
5090254c4d1SDavid Woodhouse     for (l = strings; l; l = l->next) {
5100254c4d1SDavid Woodhouse         size_t len = strlen(l->data) + 1; /* Including the NUL termination */
5110254c4d1SDavid Woodhouse         char *str = l->data;
5120254c4d1SDavid Woodhouse 
5130254c4d1SDavid Woodhouse         if (rsp->len + len > XENSTORE_PAYLOAD_MAX) {
5140254c4d1SDavid Woodhouse             if (truncate) {
5150254c4d1SDavid Woodhouse                 len = XENSTORE_PAYLOAD_MAX - rsp->len;
5160254c4d1SDavid Woodhouse                 if (!len) {
5170254c4d1SDavid Woodhouse                     return;
5180254c4d1SDavid Woodhouse                 }
5190254c4d1SDavid Woodhouse             } else {
5200254c4d1SDavid Woodhouse                 xs_error(s, rsp->req_id, rsp->tx_id, E2BIG);
5210254c4d1SDavid Woodhouse                 return;
5220254c4d1SDavid Woodhouse             }
5230254c4d1SDavid Woodhouse         }
5240254c4d1SDavid Woodhouse 
5250254c4d1SDavid Woodhouse         if (start) {
5260254c4d1SDavid Woodhouse             if (start >= len) {
5270254c4d1SDavid Woodhouse                 start -= len;
5280254c4d1SDavid Woodhouse                 continue;
5290254c4d1SDavid Woodhouse             }
5300254c4d1SDavid Woodhouse 
5310254c4d1SDavid Woodhouse             str += start;
5320254c4d1SDavid Woodhouse             len -= start;
5330254c4d1SDavid Woodhouse             start = 0;
5340254c4d1SDavid Woodhouse         }
5350254c4d1SDavid Woodhouse 
5360254c4d1SDavid Woodhouse         memcpy(&rsp_data[rsp->len], str, len);
5370254c4d1SDavid Woodhouse         rsp->len += len;
5380254c4d1SDavid Woodhouse     }
5390254c4d1SDavid Woodhouse     /* XS_DIRECTORY_PART wants an extra NUL to indicate the end */
5400254c4d1SDavid Woodhouse     if (truncate && rsp->len < XENSTORE_PAYLOAD_MAX) {
5410254c4d1SDavid Woodhouse         rsp_data[rsp->len++] = '\0';
5420254c4d1SDavid Woodhouse     }
5430254c4d1SDavid Woodhouse }
5440254c4d1SDavid Woodhouse 
5450254c4d1SDavid Woodhouse static void xs_directory(XenXenstoreState *s, unsigned int req_id,
5460254c4d1SDavid Woodhouse                          xs_transaction_t tx_id, uint8_t *req_data,
5470254c4d1SDavid Woodhouse                          unsigned int len)
5480254c4d1SDavid Woodhouse {
5490254c4d1SDavid Woodhouse     struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
5500254c4d1SDavid Woodhouse     GList *items = NULL;
5510254c4d1SDavid Woodhouse     const char *path;
5520254c4d1SDavid Woodhouse     int err;
5530254c4d1SDavid Woodhouse 
5540254c4d1SDavid Woodhouse     if (len == 0 || req_data[len - 1] != '\0') {
5550254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, EINVAL);
5560254c4d1SDavid Woodhouse         return;
5570254c4d1SDavid Woodhouse     }
5580254c4d1SDavid Woodhouse 
5590254c4d1SDavid Woodhouse     path = (const char *)req_data;
5600254c4d1SDavid Woodhouse 
5610254c4d1SDavid Woodhouse     trace_xenstore_directory(tx_id, path);
5620254c4d1SDavid Woodhouse     err = xs_impl_directory(s->impl, xen_domid, tx_id, path, NULL, &items);
5630254c4d1SDavid Woodhouse     if (err != 0) {
5640254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, err);
5650254c4d1SDavid Woodhouse         return;
5660254c4d1SDavid Woodhouse     }
5670254c4d1SDavid Woodhouse 
5680254c4d1SDavid Woodhouse     rsp->type = XS_DIRECTORY;
5690254c4d1SDavid Woodhouse     rsp->req_id = req_id;
5700254c4d1SDavid Woodhouse     rsp->tx_id = tx_id;
5710254c4d1SDavid Woodhouse     rsp->len = 0;
5720254c4d1SDavid Woodhouse 
5730254c4d1SDavid Woodhouse     xs_append_strings(s, rsp, items, 0, false);
5740254c4d1SDavid Woodhouse 
5750254c4d1SDavid Woodhouse     g_list_free_full(items, g_free);
5760254c4d1SDavid Woodhouse }
5770254c4d1SDavid Woodhouse 
5780254c4d1SDavid Woodhouse static void xs_directory_part(XenXenstoreState *s, unsigned int req_id,
5790254c4d1SDavid Woodhouse                               xs_transaction_t tx_id, uint8_t *req_data,
5800254c4d1SDavid Woodhouse                               unsigned int len)
5810254c4d1SDavid Woodhouse {
5820254c4d1SDavid Woodhouse     const char *offset_str, *path = (const char *)req_data;
5830254c4d1SDavid Woodhouse     struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
5840254c4d1SDavid Woodhouse     char *rsp_data = (char *)&rsp[1];
5850254c4d1SDavid Woodhouse     uint64_t gencnt = 0;
5860254c4d1SDavid Woodhouse     unsigned int offset;
5870254c4d1SDavid Woodhouse     GList *items = NULL;
5880254c4d1SDavid Woodhouse     int err;
5890254c4d1SDavid Woodhouse 
5900254c4d1SDavid Woodhouse     if (len == 0) {
5910254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, EINVAL);
5920254c4d1SDavid Woodhouse         return;
5930254c4d1SDavid Woodhouse     }
5940254c4d1SDavid Woodhouse 
5950254c4d1SDavid Woodhouse     while (len--) {
5960254c4d1SDavid Woodhouse         if (*req_data++ == '\0') {
5970254c4d1SDavid Woodhouse             break;
5980254c4d1SDavid Woodhouse         }
5990254c4d1SDavid Woodhouse         if (len == 0) {
6000254c4d1SDavid Woodhouse             xs_error(s, req_id, tx_id, EINVAL);
6010254c4d1SDavid Woodhouse             return;
6020254c4d1SDavid Woodhouse         }
6030254c4d1SDavid Woodhouse     }
6040254c4d1SDavid Woodhouse 
6050254c4d1SDavid Woodhouse     offset_str = (const char *)req_data;
6060254c4d1SDavid Woodhouse     while (len--) {
6070254c4d1SDavid Woodhouse         if (*req_data++ == '\0') {
6080254c4d1SDavid Woodhouse             break;
6090254c4d1SDavid Woodhouse         }
6100254c4d1SDavid Woodhouse         if (len == 0) {
6110254c4d1SDavid Woodhouse             xs_error(s, req_id, tx_id, EINVAL);
6120254c4d1SDavid Woodhouse             return;
6130254c4d1SDavid Woodhouse         }
6140254c4d1SDavid Woodhouse     }
6150254c4d1SDavid Woodhouse 
6160254c4d1SDavid Woodhouse     if (len) {
6170254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, EINVAL);
6180254c4d1SDavid Woodhouse         return;
6190254c4d1SDavid Woodhouse     }
6200254c4d1SDavid Woodhouse 
6210254c4d1SDavid Woodhouse     if (qemu_strtoui(offset_str, NULL, 10, &offset) < 0) {
6220254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, EINVAL);
6230254c4d1SDavid Woodhouse         return;
6240254c4d1SDavid Woodhouse     }
6250254c4d1SDavid Woodhouse 
6260254c4d1SDavid Woodhouse     trace_xenstore_directory_part(tx_id, path, offset);
6270254c4d1SDavid Woodhouse     err = xs_impl_directory(s->impl, xen_domid, tx_id, path, &gencnt, &items);
6280254c4d1SDavid Woodhouse     if (err != 0) {
6290254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, err);
6300254c4d1SDavid Woodhouse         return;
6310254c4d1SDavid Woodhouse     }
6320254c4d1SDavid Woodhouse 
6330254c4d1SDavid Woodhouse     rsp->type = XS_DIRECTORY_PART;
6340254c4d1SDavid Woodhouse     rsp->req_id = req_id;
6350254c4d1SDavid Woodhouse     rsp->tx_id = tx_id;
6360254c4d1SDavid Woodhouse     rsp->len = snprintf(rsp_data, XENSTORE_PAYLOAD_MAX, "%" PRIu64, gencnt) + 1;
6370254c4d1SDavid Woodhouse 
6380254c4d1SDavid Woodhouse     xs_append_strings(s, rsp, items, offset, true);
6390254c4d1SDavid Woodhouse 
6400254c4d1SDavid Woodhouse     g_list_free_full(items, g_free);
6410254c4d1SDavid Woodhouse }
6420254c4d1SDavid Woodhouse 
6430254c4d1SDavid Woodhouse static void xs_transaction_start(XenXenstoreState *s, unsigned int req_id,
6440254c4d1SDavid Woodhouse                                  xs_transaction_t tx_id, uint8_t *req_data,
6450254c4d1SDavid Woodhouse                                  unsigned int len)
6460254c4d1SDavid Woodhouse {
6470254c4d1SDavid Woodhouse     struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
6480254c4d1SDavid Woodhouse     char *rsp_data = (char *)&rsp[1];
6490254c4d1SDavid Woodhouse     int err;
6500254c4d1SDavid Woodhouse 
6510254c4d1SDavid Woodhouse     if (len != 1 || req_data[0] != '\0') {
6520254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, EINVAL);
6530254c4d1SDavid Woodhouse         return;
6540254c4d1SDavid Woodhouse     }
6550254c4d1SDavid Woodhouse 
6560254c4d1SDavid Woodhouse     rsp->type = XS_TRANSACTION_START;
6570254c4d1SDavid Woodhouse     rsp->req_id = req_id;
6580254c4d1SDavid Woodhouse     rsp->tx_id = tx_id;
6590254c4d1SDavid Woodhouse     rsp->len = 0;
6600254c4d1SDavid Woodhouse 
6610254c4d1SDavid Woodhouse     err = xs_impl_transaction_start(s->impl, xen_domid, &tx_id);
6620254c4d1SDavid Woodhouse     if (err) {
6630254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, err);
6640254c4d1SDavid Woodhouse         return;
6650254c4d1SDavid Woodhouse     }
6660254c4d1SDavid Woodhouse 
6670254c4d1SDavid Woodhouse     trace_xenstore_transaction_start(tx_id);
6680254c4d1SDavid Woodhouse 
6690254c4d1SDavid Woodhouse     rsp->len = snprintf(rsp_data, XENSTORE_PAYLOAD_MAX, "%u", tx_id);
6700254c4d1SDavid Woodhouse     assert(rsp->len < XENSTORE_PAYLOAD_MAX);
6710254c4d1SDavid Woodhouse     rsp->len++;
6720254c4d1SDavid Woodhouse }
6730254c4d1SDavid Woodhouse 
6740254c4d1SDavid Woodhouse static void xs_transaction_end(XenXenstoreState *s, unsigned int req_id,
6750254c4d1SDavid Woodhouse                                xs_transaction_t tx_id, uint8_t *req_data,
6760254c4d1SDavid Woodhouse                                unsigned int len)
6770254c4d1SDavid Woodhouse {
6780254c4d1SDavid Woodhouse     bool commit;
6790254c4d1SDavid Woodhouse     int err;
6800254c4d1SDavid Woodhouse 
6810254c4d1SDavid Woodhouse     if (len != 2 || req_data[1] != '\0') {
6820254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, EINVAL);
6830254c4d1SDavid Woodhouse         return;
6840254c4d1SDavid Woodhouse     }
6850254c4d1SDavid Woodhouse 
6860254c4d1SDavid Woodhouse     switch (req_data[0]) {
6870254c4d1SDavid Woodhouse     case 'T':
6880254c4d1SDavid Woodhouse         commit = true;
6890254c4d1SDavid Woodhouse         break;
6900254c4d1SDavid Woodhouse     case 'F':
6910254c4d1SDavid Woodhouse         commit = false;
6920254c4d1SDavid Woodhouse         break;
6930254c4d1SDavid Woodhouse     default:
6940254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, EINVAL);
6950254c4d1SDavid Woodhouse         return;
6960254c4d1SDavid Woodhouse     }
6970254c4d1SDavid Woodhouse 
6980254c4d1SDavid Woodhouse     trace_xenstore_transaction_end(tx_id, commit);
6990254c4d1SDavid Woodhouse     err = xs_impl_transaction_end(s->impl, xen_domid, tx_id, commit);
7000254c4d1SDavid Woodhouse     if (err) {
7010254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, err);
7020254c4d1SDavid Woodhouse         return;
7030254c4d1SDavid Woodhouse     }
7040254c4d1SDavid Woodhouse 
7050254c4d1SDavid Woodhouse     xs_ok(s, XS_TRANSACTION_END, req_id, tx_id);
7060254c4d1SDavid Woodhouse }
7070254c4d1SDavid Woodhouse 
7080254c4d1SDavid Woodhouse static void xs_rm(XenXenstoreState *s, unsigned int req_id,
7090254c4d1SDavid Woodhouse                   xs_transaction_t tx_id, uint8_t *req_data, unsigned int len)
7100254c4d1SDavid Woodhouse {
7110254c4d1SDavid Woodhouse     const char *path = (const char *)req_data;
7120254c4d1SDavid Woodhouse     int err;
7130254c4d1SDavid Woodhouse 
7140254c4d1SDavid Woodhouse     if (len == 0 || req_data[len - 1] != '\0') {
7150254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, EINVAL);
7160254c4d1SDavid Woodhouse         return;
7170254c4d1SDavid Woodhouse     }
7180254c4d1SDavid Woodhouse 
7190254c4d1SDavid Woodhouse     trace_xenstore_rm(tx_id, path);
7200254c4d1SDavid Woodhouse     err = xs_impl_rm(s->impl, xen_domid, tx_id, path);
7210254c4d1SDavid Woodhouse     if (err) {
7220254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, err);
7230254c4d1SDavid Woodhouse         return;
7240254c4d1SDavid Woodhouse     }
7250254c4d1SDavid Woodhouse 
7260254c4d1SDavid Woodhouse     xs_ok(s, XS_RM, req_id, tx_id);
7270254c4d1SDavid Woodhouse }
7280254c4d1SDavid Woodhouse 
7290254c4d1SDavid Woodhouse static void xs_get_perms(XenXenstoreState *s, unsigned int req_id,
7300254c4d1SDavid Woodhouse                          xs_transaction_t tx_id, uint8_t *req_data,
7310254c4d1SDavid Woodhouse                          unsigned int len)
7320254c4d1SDavid Woodhouse {
7330254c4d1SDavid Woodhouse     const char *path = (const char *)req_data;
7340254c4d1SDavid Woodhouse     struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
7350254c4d1SDavid Woodhouse     GList *perms = NULL;
7360254c4d1SDavid Woodhouse     int err;
7370254c4d1SDavid Woodhouse 
7380254c4d1SDavid Woodhouse     if (len == 0 || req_data[len - 1] != '\0') {
7390254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, EINVAL);
7400254c4d1SDavid Woodhouse         return;
7410254c4d1SDavid Woodhouse     }
7420254c4d1SDavid Woodhouse 
7430254c4d1SDavid Woodhouse     trace_xenstore_get_perms(tx_id, path);
7440254c4d1SDavid Woodhouse     err = xs_impl_get_perms(s->impl, xen_domid, tx_id, path, &perms);
7450254c4d1SDavid Woodhouse     if (err) {
7460254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, err);
7470254c4d1SDavid Woodhouse         return;
7480254c4d1SDavid Woodhouse     }
7490254c4d1SDavid Woodhouse 
7500254c4d1SDavid Woodhouse     rsp->type = XS_GET_PERMS;
7510254c4d1SDavid Woodhouse     rsp->req_id = req_id;
7520254c4d1SDavid Woodhouse     rsp->tx_id = tx_id;
7530254c4d1SDavid Woodhouse     rsp->len = 0;
7540254c4d1SDavid Woodhouse 
7550254c4d1SDavid Woodhouse     xs_append_strings(s, rsp, perms, 0, false);
7560254c4d1SDavid Woodhouse 
7570254c4d1SDavid Woodhouse     g_list_free_full(perms, g_free);
7580254c4d1SDavid Woodhouse }
7590254c4d1SDavid Woodhouse 
7600254c4d1SDavid Woodhouse static void xs_set_perms(XenXenstoreState *s, unsigned int req_id,
7610254c4d1SDavid Woodhouse                          xs_transaction_t tx_id, uint8_t *req_data,
7620254c4d1SDavid Woodhouse                          unsigned int len)
7630254c4d1SDavid Woodhouse {
7640254c4d1SDavid Woodhouse     const char *path = (const char *)req_data;
7650254c4d1SDavid Woodhouse     uint8_t *perm;
7660254c4d1SDavid Woodhouse     GList *perms = NULL;
7670254c4d1SDavid Woodhouse     int err;
7680254c4d1SDavid Woodhouse 
7690254c4d1SDavid Woodhouse     if (len == 0) {
7700254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, EINVAL);
7710254c4d1SDavid Woodhouse         return;
7720254c4d1SDavid Woodhouse     }
7730254c4d1SDavid Woodhouse 
7740254c4d1SDavid Woodhouse     while (len--) {
7750254c4d1SDavid Woodhouse         if (*req_data++ == '\0') {
7760254c4d1SDavid Woodhouse             break;
7770254c4d1SDavid Woodhouse         }
7780254c4d1SDavid Woodhouse         if (len == 0) {
7790254c4d1SDavid Woodhouse             xs_error(s, req_id, tx_id, EINVAL);
7800254c4d1SDavid Woodhouse             return;
7810254c4d1SDavid Woodhouse         }
7820254c4d1SDavid Woodhouse     }
7830254c4d1SDavid Woodhouse 
7840254c4d1SDavid Woodhouse     perm = req_data;
7850254c4d1SDavid Woodhouse     while (len--) {
7860254c4d1SDavid Woodhouse         if (*req_data++ == '\0') {
7870254c4d1SDavid Woodhouse             perms = g_list_append(perms, perm);
7880254c4d1SDavid Woodhouse             perm = req_data;
7890254c4d1SDavid Woodhouse         }
7900254c4d1SDavid Woodhouse     }
7910254c4d1SDavid Woodhouse 
7920254c4d1SDavid Woodhouse     /*
7930254c4d1SDavid Woodhouse      * Note that there may be trailing garbage at the end of the buffer.
7940254c4d1SDavid Woodhouse      * This is explicitly permitted by the '?' at the end of the definition:
7950254c4d1SDavid Woodhouse      *
7960254c4d1SDavid Woodhouse      *    SET_PERMS         <path>|<perm-as-string>|+?
7970254c4d1SDavid Woodhouse      */
7980254c4d1SDavid Woodhouse 
7990254c4d1SDavid Woodhouse     trace_xenstore_set_perms(tx_id, path);
8000254c4d1SDavid Woodhouse     err = xs_impl_set_perms(s->impl, xen_domid, tx_id, path, perms);
8010254c4d1SDavid Woodhouse     g_list_free(perms);
8020254c4d1SDavid Woodhouse     if (err) {
8030254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, err);
8040254c4d1SDavid Woodhouse         return;
8050254c4d1SDavid Woodhouse     }
8060254c4d1SDavid Woodhouse 
8070254c4d1SDavid Woodhouse     xs_ok(s, XS_SET_PERMS, req_id, tx_id);
8080254c4d1SDavid Woodhouse }
8090254c4d1SDavid Woodhouse 
8100254c4d1SDavid Woodhouse static void xs_watch(XenXenstoreState *s, unsigned int req_id,
8110254c4d1SDavid Woodhouse                      xs_transaction_t tx_id, uint8_t *req_data,
8120254c4d1SDavid Woodhouse                      unsigned int len)
8130254c4d1SDavid Woodhouse {
8140254c4d1SDavid Woodhouse     const char *token, *path = (const char *)req_data;
8150254c4d1SDavid Woodhouse     int err;
8160254c4d1SDavid Woodhouse 
8170254c4d1SDavid Woodhouse     if (len == 0) {
8180254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, EINVAL);
8190254c4d1SDavid Woodhouse         return;
8200254c4d1SDavid Woodhouse     }
8210254c4d1SDavid Woodhouse 
8220254c4d1SDavid Woodhouse     while (len--) {
8230254c4d1SDavid Woodhouse         if (*req_data++ == '\0') {
8240254c4d1SDavid Woodhouse             break;
8250254c4d1SDavid Woodhouse         }
8260254c4d1SDavid Woodhouse         if (len == 0) {
8270254c4d1SDavid Woodhouse             xs_error(s, req_id, tx_id, EINVAL);
8280254c4d1SDavid Woodhouse             return;
8290254c4d1SDavid Woodhouse         }
8300254c4d1SDavid Woodhouse     }
8310254c4d1SDavid Woodhouse 
8320254c4d1SDavid Woodhouse     token = (const char *)req_data;
8330254c4d1SDavid Woodhouse     while (len--) {
8340254c4d1SDavid Woodhouse         if (*req_data++ == '\0') {
8350254c4d1SDavid Woodhouse             break;
8360254c4d1SDavid Woodhouse         }
8370254c4d1SDavid Woodhouse         if (len == 0) {
8380254c4d1SDavid Woodhouse             xs_error(s, req_id, tx_id, EINVAL);
8390254c4d1SDavid Woodhouse             return;
8400254c4d1SDavid Woodhouse         }
8410254c4d1SDavid Woodhouse     }
8420254c4d1SDavid Woodhouse 
8430254c4d1SDavid Woodhouse     /*
8440254c4d1SDavid Woodhouse      * Note that there may be trailing garbage at the end of the buffer.
8450254c4d1SDavid Woodhouse      * This is explicitly permitted by the '?' at the end of the definition:
8460254c4d1SDavid Woodhouse      *
8470254c4d1SDavid Woodhouse      *    WATCH             <wpath>|<token>|?
8480254c4d1SDavid Woodhouse      */
8490254c4d1SDavid Woodhouse 
8500254c4d1SDavid Woodhouse     trace_xenstore_watch(path, token);
8510254c4d1SDavid Woodhouse     err = xs_impl_watch(s->impl, xen_domid, path, token, fire_watch_cb, s);
8520254c4d1SDavid Woodhouse     if (err) {
8530254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, err);
8540254c4d1SDavid Woodhouse         return;
8550254c4d1SDavid Woodhouse     }
8560254c4d1SDavid Woodhouse 
8570254c4d1SDavid Woodhouse     xs_ok(s, XS_WATCH, req_id, tx_id);
8580254c4d1SDavid Woodhouse }
8590254c4d1SDavid Woodhouse 
8600254c4d1SDavid Woodhouse static void xs_unwatch(XenXenstoreState *s, unsigned int req_id,
8610254c4d1SDavid Woodhouse                        xs_transaction_t tx_id, uint8_t *req_data,
8620254c4d1SDavid Woodhouse                        unsigned int len)
8630254c4d1SDavid Woodhouse {
8640254c4d1SDavid Woodhouse     const char *token, *path = (const char *)req_data;
8650254c4d1SDavid Woodhouse     int err;
8660254c4d1SDavid Woodhouse 
8670254c4d1SDavid Woodhouse     if (len == 0) {
8680254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, EINVAL);
8690254c4d1SDavid Woodhouse         return;
8700254c4d1SDavid Woodhouse     }
8710254c4d1SDavid Woodhouse 
8720254c4d1SDavid Woodhouse     while (len--) {
8730254c4d1SDavid Woodhouse         if (*req_data++ == '\0') {
8740254c4d1SDavid Woodhouse             break;
8750254c4d1SDavid Woodhouse         }
8760254c4d1SDavid Woodhouse         if (len == 0) {
8770254c4d1SDavid Woodhouse             xs_error(s, req_id, tx_id, EINVAL);
8780254c4d1SDavid Woodhouse             return;
8790254c4d1SDavid Woodhouse         }
8800254c4d1SDavid Woodhouse     }
8810254c4d1SDavid Woodhouse 
8820254c4d1SDavid Woodhouse     token = (const char *)req_data;
8830254c4d1SDavid Woodhouse     while (len--) {
8840254c4d1SDavid Woodhouse         if (*req_data++ == '\0') {
8850254c4d1SDavid Woodhouse             break;
8860254c4d1SDavid Woodhouse         }
8870254c4d1SDavid Woodhouse         if (len == 0) {
8880254c4d1SDavid Woodhouse             xs_error(s, req_id, tx_id, EINVAL);
8890254c4d1SDavid Woodhouse             return;
8900254c4d1SDavid Woodhouse         }
8910254c4d1SDavid Woodhouse     }
8920254c4d1SDavid Woodhouse 
8930254c4d1SDavid Woodhouse     trace_xenstore_unwatch(path, token);
8940254c4d1SDavid Woodhouse     err = xs_impl_unwatch(s->impl, xen_domid, path, token, fire_watch_cb, s);
8950254c4d1SDavid Woodhouse     if (err) {
8960254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, err);
8970254c4d1SDavid Woodhouse         return;
8980254c4d1SDavid Woodhouse     }
8990254c4d1SDavid Woodhouse 
9000254c4d1SDavid Woodhouse     xs_ok(s, XS_UNWATCH, req_id, tx_id);
9010254c4d1SDavid Woodhouse }
9020254c4d1SDavid Woodhouse 
9030254c4d1SDavid Woodhouse static void xs_reset_watches(XenXenstoreState *s, unsigned int req_id,
9040254c4d1SDavid Woodhouse                              xs_transaction_t tx_id, uint8_t *req_data,
9050254c4d1SDavid Woodhouse                              unsigned int len)
9060254c4d1SDavid Woodhouse {
9070254c4d1SDavid Woodhouse     if (len == 0 || req_data[len - 1] != '\0') {
9080254c4d1SDavid Woodhouse         xs_error(s, req_id, tx_id, EINVAL);
9090254c4d1SDavid Woodhouse         return;
9100254c4d1SDavid Woodhouse     }
9110254c4d1SDavid Woodhouse 
9120254c4d1SDavid Woodhouse     trace_xenstore_reset_watches();
9130254c4d1SDavid Woodhouse     xs_impl_reset_watches(s->impl, xen_domid);
9140254c4d1SDavid Woodhouse 
9150254c4d1SDavid Woodhouse     xs_ok(s, XS_RESET_WATCHES, req_id, tx_id);
9160254c4d1SDavid Woodhouse }
9170254c4d1SDavid Woodhouse 
9180254c4d1SDavid Woodhouse static void xs_priv(XenXenstoreState *s, unsigned int req_id,
9190254c4d1SDavid Woodhouse                     xs_transaction_t tx_id, uint8_t *data,
9200254c4d1SDavid Woodhouse                     unsigned int len)
9210254c4d1SDavid Woodhouse {
9220254c4d1SDavid Woodhouse     xs_error(s, req_id, tx_id, EACCES);
9230254c4d1SDavid Woodhouse }
9240254c4d1SDavid Woodhouse 
9250254c4d1SDavid Woodhouse static void xs_unimpl(XenXenstoreState *s, unsigned int req_id,
9260254c4d1SDavid Woodhouse                       xs_transaction_t tx_id, uint8_t *data,
9270254c4d1SDavid Woodhouse                       unsigned int len)
9280254c4d1SDavid Woodhouse {
9290254c4d1SDavid Woodhouse     xs_error(s, req_id, tx_id, ENOSYS);
9300254c4d1SDavid Woodhouse }
9310254c4d1SDavid Woodhouse 
9320254c4d1SDavid Woodhouse typedef void (*xs_impl)(XenXenstoreState *s, unsigned int req_id,
9330254c4d1SDavid Woodhouse                         xs_transaction_t tx_id, uint8_t *data,
9340254c4d1SDavid Woodhouse                         unsigned int len);
9350254c4d1SDavid Woodhouse 
9360254c4d1SDavid Woodhouse struct xsd_req {
9370254c4d1SDavid Woodhouse     const char *name;
9380254c4d1SDavid Woodhouse     xs_impl fn;
9390254c4d1SDavid Woodhouse };
9400254c4d1SDavid Woodhouse #define XSD_REQ(_type, _fn)                           \
9410254c4d1SDavid Woodhouse     [_type] = { .name = #_type, .fn = _fn }
9420254c4d1SDavid Woodhouse 
9430254c4d1SDavid Woodhouse struct xsd_req xsd_reqs[] = {
9440254c4d1SDavid Woodhouse     XSD_REQ(XS_READ, xs_read),
9450254c4d1SDavid Woodhouse     XSD_REQ(XS_WRITE, xs_write),
9460254c4d1SDavid Woodhouse     XSD_REQ(XS_MKDIR, xs_mkdir),
9470254c4d1SDavid Woodhouse     XSD_REQ(XS_DIRECTORY, xs_directory),
9480254c4d1SDavid Woodhouse     XSD_REQ(XS_DIRECTORY_PART, xs_directory_part),
9490254c4d1SDavid Woodhouse     XSD_REQ(XS_TRANSACTION_START, xs_transaction_start),
9500254c4d1SDavid Woodhouse     XSD_REQ(XS_TRANSACTION_END, xs_transaction_end),
9510254c4d1SDavid Woodhouse     XSD_REQ(XS_RM, xs_rm),
9520254c4d1SDavid Woodhouse     XSD_REQ(XS_GET_PERMS, xs_get_perms),
9530254c4d1SDavid Woodhouse     XSD_REQ(XS_SET_PERMS, xs_set_perms),
9540254c4d1SDavid Woodhouse     XSD_REQ(XS_WATCH, xs_watch),
9550254c4d1SDavid Woodhouse     XSD_REQ(XS_UNWATCH, xs_unwatch),
9560254c4d1SDavid Woodhouse     XSD_REQ(XS_CONTROL, xs_priv),
9570254c4d1SDavid Woodhouse     XSD_REQ(XS_INTRODUCE, xs_priv),
9580254c4d1SDavid Woodhouse     XSD_REQ(XS_RELEASE, xs_priv),
9590254c4d1SDavid Woodhouse     XSD_REQ(XS_IS_DOMAIN_INTRODUCED, xs_priv),
9600254c4d1SDavid Woodhouse     XSD_REQ(XS_RESUME, xs_priv),
9610254c4d1SDavid Woodhouse     XSD_REQ(XS_SET_TARGET, xs_priv),
9620254c4d1SDavid Woodhouse     XSD_REQ(XS_RESET_WATCHES, xs_reset_watches),
9630254c4d1SDavid Woodhouse };
9640254c4d1SDavid Woodhouse 
965f3341e7bSDavid Woodhouse static void process_req(XenXenstoreState *s)
966f3341e7bSDavid Woodhouse {
967f3341e7bSDavid Woodhouse     struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data;
9680254c4d1SDavid Woodhouse     xs_impl handler = NULL;
969f3341e7bSDavid Woodhouse 
970f3341e7bSDavid Woodhouse     assert(req_pending(s));
971f3341e7bSDavid Woodhouse     assert(!s->rsp_pending);
972f3341e7bSDavid Woodhouse 
9730254c4d1SDavid Woodhouse     if (req->type < ARRAY_SIZE(xsd_reqs)) {
9740254c4d1SDavid Woodhouse         handler = xsd_reqs[req->type].fn;
9750254c4d1SDavid Woodhouse     }
9760254c4d1SDavid Woodhouse     if (!handler) {
9770254c4d1SDavid Woodhouse         handler = &xs_unimpl;
9780254c4d1SDavid Woodhouse     }
9790254c4d1SDavid Woodhouse 
9800254c4d1SDavid Woodhouse     handler(s, req->req_id, req->tx_id, (uint8_t *)&req[1], req->len);
981f3341e7bSDavid Woodhouse 
982f3341e7bSDavid Woodhouse     s->rsp_pending = true;
983f3341e7bSDavid Woodhouse     reset_req(s);
984f3341e7bSDavid Woodhouse }
985f3341e7bSDavid Woodhouse 
986f3341e7bSDavid Woodhouse static unsigned int copy_from_ring(XenXenstoreState *s, uint8_t *ptr,
987f3341e7bSDavid Woodhouse                                    unsigned int len)
988f3341e7bSDavid Woodhouse {
989f3341e7bSDavid Woodhouse     if (!len) {
990f3341e7bSDavid Woodhouse         return 0;
991f3341e7bSDavid Woodhouse     }
992f3341e7bSDavid Woodhouse 
993f3341e7bSDavid Woodhouse     XENSTORE_RING_IDX prod = qatomic_read(&s->xs->req_prod);
994f3341e7bSDavid Woodhouse     XENSTORE_RING_IDX cons = qatomic_read(&s->xs->req_cons);
995f3341e7bSDavid Woodhouse     unsigned int copied = 0;
996f3341e7bSDavid Woodhouse 
997f3341e7bSDavid Woodhouse     /* Ensure the ring contents don't cross the req_prod access. */
998f3341e7bSDavid Woodhouse     smp_rmb();
999f3341e7bSDavid Woodhouse 
1000f3341e7bSDavid Woodhouse     while (len) {
1001f3341e7bSDavid Woodhouse         unsigned int avail = prod - cons;
1002f3341e7bSDavid Woodhouse         unsigned int offset = MASK_XENSTORE_IDX(cons);
1003f3341e7bSDavid Woodhouse         unsigned int copylen = avail;
1004f3341e7bSDavid Woodhouse 
1005f3341e7bSDavid Woodhouse         if (avail > XENSTORE_RING_SIZE) {
1006f3341e7bSDavid Woodhouse             error_report("XenStore ring handling error");
1007f3341e7bSDavid Woodhouse             s->fatal_error = true;
1008f3341e7bSDavid Woodhouse             break;
1009f3341e7bSDavid Woodhouse         } else if (avail == 0) {
1010f3341e7bSDavid Woodhouse             break;
1011f3341e7bSDavid Woodhouse         }
1012f3341e7bSDavid Woodhouse 
1013f3341e7bSDavid Woodhouse         if (copylen > len) {
1014f3341e7bSDavid Woodhouse             copylen = len;
1015f3341e7bSDavid Woodhouse         }
1016f3341e7bSDavid Woodhouse         if (copylen > XENSTORE_RING_SIZE - offset) {
1017f3341e7bSDavid Woodhouse             copylen = XENSTORE_RING_SIZE - offset;
1018f3341e7bSDavid Woodhouse         }
1019f3341e7bSDavid Woodhouse 
1020f3341e7bSDavid Woodhouse         memcpy(ptr, &s->xs->req[offset], copylen);
1021f3341e7bSDavid Woodhouse         copied += copylen;
1022f3341e7bSDavid Woodhouse 
1023f3341e7bSDavid Woodhouse         ptr += copylen;
1024f3341e7bSDavid Woodhouse         len -= copylen;
1025f3341e7bSDavid Woodhouse 
1026f3341e7bSDavid Woodhouse         cons += copylen;
1027f3341e7bSDavid Woodhouse     }
1028f3341e7bSDavid Woodhouse 
1029f3341e7bSDavid Woodhouse     /*
1030f3341e7bSDavid Woodhouse      * Not sure this ever mattered except on Alpha, but this barrier
1031f3341e7bSDavid Woodhouse      * is to ensure that the update to req_cons is globally visible
1032f3341e7bSDavid Woodhouse      * only after we have consumed all the data from the ring, and we
1033f3341e7bSDavid Woodhouse      * don't end up seeing data written to the ring *after* the other
1034f3341e7bSDavid Woodhouse      * end sees the update and writes more to the ring. Xen's own
1035f3341e7bSDavid Woodhouse      * xenstored has the same barrier here (although with no comment
1036f3341e7bSDavid Woodhouse      * at all, obviously, because it's Xen code).
1037f3341e7bSDavid Woodhouse      */
1038f3341e7bSDavid Woodhouse     smp_mb();
1039f3341e7bSDavid Woodhouse 
1040f3341e7bSDavid Woodhouse     qatomic_set(&s->xs->req_cons, cons);
1041f3341e7bSDavid Woodhouse 
1042f3341e7bSDavid Woodhouse     return copied;
1043f3341e7bSDavid Woodhouse }
1044f3341e7bSDavid Woodhouse 
1045f3341e7bSDavid Woodhouse static unsigned int copy_to_ring(XenXenstoreState *s, uint8_t *ptr,
1046f3341e7bSDavid Woodhouse                                  unsigned int len)
1047f3341e7bSDavid Woodhouse {
1048f3341e7bSDavid Woodhouse     if (!len) {
1049f3341e7bSDavid Woodhouse         return 0;
1050f3341e7bSDavid Woodhouse     }
1051f3341e7bSDavid Woodhouse 
1052f3341e7bSDavid Woodhouse     XENSTORE_RING_IDX cons = qatomic_read(&s->xs->rsp_cons);
1053f3341e7bSDavid Woodhouse     XENSTORE_RING_IDX prod = qatomic_read(&s->xs->rsp_prod);
1054f3341e7bSDavid Woodhouse     unsigned int copied = 0;
1055f3341e7bSDavid Woodhouse 
1056f3341e7bSDavid Woodhouse     /*
1057f3341e7bSDavid Woodhouse      * This matches the barrier in copy_to_ring() (or the guest's
1058f3341e7bSDavid Woodhouse      * equivalent) betweem writing the data to the ring and updating
1059f3341e7bSDavid Woodhouse      * rsp_prod. It protects against the pathological case (which
1060f3341e7bSDavid Woodhouse      * again I think never happened except on Alpha) where our
1061f3341e7bSDavid Woodhouse      * subsequent writes to the ring could *cross* the read of
1062f3341e7bSDavid Woodhouse      * rsp_cons and the guest could see the new data when it was
1063f3341e7bSDavid Woodhouse      * intending to read the old.
1064f3341e7bSDavid Woodhouse      */
1065f3341e7bSDavid Woodhouse     smp_mb();
1066f3341e7bSDavid Woodhouse 
1067f3341e7bSDavid Woodhouse     while (len) {
1068f3341e7bSDavid Woodhouse         unsigned int avail = cons + XENSTORE_RING_SIZE - prod;
1069f3341e7bSDavid Woodhouse         unsigned int offset = MASK_XENSTORE_IDX(prod);
1070f3341e7bSDavid Woodhouse         unsigned int copylen = len;
1071f3341e7bSDavid Woodhouse 
1072f3341e7bSDavid Woodhouse         if (avail > XENSTORE_RING_SIZE) {
1073f3341e7bSDavid Woodhouse             error_report("XenStore ring handling error");
1074f3341e7bSDavid Woodhouse             s->fatal_error = true;
1075f3341e7bSDavid Woodhouse             break;
1076f3341e7bSDavid Woodhouse         } else if (avail == 0) {
1077f3341e7bSDavid Woodhouse             break;
1078f3341e7bSDavid Woodhouse         }
1079f3341e7bSDavid Woodhouse 
1080f3341e7bSDavid Woodhouse         if (copylen > avail) {
1081f3341e7bSDavid Woodhouse             copylen = avail;
1082f3341e7bSDavid Woodhouse         }
1083f3341e7bSDavid Woodhouse         if (copylen > XENSTORE_RING_SIZE - offset) {
1084f3341e7bSDavid Woodhouse             copylen = XENSTORE_RING_SIZE - offset;
1085f3341e7bSDavid Woodhouse         }
1086f3341e7bSDavid Woodhouse 
1087f3341e7bSDavid Woodhouse 
1088f3341e7bSDavid Woodhouse         memcpy(&s->xs->rsp[offset], ptr, copylen);
1089f3341e7bSDavid Woodhouse         copied += copylen;
1090f3341e7bSDavid Woodhouse 
1091f3341e7bSDavid Woodhouse         ptr += copylen;
1092f3341e7bSDavid Woodhouse         len -= copylen;
1093f3341e7bSDavid Woodhouse 
1094f3341e7bSDavid Woodhouse         prod += copylen;
1095f3341e7bSDavid Woodhouse     }
1096f3341e7bSDavid Woodhouse 
1097f3341e7bSDavid Woodhouse     /* Ensure the ring contents are seen before rsp_prod update. */
1098f3341e7bSDavid Woodhouse     smp_wmb();
1099f3341e7bSDavid Woodhouse 
1100f3341e7bSDavid Woodhouse     qatomic_set(&s->xs->rsp_prod, prod);
1101f3341e7bSDavid Woodhouse 
1102f3341e7bSDavid Woodhouse     return copied;
1103f3341e7bSDavid Woodhouse }
1104f3341e7bSDavid Woodhouse 
1105f3341e7bSDavid Woodhouse static unsigned int get_req(XenXenstoreState *s)
1106f3341e7bSDavid Woodhouse {
1107f3341e7bSDavid Woodhouse     unsigned int copied = 0;
1108f3341e7bSDavid Woodhouse 
1109f3341e7bSDavid Woodhouse     if (s->fatal_error) {
1110f3341e7bSDavid Woodhouse         return 0;
1111f3341e7bSDavid Woodhouse     }
1112f3341e7bSDavid Woodhouse 
1113f3341e7bSDavid Woodhouse     assert(!req_pending(s));
1114f3341e7bSDavid Woodhouse 
1115f3341e7bSDavid Woodhouse     if (s->req_offset < XENSTORE_HEADER_SIZE) {
1116f3341e7bSDavid Woodhouse         void *ptr = s->req_data + s->req_offset;
1117f3341e7bSDavid Woodhouse         unsigned int len = XENSTORE_HEADER_SIZE;
1118f3341e7bSDavid Woodhouse         unsigned int copylen = copy_from_ring(s, ptr, len);
1119f3341e7bSDavid Woodhouse 
1120f3341e7bSDavid Woodhouse         copied += copylen;
1121f3341e7bSDavid Woodhouse         s->req_offset += copylen;
1122f3341e7bSDavid Woodhouse     }
1123f3341e7bSDavid Woodhouse 
1124f3341e7bSDavid Woodhouse     if (s->req_offset >= XENSTORE_HEADER_SIZE) {
1125f3341e7bSDavid Woodhouse         struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data;
1126f3341e7bSDavid Woodhouse 
1127f3341e7bSDavid Woodhouse         if (req->len > (uint32_t)XENSTORE_PAYLOAD_MAX) {
1128f3341e7bSDavid Woodhouse             error_report("Illegal XenStore request");
1129f3341e7bSDavid Woodhouse             s->fatal_error = true;
1130f3341e7bSDavid Woodhouse             return 0;
1131f3341e7bSDavid Woodhouse         }
1132f3341e7bSDavid Woodhouse 
1133f3341e7bSDavid Woodhouse         void *ptr = s->req_data + s->req_offset;
1134f3341e7bSDavid Woodhouse         unsigned int len = XENSTORE_HEADER_SIZE + req->len - s->req_offset;
1135f3341e7bSDavid Woodhouse         unsigned int copylen = copy_from_ring(s, ptr, len);
1136f3341e7bSDavid Woodhouse 
1137f3341e7bSDavid Woodhouse         copied += copylen;
1138f3341e7bSDavid Woodhouse         s->req_offset += copylen;
1139f3341e7bSDavid Woodhouse     }
1140f3341e7bSDavid Woodhouse 
1141f3341e7bSDavid Woodhouse     return copied;
1142f3341e7bSDavid Woodhouse }
1143f3341e7bSDavid Woodhouse 
1144f3341e7bSDavid Woodhouse static unsigned int put_rsp(XenXenstoreState *s)
1145f3341e7bSDavid Woodhouse {
1146f3341e7bSDavid Woodhouse     if (s->fatal_error) {
1147f3341e7bSDavid Woodhouse         return 0;
1148f3341e7bSDavid Woodhouse     }
1149f3341e7bSDavid Woodhouse 
1150f3341e7bSDavid Woodhouse     assert(s->rsp_pending);
1151f3341e7bSDavid Woodhouse 
1152f3341e7bSDavid Woodhouse     struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
1153f3341e7bSDavid Woodhouse     assert(s->rsp_offset < XENSTORE_HEADER_SIZE + rsp->len);
1154f3341e7bSDavid Woodhouse 
1155f3341e7bSDavid Woodhouse     void *ptr = s->rsp_data + s->rsp_offset;
1156f3341e7bSDavid Woodhouse     unsigned int len = XENSTORE_HEADER_SIZE + rsp->len - s->rsp_offset;
1157f3341e7bSDavid Woodhouse     unsigned int copylen = copy_to_ring(s, ptr, len);
1158f3341e7bSDavid Woodhouse 
1159f3341e7bSDavid Woodhouse     s->rsp_offset += copylen;
1160f3341e7bSDavid Woodhouse 
1161f3341e7bSDavid Woodhouse     /* Have we produced a complete response? */
1162f3341e7bSDavid Woodhouse     if (s->rsp_offset == XENSTORE_HEADER_SIZE + rsp->len) {
1163f3341e7bSDavid Woodhouse         reset_rsp(s);
1164f3341e7bSDavid Woodhouse     }
1165f3341e7bSDavid Woodhouse 
1166f3341e7bSDavid Woodhouse     return copylen;
1167f3341e7bSDavid Woodhouse }
1168f3341e7bSDavid Woodhouse 
11690254c4d1SDavid Woodhouse static void deliver_watch(XenXenstoreState *s, const char *path,
11700254c4d1SDavid Woodhouse                           const char *token)
11710254c4d1SDavid Woodhouse {
11720254c4d1SDavid Woodhouse     struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
11730254c4d1SDavid Woodhouse     uint8_t *rsp_data = (uint8_t *)&rsp[1];
11740254c4d1SDavid Woodhouse     unsigned int len;
11750254c4d1SDavid Woodhouse 
11760254c4d1SDavid Woodhouse     assert(!s->rsp_pending);
11770254c4d1SDavid Woodhouse 
11780254c4d1SDavid Woodhouse     trace_xenstore_watch_event(path, token);
11790254c4d1SDavid Woodhouse 
11800254c4d1SDavid Woodhouse     rsp->type = XS_WATCH_EVENT;
11810254c4d1SDavid Woodhouse     rsp->req_id = 0;
11820254c4d1SDavid Woodhouse     rsp->tx_id = 0;
11830254c4d1SDavid Woodhouse     rsp->len = 0;
11840254c4d1SDavid Woodhouse 
11850254c4d1SDavid Woodhouse     len = strlen(path);
11860254c4d1SDavid Woodhouse 
11870254c4d1SDavid Woodhouse     /* XENSTORE_ABS/REL_PATH_MAX should ensure there can be no overflow */
11880254c4d1SDavid Woodhouse     assert(rsp->len + len < XENSTORE_PAYLOAD_MAX);
11890254c4d1SDavid Woodhouse 
11900254c4d1SDavid Woodhouse     memcpy(&rsp_data[rsp->len], path, len);
11910254c4d1SDavid Woodhouse     rsp->len += len;
11920254c4d1SDavid Woodhouse     rsp_data[rsp->len] = '\0';
11930254c4d1SDavid Woodhouse     rsp->len++;
11940254c4d1SDavid Woodhouse 
11950254c4d1SDavid Woodhouse     len = strlen(token);
11960254c4d1SDavid Woodhouse     /*
11970254c4d1SDavid Woodhouse      * It is possible for the guest to have chosen a token that will
11980254c4d1SDavid Woodhouse      * not fit (along with the patch) into a watch event. We have no
11990254c4d1SDavid Woodhouse      * choice but to drop the event if this is the case.
12000254c4d1SDavid Woodhouse      */
12010254c4d1SDavid Woodhouse     if (rsp->len + len >= XENSTORE_PAYLOAD_MAX) {
12020254c4d1SDavid Woodhouse         return;
12030254c4d1SDavid Woodhouse     }
12040254c4d1SDavid Woodhouse 
12050254c4d1SDavid Woodhouse     memcpy(&rsp_data[rsp->len], token, len);
12060254c4d1SDavid Woodhouse     rsp->len += len;
12070254c4d1SDavid Woodhouse     rsp_data[rsp->len] = '\0';
12080254c4d1SDavid Woodhouse     rsp->len++;
12090254c4d1SDavid Woodhouse 
12100254c4d1SDavid Woodhouse     s->rsp_pending = true;
12110254c4d1SDavid Woodhouse }
12120254c4d1SDavid Woodhouse 
12130254c4d1SDavid Woodhouse struct watch_event {
12140254c4d1SDavid Woodhouse     char *path;
12150254c4d1SDavid Woodhouse     char *token;
12160254c4d1SDavid Woodhouse };
12170254c4d1SDavid Woodhouse 
12180254c4d1SDavid Woodhouse static void queue_watch(XenXenstoreState *s, const char *path,
12190254c4d1SDavid Woodhouse                         const char *token)
12200254c4d1SDavid Woodhouse {
12210254c4d1SDavid Woodhouse     struct watch_event *ev = g_new0(struct watch_event, 1);
12220254c4d1SDavid Woodhouse 
12230254c4d1SDavid Woodhouse     ev->path = g_strdup(path);
12240254c4d1SDavid Woodhouse     ev->token = g_strdup(token);
12250254c4d1SDavid Woodhouse 
12260254c4d1SDavid Woodhouse     s->watch_events = g_list_append(s->watch_events, ev);
12270254c4d1SDavid Woodhouse }
12280254c4d1SDavid Woodhouse 
12290254c4d1SDavid Woodhouse static void fire_watch_cb(void *opaque, const char *path, const char *token)
12300254c4d1SDavid Woodhouse {
12310254c4d1SDavid Woodhouse     XenXenstoreState *s = opaque;
12320254c4d1SDavid Woodhouse 
12330254c4d1SDavid Woodhouse     assert(qemu_mutex_iothread_locked());
12340254c4d1SDavid Woodhouse 
12350254c4d1SDavid Woodhouse     /*
12360254c4d1SDavid Woodhouse      * If there's a response pending, we obviously can't scribble over
12370254c4d1SDavid Woodhouse      * it. But if there's a request pending, it has dibs on the buffer
12380254c4d1SDavid Woodhouse      * too.
12390254c4d1SDavid Woodhouse      *
12400254c4d1SDavid Woodhouse      * In the common case of a watch firing due to backend activity
12410254c4d1SDavid Woodhouse      * when the ring was otherwise idle, we should be able to copy the
12420254c4d1SDavid Woodhouse      * strings directly into the rsp_data and thence the actual ring,
12430254c4d1SDavid Woodhouse      * without needing to perform any allocations and queue them.
12440254c4d1SDavid Woodhouse      */
12450254c4d1SDavid Woodhouse     if (s->rsp_pending || req_pending(s)) {
12460254c4d1SDavid Woodhouse         queue_watch(s, path, token);
12470254c4d1SDavid Woodhouse     } else {
12480254c4d1SDavid Woodhouse         deliver_watch(s, path, token);
12490254c4d1SDavid Woodhouse         /*
12500254c4d1SDavid Woodhouse          * If the message was queued because there was already ring activity,
12510254c4d1SDavid Woodhouse          * no need to wake the guest. But if not, we need to send the evtchn.
12520254c4d1SDavid Woodhouse          */
12530254c4d1SDavid Woodhouse         xen_be_evtchn_notify(s->eh, s->be_port);
12540254c4d1SDavid Woodhouse     }
12550254c4d1SDavid Woodhouse }
12560254c4d1SDavid Woodhouse 
12570254c4d1SDavid Woodhouse static void process_watch_events(XenXenstoreState *s)
12580254c4d1SDavid Woodhouse {
12590254c4d1SDavid Woodhouse     struct watch_event *ev = s->watch_events->data;
12600254c4d1SDavid Woodhouse 
12610254c4d1SDavid Woodhouse     deliver_watch(s, ev->path, ev->token);
12620254c4d1SDavid Woodhouse 
12630254c4d1SDavid Woodhouse     s->watch_events = g_list_remove(s->watch_events, ev);
12640254c4d1SDavid Woodhouse     g_free(ev->path);
12650254c4d1SDavid Woodhouse     g_free(ev->token);
12660254c4d1SDavid Woodhouse     g_free(ev);
12670254c4d1SDavid Woodhouse }
12680254c4d1SDavid Woodhouse 
1269c08f5d0eSDavid Woodhouse static void xen_xenstore_event(void *opaque)
1270c08f5d0eSDavid Woodhouse {
1271c08f5d0eSDavid Woodhouse     XenXenstoreState *s = opaque;
1272c08f5d0eSDavid Woodhouse     evtchn_port_t port = xen_be_evtchn_pending(s->eh);
1273f3341e7bSDavid Woodhouse     unsigned int copied_to, copied_from;
1274f3341e7bSDavid Woodhouse     bool processed, notify = false;
1275f3341e7bSDavid Woodhouse 
1276c08f5d0eSDavid Woodhouse     if (port != s->be_port) {
1277c08f5d0eSDavid Woodhouse         return;
1278c08f5d0eSDavid Woodhouse     }
1279f3341e7bSDavid Woodhouse 
1280c08f5d0eSDavid Woodhouse     /* We know this is a no-op. */
1281c08f5d0eSDavid Woodhouse     xen_be_evtchn_unmask(s->eh, port);
1282f3341e7bSDavid Woodhouse 
1283f3341e7bSDavid Woodhouse     do {
1284f3341e7bSDavid Woodhouse         copied_to = copied_from = 0;
1285f3341e7bSDavid Woodhouse         processed = false;
1286f3341e7bSDavid Woodhouse 
12870254c4d1SDavid Woodhouse         if (!s->rsp_pending && s->watch_events) {
12880254c4d1SDavid Woodhouse             process_watch_events(s);
12890254c4d1SDavid Woodhouse         }
12900254c4d1SDavid Woodhouse 
1291f3341e7bSDavid Woodhouse         if (s->rsp_pending) {
1292f3341e7bSDavid Woodhouse             copied_to = put_rsp(s);
1293f3341e7bSDavid Woodhouse         }
1294f3341e7bSDavid Woodhouse 
1295f3341e7bSDavid Woodhouse         if (!req_pending(s)) {
1296f3341e7bSDavid Woodhouse             copied_from = get_req(s);
1297f3341e7bSDavid Woodhouse         }
1298f3341e7bSDavid Woodhouse 
12990254c4d1SDavid Woodhouse         if (req_pending(s) && !s->rsp_pending && !s->watch_events) {
1300f3341e7bSDavid Woodhouse             process_req(s);
1301f3341e7bSDavid Woodhouse             processed = true;
1302f3341e7bSDavid Woodhouse         }
1303f3341e7bSDavid Woodhouse 
1304f3341e7bSDavid Woodhouse         notify |= copied_to || copied_from;
1305f3341e7bSDavid Woodhouse     } while (copied_to || copied_from || processed);
1306f3341e7bSDavid Woodhouse 
1307f3341e7bSDavid Woodhouse     if (notify) {
1308c08f5d0eSDavid Woodhouse         xen_be_evtchn_notify(s->eh, s->be_port);
1309c08f5d0eSDavid Woodhouse     }
1310f3341e7bSDavid Woodhouse }
1311c08f5d0eSDavid Woodhouse 
1312c08f5d0eSDavid Woodhouse static void alloc_guest_port(XenXenstoreState *s)
1313c08f5d0eSDavid Woodhouse {
1314c08f5d0eSDavid Woodhouse     struct evtchn_alloc_unbound alloc = {
1315c08f5d0eSDavid Woodhouse         .dom = DOMID_SELF,
1316c08f5d0eSDavid Woodhouse         .remote_dom = DOMID_QEMU,
1317c08f5d0eSDavid Woodhouse     };
1318c08f5d0eSDavid Woodhouse 
1319c08f5d0eSDavid Woodhouse     if (!xen_evtchn_alloc_unbound_op(&alloc)) {
1320c08f5d0eSDavid Woodhouse         s->guest_port = alloc.port;
1321c08f5d0eSDavid Woodhouse     }
1322c08f5d0eSDavid Woodhouse }
1323c08f5d0eSDavid Woodhouse 
1324c08f5d0eSDavid Woodhouse int xen_xenstore_reset(void)
1325c08f5d0eSDavid Woodhouse {
1326c08f5d0eSDavid Woodhouse     XenXenstoreState *s = xen_xenstore_singleton;
1327c08f5d0eSDavid Woodhouse     int err;
1328c08f5d0eSDavid Woodhouse 
1329c08f5d0eSDavid Woodhouse     if (!s) {
1330c08f5d0eSDavid Woodhouse         return -ENOTSUP;
1331c08f5d0eSDavid Woodhouse     }
1332c08f5d0eSDavid Woodhouse 
1333c08f5d0eSDavid Woodhouse     s->req_offset = s->rsp_offset = 0;
1334c08f5d0eSDavid Woodhouse     s->rsp_pending = false;
1335c08f5d0eSDavid Woodhouse 
1336c08f5d0eSDavid Woodhouse     if (!memory_region_is_mapped(&s->xenstore_page)) {
1337c08f5d0eSDavid Woodhouse         uint64_t gpa = XEN_SPECIAL_PFN(XENSTORE) << TARGET_PAGE_BITS;
1338c08f5d0eSDavid Woodhouse         xen_overlay_do_map_page(&s->xenstore_page, gpa);
1339c08f5d0eSDavid Woodhouse     }
1340c08f5d0eSDavid Woodhouse 
1341c08f5d0eSDavid Woodhouse     alloc_guest_port(s);
1342c08f5d0eSDavid Woodhouse 
1343c08f5d0eSDavid Woodhouse     /*
1344c08f5d0eSDavid Woodhouse      * As qemu/dom0, bind to the guest's port. For incoming migration, this
1345c08f5d0eSDavid Woodhouse      * will be unbound as the guest's evtchn table is overwritten. We then
1346c08f5d0eSDavid Woodhouse      * rebind to the correct guest port in xen_xenstore_post_load().
1347c08f5d0eSDavid Woodhouse      */
1348c08f5d0eSDavid Woodhouse     err = xen_be_evtchn_bind_interdomain(s->eh, xen_domid, s->guest_port);
1349c08f5d0eSDavid Woodhouse     if (err < 0) {
1350c08f5d0eSDavid Woodhouse         return err;
1351c08f5d0eSDavid Woodhouse     }
1352c08f5d0eSDavid Woodhouse     s->be_port = err;
1353c08f5d0eSDavid Woodhouse 
1354c08f5d0eSDavid Woodhouse     return 0;
1355c08f5d0eSDavid Woodhouse }
1356