1c08f5d0eSDavid Woodhouse /* 2c08f5d0eSDavid Woodhouse * QEMU Xen emulation: Shared/overlay pages support 3c08f5d0eSDavid Woodhouse * 4c08f5d0eSDavid Woodhouse * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. 5c08f5d0eSDavid Woodhouse * 6c08f5d0eSDavid Woodhouse * Authors: David Woodhouse <dwmw2@infradead.org> 7c08f5d0eSDavid Woodhouse * 8c08f5d0eSDavid Woodhouse * This work is licensed under the terms of the GNU GPL, version 2 or later. 9c08f5d0eSDavid Woodhouse * See the COPYING file in the top-level directory. 10c08f5d0eSDavid Woodhouse */ 11c08f5d0eSDavid Woodhouse 12c08f5d0eSDavid Woodhouse #include "qemu/osdep.h" 13c08f5d0eSDavid Woodhouse 14c08f5d0eSDavid Woodhouse #include "qemu/host-utils.h" 15c08f5d0eSDavid Woodhouse #include "qemu/module.h" 16c08f5d0eSDavid Woodhouse #include "qemu/main-loop.h" 17c08f5d0eSDavid Woodhouse #include "qemu/cutils.h" 18c08f5d0eSDavid Woodhouse #include "qapi/error.h" 19c08f5d0eSDavid Woodhouse #include "qom/object.h" 20c08f5d0eSDavid Woodhouse #include "migration/vmstate.h" 21c08f5d0eSDavid Woodhouse 22c08f5d0eSDavid Woodhouse #include "hw/sysbus.h" 23c08f5d0eSDavid Woodhouse #include "hw/xen/xen.h" 24c08f5d0eSDavid Woodhouse #include "xen_overlay.h" 25c08f5d0eSDavid Woodhouse #include "xen_evtchn.h" 26c08f5d0eSDavid Woodhouse #include "xen_xenstore.h" 27c08f5d0eSDavid Woodhouse 28c08f5d0eSDavid Woodhouse #include "sysemu/kvm.h" 29c08f5d0eSDavid Woodhouse #include "sysemu/kvm_xen.h" 30c08f5d0eSDavid Woodhouse 310254c4d1SDavid Woodhouse #include "trace.h" 320254c4d1SDavid Woodhouse 330254c4d1SDavid Woodhouse #include "xenstore_impl.h" 340254c4d1SDavid Woodhouse 35c08f5d0eSDavid Woodhouse #include "hw/xen/interface/io/xs_wire.h" 36c08f5d0eSDavid Woodhouse #include "hw/xen/interface/event_channel.h" 37c08f5d0eSDavid Woodhouse 38c08f5d0eSDavid Woodhouse #define TYPE_XEN_XENSTORE "xen-xenstore" 39c08f5d0eSDavid Woodhouse OBJECT_DECLARE_SIMPLE_TYPE(XenXenstoreState, XEN_XENSTORE) 40c08f5d0eSDavid Woodhouse 41c08f5d0eSDavid Woodhouse #define XEN_PAGE_SHIFT 12 42c08f5d0eSDavid Woodhouse #define XEN_PAGE_SIZE (1ULL << XEN_PAGE_SHIFT) 43c08f5d0eSDavid Woodhouse 44c08f5d0eSDavid Woodhouse #define ENTRIES_PER_FRAME_V1 (XEN_PAGE_SIZE / sizeof(grant_entry_v1_t)) 45c08f5d0eSDavid Woodhouse #define ENTRIES_PER_FRAME_V2 (XEN_PAGE_SIZE / sizeof(grant_entry_v2_t)) 46c08f5d0eSDavid Woodhouse 47c08f5d0eSDavid Woodhouse #define XENSTORE_HEADER_SIZE ((unsigned int)sizeof(struct xsd_sockmsg)) 48c08f5d0eSDavid Woodhouse 49c08f5d0eSDavid Woodhouse struct XenXenstoreState { 50c08f5d0eSDavid Woodhouse /*< private >*/ 51c08f5d0eSDavid Woodhouse SysBusDevice busdev; 52c08f5d0eSDavid Woodhouse /*< public >*/ 53c08f5d0eSDavid Woodhouse 540254c4d1SDavid Woodhouse XenstoreImplState *impl; 550254c4d1SDavid Woodhouse GList *watch_events; 560254c4d1SDavid Woodhouse 57c08f5d0eSDavid Woodhouse MemoryRegion xenstore_page; 58c08f5d0eSDavid Woodhouse struct xenstore_domain_interface *xs; 59c08f5d0eSDavid Woodhouse uint8_t req_data[XENSTORE_HEADER_SIZE + XENSTORE_PAYLOAD_MAX]; 60c08f5d0eSDavid Woodhouse uint8_t rsp_data[XENSTORE_HEADER_SIZE + XENSTORE_PAYLOAD_MAX]; 61c08f5d0eSDavid Woodhouse uint32_t req_offset; 62c08f5d0eSDavid Woodhouse uint32_t rsp_offset; 63c08f5d0eSDavid Woodhouse bool rsp_pending; 64c08f5d0eSDavid Woodhouse bool fatal_error; 65c08f5d0eSDavid Woodhouse 66c08f5d0eSDavid Woodhouse evtchn_port_t guest_port; 67c08f5d0eSDavid Woodhouse evtchn_port_t be_port; 68c08f5d0eSDavid Woodhouse struct xenevtchn_handle *eh; 69766804b1SDavid Woodhouse 70766804b1SDavid Woodhouse uint8_t *impl_state; 71766804b1SDavid Woodhouse uint32_t impl_state_size; 72c08f5d0eSDavid Woodhouse }; 73c08f5d0eSDavid Woodhouse 74c08f5d0eSDavid Woodhouse struct XenXenstoreState *xen_xenstore_singleton; 75c08f5d0eSDavid Woodhouse 76c08f5d0eSDavid Woodhouse static void xen_xenstore_event(void *opaque); 770254c4d1SDavid Woodhouse static void fire_watch_cb(void *opaque, const char *path, const char *token); 78c08f5d0eSDavid Woodhouse 79*831b0db8SPaul Durrant static void G_GNUC_PRINTF (4, 5) relpath_printf(XenXenstoreState *s, 80*831b0db8SPaul Durrant GList *perms, 81*831b0db8SPaul Durrant const char *relpath, 82*831b0db8SPaul Durrant const char *fmt, ...) 83*831b0db8SPaul Durrant { 84*831b0db8SPaul Durrant gchar *abspath; 85*831b0db8SPaul Durrant gchar *value; 86*831b0db8SPaul Durrant va_list args; 87*831b0db8SPaul Durrant GByteArray *data; 88*831b0db8SPaul Durrant int err; 89*831b0db8SPaul Durrant 90*831b0db8SPaul Durrant abspath = g_strdup_printf("/local/domain/%u/%s", xen_domid, relpath); 91*831b0db8SPaul Durrant va_start(args, fmt); 92*831b0db8SPaul Durrant value = g_strdup_vprintf(fmt, args); 93*831b0db8SPaul Durrant va_end(args); 94*831b0db8SPaul Durrant 95*831b0db8SPaul Durrant data = g_byte_array_new_take((void *)value, strlen(value)); 96*831b0db8SPaul Durrant 97*831b0db8SPaul Durrant err = xs_impl_write(s->impl, DOMID_QEMU, XBT_NULL, abspath, data); 98*831b0db8SPaul Durrant assert(!err); 99*831b0db8SPaul Durrant 100*831b0db8SPaul Durrant g_byte_array_unref(data); 101*831b0db8SPaul Durrant 102*831b0db8SPaul Durrant err = xs_impl_set_perms(s->impl, DOMID_QEMU, XBT_NULL, abspath, perms); 103*831b0db8SPaul Durrant assert(!err); 104*831b0db8SPaul Durrant 105*831b0db8SPaul Durrant g_free(abspath); 106*831b0db8SPaul Durrant } 107*831b0db8SPaul Durrant 108c08f5d0eSDavid Woodhouse static void xen_xenstore_realize(DeviceState *dev, Error **errp) 109c08f5d0eSDavid Woodhouse { 110c08f5d0eSDavid Woodhouse XenXenstoreState *s = XEN_XENSTORE(dev); 111*831b0db8SPaul Durrant GList *perms; 112c08f5d0eSDavid Woodhouse 113c08f5d0eSDavid Woodhouse if (xen_mode != XEN_EMULATE) { 114c08f5d0eSDavid Woodhouse error_setg(errp, "Xen xenstore support is for Xen emulation"); 115c08f5d0eSDavid Woodhouse return; 116c08f5d0eSDavid Woodhouse } 117c08f5d0eSDavid Woodhouse memory_region_init_ram(&s->xenstore_page, OBJECT(dev), "xen:xenstore_page", 118c08f5d0eSDavid Woodhouse XEN_PAGE_SIZE, &error_abort); 119c08f5d0eSDavid Woodhouse memory_region_set_enabled(&s->xenstore_page, true); 120c08f5d0eSDavid Woodhouse s->xs = memory_region_get_ram_ptr(&s->xenstore_page); 121c08f5d0eSDavid Woodhouse memset(s->xs, 0, XEN_PAGE_SIZE); 122c08f5d0eSDavid Woodhouse 123c08f5d0eSDavid Woodhouse /* We can't map it this early as KVM isn't ready */ 124c08f5d0eSDavid Woodhouse xen_xenstore_singleton = s; 125c08f5d0eSDavid Woodhouse 126c08f5d0eSDavid Woodhouse s->eh = xen_be_evtchn_open(); 127c08f5d0eSDavid Woodhouse if (!s->eh) { 128c08f5d0eSDavid Woodhouse error_setg(errp, "Xenstore evtchn port init failed"); 129c08f5d0eSDavid Woodhouse return; 130c08f5d0eSDavid Woodhouse } 131c08f5d0eSDavid Woodhouse aio_set_fd_handler(qemu_get_aio_context(), xen_be_evtchn_fd(s->eh), true, 132c08f5d0eSDavid Woodhouse xen_xenstore_event, NULL, NULL, NULL, s); 1330254c4d1SDavid Woodhouse 134be1934dfSPaul Durrant s->impl = xs_impl_create(xen_domid); 135*831b0db8SPaul Durrant 136*831b0db8SPaul Durrant /* Populate the default nodes */ 137*831b0db8SPaul Durrant 138*831b0db8SPaul Durrant /* Nodes owned by 'dom0' but readable by the guest */ 139*831b0db8SPaul Durrant perms = g_list_append(NULL, xs_perm_as_string(XS_PERM_NONE, DOMID_QEMU)); 140*831b0db8SPaul Durrant perms = g_list_append(perms, xs_perm_as_string(XS_PERM_READ, xen_domid)); 141*831b0db8SPaul Durrant 142*831b0db8SPaul Durrant relpath_printf(s, perms, "", "%s", ""); 143*831b0db8SPaul Durrant 144*831b0db8SPaul Durrant relpath_printf(s, perms, "domid", "%u", xen_domid); 145*831b0db8SPaul Durrant 146*831b0db8SPaul Durrant relpath_printf(s, perms, "control/platform-feature-xs_reset_watches", "%u", 1); 147*831b0db8SPaul Durrant relpath_printf(s, perms, "control/platform-feature-multiprocessor-suspend", "%u", 1); 148*831b0db8SPaul Durrant 149*831b0db8SPaul Durrant relpath_printf(s, perms, "platform/acpi", "%u", 1); 150*831b0db8SPaul Durrant relpath_printf(s, perms, "platform/acpi_s3", "%u", 1); 151*831b0db8SPaul Durrant relpath_printf(s, perms, "platform/acpi_s4", "%u", 1); 152*831b0db8SPaul Durrant relpath_printf(s, perms, "platform/acpi_laptop_slate", "%u", 0); 153*831b0db8SPaul Durrant 154*831b0db8SPaul Durrant g_list_free_full(perms, g_free); 155*831b0db8SPaul Durrant 156*831b0db8SPaul Durrant /* Nodes owned by the guest */ 157*831b0db8SPaul Durrant perms = g_list_append(NULL, xs_perm_as_string(XS_PERM_NONE, xen_domid)); 158*831b0db8SPaul Durrant 159*831b0db8SPaul Durrant relpath_printf(s, perms, "attr", "%s", ""); 160*831b0db8SPaul Durrant 161*831b0db8SPaul Durrant relpath_printf(s, perms, "control/shutdown", "%s", ""); 162*831b0db8SPaul Durrant relpath_printf(s, perms, "control/feature-poweroff", "%u", 1); 163*831b0db8SPaul Durrant relpath_printf(s, perms, "control/feature-reboot", "%u", 1); 164*831b0db8SPaul Durrant relpath_printf(s, perms, "control/feature-suspend", "%u", 1); 165*831b0db8SPaul Durrant relpath_printf(s, perms, "control/feature-s3", "%u", 1); 166*831b0db8SPaul Durrant relpath_printf(s, perms, "control/feature-s4", "%u", 1); 167*831b0db8SPaul Durrant 168*831b0db8SPaul Durrant relpath_printf(s, perms, "data", "%s", ""); 169*831b0db8SPaul Durrant relpath_printf(s, perms, "device", "%s", ""); 170*831b0db8SPaul Durrant relpath_printf(s, perms, "drivers", "%s", ""); 171*831b0db8SPaul Durrant relpath_printf(s, perms, "error", "%s", ""); 172*831b0db8SPaul Durrant relpath_printf(s, perms, "feature", "%s", ""); 173*831b0db8SPaul Durrant 174*831b0db8SPaul Durrant g_list_free_full(perms, g_free); 175c08f5d0eSDavid Woodhouse } 176c08f5d0eSDavid Woodhouse 177c08f5d0eSDavid Woodhouse static bool xen_xenstore_is_needed(void *opaque) 178c08f5d0eSDavid Woodhouse { 179c08f5d0eSDavid Woodhouse return xen_mode == XEN_EMULATE; 180c08f5d0eSDavid Woodhouse } 181c08f5d0eSDavid Woodhouse 182c08f5d0eSDavid Woodhouse static int xen_xenstore_pre_save(void *opaque) 183c08f5d0eSDavid Woodhouse { 184c08f5d0eSDavid Woodhouse XenXenstoreState *s = opaque; 185766804b1SDavid Woodhouse GByteArray *save; 186c08f5d0eSDavid Woodhouse 187c08f5d0eSDavid Woodhouse if (s->eh) { 188c08f5d0eSDavid Woodhouse s->guest_port = xen_be_evtchn_get_guest_port(s->eh); 189c08f5d0eSDavid Woodhouse } 190766804b1SDavid Woodhouse 191766804b1SDavid Woodhouse g_free(s->impl_state); 192766804b1SDavid Woodhouse save = xs_impl_serialize(s->impl); 193766804b1SDavid Woodhouse s->impl_state = save->data; 194766804b1SDavid Woodhouse s->impl_state_size = save->len; 195766804b1SDavid Woodhouse g_byte_array_free(save, false); 196766804b1SDavid Woodhouse 197c08f5d0eSDavid Woodhouse return 0; 198c08f5d0eSDavid Woodhouse } 199c08f5d0eSDavid Woodhouse 200c08f5d0eSDavid Woodhouse static int xen_xenstore_post_load(void *opaque, int ver) 201c08f5d0eSDavid Woodhouse { 202c08f5d0eSDavid Woodhouse XenXenstoreState *s = opaque; 203766804b1SDavid Woodhouse GByteArray *save; 204766804b1SDavid Woodhouse int ret; 205c08f5d0eSDavid Woodhouse 206c08f5d0eSDavid Woodhouse /* 207c08f5d0eSDavid Woodhouse * As qemu/dom0, rebind to the guest's port. The Windows drivers may 208c08f5d0eSDavid Woodhouse * unbind the XenStore evtchn and rebind to it, having obtained the 209c08f5d0eSDavid Woodhouse * "remote" port through EVTCHNOP_status. In the case that migration 210c08f5d0eSDavid Woodhouse * occurs while it's unbound, the "remote" port needs to be the same 211c08f5d0eSDavid Woodhouse * as before so that the guest can find it, but should remain unbound. 212c08f5d0eSDavid Woodhouse */ 213c08f5d0eSDavid Woodhouse if (s->guest_port) { 214c08f5d0eSDavid Woodhouse int be_port = xen_be_evtchn_bind_interdomain(s->eh, xen_domid, 215c08f5d0eSDavid Woodhouse s->guest_port); 216c08f5d0eSDavid Woodhouse if (be_port < 0) { 217c08f5d0eSDavid Woodhouse return be_port; 218c08f5d0eSDavid Woodhouse } 219c08f5d0eSDavid Woodhouse s->be_port = be_port; 220c08f5d0eSDavid Woodhouse } 221766804b1SDavid Woodhouse 222766804b1SDavid Woodhouse save = g_byte_array_new_take(s->impl_state, s->impl_state_size); 223766804b1SDavid Woodhouse s->impl_state = NULL; 224766804b1SDavid Woodhouse s->impl_state_size = 0; 225766804b1SDavid Woodhouse 226766804b1SDavid Woodhouse ret = xs_impl_deserialize(s->impl, save, xen_domid, fire_watch_cb, s); 227766804b1SDavid Woodhouse return ret; 228c08f5d0eSDavid Woodhouse } 229c08f5d0eSDavid Woodhouse 230c08f5d0eSDavid Woodhouse static const VMStateDescription xen_xenstore_vmstate = { 231c08f5d0eSDavid Woodhouse .name = "xen_xenstore", 232766804b1SDavid Woodhouse .unmigratable = 1, /* The PV back ends don't migrate yet */ 233c08f5d0eSDavid Woodhouse .version_id = 1, 234c08f5d0eSDavid Woodhouse .minimum_version_id = 1, 235c08f5d0eSDavid Woodhouse .needed = xen_xenstore_is_needed, 236c08f5d0eSDavid Woodhouse .pre_save = xen_xenstore_pre_save, 237c08f5d0eSDavid Woodhouse .post_load = xen_xenstore_post_load, 238c08f5d0eSDavid Woodhouse .fields = (VMStateField[]) { 239c08f5d0eSDavid Woodhouse VMSTATE_UINT8_ARRAY(req_data, XenXenstoreState, 240c08f5d0eSDavid Woodhouse sizeof_field(XenXenstoreState, req_data)), 241c08f5d0eSDavid Woodhouse VMSTATE_UINT8_ARRAY(rsp_data, XenXenstoreState, 242c08f5d0eSDavid Woodhouse sizeof_field(XenXenstoreState, rsp_data)), 243c08f5d0eSDavid Woodhouse VMSTATE_UINT32(req_offset, XenXenstoreState), 244c08f5d0eSDavid Woodhouse VMSTATE_UINT32(rsp_offset, XenXenstoreState), 245c08f5d0eSDavid Woodhouse VMSTATE_BOOL(rsp_pending, XenXenstoreState), 246c08f5d0eSDavid Woodhouse VMSTATE_UINT32(guest_port, XenXenstoreState), 247c08f5d0eSDavid Woodhouse VMSTATE_BOOL(fatal_error, XenXenstoreState), 248766804b1SDavid Woodhouse VMSTATE_UINT32(impl_state_size, XenXenstoreState), 249766804b1SDavid Woodhouse VMSTATE_VARRAY_UINT32_ALLOC(impl_state, XenXenstoreState, 250766804b1SDavid Woodhouse impl_state_size, 0, 251766804b1SDavid Woodhouse vmstate_info_uint8, uint8_t), 252c08f5d0eSDavid Woodhouse VMSTATE_END_OF_LIST() 253c08f5d0eSDavid Woodhouse } 254c08f5d0eSDavid Woodhouse }; 255c08f5d0eSDavid Woodhouse 256c08f5d0eSDavid Woodhouse static void xen_xenstore_class_init(ObjectClass *klass, void *data) 257c08f5d0eSDavid Woodhouse { 258c08f5d0eSDavid Woodhouse DeviceClass *dc = DEVICE_CLASS(klass); 259c08f5d0eSDavid Woodhouse 260c08f5d0eSDavid Woodhouse dc->realize = xen_xenstore_realize; 261c08f5d0eSDavid Woodhouse dc->vmsd = &xen_xenstore_vmstate; 262c08f5d0eSDavid Woodhouse } 263c08f5d0eSDavid Woodhouse 264c08f5d0eSDavid Woodhouse static const TypeInfo xen_xenstore_info = { 265c08f5d0eSDavid Woodhouse .name = TYPE_XEN_XENSTORE, 266c08f5d0eSDavid Woodhouse .parent = TYPE_SYS_BUS_DEVICE, 267c08f5d0eSDavid Woodhouse .instance_size = sizeof(XenXenstoreState), 268c08f5d0eSDavid Woodhouse .class_init = xen_xenstore_class_init, 269c08f5d0eSDavid Woodhouse }; 270c08f5d0eSDavid Woodhouse 271c08f5d0eSDavid Woodhouse void xen_xenstore_create(void) 272c08f5d0eSDavid Woodhouse { 273c08f5d0eSDavid Woodhouse DeviceState *dev = sysbus_create_simple(TYPE_XEN_XENSTORE, -1, NULL); 274c08f5d0eSDavid Woodhouse 275c08f5d0eSDavid Woodhouse xen_xenstore_singleton = XEN_XENSTORE(dev); 276c08f5d0eSDavid Woodhouse 277c08f5d0eSDavid Woodhouse /* 278c08f5d0eSDavid Woodhouse * Defer the init (xen_xenstore_reset()) until KVM is set up and the 279c08f5d0eSDavid Woodhouse * overlay page can be mapped. 280c08f5d0eSDavid Woodhouse */ 281c08f5d0eSDavid Woodhouse } 282c08f5d0eSDavid Woodhouse 283c08f5d0eSDavid Woodhouse static void xen_xenstore_register_types(void) 284c08f5d0eSDavid Woodhouse { 285c08f5d0eSDavid Woodhouse type_register_static(&xen_xenstore_info); 286c08f5d0eSDavid Woodhouse } 287c08f5d0eSDavid Woodhouse 288c08f5d0eSDavid Woodhouse type_init(xen_xenstore_register_types) 289c08f5d0eSDavid Woodhouse 290c08f5d0eSDavid Woodhouse uint16_t xen_xenstore_get_port(void) 291c08f5d0eSDavid Woodhouse { 292c08f5d0eSDavid Woodhouse XenXenstoreState *s = xen_xenstore_singleton; 293c08f5d0eSDavid Woodhouse if (!s) { 294c08f5d0eSDavid Woodhouse return 0; 295c08f5d0eSDavid Woodhouse } 296c08f5d0eSDavid Woodhouse return s->guest_port; 297c08f5d0eSDavid Woodhouse } 298c08f5d0eSDavid Woodhouse 299f3341e7bSDavid Woodhouse static bool req_pending(XenXenstoreState *s) 300f3341e7bSDavid Woodhouse { 301f3341e7bSDavid Woodhouse struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data; 302f3341e7bSDavid Woodhouse 303f3341e7bSDavid Woodhouse return s->req_offset == XENSTORE_HEADER_SIZE + req->len; 304f3341e7bSDavid Woodhouse } 305f3341e7bSDavid Woodhouse 306f3341e7bSDavid Woodhouse static void reset_req(XenXenstoreState *s) 307f3341e7bSDavid Woodhouse { 308f3341e7bSDavid Woodhouse memset(s->req_data, 0, sizeof(s->req_data)); 309f3341e7bSDavid Woodhouse s->req_offset = 0; 310f3341e7bSDavid Woodhouse } 311f3341e7bSDavid Woodhouse 312f3341e7bSDavid Woodhouse static void reset_rsp(XenXenstoreState *s) 313f3341e7bSDavid Woodhouse { 314f3341e7bSDavid Woodhouse s->rsp_pending = false; 315f3341e7bSDavid Woodhouse 316f3341e7bSDavid Woodhouse memset(s->rsp_data, 0, sizeof(s->rsp_data)); 317f3341e7bSDavid Woodhouse s->rsp_offset = 0; 318f3341e7bSDavid Woodhouse } 319f3341e7bSDavid Woodhouse 3200254c4d1SDavid Woodhouse static void xs_error(XenXenstoreState *s, unsigned int id, 3210254c4d1SDavid Woodhouse xs_transaction_t tx_id, int errnum) 3220254c4d1SDavid Woodhouse { 3230254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 3240254c4d1SDavid Woodhouse const char *errstr = NULL; 3250254c4d1SDavid Woodhouse 3260254c4d1SDavid Woodhouse for (unsigned int i = 0; i < ARRAY_SIZE(xsd_errors); i++) { 3270254c4d1SDavid Woodhouse struct xsd_errors *xsd_error = &xsd_errors[i]; 3280254c4d1SDavid Woodhouse 3290254c4d1SDavid Woodhouse if (xsd_error->errnum == errnum) { 3300254c4d1SDavid Woodhouse errstr = xsd_error->errstring; 3310254c4d1SDavid Woodhouse break; 3320254c4d1SDavid Woodhouse } 3330254c4d1SDavid Woodhouse } 3340254c4d1SDavid Woodhouse assert(errstr); 3350254c4d1SDavid Woodhouse 3360254c4d1SDavid Woodhouse trace_xenstore_error(id, tx_id, errstr); 3370254c4d1SDavid Woodhouse 3380254c4d1SDavid Woodhouse rsp->type = XS_ERROR; 3390254c4d1SDavid Woodhouse rsp->req_id = id; 3400254c4d1SDavid Woodhouse rsp->tx_id = tx_id; 3410254c4d1SDavid Woodhouse rsp->len = (uint32_t)strlen(errstr) + 1; 3420254c4d1SDavid Woodhouse 3430254c4d1SDavid Woodhouse memcpy(&rsp[1], errstr, rsp->len); 3440254c4d1SDavid Woodhouse } 3450254c4d1SDavid Woodhouse 3460254c4d1SDavid Woodhouse static void xs_ok(XenXenstoreState *s, unsigned int type, unsigned int req_id, 3470254c4d1SDavid Woodhouse xs_transaction_t tx_id) 3480254c4d1SDavid Woodhouse { 3490254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 3500254c4d1SDavid Woodhouse const char *okstr = "OK"; 3510254c4d1SDavid Woodhouse 3520254c4d1SDavid Woodhouse rsp->type = type; 3530254c4d1SDavid Woodhouse rsp->req_id = req_id; 3540254c4d1SDavid Woodhouse rsp->tx_id = tx_id; 3550254c4d1SDavid Woodhouse rsp->len = (uint32_t)strlen(okstr) + 1; 3560254c4d1SDavid Woodhouse 3570254c4d1SDavid Woodhouse memcpy(&rsp[1], okstr, rsp->len); 3580254c4d1SDavid Woodhouse } 3590254c4d1SDavid Woodhouse 3600254c4d1SDavid Woodhouse /* 3610254c4d1SDavid Woodhouse * The correct request and response formats are documented in xen.git: 3620254c4d1SDavid Woodhouse * docs/misc/xenstore.txt. A summary is given below for convenience. 3630254c4d1SDavid Woodhouse * The '|' symbol represents a NUL character. 3640254c4d1SDavid Woodhouse * 3650254c4d1SDavid Woodhouse * ---------- Database read, write and permissions operations ---------- 3660254c4d1SDavid Woodhouse * 3670254c4d1SDavid Woodhouse * READ <path>| <value|> 3680254c4d1SDavid Woodhouse * WRITE <path>|<value|> 3690254c4d1SDavid Woodhouse * Store and read the octet string <value> at <path>. 3700254c4d1SDavid Woodhouse * WRITE creates any missing parent paths, with empty values. 3710254c4d1SDavid Woodhouse * 3720254c4d1SDavid Woodhouse * MKDIR <path>| 3730254c4d1SDavid Woodhouse * Ensures that the <path> exists, by necessary by creating 3740254c4d1SDavid Woodhouse * it and any missing parents with empty values. If <path> 3750254c4d1SDavid Woodhouse * or any parent already exists, its value is left unchanged. 3760254c4d1SDavid Woodhouse * 3770254c4d1SDavid Woodhouse * RM <path>| 3780254c4d1SDavid Woodhouse * Ensures that the <path> does not exist, by deleting 3790254c4d1SDavid Woodhouse * it and all of its children. It is not an error if <path> does 3800254c4d1SDavid Woodhouse * not exist, but it _is_ an error if <path>'s immediate parent 3810254c4d1SDavid Woodhouse * does not exist either. 3820254c4d1SDavid Woodhouse * 3830254c4d1SDavid Woodhouse * DIRECTORY <path>| <child-leaf-name>|* 3840254c4d1SDavid Woodhouse * Gives a list of the immediate children of <path>, as only the 3850254c4d1SDavid Woodhouse * leafnames. The resulting children are each named 3860254c4d1SDavid Woodhouse * <path>/<child-leaf-name>. 3870254c4d1SDavid Woodhouse * 3880254c4d1SDavid Woodhouse * DIRECTORY_PART <path>|<offset> <gencnt>|<child-leaf-name>|* 3890254c4d1SDavid Woodhouse * Same as DIRECTORY, but to be used for children lists longer than 3900254c4d1SDavid Woodhouse * XENSTORE_PAYLOAD_MAX. Input are <path> and the byte offset into 3910254c4d1SDavid Woodhouse * the list of children to return. Return values are the generation 3920254c4d1SDavid Woodhouse * count <gencnt> of the node (to be used to ensure the node hasn't 3930254c4d1SDavid Woodhouse * changed between two reads: <gencnt> being the same for multiple 3940254c4d1SDavid Woodhouse * reads guarantees the node hasn't changed) and the list of children 3950254c4d1SDavid Woodhouse * starting at the specified <offset> of the complete list. 3960254c4d1SDavid Woodhouse * 3970254c4d1SDavid Woodhouse * GET_PERMS <path>| <perm-as-string>|+ 3980254c4d1SDavid Woodhouse * SET_PERMS <path>|<perm-as-string>|+? 3990254c4d1SDavid Woodhouse * <perm-as-string> is one of the following 4000254c4d1SDavid Woodhouse * w<domid> write only 4010254c4d1SDavid Woodhouse * r<domid> read only 4020254c4d1SDavid Woodhouse * b<domid> both read and write 4030254c4d1SDavid Woodhouse * n<domid> no access 4040254c4d1SDavid Woodhouse * See https://wiki.xen.org/wiki/XenBus section 4050254c4d1SDavid Woodhouse * `Permissions' for details of the permissions system. 4060254c4d1SDavid Woodhouse * It is possible to set permissions for the special watch paths 4070254c4d1SDavid Woodhouse * "@introduceDomain" and "@releaseDomain" to enable receiving those 4080254c4d1SDavid Woodhouse * watches in unprivileged domains. 4090254c4d1SDavid Woodhouse * 4100254c4d1SDavid Woodhouse * ---------- Watches ---------- 4110254c4d1SDavid Woodhouse * 4120254c4d1SDavid Woodhouse * WATCH <wpath>|<token>|? 4130254c4d1SDavid Woodhouse * Adds a watch. 4140254c4d1SDavid Woodhouse * 4150254c4d1SDavid Woodhouse * When a <path> is modified (including path creation, removal, 4160254c4d1SDavid Woodhouse * contents change or permissions change) this generates an event 4170254c4d1SDavid Woodhouse * on the changed <path>. Changes made in transactions cause an 4180254c4d1SDavid Woodhouse * event only if and when committed. Each occurring event is 4190254c4d1SDavid Woodhouse * matched against all the watches currently set up, and each 4200254c4d1SDavid Woodhouse * matching watch results in a WATCH_EVENT message (see below). 4210254c4d1SDavid Woodhouse * 4220254c4d1SDavid Woodhouse * The event's path matches the watch's <wpath> if it is an child 4230254c4d1SDavid Woodhouse * of <wpath>. 4240254c4d1SDavid Woodhouse * 4250254c4d1SDavid Woodhouse * <wpath> can be a <path> to watch or @<wspecial>. In the 4260254c4d1SDavid Woodhouse * latter case <wspecial> may have any syntax but it matches 4270254c4d1SDavid Woodhouse * (according to the rules above) only the following special 4280254c4d1SDavid Woodhouse * events which are invented by xenstored: 4290254c4d1SDavid Woodhouse * @introduceDomain occurs on INTRODUCE 4300254c4d1SDavid Woodhouse * @releaseDomain occurs on any domain crash or 4310254c4d1SDavid Woodhouse * shutdown, and also on RELEASE 4320254c4d1SDavid Woodhouse * and domain destruction 4330254c4d1SDavid Woodhouse * <wspecial> events are sent to privileged callers or explicitly 4340254c4d1SDavid Woodhouse * via SET_PERMS enabled domains only. 4350254c4d1SDavid Woodhouse * 4360254c4d1SDavid Woodhouse * When a watch is first set up it is triggered once straight 4370254c4d1SDavid Woodhouse * away, with <path> equal to <wpath>. Watches may be triggered 4380254c4d1SDavid Woodhouse * spuriously. The tx_id in a WATCH request is ignored. 4390254c4d1SDavid Woodhouse * 4400254c4d1SDavid Woodhouse * Watches are supposed to be restricted by the permissions 4410254c4d1SDavid Woodhouse * system but in practice the implementation is imperfect. 4420254c4d1SDavid Woodhouse * Applications should not rely on being sent a notification for 4430254c4d1SDavid Woodhouse * paths that they cannot read; however, an application may rely 4440254c4d1SDavid Woodhouse * on being sent a watch when a path which it _is_ able to read 4450254c4d1SDavid Woodhouse * is deleted even if that leaves only a nonexistent unreadable 4460254c4d1SDavid Woodhouse * parent. A notification may omitted if a node's permissions 4470254c4d1SDavid Woodhouse * are changed so as to make it unreadable, in which case future 4480254c4d1SDavid Woodhouse * notifications may be suppressed (and if the node is later made 4490254c4d1SDavid Woodhouse * readable, some notifications may have been lost). 4500254c4d1SDavid Woodhouse * 4510254c4d1SDavid Woodhouse * WATCH_EVENT <epath>|<token>| 4520254c4d1SDavid Woodhouse * Unsolicited `reply' generated for matching modification events 4530254c4d1SDavid Woodhouse * as described above. req_id and tx_id are both 0. 4540254c4d1SDavid Woodhouse * 4550254c4d1SDavid Woodhouse * <epath> is the event's path, ie the actual path that was 4560254c4d1SDavid Woodhouse * modified; however if the event was the recursive removal of an 4570254c4d1SDavid Woodhouse * parent of <wpath>, <epath> is just 4580254c4d1SDavid Woodhouse * <wpath> (rather than the actual path which was removed). So 4590254c4d1SDavid Woodhouse * <epath> is a child of <wpath>, regardless. 4600254c4d1SDavid Woodhouse * 4610254c4d1SDavid Woodhouse * Iff <wpath> for the watch was specified as a relative pathname, 4620254c4d1SDavid Woodhouse * the <epath> path will also be relative (with the same base, 4630254c4d1SDavid Woodhouse * obviously). 4640254c4d1SDavid Woodhouse * 4650254c4d1SDavid Woodhouse * UNWATCH <wpath>|<token>|? 4660254c4d1SDavid Woodhouse * 4670254c4d1SDavid Woodhouse * RESET_WATCHES | 4680254c4d1SDavid Woodhouse * Reset all watches and transactions of the caller. 4690254c4d1SDavid Woodhouse * 4700254c4d1SDavid Woodhouse * ---------- Transactions ---------- 4710254c4d1SDavid Woodhouse * 4720254c4d1SDavid Woodhouse * TRANSACTION_START | <transid>| 4730254c4d1SDavid Woodhouse * <transid> is an opaque uint32_t allocated by xenstored 4740254c4d1SDavid Woodhouse * represented as unsigned decimal. After this, transaction may 4750254c4d1SDavid Woodhouse * be referenced by using <transid> (as 32-bit binary) in the 4760254c4d1SDavid Woodhouse * tx_id request header field. When transaction is started whole 4770254c4d1SDavid Woodhouse * db is copied; reads and writes happen on the copy. 4780254c4d1SDavid Woodhouse * It is not legal to send non-0 tx_id in TRANSACTION_START. 4790254c4d1SDavid Woodhouse * 4800254c4d1SDavid Woodhouse * TRANSACTION_END T| 4810254c4d1SDavid Woodhouse * TRANSACTION_END F| 4820254c4d1SDavid Woodhouse * tx_id must refer to existing transaction. After this 4830254c4d1SDavid Woodhouse * request the tx_id is no longer valid and may be reused by 4840254c4d1SDavid Woodhouse * xenstore. If F, the transaction is discarded. If T, 4850254c4d1SDavid Woodhouse * it is committed: if there were any other intervening writes 4860254c4d1SDavid Woodhouse * then our END gets get EAGAIN. 4870254c4d1SDavid Woodhouse * 4880254c4d1SDavid Woodhouse * The plan is that in the future only intervening `conflicting' 4890254c4d1SDavid Woodhouse * writes cause EAGAIN, meaning only writes or other commits 4900254c4d1SDavid Woodhouse * which changed paths which were read or written in the 4910254c4d1SDavid Woodhouse * transaction at hand. 4920254c4d1SDavid Woodhouse * 4930254c4d1SDavid Woodhouse */ 4940254c4d1SDavid Woodhouse 4950254c4d1SDavid Woodhouse static void xs_read(XenXenstoreState *s, unsigned int req_id, 4960254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, unsigned int len) 4970254c4d1SDavid Woodhouse { 4980254c4d1SDavid Woodhouse const char *path = (const char *)req_data; 4990254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 5000254c4d1SDavid Woodhouse uint8_t *rsp_data = (uint8_t *)&rsp[1]; 5010254c4d1SDavid Woodhouse g_autoptr(GByteArray) data = g_byte_array_new(); 5020254c4d1SDavid Woodhouse int err; 5030254c4d1SDavid Woodhouse 5040254c4d1SDavid Woodhouse if (len == 0 || req_data[len - 1] != '\0') { 5050254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 5060254c4d1SDavid Woodhouse return; 5070254c4d1SDavid Woodhouse } 5080254c4d1SDavid Woodhouse 5090254c4d1SDavid Woodhouse trace_xenstore_read(tx_id, path); 5100254c4d1SDavid Woodhouse err = xs_impl_read(s->impl, xen_domid, tx_id, path, data); 5110254c4d1SDavid Woodhouse if (err) { 5120254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 5130254c4d1SDavid Woodhouse return; 5140254c4d1SDavid Woodhouse } 5150254c4d1SDavid Woodhouse 5160254c4d1SDavid Woodhouse rsp->type = XS_READ; 5170254c4d1SDavid Woodhouse rsp->req_id = req_id; 5180254c4d1SDavid Woodhouse rsp->tx_id = tx_id; 5190254c4d1SDavid Woodhouse rsp->len = 0; 5200254c4d1SDavid Woodhouse 5210254c4d1SDavid Woodhouse len = data->len; 5220254c4d1SDavid Woodhouse if (len > XENSTORE_PAYLOAD_MAX) { 5230254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, E2BIG); 5240254c4d1SDavid Woodhouse return; 5250254c4d1SDavid Woodhouse } 5260254c4d1SDavid Woodhouse 5270254c4d1SDavid Woodhouse memcpy(&rsp_data[rsp->len], data->data, len); 5280254c4d1SDavid Woodhouse rsp->len += len; 5290254c4d1SDavid Woodhouse } 5300254c4d1SDavid Woodhouse 5310254c4d1SDavid Woodhouse static void xs_write(XenXenstoreState *s, unsigned int req_id, 5320254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 5330254c4d1SDavid Woodhouse unsigned int len) 5340254c4d1SDavid Woodhouse { 5350254c4d1SDavid Woodhouse g_autoptr(GByteArray) data = g_byte_array_new(); 5360254c4d1SDavid Woodhouse const char *path; 5370254c4d1SDavid Woodhouse int err; 5380254c4d1SDavid Woodhouse 5390254c4d1SDavid Woodhouse if (len == 0) { 5400254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 5410254c4d1SDavid Woodhouse return; 5420254c4d1SDavid Woodhouse } 5430254c4d1SDavid Woodhouse 5440254c4d1SDavid Woodhouse path = (const char *)req_data; 5450254c4d1SDavid Woodhouse 5460254c4d1SDavid Woodhouse while (len--) { 5470254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 5480254c4d1SDavid Woodhouse break; 5490254c4d1SDavid Woodhouse } 5500254c4d1SDavid Woodhouse if (len == 0) { 5510254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 5520254c4d1SDavid Woodhouse return; 5530254c4d1SDavid Woodhouse } 5540254c4d1SDavid Woodhouse } 5550254c4d1SDavid Woodhouse 5560254c4d1SDavid Woodhouse g_byte_array_append(data, req_data, len); 5570254c4d1SDavid Woodhouse 5580254c4d1SDavid Woodhouse trace_xenstore_write(tx_id, path); 5590254c4d1SDavid Woodhouse err = xs_impl_write(s->impl, xen_domid, tx_id, path, data); 5600254c4d1SDavid Woodhouse if (err) { 5610254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 5620254c4d1SDavid Woodhouse return; 5630254c4d1SDavid Woodhouse } 5640254c4d1SDavid Woodhouse 5650254c4d1SDavid Woodhouse xs_ok(s, XS_WRITE, req_id, tx_id); 5660254c4d1SDavid Woodhouse } 5670254c4d1SDavid Woodhouse 5680254c4d1SDavid Woodhouse static void xs_mkdir(XenXenstoreState *s, unsigned int req_id, 5690254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 5700254c4d1SDavid Woodhouse unsigned int len) 5710254c4d1SDavid Woodhouse { 5720254c4d1SDavid Woodhouse g_autoptr(GByteArray) data = g_byte_array_new(); 5730254c4d1SDavid Woodhouse const char *path; 5740254c4d1SDavid Woodhouse int err; 5750254c4d1SDavid Woodhouse 5760254c4d1SDavid Woodhouse if (len == 0 || req_data[len - 1] != '\0') { 5770254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 5780254c4d1SDavid Woodhouse return; 5790254c4d1SDavid Woodhouse } 5800254c4d1SDavid Woodhouse 5810254c4d1SDavid Woodhouse path = (const char *)req_data; 5820254c4d1SDavid Woodhouse 5830254c4d1SDavid Woodhouse trace_xenstore_mkdir(tx_id, path); 5840254c4d1SDavid Woodhouse err = xs_impl_read(s->impl, xen_domid, tx_id, path, data); 5850254c4d1SDavid Woodhouse if (err == ENOENT) { 5860254c4d1SDavid Woodhouse err = xs_impl_write(s->impl, xen_domid, tx_id, path, data); 5870254c4d1SDavid Woodhouse } 5880254c4d1SDavid Woodhouse 5890254c4d1SDavid Woodhouse if (!err) { 5900254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 5910254c4d1SDavid Woodhouse return; 5920254c4d1SDavid Woodhouse } 5930254c4d1SDavid Woodhouse 5940254c4d1SDavid Woodhouse xs_ok(s, XS_MKDIR, req_id, tx_id); 5950254c4d1SDavid Woodhouse } 5960254c4d1SDavid Woodhouse 5970254c4d1SDavid Woodhouse static void xs_append_strings(XenXenstoreState *s, struct xsd_sockmsg *rsp, 5980254c4d1SDavid Woodhouse GList *strings, unsigned int start, bool truncate) 5990254c4d1SDavid Woodhouse { 6000254c4d1SDavid Woodhouse uint8_t *rsp_data = (uint8_t *)&rsp[1]; 6010254c4d1SDavid Woodhouse GList *l; 6020254c4d1SDavid Woodhouse 6030254c4d1SDavid Woodhouse for (l = strings; l; l = l->next) { 6040254c4d1SDavid Woodhouse size_t len = strlen(l->data) + 1; /* Including the NUL termination */ 6050254c4d1SDavid Woodhouse char *str = l->data; 6060254c4d1SDavid Woodhouse 6070254c4d1SDavid Woodhouse if (rsp->len + len > XENSTORE_PAYLOAD_MAX) { 6080254c4d1SDavid Woodhouse if (truncate) { 6090254c4d1SDavid Woodhouse len = XENSTORE_PAYLOAD_MAX - rsp->len; 6100254c4d1SDavid Woodhouse if (!len) { 6110254c4d1SDavid Woodhouse return; 6120254c4d1SDavid Woodhouse } 6130254c4d1SDavid Woodhouse } else { 6140254c4d1SDavid Woodhouse xs_error(s, rsp->req_id, rsp->tx_id, E2BIG); 6150254c4d1SDavid Woodhouse return; 6160254c4d1SDavid Woodhouse } 6170254c4d1SDavid Woodhouse } 6180254c4d1SDavid Woodhouse 6190254c4d1SDavid Woodhouse if (start) { 6200254c4d1SDavid Woodhouse if (start >= len) { 6210254c4d1SDavid Woodhouse start -= len; 6220254c4d1SDavid Woodhouse continue; 6230254c4d1SDavid Woodhouse } 6240254c4d1SDavid Woodhouse 6250254c4d1SDavid Woodhouse str += start; 6260254c4d1SDavid Woodhouse len -= start; 6270254c4d1SDavid Woodhouse start = 0; 6280254c4d1SDavid Woodhouse } 6290254c4d1SDavid Woodhouse 6300254c4d1SDavid Woodhouse memcpy(&rsp_data[rsp->len], str, len); 6310254c4d1SDavid Woodhouse rsp->len += len; 6320254c4d1SDavid Woodhouse } 6330254c4d1SDavid Woodhouse /* XS_DIRECTORY_PART wants an extra NUL to indicate the end */ 6340254c4d1SDavid Woodhouse if (truncate && rsp->len < XENSTORE_PAYLOAD_MAX) { 6350254c4d1SDavid Woodhouse rsp_data[rsp->len++] = '\0'; 6360254c4d1SDavid Woodhouse } 6370254c4d1SDavid Woodhouse } 6380254c4d1SDavid Woodhouse 6390254c4d1SDavid Woodhouse static void xs_directory(XenXenstoreState *s, unsigned int req_id, 6400254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 6410254c4d1SDavid Woodhouse unsigned int len) 6420254c4d1SDavid Woodhouse { 6430254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 6440254c4d1SDavid Woodhouse GList *items = NULL; 6450254c4d1SDavid Woodhouse const char *path; 6460254c4d1SDavid Woodhouse int err; 6470254c4d1SDavid Woodhouse 6480254c4d1SDavid Woodhouse if (len == 0 || req_data[len - 1] != '\0') { 6490254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 6500254c4d1SDavid Woodhouse return; 6510254c4d1SDavid Woodhouse } 6520254c4d1SDavid Woodhouse 6530254c4d1SDavid Woodhouse path = (const char *)req_data; 6540254c4d1SDavid Woodhouse 6550254c4d1SDavid Woodhouse trace_xenstore_directory(tx_id, path); 6560254c4d1SDavid Woodhouse err = xs_impl_directory(s->impl, xen_domid, tx_id, path, NULL, &items); 6570254c4d1SDavid Woodhouse if (err != 0) { 6580254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 6590254c4d1SDavid Woodhouse return; 6600254c4d1SDavid Woodhouse } 6610254c4d1SDavid Woodhouse 6620254c4d1SDavid Woodhouse rsp->type = XS_DIRECTORY; 6630254c4d1SDavid Woodhouse rsp->req_id = req_id; 6640254c4d1SDavid Woodhouse rsp->tx_id = tx_id; 6650254c4d1SDavid Woodhouse rsp->len = 0; 6660254c4d1SDavid Woodhouse 6670254c4d1SDavid Woodhouse xs_append_strings(s, rsp, items, 0, false); 6680254c4d1SDavid Woodhouse 6690254c4d1SDavid Woodhouse g_list_free_full(items, g_free); 6700254c4d1SDavid Woodhouse } 6710254c4d1SDavid Woodhouse 6720254c4d1SDavid Woodhouse static void xs_directory_part(XenXenstoreState *s, unsigned int req_id, 6730254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 6740254c4d1SDavid Woodhouse unsigned int len) 6750254c4d1SDavid Woodhouse { 6760254c4d1SDavid Woodhouse const char *offset_str, *path = (const char *)req_data; 6770254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 6780254c4d1SDavid Woodhouse char *rsp_data = (char *)&rsp[1]; 6790254c4d1SDavid Woodhouse uint64_t gencnt = 0; 6800254c4d1SDavid Woodhouse unsigned int offset; 6810254c4d1SDavid Woodhouse GList *items = NULL; 6820254c4d1SDavid Woodhouse int err; 6830254c4d1SDavid Woodhouse 6840254c4d1SDavid Woodhouse if (len == 0) { 6850254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 6860254c4d1SDavid Woodhouse return; 6870254c4d1SDavid Woodhouse } 6880254c4d1SDavid Woodhouse 6890254c4d1SDavid Woodhouse while (len--) { 6900254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 6910254c4d1SDavid Woodhouse break; 6920254c4d1SDavid Woodhouse } 6930254c4d1SDavid Woodhouse if (len == 0) { 6940254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 6950254c4d1SDavid Woodhouse return; 6960254c4d1SDavid Woodhouse } 6970254c4d1SDavid Woodhouse } 6980254c4d1SDavid Woodhouse 6990254c4d1SDavid Woodhouse offset_str = (const char *)req_data; 7000254c4d1SDavid Woodhouse while (len--) { 7010254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 7020254c4d1SDavid Woodhouse break; 7030254c4d1SDavid Woodhouse } 7040254c4d1SDavid Woodhouse if (len == 0) { 7050254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 7060254c4d1SDavid Woodhouse return; 7070254c4d1SDavid Woodhouse } 7080254c4d1SDavid Woodhouse } 7090254c4d1SDavid Woodhouse 7100254c4d1SDavid Woodhouse if (len) { 7110254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 7120254c4d1SDavid Woodhouse return; 7130254c4d1SDavid Woodhouse } 7140254c4d1SDavid Woodhouse 7150254c4d1SDavid Woodhouse if (qemu_strtoui(offset_str, NULL, 10, &offset) < 0) { 7160254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 7170254c4d1SDavid Woodhouse return; 7180254c4d1SDavid Woodhouse } 7190254c4d1SDavid Woodhouse 7200254c4d1SDavid Woodhouse trace_xenstore_directory_part(tx_id, path, offset); 7210254c4d1SDavid Woodhouse err = xs_impl_directory(s->impl, xen_domid, tx_id, path, &gencnt, &items); 7220254c4d1SDavid Woodhouse if (err != 0) { 7230254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 7240254c4d1SDavid Woodhouse return; 7250254c4d1SDavid Woodhouse } 7260254c4d1SDavid Woodhouse 7270254c4d1SDavid Woodhouse rsp->type = XS_DIRECTORY_PART; 7280254c4d1SDavid Woodhouse rsp->req_id = req_id; 7290254c4d1SDavid Woodhouse rsp->tx_id = tx_id; 7300254c4d1SDavid Woodhouse rsp->len = snprintf(rsp_data, XENSTORE_PAYLOAD_MAX, "%" PRIu64, gencnt) + 1; 7310254c4d1SDavid Woodhouse 7320254c4d1SDavid Woodhouse xs_append_strings(s, rsp, items, offset, true); 7330254c4d1SDavid Woodhouse 7340254c4d1SDavid Woodhouse g_list_free_full(items, g_free); 7350254c4d1SDavid Woodhouse } 7360254c4d1SDavid Woodhouse 7370254c4d1SDavid Woodhouse static void xs_transaction_start(XenXenstoreState *s, unsigned int req_id, 7380254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 7390254c4d1SDavid Woodhouse unsigned int len) 7400254c4d1SDavid Woodhouse { 7410254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 7420254c4d1SDavid Woodhouse char *rsp_data = (char *)&rsp[1]; 7430254c4d1SDavid Woodhouse int err; 7440254c4d1SDavid Woodhouse 7450254c4d1SDavid Woodhouse if (len != 1 || req_data[0] != '\0') { 7460254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 7470254c4d1SDavid Woodhouse return; 7480254c4d1SDavid Woodhouse } 7490254c4d1SDavid Woodhouse 7500254c4d1SDavid Woodhouse rsp->type = XS_TRANSACTION_START; 7510254c4d1SDavid Woodhouse rsp->req_id = req_id; 7520254c4d1SDavid Woodhouse rsp->tx_id = tx_id; 7530254c4d1SDavid Woodhouse rsp->len = 0; 7540254c4d1SDavid Woodhouse 7550254c4d1SDavid Woodhouse err = xs_impl_transaction_start(s->impl, xen_domid, &tx_id); 7560254c4d1SDavid Woodhouse if (err) { 7570254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 7580254c4d1SDavid Woodhouse return; 7590254c4d1SDavid Woodhouse } 7600254c4d1SDavid Woodhouse 7610254c4d1SDavid Woodhouse trace_xenstore_transaction_start(tx_id); 7620254c4d1SDavid Woodhouse 7630254c4d1SDavid Woodhouse rsp->len = snprintf(rsp_data, XENSTORE_PAYLOAD_MAX, "%u", tx_id); 7640254c4d1SDavid Woodhouse assert(rsp->len < XENSTORE_PAYLOAD_MAX); 7650254c4d1SDavid Woodhouse rsp->len++; 7660254c4d1SDavid Woodhouse } 7670254c4d1SDavid Woodhouse 7680254c4d1SDavid Woodhouse static void xs_transaction_end(XenXenstoreState *s, unsigned int req_id, 7690254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 7700254c4d1SDavid Woodhouse unsigned int len) 7710254c4d1SDavid Woodhouse { 7720254c4d1SDavid Woodhouse bool commit; 7730254c4d1SDavid Woodhouse int err; 7740254c4d1SDavid Woodhouse 7750254c4d1SDavid Woodhouse if (len != 2 || req_data[1] != '\0') { 7760254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 7770254c4d1SDavid Woodhouse return; 7780254c4d1SDavid Woodhouse } 7790254c4d1SDavid Woodhouse 7800254c4d1SDavid Woodhouse switch (req_data[0]) { 7810254c4d1SDavid Woodhouse case 'T': 7820254c4d1SDavid Woodhouse commit = true; 7830254c4d1SDavid Woodhouse break; 7840254c4d1SDavid Woodhouse case 'F': 7850254c4d1SDavid Woodhouse commit = false; 7860254c4d1SDavid Woodhouse break; 7870254c4d1SDavid Woodhouse default: 7880254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 7890254c4d1SDavid Woodhouse return; 7900254c4d1SDavid Woodhouse } 7910254c4d1SDavid Woodhouse 7920254c4d1SDavid Woodhouse trace_xenstore_transaction_end(tx_id, commit); 7930254c4d1SDavid Woodhouse err = xs_impl_transaction_end(s->impl, xen_domid, tx_id, commit); 7940254c4d1SDavid Woodhouse if (err) { 7950254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 7960254c4d1SDavid Woodhouse return; 7970254c4d1SDavid Woodhouse } 7980254c4d1SDavid Woodhouse 7990254c4d1SDavid Woodhouse xs_ok(s, XS_TRANSACTION_END, req_id, tx_id); 8000254c4d1SDavid Woodhouse } 8010254c4d1SDavid Woodhouse 8020254c4d1SDavid Woodhouse static void xs_rm(XenXenstoreState *s, unsigned int req_id, 8030254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, unsigned int len) 8040254c4d1SDavid Woodhouse { 8050254c4d1SDavid Woodhouse const char *path = (const char *)req_data; 8060254c4d1SDavid Woodhouse int err; 8070254c4d1SDavid Woodhouse 8080254c4d1SDavid Woodhouse if (len == 0 || req_data[len - 1] != '\0') { 8090254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 8100254c4d1SDavid Woodhouse return; 8110254c4d1SDavid Woodhouse } 8120254c4d1SDavid Woodhouse 8130254c4d1SDavid Woodhouse trace_xenstore_rm(tx_id, path); 8140254c4d1SDavid Woodhouse err = xs_impl_rm(s->impl, xen_domid, tx_id, path); 8150254c4d1SDavid Woodhouse if (err) { 8160254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 8170254c4d1SDavid Woodhouse return; 8180254c4d1SDavid Woodhouse } 8190254c4d1SDavid Woodhouse 8200254c4d1SDavid Woodhouse xs_ok(s, XS_RM, req_id, tx_id); 8210254c4d1SDavid Woodhouse } 8220254c4d1SDavid Woodhouse 8230254c4d1SDavid Woodhouse static void xs_get_perms(XenXenstoreState *s, unsigned int req_id, 8240254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 8250254c4d1SDavid Woodhouse unsigned int len) 8260254c4d1SDavid Woodhouse { 8270254c4d1SDavid Woodhouse const char *path = (const char *)req_data; 8280254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 8290254c4d1SDavid Woodhouse GList *perms = NULL; 8300254c4d1SDavid Woodhouse int err; 8310254c4d1SDavid Woodhouse 8320254c4d1SDavid Woodhouse if (len == 0 || req_data[len - 1] != '\0') { 8330254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 8340254c4d1SDavid Woodhouse return; 8350254c4d1SDavid Woodhouse } 8360254c4d1SDavid Woodhouse 8370254c4d1SDavid Woodhouse trace_xenstore_get_perms(tx_id, path); 8380254c4d1SDavid Woodhouse err = xs_impl_get_perms(s->impl, xen_domid, tx_id, path, &perms); 8390254c4d1SDavid Woodhouse if (err) { 8400254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 8410254c4d1SDavid Woodhouse return; 8420254c4d1SDavid Woodhouse } 8430254c4d1SDavid Woodhouse 8440254c4d1SDavid Woodhouse rsp->type = XS_GET_PERMS; 8450254c4d1SDavid Woodhouse rsp->req_id = req_id; 8460254c4d1SDavid Woodhouse rsp->tx_id = tx_id; 8470254c4d1SDavid Woodhouse rsp->len = 0; 8480254c4d1SDavid Woodhouse 8490254c4d1SDavid Woodhouse xs_append_strings(s, rsp, perms, 0, false); 8500254c4d1SDavid Woodhouse 8510254c4d1SDavid Woodhouse g_list_free_full(perms, g_free); 8520254c4d1SDavid Woodhouse } 8530254c4d1SDavid Woodhouse 8540254c4d1SDavid Woodhouse static void xs_set_perms(XenXenstoreState *s, unsigned int req_id, 8550254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 8560254c4d1SDavid Woodhouse unsigned int len) 8570254c4d1SDavid Woodhouse { 8580254c4d1SDavid Woodhouse const char *path = (const char *)req_data; 8590254c4d1SDavid Woodhouse uint8_t *perm; 8600254c4d1SDavid Woodhouse GList *perms = NULL; 8610254c4d1SDavid Woodhouse int err; 8620254c4d1SDavid Woodhouse 8630254c4d1SDavid Woodhouse if (len == 0) { 8640254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 8650254c4d1SDavid Woodhouse return; 8660254c4d1SDavid Woodhouse } 8670254c4d1SDavid Woodhouse 8680254c4d1SDavid Woodhouse while (len--) { 8690254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 8700254c4d1SDavid Woodhouse break; 8710254c4d1SDavid Woodhouse } 8720254c4d1SDavid Woodhouse if (len == 0) { 8730254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 8740254c4d1SDavid Woodhouse return; 8750254c4d1SDavid Woodhouse } 8760254c4d1SDavid Woodhouse } 8770254c4d1SDavid Woodhouse 8780254c4d1SDavid Woodhouse perm = req_data; 8790254c4d1SDavid Woodhouse while (len--) { 8800254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 8810254c4d1SDavid Woodhouse perms = g_list_append(perms, perm); 8820254c4d1SDavid Woodhouse perm = req_data; 8830254c4d1SDavid Woodhouse } 8840254c4d1SDavid Woodhouse } 8850254c4d1SDavid Woodhouse 8860254c4d1SDavid Woodhouse /* 8870254c4d1SDavid Woodhouse * Note that there may be trailing garbage at the end of the buffer. 8880254c4d1SDavid Woodhouse * This is explicitly permitted by the '?' at the end of the definition: 8890254c4d1SDavid Woodhouse * 8900254c4d1SDavid Woodhouse * SET_PERMS <path>|<perm-as-string>|+? 8910254c4d1SDavid Woodhouse */ 8920254c4d1SDavid Woodhouse 8930254c4d1SDavid Woodhouse trace_xenstore_set_perms(tx_id, path); 8940254c4d1SDavid Woodhouse err = xs_impl_set_perms(s->impl, xen_domid, tx_id, path, perms); 8950254c4d1SDavid Woodhouse g_list_free(perms); 8960254c4d1SDavid Woodhouse if (err) { 8970254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 8980254c4d1SDavid Woodhouse return; 8990254c4d1SDavid Woodhouse } 9000254c4d1SDavid Woodhouse 9010254c4d1SDavid Woodhouse xs_ok(s, XS_SET_PERMS, req_id, tx_id); 9020254c4d1SDavid Woodhouse } 9030254c4d1SDavid Woodhouse 9040254c4d1SDavid Woodhouse static void xs_watch(XenXenstoreState *s, unsigned int req_id, 9050254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 9060254c4d1SDavid Woodhouse unsigned int len) 9070254c4d1SDavid Woodhouse { 9080254c4d1SDavid Woodhouse const char *token, *path = (const char *)req_data; 9090254c4d1SDavid Woodhouse int err; 9100254c4d1SDavid Woodhouse 9110254c4d1SDavid Woodhouse if (len == 0) { 9120254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 9130254c4d1SDavid Woodhouse return; 9140254c4d1SDavid Woodhouse } 9150254c4d1SDavid Woodhouse 9160254c4d1SDavid Woodhouse while (len--) { 9170254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 9180254c4d1SDavid Woodhouse break; 9190254c4d1SDavid Woodhouse } 9200254c4d1SDavid Woodhouse if (len == 0) { 9210254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 9220254c4d1SDavid Woodhouse return; 9230254c4d1SDavid Woodhouse } 9240254c4d1SDavid Woodhouse } 9250254c4d1SDavid Woodhouse 9260254c4d1SDavid Woodhouse token = (const char *)req_data; 9270254c4d1SDavid Woodhouse while (len--) { 9280254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 9290254c4d1SDavid Woodhouse break; 9300254c4d1SDavid Woodhouse } 9310254c4d1SDavid Woodhouse if (len == 0) { 9320254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 9330254c4d1SDavid Woodhouse return; 9340254c4d1SDavid Woodhouse } 9350254c4d1SDavid Woodhouse } 9360254c4d1SDavid Woodhouse 9370254c4d1SDavid Woodhouse /* 9380254c4d1SDavid Woodhouse * Note that there may be trailing garbage at the end of the buffer. 9390254c4d1SDavid Woodhouse * This is explicitly permitted by the '?' at the end of the definition: 9400254c4d1SDavid Woodhouse * 9410254c4d1SDavid Woodhouse * WATCH <wpath>|<token>|? 9420254c4d1SDavid Woodhouse */ 9430254c4d1SDavid Woodhouse 9440254c4d1SDavid Woodhouse trace_xenstore_watch(path, token); 9450254c4d1SDavid Woodhouse err = xs_impl_watch(s->impl, xen_domid, path, token, fire_watch_cb, s); 9460254c4d1SDavid Woodhouse if (err) { 9470254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 9480254c4d1SDavid Woodhouse return; 9490254c4d1SDavid Woodhouse } 9500254c4d1SDavid Woodhouse 9510254c4d1SDavid Woodhouse xs_ok(s, XS_WATCH, req_id, tx_id); 9520254c4d1SDavid Woodhouse } 9530254c4d1SDavid Woodhouse 9540254c4d1SDavid Woodhouse static void xs_unwatch(XenXenstoreState *s, unsigned int req_id, 9550254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 9560254c4d1SDavid Woodhouse unsigned int len) 9570254c4d1SDavid Woodhouse { 9580254c4d1SDavid Woodhouse const char *token, *path = (const char *)req_data; 9590254c4d1SDavid Woodhouse int err; 9600254c4d1SDavid Woodhouse 9610254c4d1SDavid Woodhouse if (len == 0) { 9620254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 9630254c4d1SDavid Woodhouse return; 9640254c4d1SDavid Woodhouse } 9650254c4d1SDavid Woodhouse 9660254c4d1SDavid Woodhouse while (len--) { 9670254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 9680254c4d1SDavid Woodhouse break; 9690254c4d1SDavid Woodhouse } 9700254c4d1SDavid Woodhouse if (len == 0) { 9710254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 9720254c4d1SDavid Woodhouse return; 9730254c4d1SDavid Woodhouse } 9740254c4d1SDavid Woodhouse } 9750254c4d1SDavid Woodhouse 9760254c4d1SDavid Woodhouse token = (const char *)req_data; 9770254c4d1SDavid Woodhouse while (len--) { 9780254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 9790254c4d1SDavid Woodhouse break; 9800254c4d1SDavid Woodhouse } 9810254c4d1SDavid Woodhouse if (len == 0) { 9820254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 9830254c4d1SDavid Woodhouse return; 9840254c4d1SDavid Woodhouse } 9850254c4d1SDavid Woodhouse } 9860254c4d1SDavid Woodhouse 9870254c4d1SDavid Woodhouse trace_xenstore_unwatch(path, token); 9880254c4d1SDavid Woodhouse err = xs_impl_unwatch(s->impl, xen_domid, path, token, fire_watch_cb, s); 9890254c4d1SDavid Woodhouse if (err) { 9900254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 9910254c4d1SDavid Woodhouse return; 9920254c4d1SDavid Woodhouse } 9930254c4d1SDavid Woodhouse 9940254c4d1SDavid Woodhouse xs_ok(s, XS_UNWATCH, req_id, tx_id); 9950254c4d1SDavid Woodhouse } 9960254c4d1SDavid Woodhouse 9970254c4d1SDavid Woodhouse static void xs_reset_watches(XenXenstoreState *s, unsigned int req_id, 9980254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 9990254c4d1SDavid Woodhouse unsigned int len) 10000254c4d1SDavid Woodhouse { 10010254c4d1SDavid Woodhouse if (len == 0 || req_data[len - 1] != '\0') { 10020254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 10030254c4d1SDavid Woodhouse return; 10040254c4d1SDavid Woodhouse } 10050254c4d1SDavid Woodhouse 10060254c4d1SDavid Woodhouse trace_xenstore_reset_watches(); 10070254c4d1SDavid Woodhouse xs_impl_reset_watches(s->impl, xen_domid); 10080254c4d1SDavid Woodhouse 10090254c4d1SDavid Woodhouse xs_ok(s, XS_RESET_WATCHES, req_id, tx_id); 10100254c4d1SDavid Woodhouse } 10110254c4d1SDavid Woodhouse 10120254c4d1SDavid Woodhouse static void xs_priv(XenXenstoreState *s, unsigned int req_id, 10130254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *data, 10140254c4d1SDavid Woodhouse unsigned int len) 10150254c4d1SDavid Woodhouse { 10160254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EACCES); 10170254c4d1SDavid Woodhouse } 10180254c4d1SDavid Woodhouse 10190254c4d1SDavid Woodhouse static void xs_unimpl(XenXenstoreState *s, unsigned int req_id, 10200254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *data, 10210254c4d1SDavid Woodhouse unsigned int len) 10220254c4d1SDavid Woodhouse { 10230254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, ENOSYS); 10240254c4d1SDavid Woodhouse } 10250254c4d1SDavid Woodhouse 10260254c4d1SDavid Woodhouse typedef void (*xs_impl)(XenXenstoreState *s, unsigned int req_id, 10270254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *data, 10280254c4d1SDavid Woodhouse unsigned int len); 10290254c4d1SDavid Woodhouse 10300254c4d1SDavid Woodhouse struct xsd_req { 10310254c4d1SDavid Woodhouse const char *name; 10320254c4d1SDavid Woodhouse xs_impl fn; 10330254c4d1SDavid Woodhouse }; 10340254c4d1SDavid Woodhouse #define XSD_REQ(_type, _fn) \ 10350254c4d1SDavid Woodhouse [_type] = { .name = #_type, .fn = _fn } 10360254c4d1SDavid Woodhouse 10370254c4d1SDavid Woodhouse struct xsd_req xsd_reqs[] = { 10380254c4d1SDavid Woodhouse XSD_REQ(XS_READ, xs_read), 10390254c4d1SDavid Woodhouse XSD_REQ(XS_WRITE, xs_write), 10400254c4d1SDavid Woodhouse XSD_REQ(XS_MKDIR, xs_mkdir), 10410254c4d1SDavid Woodhouse XSD_REQ(XS_DIRECTORY, xs_directory), 10420254c4d1SDavid Woodhouse XSD_REQ(XS_DIRECTORY_PART, xs_directory_part), 10430254c4d1SDavid Woodhouse XSD_REQ(XS_TRANSACTION_START, xs_transaction_start), 10440254c4d1SDavid Woodhouse XSD_REQ(XS_TRANSACTION_END, xs_transaction_end), 10450254c4d1SDavid Woodhouse XSD_REQ(XS_RM, xs_rm), 10460254c4d1SDavid Woodhouse XSD_REQ(XS_GET_PERMS, xs_get_perms), 10470254c4d1SDavid Woodhouse XSD_REQ(XS_SET_PERMS, xs_set_perms), 10480254c4d1SDavid Woodhouse XSD_REQ(XS_WATCH, xs_watch), 10490254c4d1SDavid Woodhouse XSD_REQ(XS_UNWATCH, xs_unwatch), 10500254c4d1SDavid Woodhouse XSD_REQ(XS_CONTROL, xs_priv), 10510254c4d1SDavid Woodhouse XSD_REQ(XS_INTRODUCE, xs_priv), 10520254c4d1SDavid Woodhouse XSD_REQ(XS_RELEASE, xs_priv), 10530254c4d1SDavid Woodhouse XSD_REQ(XS_IS_DOMAIN_INTRODUCED, xs_priv), 10540254c4d1SDavid Woodhouse XSD_REQ(XS_RESUME, xs_priv), 10550254c4d1SDavid Woodhouse XSD_REQ(XS_SET_TARGET, xs_priv), 10560254c4d1SDavid Woodhouse XSD_REQ(XS_RESET_WATCHES, xs_reset_watches), 10570254c4d1SDavid Woodhouse }; 10580254c4d1SDavid Woodhouse 1059f3341e7bSDavid Woodhouse static void process_req(XenXenstoreState *s) 1060f3341e7bSDavid Woodhouse { 1061f3341e7bSDavid Woodhouse struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data; 10620254c4d1SDavid Woodhouse xs_impl handler = NULL; 1063f3341e7bSDavid Woodhouse 1064f3341e7bSDavid Woodhouse assert(req_pending(s)); 1065f3341e7bSDavid Woodhouse assert(!s->rsp_pending); 1066f3341e7bSDavid Woodhouse 10670254c4d1SDavid Woodhouse if (req->type < ARRAY_SIZE(xsd_reqs)) { 10680254c4d1SDavid Woodhouse handler = xsd_reqs[req->type].fn; 10690254c4d1SDavid Woodhouse } 10700254c4d1SDavid Woodhouse if (!handler) { 10710254c4d1SDavid Woodhouse handler = &xs_unimpl; 10720254c4d1SDavid Woodhouse } 10730254c4d1SDavid Woodhouse 10740254c4d1SDavid Woodhouse handler(s, req->req_id, req->tx_id, (uint8_t *)&req[1], req->len); 1075f3341e7bSDavid Woodhouse 1076f3341e7bSDavid Woodhouse s->rsp_pending = true; 1077f3341e7bSDavid Woodhouse reset_req(s); 1078f3341e7bSDavid Woodhouse } 1079f3341e7bSDavid Woodhouse 1080f3341e7bSDavid Woodhouse static unsigned int copy_from_ring(XenXenstoreState *s, uint8_t *ptr, 1081f3341e7bSDavid Woodhouse unsigned int len) 1082f3341e7bSDavid Woodhouse { 1083f3341e7bSDavid Woodhouse if (!len) { 1084f3341e7bSDavid Woodhouse return 0; 1085f3341e7bSDavid Woodhouse } 1086f3341e7bSDavid Woodhouse 1087f3341e7bSDavid Woodhouse XENSTORE_RING_IDX prod = qatomic_read(&s->xs->req_prod); 1088f3341e7bSDavid Woodhouse XENSTORE_RING_IDX cons = qatomic_read(&s->xs->req_cons); 1089f3341e7bSDavid Woodhouse unsigned int copied = 0; 1090f3341e7bSDavid Woodhouse 1091f3341e7bSDavid Woodhouse /* Ensure the ring contents don't cross the req_prod access. */ 1092f3341e7bSDavid Woodhouse smp_rmb(); 1093f3341e7bSDavid Woodhouse 1094f3341e7bSDavid Woodhouse while (len) { 1095f3341e7bSDavid Woodhouse unsigned int avail = prod - cons; 1096f3341e7bSDavid Woodhouse unsigned int offset = MASK_XENSTORE_IDX(cons); 1097f3341e7bSDavid Woodhouse unsigned int copylen = avail; 1098f3341e7bSDavid Woodhouse 1099f3341e7bSDavid Woodhouse if (avail > XENSTORE_RING_SIZE) { 1100f3341e7bSDavid Woodhouse error_report("XenStore ring handling error"); 1101f3341e7bSDavid Woodhouse s->fatal_error = true; 1102f3341e7bSDavid Woodhouse break; 1103f3341e7bSDavid Woodhouse } else if (avail == 0) { 1104f3341e7bSDavid Woodhouse break; 1105f3341e7bSDavid Woodhouse } 1106f3341e7bSDavid Woodhouse 1107f3341e7bSDavid Woodhouse if (copylen > len) { 1108f3341e7bSDavid Woodhouse copylen = len; 1109f3341e7bSDavid Woodhouse } 1110f3341e7bSDavid Woodhouse if (copylen > XENSTORE_RING_SIZE - offset) { 1111f3341e7bSDavid Woodhouse copylen = XENSTORE_RING_SIZE - offset; 1112f3341e7bSDavid Woodhouse } 1113f3341e7bSDavid Woodhouse 1114f3341e7bSDavid Woodhouse memcpy(ptr, &s->xs->req[offset], copylen); 1115f3341e7bSDavid Woodhouse copied += copylen; 1116f3341e7bSDavid Woodhouse 1117f3341e7bSDavid Woodhouse ptr += copylen; 1118f3341e7bSDavid Woodhouse len -= copylen; 1119f3341e7bSDavid Woodhouse 1120f3341e7bSDavid Woodhouse cons += copylen; 1121f3341e7bSDavid Woodhouse } 1122f3341e7bSDavid Woodhouse 1123f3341e7bSDavid Woodhouse /* 1124f3341e7bSDavid Woodhouse * Not sure this ever mattered except on Alpha, but this barrier 1125f3341e7bSDavid Woodhouse * is to ensure that the update to req_cons is globally visible 1126f3341e7bSDavid Woodhouse * only after we have consumed all the data from the ring, and we 1127f3341e7bSDavid Woodhouse * don't end up seeing data written to the ring *after* the other 1128f3341e7bSDavid Woodhouse * end sees the update and writes more to the ring. Xen's own 1129f3341e7bSDavid Woodhouse * xenstored has the same barrier here (although with no comment 1130f3341e7bSDavid Woodhouse * at all, obviously, because it's Xen code). 1131f3341e7bSDavid Woodhouse */ 1132f3341e7bSDavid Woodhouse smp_mb(); 1133f3341e7bSDavid Woodhouse 1134f3341e7bSDavid Woodhouse qatomic_set(&s->xs->req_cons, cons); 1135f3341e7bSDavid Woodhouse 1136f3341e7bSDavid Woodhouse return copied; 1137f3341e7bSDavid Woodhouse } 1138f3341e7bSDavid Woodhouse 1139f3341e7bSDavid Woodhouse static unsigned int copy_to_ring(XenXenstoreState *s, uint8_t *ptr, 1140f3341e7bSDavid Woodhouse unsigned int len) 1141f3341e7bSDavid Woodhouse { 1142f3341e7bSDavid Woodhouse if (!len) { 1143f3341e7bSDavid Woodhouse return 0; 1144f3341e7bSDavid Woodhouse } 1145f3341e7bSDavid Woodhouse 1146f3341e7bSDavid Woodhouse XENSTORE_RING_IDX cons = qatomic_read(&s->xs->rsp_cons); 1147f3341e7bSDavid Woodhouse XENSTORE_RING_IDX prod = qatomic_read(&s->xs->rsp_prod); 1148f3341e7bSDavid Woodhouse unsigned int copied = 0; 1149f3341e7bSDavid Woodhouse 1150f3341e7bSDavid Woodhouse /* 1151f3341e7bSDavid Woodhouse * This matches the barrier in copy_to_ring() (or the guest's 1152f3341e7bSDavid Woodhouse * equivalent) betweem writing the data to the ring and updating 1153f3341e7bSDavid Woodhouse * rsp_prod. It protects against the pathological case (which 1154f3341e7bSDavid Woodhouse * again I think never happened except on Alpha) where our 1155f3341e7bSDavid Woodhouse * subsequent writes to the ring could *cross* the read of 1156f3341e7bSDavid Woodhouse * rsp_cons and the guest could see the new data when it was 1157f3341e7bSDavid Woodhouse * intending to read the old. 1158f3341e7bSDavid Woodhouse */ 1159f3341e7bSDavid Woodhouse smp_mb(); 1160f3341e7bSDavid Woodhouse 1161f3341e7bSDavid Woodhouse while (len) { 1162f3341e7bSDavid Woodhouse unsigned int avail = cons + XENSTORE_RING_SIZE - prod; 1163f3341e7bSDavid Woodhouse unsigned int offset = MASK_XENSTORE_IDX(prod); 1164f3341e7bSDavid Woodhouse unsigned int copylen = len; 1165f3341e7bSDavid Woodhouse 1166f3341e7bSDavid Woodhouse if (avail > XENSTORE_RING_SIZE) { 1167f3341e7bSDavid Woodhouse error_report("XenStore ring handling error"); 1168f3341e7bSDavid Woodhouse s->fatal_error = true; 1169f3341e7bSDavid Woodhouse break; 1170f3341e7bSDavid Woodhouse } else if (avail == 0) { 1171f3341e7bSDavid Woodhouse break; 1172f3341e7bSDavid Woodhouse } 1173f3341e7bSDavid Woodhouse 1174f3341e7bSDavid Woodhouse if (copylen > avail) { 1175f3341e7bSDavid Woodhouse copylen = avail; 1176f3341e7bSDavid Woodhouse } 1177f3341e7bSDavid Woodhouse if (copylen > XENSTORE_RING_SIZE - offset) { 1178f3341e7bSDavid Woodhouse copylen = XENSTORE_RING_SIZE - offset; 1179f3341e7bSDavid Woodhouse } 1180f3341e7bSDavid Woodhouse 1181f3341e7bSDavid Woodhouse 1182f3341e7bSDavid Woodhouse memcpy(&s->xs->rsp[offset], ptr, copylen); 1183f3341e7bSDavid Woodhouse copied += copylen; 1184f3341e7bSDavid Woodhouse 1185f3341e7bSDavid Woodhouse ptr += copylen; 1186f3341e7bSDavid Woodhouse len -= copylen; 1187f3341e7bSDavid Woodhouse 1188f3341e7bSDavid Woodhouse prod += copylen; 1189f3341e7bSDavid Woodhouse } 1190f3341e7bSDavid Woodhouse 1191f3341e7bSDavid Woodhouse /* Ensure the ring contents are seen before rsp_prod update. */ 1192f3341e7bSDavid Woodhouse smp_wmb(); 1193f3341e7bSDavid Woodhouse 1194f3341e7bSDavid Woodhouse qatomic_set(&s->xs->rsp_prod, prod); 1195f3341e7bSDavid Woodhouse 1196f3341e7bSDavid Woodhouse return copied; 1197f3341e7bSDavid Woodhouse } 1198f3341e7bSDavid Woodhouse 1199f3341e7bSDavid Woodhouse static unsigned int get_req(XenXenstoreState *s) 1200f3341e7bSDavid Woodhouse { 1201f3341e7bSDavid Woodhouse unsigned int copied = 0; 1202f3341e7bSDavid Woodhouse 1203f3341e7bSDavid Woodhouse if (s->fatal_error) { 1204f3341e7bSDavid Woodhouse return 0; 1205f3341e7bSDavid Woodhouse } 1206f3341e7bSDavid Woodhouse 1207f3341e7bSDavid Woodhouse assert(!req_pending(s)); 1208f3341e7bSDavid Woodhouse 1209f3341e7bSDavid Woodhouse if (s->req_offset < XENSTORE_HEADER_SIZE) { 1210f3341e7bSDavid Woodhouse void *ptr = s->req_data + s->req_offset; 1211f3341e7bSDavid Woodhouse unsigned int len = XENSTORE_HEADER_SIZE; 1212f3341e7bSDavid Woodhouse unsigned int copylen = copy_from_ring(s, ptr, len); 1213f3341e7bSDavid Woodhouse 1214f3341e7bSDavid Woodhouse copied += copylen; 1215f3341e7bSDavid Woodhouse s->req_offset += copylen; 1216f3341e7bSDavid Woodhouse } 1217f3341e7bSDavid Woodhouse 1218f3341e7bSDavid Woodhouse if (s->req_offset >= XENSTORE_HEADER_SIZE) { 1219f3341e7bSDavid Woodhouse struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data; 1220f3341e7bSDavid Woodhouse 1221f3341e7bSDavid Woodhouse if (req->len > (uint32_t)XENSTORE_PAYLOAD_MAX) { 1222f3341e7bSDavid Woodhouse error_report("Illegal XenStore request"); 1223f3341e7bSDavid Woodhouse s->fatal_error = true; 1224f3341e7bSDavid Woodhouse return 0; 1225f3341e7bSDavid Woodhouse } 1226f3341e7bSDavid Woodhouse 1227f3341e7bSDavid Woodhouse void *ptr = s->req_data + s->req_offset; 1228f3341e7bSDavid Woodhouse unsigned int len = XENSTORE_HEADER_SIZE + req->len - s->req_offset; 1229f3341e7bSDavid Woodhouse unsigned int copylen = copy_from_ring(s, ptr, len); 1230f3341e7bSDavid Woodhouse 1231f3341e7bSDavid Woodhouse copied += copylen; 1232f3341e7bSDavid Woodhouse s->req_offset += copylen; 1233f3341e7bSDavid Woodhouse } 1234f3341e7bSDavid Woodhouse 1235f3341e7bSDavid Woodhouse return copied; 1236f3341e7bSDavid Woodhouse } 1237f3341e7bSDavid Woodhouse 1238f3341e7bSDavid Woodhouse static unsigned int put_rsp(XenXenstoreState *s) 1239f3341e7bSDavid Woodhouse { 1240f3341e7bSDavid Woodhouse if (s->fatal_error) { 1241f3341e7bSDavid Woodhouse return 0; 1242f3341e7bSDavid Woodhouse } 1243f3341e7bSDavid Woodhouse 1244f3341e7bSDavid Woodhouse assert(s->rsp_pending); 1245f3341e7bSDavid Woodhouse 1246f3341e7bSDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 1247f3341e7bSDavid Woodhouse assert(s->rsp_offset < XENSTORE_HEADER_SIZE + rsp->len); 1248f3341e7bSDavid Woodhouse 1249f3341e7bSDavid Woodhouse void *ptr = s->rsp_data + s->rsp_offset; 1250f3341e7bSDavid Woodhouse unsigned int len = XENSTORE_HEADER_SIZE + rsp->len - s->rsp_offset; 1251f3341e7bSDavid Woodhouse unsigned int copylen = copy_to_ring(s, ptr, len); 1252f3341e7bSDavid Woodhouse 1253f3341e7bSDavid Woodhouse s->rsp_offset += copylen; 1254f3341e7bSDavid Woodhouse 1255f3341e7bSDavid Woodhouse /* Have we produced a complete response? */ 1256f3341e7bSDavid Woodhouse if (s->rsp_offset == XENSTORE_HEADER_SIZE + rsp->len) { 1257f3341e7bSDavid Woodhouse reset_rsp(s); 1258f3341e7bSDavid Woodhouse } 1259f3341e7bSDavid Woodhouse 1260f3341e7bSDavid Woodhouse return copylen; 1261f3341e7bSDavid Woodhouse } 1262f3341e7bSDavid Woodhouse 12630254c4d1SDavid Woodhouse static void deliver_watch(XenXenstoreState *s, const char *path, 12640254c4d1SDavid Woodhouse const char *token) 12650254c4d1SDavid Woodhouse { 12660254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 12670254c4d1SDavid Woodhouse uint8_t *rsp_data = (uint8_t *)&rsp[1]; 12680254c4d1SDavid Woodhouse unsigned int len; 12690254c4d1SDavid Woodhouse 12700254c4d1SDavid Woodhouse assert(!s->rsp_pending); 12710254c4d1SDavid Woodhouse 12720254c4d1SDavid Woodhouse trace_xenstore_watch_event(path, token); 12730254c4d1SDavid Woodhouse 12740254c4d1SDavid Woodhouse rsp->type = XS_WATCH_EVENT; 12750254c4d1SDavid Woodhouse rsp->req_id = 0; 12760254c4d1SDavid Woodhouse rsp->tx_id = 0; 12770254c4d1SDavid Woodhouse rsp->len = 0; 12780254c4d1SDavid Woodhouse 12790254c4d1SDavid Woodhouse len = strlen(path); 12800254c4d1SDavid Woodhouse 12810254c4d1SDavid Woodhouse /* XENSTORE_ABS/REL_PATH_MAX should ensure there can be no overflow */ 12820254c4d1SDavid Woodhouse assert(rsp->len + len < XENSTORE_PAYLOAD_MAX); 12830254c4d1SDavid Woodhouse 12840254c4d1SDavid Woodhouse memcpy(&rsp_data[rsp->len], path, len); 12850254c4d1SDavid Woodhouse rsp->len += len; 12860254c4d1SDavid Woodhouse rsp_data[rsp->len] = '\0'; 12870254c4d1SDavid Woodhouse rsp->len++; 12880254c4d1SDavid Woodhouse 12890254c4d1SDavid Woodhouse len = strlen(token); 12900254c4d1SDavid Woodhouse /* 12910254c4d1SDavid Woodhouse * It is possible for the guest to have chosen a token that will 12920254c4d1SDavid Woodhouse * not fit (along with the patch) into a watch event. We have no 12930254c4d1SDavid Woodhouse * choice but to drop the event if this is the case. 12940254c4d1SDavid Woodhouse */ 12950254c4d1SDavid Woodhouse if (rsp->len + len >= XENSTORE_PAYLOAD_MAX) { 12960254c4d1SDavid Woodhouse return; 12970254c4d1SDavid Woodhouse } 12980254c4d1SDavid Woodhouse 12990254c4d1SDavid Woodhouse memcpy(&rsp_data[rsp->len], token, len); 13000254c4d1SDavid Woodhouse rsp->len += len; 13010254c4d1SDavid Woodhouse rsp_data[rsp->len] = '\0'; 13020254c4d1SDavid Woodhouse rsp->len++; 13030254c4d1SDavid Woodhouse 13040254c4d1SDavid Woodhouse s->rsp_pending = true; 13050254c4d1SDavid Woodhouse } 13060254c4d1SDavid Woodhouse 13070254c4d1SDavid Woodhouse struct watch_event { 13080254c4d1SDavid Woodhouse char *path; 13090254c4d1SDavid Woodhouse char *token; 13100254c4d1SDavid Woodhouse }; 13110254c4d1SDavid Woodhouse 13120254c4d1SDavid Woodhouse static void queue_watch(XenXenstoreState *s, const char *path, 13130254c4d1SDavid Woodhouse const char *token) 13140254c4d1SDavid Woodhouse { 13150254c4d1SDavid Woodhouse struct watch_event *ev = g_new0(struct watch_event, 1); 13160254c4d1SDavid Woodhouse 13170254c4d1SDavid Woodhouse ev->path = g_strdup(path); 13180254c4d1SDavid Woodhouse ev->token = g_strdup(token); 13190254c4d1SDavid Woodhouse 13200254c4d1SDavid Woodhouse s->watch_events = g_list_append(s->watch_events, ev); 13210254c4d1SDavid Woodhouse } 13220254c4d1SDavid Woodhouse 13230254c4d1SDavid Woodhouse static void fire_watch_cb(void *opaque, const char *path, const char *token) 13240254c4d1SDavid Woodhouse { 13250254c4d1SDavid Woodhouse XenXenstoreState *s = opaque; 13260254c4d1SDavid Woodhouse 13270254c4d1SDavid Woodhouse assert(qemu_mutex_iothread_locked()); 13280254c4d1SDavid Woodhouse 13290254c4d1SDavid Woodhouse /* 13300254c4d1SDavid Woodhouse * If there's a response pending, we obviously can't scribble over 13310254c4d1SDavid Woodhouse * it. But if there's a request pending, it has dibs on the buffer 13320254c4d1SDavid Woodhouse * too. 13330254c4d1SDavid Woodhouse * 13340254c4d1SDavid Woodhouse * In the common case of a watch firing due to backend activity 13350254c4d1SDavid Woodhouse * when the ring was otherwise idle, we should be able to copy the 13360254c4d1SDavid Woodhouse * strings directly into the rsp_data and thence the actual ring, 13370254c4d1SDavid Woodhouse * without needing to perform any allocations and queue them. 13380254c4d1SDavid Woodhouse */ 13390254c4d1SDavid Woodhouse if (s->rsp_pending || req_pending(s)) { 13400254c4d1SDavid Woodhouse queue_watch(s, path, token); 13410254c4d1SDavid Woodhouse } else { 13420254c4d1SDavid Woodhouse deliver_watch(s, path, token); 13430254c4d1SDavid Woodhouse /* 13440254c4d1SDavid Woodhouse * If the message was queued because there was already ring activity, 13450254c4d1SDavid Woodhouse * no need to wake the guest. But if not, we need to send the evtchn. 13460254c4d1SDavid Woodhouse */ 13470254c4d1SDavid Woodhouse xen_be_evtchn_notify(s->eh, s->be_port); 13480254c4d1SDavid Woodhouse } 13490254c4d1SDavid Woodhouse } 13500254c4d1SDavid Woodhouse 13510254c4d1SDavid Woodhouse static void process_watch_events(XenXenstoreState *s) 13520254c4d1SDavid Woodhouse { 13530254c4d1SDavid Woodhouse struct watch_event *ev = s->watch_events->data; 13540254c4d1SDavid Woodhouse 13550254c4d1SDavid Woodhouse deliver_watch(s, ev->path, ev->token); 13560254c4d1SDavid Woodhouse 13570254c4d1SDavid Woodhouse s->watch_events = g_list_remove(s->watch_events, ev); 13580254c4d1SDavid Woodhouse g_free(ev->path); 13590254c4d1SDavid Woodhouse g_free(ev->token); 13600254c4d1SDavid Woodhouse g_free(ev); 13610254c4d1SDavid Woodhouse } 13620254c4d1SDavid Woodhouse 1363c08f5d0eSDavid Woodhouse static void xen_xenstore_event(void *opaque) 1364c08f5d0eSDavid Woodhouse { 1365c08f5d0eSDavid Woodhouse XenXenstoreState *s = opaque; 1366c08f5d0eSDavid Woodhouse evtchn_port_t port = xen_be_evtchn_pending(s->eh); 1367f3341e7bSDavid Woodhouse unsigned int copied_to, copied_from; 1368f3341e7bSDavid Woodhouse bool processed, notify = false; 1369f3341e7bSDavid Woodhouse 1370c08f5d0eSDavid Woodhouse if (port != s->be_port) { 1371c08f5d0eSDavid Woodhouse return; 1372c08f5d0eSDavid Woodhouse } 1373f3341e7bSDavid Woodhouse 1374c08f5d0eSDavid Woodhouse /* We know this is a no-op. */ 1375c08f5d0eSDavid Woodhouse xen_be_evtchn_unmask(s->eh, port); 1376f3341e7bSDavid Woodhouse 1377f3341e7bSDavid Woodhouse do { 1378f3341e7bSDavid Woodhouse copied_to = copied_from = 0; 1379f3341e7bSDavid Woodhouse processed = false; 1380f3341e7bSDavid Woodhouse 13810254c4d1SDavid Woodhouse if (!s->rsp_pending && s->watch_events) { 13820254c4d1SDavid Woodhouse process_watch_events(s); 13830254c4d1SDavid Woodhouse } 13840254c4d1SDavid Woodhouse 1385f3341e7bSDavid Woodhouse if (s->rsp_pending) { 1386f3341e7bSDavid Woodhouse copied_to = put_rsp(s); 1387f3341e7bSDavid Woodhouse } 1388f3341e7bSDavid Woodhouse 1389f3341e7bSDavid Woodhouse if (!req_pending(s)) { 1390f3341e7bSDavid Woodhouse copied_from = get_req(s); 1391f3341e7bSDavid Woodhouse } 1392f3341e7bSDavid Woodhouse 13930254c4d1SDavid Woodhouse if (req_pending(s) && !s->rsp_pending && !s->watch_events) { 1394f3341e7bSDavid Woodhouse process_req(s); 1395f3341e7bSDavid Woodhouse processed = true; 1396f3341e7bSDavid Woodhouse } 1397f3341e7bSDavid Woodhouse 1398f3341e7bSDavid Woodhouse notify |= copied_to || copied_from; 1399f3341e7bSDavid Woodhouse } while (copied_to || copied_from || processed); 1400f3341e7bSDavid Woodhouse 1401f3341e7bSDavid Woodhouse if (notify) { 1402c08f5d0eSDavid Woodhouse xen_be_evtchn_notify(s->eh, s->be_port); 1403c08f5d0eSDavid Woodhouse } 1404f3341e7bSDavid Woodhouse } 1405c08f5d0eSDavid Woodhouse 1406c08f5d0eSDavid Woodhouse static void alloc_guest_port(XenXenstoreState *s) 1407c08f5d0eSDavid Woodhouse { 1408c08f5d0eSDavid Woodhouse struct evtchn_alloc_unbound alloc = { 1409c08f5d0eSDavid Woodhouse .dom = DOMID_SELF, 1410c08f5d0eSDavid Woodhouse .remote_dom = DOMID_QEMU, 1411c08f5d0eSDavid Woodhouse }; 1412c08f5d0eSDavid Woodhouse 1413c08f5d0eSDavid Woodhouse if (!xen_evtchn_alloc_unbound_op(&alloc)) { 1414c08f5d0eSDavid Woodhouse s->guest_port = alloc.port; 1415c08f5d0eSDavid Woodhouse } 1416c08f5d0eSDavid Woodhouse } 1417c08f5d0eSDavid Woodhouse 1418c08f5d0eSDavid Woodhouse int xen_xenstore_reset(void) 1419c08f5d0eSDavid Woodhouse { 1420c08f5d0eSDavid Woodhouse XenXenstoreState *s = xen_xenstore_singleton; 1421c08f5d0eSDavid Woodhouse int err; 1422c08f5d0eSDavid Woodhouse 1423c08f5d0eSDavid Woodhouse if (!s) { 1424c08f5d0eSDavid Woodhouse return -ENOTSUP; 1425c08f5d0eSDavid Woodhouse } 1426c08f5d0eSDavid Woodhouse 1427c08f5d0eSDavid Woodhouse s->req_offset = s->rsp_offset = 0; 1428c08f5d0eSDavid Woodhouse s->rsp_pending = false; 1429c08f5d0eSDavid Woodhouse 1430c08f5d0eSDavid Woodhouse if (!memory_region_is_mapped(&s->xenstore_page)) { 1431c08f5d0eSDavid Woodhouse uint64_t gpa = XEN_SPECIAL_PFN(XENSTORE) << TARGET_PAGE_BITS; 1432c08f5d0eSDavid Woodhouse xen_overlay_do_map_page(&s->xenstore_page, gpa); 1433c08f5d0eSDavid Woodhouse } 1434c08f5d0eSDavid Woodhouse 1435c08f5d0eSDavid Woodhouse alloc_guest_port(s); 1436c08f5d0eSDavid Woodhouse 1437c08f5d0eSDavid Woodhouse /* 1438c08f5d0eSDavid Woodhouse * As qemu/dom0, bind to the guest's port. For incoming migration, this 1439c08f5d0eSDavid Woodhouse * will be unbound as the guest's evtchn table is overwritten. We then 1440c08f5d0eSDavid Woodhouse * rebind to the correct guest port in xen_xenstore_post_load(). 1441c08f5d0eSDavid Woodhouse */ 1442c08f5d0eSDavid Woodhouse err = xen_be_evtchn_bind_interdomain(s->eh, xen_domid, s->guest_port); 1443c08f5d0eSDavid Woodhouse if (err < 0) { 1444c08f5d0eSDavid Woodhouse return err; 1445c08f5d0eSDavid Woodhouse } 1446c08f5d0eSDavid Woodhouse s->be_port = err; 1447c08f5d0eSDavid Woodhouse 1448c08f5d0eSDavid Woodhouse return 0; 1449c08f5d0eSDavid Woodhouse } 1450