1c08f5d0eSDavid Woodhouse /* 2c08f5d0eSDavid Woodhouse * QEMU Xen emulation: Shared/overlay pages support 3c08f5d0eSDavid Woodhouse * 4c08f5d0eSDavid Woodhouse * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. 5c08f5d0eSDavid Woodhouse * 6c08f5d0eSDavid Woodhouse * Authors: David Woodhouse <dwmw2@infradead.org> 7c08f5d0eSDavid Woodhouse * 8c08f5d0eSDavid Woodhouse * This work is licensed under the terms of the GNU GPL, version 2 or later. 9c08f5d0eSDavid Woodhouse * See the COPYING file in the top-level directory. 10c08f5d0eSDavid Woodhouse */ 11c08f5d0eSDavid Woodhouse 12c08f5d0eSDavid Woodhouse #include "qemu/osdep.h" 13c08f5d0eSDavid Woodhouse 14c08f5d0eSDavid Woodhouse #include "qemu/host-utils.h" 15c08f5d0eSDavid Woodhouse #include "qemu/module.h" 16c08f5d0eSDavid Woodhouse #include "qemu/main-loop.h" 17c08f5d0eSDavid Woodhouse #include "qemu/cutils.h" 18cc37d98bSRichard Henderson #include "qemu/error-report.h" 19c08f5d0eSDavid Woodhouse #include "qapi/error.h" 20c08f5d0eSDavid Woodhouse #include "qom/object.h" 21c08f5d0eSDavid Woodhouse #include "migration/vmstate.h" 22c08f5d0eSDavid Woodhouse 23c08f5d0eSDavid Woodhouse #include "hw/sysbus.h" 24c08f5d0eSDavid Woodhouse #include "hw/xen/xen.h" 25d05864d2SDavid Woodhouse #include "hw/xen/xen_backend_ops.h" 26c08f5d0eSDavid Woodhouse #include "xen_overlay.h" 27c08f5d0eSDavid Woodhouse #include "xen_evtchn.h" 28a72ccc7fSDavid Woodhouse #include "xen_primary_console.h" 29c08f5d0eSDavid Woodhouse #include "xen_xenstore.h" 30c08f5d0eSDavid Woodhouse 31*32cad1ffSPhilippe Mathieu-Daudé #include "system/kvm.h" 32*32cad1ffSPhilippe Mathieu-Daudé #include "system/kvm_xen.h" 33c08f5d0eSDavid Woodhouse 340254c4d1SDavid Woodhouse #include "trace.h" 350254c4d1SDavid Woodhouse 360254c4d1SDavid Woodhouse #include "xenstore_impl.h" 370254c4d1SDavid Woodhouse 38c08f5d0eSDavid Woodhouse #include "hw/xen/interface/io/xs_wire.h" 39c08f5d0eSDavid Woodhouse #include "hw/xen/interface/event_channel.h" 40d05864d2SDavid Woodhouse #include "hw/xen/interface/grant_table.h" 41c08f5d0eSDavid Woodhouse 42c08f5d0eSDavid Woodhouse #define TYPE_XEN_XENSTORE "xen-xenstore" 43c08f5d0eSDavid Woodhouse OBJECT_DECLARE_SIMPLE_TYPE(XenXenstoreState, XEN_XENSTORE) 44c08f5d0eSDavid Woodhouse 45c08f5d0eSDavid Woodhouse #define ENTRIES_PER_FRAME_V1 (XEN_PAGE_SIZE / sizeof(grant_entry_v1_t)) 46c08f5d0eSDavid Woodhouse #define ENTRIES_PER_FRAME_V2 (XEN_PAGE_SIZE / sizeof(grant_entry_v2_t)) 47c08f5d0eSDavid Woodhouse 48c08f5d0eSDavid Woodhouse #define XENSTORE_HEADER_SIZE ((unsigned int)sizeof(struct xsd_sockmsg)) 49c08f5d0eSDavid Woodhouse 50c08f5d0eSDavid Woodhouse struct XenXenstoreState { 51c08f5d0eSDavid Woodhouse /*< private >*/ 52c08f5d0eSDavid Woodhouse SysBusDevice busdev; 53c08f5d0eSDavid Woodhouse /*< public >*/ 54c08f5d0eSDavid Woodhouse 550254c4d1SDavid Woodhouse XenstoreImplState *impl; 5603247512SDavid Woodhouse GList *watch_events; /* for the guest */ 570254c4d1SDavid Woodhouse 58c08f5d0eSDavid Woodhouse MemoryRegion xenstore_page; 59c08f5d0eSDavid Woodhouse struct xenstore_domain_interface *xs; 60c08f5d0eSDavid Woodhouse uint8_t req_data[XENSTORE_HEADER_SIZE + XENSTORE_PAYLOAD_MAX]; 61c08f5d0eSDavid Woodhouse uint8_t rsp_data[XENSTORE_HEADER_SIZE + XENSTORE_PAYLOAD_MAX]; 62c08f5d0eSDavid Woodhouse uint32_t req_offset; 63c08f5d0eSDavid Woodhouse uint32_t rsp_offset; 64c08f5d0eSDavid Woodhouse bool rsp_pending; 65c08f5d0eSDavid Woodhouse bool fatal_error; 66c08f5d0eSDavid Woodhouse 67c08f5d0eSDavid Woodhouse evtchn_port_t guest_port; 68c08f5d0eSDavid Woodhouse evtchn_port_t be_port; 69c08f5d0eSDavid Woodhouse struct xenevtchn_handle *eh; 70766804b1SDavid Woodhouse 71766804b1SDavid Woodhouse uint8_t *impl_state; 72766804b1SDavid Woodhouse uint32_t impl_state_size; 73d05864d2SDavid Woodhouse 74d05864d2SDavid Woodhouse struct xengntdev_handle *gt; 75d05864d2SDavid Woodhouse void *granted_xs; 76c08f5d0eSDavid Woodhouse }; 77c08f5d0eSDavid Woodhouse 78c08f5d0eSDavid Woodhouse struct XenXenstoreState *xen_xenstore_singleton; 79c08f5d0eSDavid Woodhouse 80c08f5d0eSDavid Woodhouse static void xen_xenstore_event(void *opaque); 810254c4d1SDavid Woodhouse static void fire_watch_cb(void *opaque, const char *path, const char *token); 82c08f5d0eSDavid Woodhouse 8303247512SDavid Woodhouse static struct xenstore_backend_ops emu_xenstore_backend_ops; 8403247512SDavid Woodhouse 85831b0db8SPaul Durrant static void G_GNUC_PRINTF (4, 5) relpath_printf(XenXenstoreState *s, 86831b0db8SPaul Durrant GList *perms, 87831b0db8SPaul Durrant const char *relpath, 88831b0db8SPaul Durrant const char *fmt, ...) 89831b0db8SPaul Durrant { 90831b0db8SPaul Durrant gchar *abspath; 91831b0db8SPaul Durrant gchar *value; 92831b0db8SPaul Durrant va_list args; 93831b0db8SPaul Durrant GByteArray *data; 94831b0db8SPaul Durrant int err; 95831b0db8SPaul Durrant 96831b0db8SPaul Durrant abspath = g_strdup_printf("/local/domain/%u/%s", xen_domid, relpath); 97831b0db8SPaul Durrant va_start(args, fmt); 98831b0db8SPaul Durrant value = g_strdup_vprintf(fmt, args); 99831b0db8SPaul Durrant va_end(args); 100831b0db8SPaul Durrant 101831b0db8SPaul Durrant data = g_byte_array_new_take((void *)value, strlen(value)); 102831b0db8SPaul Durrant 103831b0db8SPaul Durrant err = xs_impl_write(s->impl, DOMID_QEMU, XBT_NULL, abspath, data); 104831b0db8SPaul Durrant assert(!err); 105831b0db8SPaul Durrant 106831b0db8SPaul Durrant g_byte_array_unref(data); 107831b0db8SPaul Durrant 108831b0db8SPaul Durrant err = xs_impl_set_perms(s->impl, DOMID_QEMU, XBT_NULL, abspath, perms); 109831b0db8SPaul Durrant assert(!err); 110831b0db8SPaul Durrant 111831b0db8SPaul Durrant g_free(abspath); 112831b0db8SPaul Durrant } 113831b0db8SPaul Durrant 114c08f5d0eSDavid Woodhouse static void xen_xenstore_realize(DeviceState *dev, Error **errp) 115c08f5d0eSDavid Woodhouse { 116c08f5d0eSDavid Woodhouse XenXenstoreState *s = XEN_XENSTORE(dev); 117831b0db8SPaul Durrant GList *perms; 118c08f5d0eSDavid Woodhouse 119c08f5d0eSDavid Woodhouse if (xen_mode != XEN_EMULATE) { 120c08f5d0eSDavid Woodhouse error_setg(errp, "Xen xenstore support is for Xen emulation"); 121c08f5d0eSDavid Woodhouse return; 122c08f5d0eSDavid Woodhouse } 123c08f5d0eSDavid Woodhouse memory_region_init_ram(&s->xenstore_page, OBJECT(dev), "xen:xenstore_page", 124c08f5d0eSDavid Woodhouse XEN_PAGE_SIZE, &error_abort); 125c08f5d0eSDavid Woodhouse memory_region_set_enabled(&s->xenstore_page, true); 126c08f5d0eSDavid Woodhouse s->xs = memory_region_get_ram_ptr(&s->xenstore_page); 127c08f5d0eSDavid Woodhouse memset(s->xs, 0, XEN_PAGE_SIZE); 128c08f5d0eSDavid Woodhouse 129c08f5d0eSDavid Woodhouse /* We can't map it this early as KVM isn't ready */ 130c08f5d0eSDavid Woodhouse xen_xenstore_singleton = s; 131c08f5d0eSDavid Woodhouse 132c08f5d0eSDavid Woodhouse s->eh = xen_be_evtchn_open(); 133c08f5d0eSDavid Woodhouse if (!s->eh) { 134c08f5d0eSDavid Woodhouse error_setg(errp, "Xenstore evtchn port init failed"); 135c08f5d0eSDavid Woodhouse return; 136c08f5d0eSDavid Woodhouse } 13760f782b6SStefan Hajnoczi aio_set_fd_handler(qemu_get_aio_context(), xen_be_evtchn_fd(s->eh), 138c08f5d0eSDavid Woodhouse xen_xenstore_event, NULL, NULL, NULL, s); 1390254c4d1SDavid Woodhouse 140be1934dfSPaul Durrant s->impl = xs_impl_create(xen_domid); 141831b0db8SPaul Durrant 142831b0db8SPaul Durrant /* Populate the default nodes */ 143831b0db8SPaul Durrant 144831b0db8SPaul Durrant /* Nodes owned by 'dom0' but readable by the guest */ 145831b0db8SPaul Durrant perms = g_list_append(NULL, xs_perm_as_string(XS_PERM_NONE, DOMID_QEMU)); 146831b0db8SPaul Durrant perms = g_list_append(perms, xs_perm_as_string(XS_PERM_READ, xen_domid)); 147831b0db8SPaul Durrant 148831b0db8SPaul Durrant relpath_printf(s, perms, "", "%s", ""); 149831b0db8SPaul Durrant 150831b0db8SPaul Durrant relpath_printf(s, perms, "domid", "%u", xen_domid); 151831b0db8SPaul Durrant 152831b0db8SPaul Durrant relpath_printf(s, perms, "control/platform-feature-xs_reset_watches", "%u", 1); 153831b0db8SPaul Durrant relpath_printf(s, perms, "control/platform-feature-multiprocessor-suspend", "%u", 1); 154831b0db8SPaul Durrant 155831b0db8SPaul Durrant relpath_printf(s, perms, "platform/acpi", "%u", 1); 156831b0db8SPaul Durrant relpath_printf(s, perms, "platform/acpi_s3", "%u", 1); 157831b0db8SPaul Durrant relpath_printf(s, perms, "platform/acpi_s4", "%u", 1); 158831b0db8SPaul Durrant relpath_printf(s, perms, "platform/acpi_laptop_slate", "%u", 0); 159831b0db8SPaul Durrant 160831b0db8SPaul Durrant g_list_free_full(perms, g_free); 161831b0db8SPaul Durrant 162831b0db8SPaul Durrant /* Nodes owned by the guest */ 163831b0db8SPaul Durrant perms = g_list_append(NULL, xs_perm_as_string(XS_PERM_NONE, xen_domid)); 164831b0db8SPaul Durrant 165831b0db8SPaul Durrant relpath_printf(s, perms, "attr", "%s", ""); 166831b0db8SPaul Durrant 167831b0db8SPaul Durrant relpath_printf(s, perms, "control/shutdown", "%s", ""); 168831b0db8SPaul Durrant relpath_printf(s, perms, "control/feature-poweroff", "%u", 1); 169831b0db8SPaul Durrant relpath_printf(s, perms, "control/feature-reboot", "%u", 1); 170831b0db8SPaul Durrant relpath_printf(s, perms, "control/feature-suspend", "%u", 1); 171831b0db8SPaul Durrant relpath_printf(s, perms, "control/feature-s3", "%u", 1); 172831b0db8SPaul Durrant relpath_printf(s, perms, "control/feature-s4", "%u", 1); 173831b0db8SPaul Durrant 174831b0db8SPaul Durrant relpath_printf(s, perms, "data", "%s", ""); 175831b0db8SPaul Durrant relpath_printf(s, perms, "device", "%s", ""); 176831b0db8SPaul Durrant relpath_printf(s, perms, "drivers", "%s", ""); 177831b0db8SPaul Durrant relpath_printf(s, perms, "error", "%s", ""); 178831b0db8SPaul Durrant relpath_printf(s, perms, "feature", "%s", ""); 179831b0db8SPaul Durrant 180831b0db8SPaul Durrant g_list_free_full(perms, g_free); 18103247512SDavid Woodhouse 18203247512SDavid Woodhouse xen_xenstore_ops = &emu_xenstore_backend_ops; 183c08f5d0eSDavid Woodhouse } 184c08f5d0eSDavid Woodhouse 185c08f5d0eSDavid Woodhouse static bool xen_xenstore_is_needed(void *opaque) 186c08f5d0eSDavid Woodhouse { 187c08f5d0eSDavid Woodhouse return xen_mode == XEN_EMULATE; 188c08f5d0eSDavid Woodhouse } 189c08f5d0eSDavid Woodhouse 190c08f5d0eSDavid Woodhouse static int xen_xenstore_pre_save(void *opaque) 191c08f5d0eSDavid Woodhouse { 192c08f5d0eSDavid Woodhouse XenXenstoreState *s = opaque; 193766804b1SDavid Woodhouse GByteArray *save; 194c08f5d0eSDavid Woodhouse 195c08f5d0eSDavid Woodhouse if (s->eh) { 196c08f5d0eSDavid Woodhouse s->guest_port = xen_be_evtchn_get_guest_port(s->eh); 197c08f5d0eSDavid Woodhouse } 198766804b1SDavid Woodhouse 199766804b1SDavid Woodhouse g_free(s->impl_state); 200766804b1SDavid Woodhouse save = xs_impl_serialize(s->impl); 201766804b1SDavid Woodhouse s->impl_state = save->data; 202766804b1SDavid Woodhouse s->impl_state_size = save->len; 203766804b1SDavid Woodhouse g_byte_array_free(save, false); 204766804b1SDavid Woodhouse 205c08f5d0eSDavid Woodhouse return 0; 206c08f5d0eSDavid Woodhouse } 207c08f5d0eSDavid Woodhouse 208c08f5d0eSDavid Woodhouse static int xen_xenstore_post_load(void *opaque, int ver) 209c08f5d0eSDavid Woodhouse { 210c08f5d0eSDavid Woodhouse XenXenstoreState *s = opaque; 211766804b1SDavid Woodhouse GByteArray *save; 212766804b1SDavid Woodhouse int ret; 213c08f5d0eSDavid Woodhouse 214c08f5d0eSDavid Woodhouse /* 215c08f5d0eSDavid Woodhouse * As qemu/dom0, rebind to the guest's port. The Windows drivers may 216c08f5d0eSDavid Woodhouse * unbind the XenStore evtchn and rebind to it, having obtained the 217c08f5d0eSDavid Woodhouse * "remote" port through EVTCHNOP_status. In the case that migration 218c08f5d0eSDavid Woodhouse * occurs while it's unbound, the "remote" port needs to be the same 219c08f5d0eSDavid Woodhouse * as before so that the guest can find it, but should remain unbound. 220c08f5d0eSDavid Woodhouse */ 221c08f5d0eSDavid Woodhouse if (s->guest_port) { 222c08f5d0eSDavid Woodhouse int be_port = xen_be_evtchn_bind_interdomain(s->eh, xen_domid, 223c08f5d0eSDavid Woodhouse s->guest_port); 224c08f5d0eSDavid Woodhouse if (be_port < 0) { 225c08f5d0eSDavid Woodhouse return be_port; 226c08f5d0eSDavid Woodhouse } 227c08f5d0eSDavid Woodhouse s->be_port = be_port; 228c08f5d0eSDavid Woodhouse } 229766804b1SDavid Woodhouse 230766804b1SDavid Woodhouse save = g_byte_array_new_take(s->impl_state, s->impl_state_size); 231766804b1SDavid Woodhouse s->impl_state = NULL; 232766804b1SDavid Woodhouse s->impl_state_size = 0; 233766804b1SDavid Woodhouse 234766804b1SDavid Woodhouse ret = xs_impl_deserialize(s->impl, save, xen_domid, fire_watch_cb, s); 235766804b1SDavid Woodhouse return ret; 236c08f5d0eSDavid Woodhouse } 237c08f5d0eSDavid Woodhouse 238c08f5d0eSDavid Woodhouse static const VMStateDescription xen_xenstore_vmstate = { 239c08f5d0eSDavid Woodhouse .name = "xen_xenstore", 240766804b1SDavid Woodhouse .unmigratable = 1, /* The PV back ends don't migrate yet */ 241c08f5d0eSDavid Woodhouse .version_id = 1, 242c08f5d0eSDavid Woodhouse .minimum_version_id = 1, 243c08f5d0eSDavid Woodhouse .needed = xen_xenstore_is_needed, 244c08f5d0eSDavid Woodhouse .pre_save = xen_xenstore_pre_save, 245c08f5d0eSDavid Woodhouse .post_load = xen_xenstore_post_load, 2469231a017SRichard Henderson .fields = (const VMStateField[]) { 247c08f5d0eSDavid Woodhouse VMSTATE_UINT8_ARRAY(req_data, XenXenstoreState, 248c08f5d0eSDavid Woodhouse sizeof_field(XenXenstoreState, req_data)), 249c08f5d0eSDavid Woodhouse VMSTATE_UINT8_ARRAY(rsp_data, XenXenstoreState, 250c08f5d0eSDavid Woodhouse sizeof_field(XenXenstoreState, rsp_data)), 251c08f5d0eSDavid Woodhouse VMSTATE_UINT32(req_offset, XenXenstoreState), 252c08f5d0eSDavid Woodhouse VMSTATE_UINT32(rsp_offset, XenXenstoreState), 253c08f5d0eSDavid Woodhouse VMSTATE_BOOL(rsp_pending, XenXenstoreState), 254c08f5d0eSDavid Woodhouse VMSTATE_UINT32(guest_port, XenXenstoreState), 255c08f5d0eSDavid Woodhouse VMSTATE_BOOL(fatal_error, XenXenstoreState), 256766804b1SDavid Woodhouse VMSTATE_UINT32(impl_state_size, XenXenstoreState), 257766804b1SDavid Woodhouse VMSTATE_VARRAY_UINT32_ALLOC(impl_state, XenXenstoreState, 258766804b1SDavid Woodhouse impl_state_size, 0, 259766804b1SDavid Woodhouse vmstate_info_uint8, uint8_t), 260c08f5d0eSDavid Woodhouse VMSTATE_END_OF_LIST() 261c08f5d0eSDavid Woodhouse } 262c08f5d0eSDavid Woodhouse }; 263c08f5d0eSDavid Woodhouse 264c08f5d0eSDavid Woodhouse static void xen_xenstore_class_init(ObjectClass *klass, void *data) 265c08f5d0eSDavid Woodhouse { 266c08f5d0eSDavid Woodhouse DeviceClass *dc = DEVICE_CLASS(klass); 267c08f5d0eSDavid Woodhouse 268c08f5d0eSDavid Woodhouse dc->realize = xen_xenstore_realize; 269c08f5d0eSDavid Woodhouse dc->vmsd = &xen_xenstore_vmstate; 270c08f5d0eSDavid Woodhouse } 271c08f5d0eSDavid Woodhouse 272c08f5d0eSDavid Woodhouse static const TypeInfo xen_xenstore_info = { 273c08f5d0eSDavid Woodhouse .name = TYPE_XEN_XENSTORE, 274c08f5d0eSDavid Woodhouse .parent = TYPE_SYS_BUS_DEVICE, 275c08f5d0eSDavid Woodhouse .instance_size = sizeof(XenXenstoreState), 276c08f5d0eSDavid Woodhouse .class_init = xen_xenstore_class_init, 277c08f5d0eSDavid Woodhouse }; 278c08f5d0eSDavid Woodhouse 279c08f5d0eSDavid Woodhouse void xen_xenstore_create(void) 280c08f5d0eSDavid Woodhouse { 281c08f5d0eSDavid Woodhouse DeviceState *dev = sysbus_create_simple(TYPE_XEN_XENSTORE, -1, NULL); 282c08f5d0eSDavid Woodhouse 283c08f5d0eSDavid Woodhouse xen_xenstore_singleton = XEN_XENSTORE(dev); 284c08f5d0eSDavid Woodhouse 285c08f5d0eSDavid Woodhouse /* 286c08f5d0eSDavid Woodhouse * Defer the init (xen_xenstore_reset()) until KVM is set up and the 287c08f5d0eSDavid Woodhouse * overlay page can be mapped. 288c08f5d0eSDavid Woodhouse */ 289c08f5d0eSDavid Woodhouse } 290c08f5d0eSDavid Woodhouse 291c08f5d0eSDavid Woodhouse static void xen_xenstore_register_types(void) 292c08f5d0eSDavid Woodhouse { 293c08f5d0eSDavid Woodhouse type_register_static(&xen_xenstore_info); 294c08f5d0eSDavid Woodhouse } 295c08f5d0eSDavid Woodhouse 296c08f5d0eSDavid Woodhouse type_init(xen_xenstore_register_types) 297c08f5d0eSDavid Woodhouse 298c08f5d0eSDavid Woodhouse uint16_t xen_xenstore_get_port(void) 299c08f5d0eSDavid Woodhouse { 300c08f5d0eSDavid Woodhouse XenXenstoreState *s = xen_xenstore_singleton; 301c08f5d0eSDavid Woodhouse if (!s) { 302c08f5d0eSDavid Woodhouse return 0; 303c08f5d0eSDavid Woodhouse } 304c08f5d0eSDavid Woodhouse return s->guest_port; 305c08f5d0eSDavid Woodhouse } 306c08f5d0eSDavid Woodhouse 307f3341e7bSDavid Woodhouse static bool req_pending(XenXenstoreState *s) 308f3341e7bSDavid Woodhouse { 309f3341e7bSDavid Woodhouse struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data; 310f3341e7bSDavid Woodhouse 311f3341e7bSDavid Woodhouse return s->req_offset == XENSTORE_HEADER_SIZE + req->len; 312f3341e7bSDavid Woodhouse } 313f3341e7bSDavid Woodhouse 314f3341e7bSDavid Woodhouse static void reset_req(XenXenstoreState *s) 315f3341e7bSDavid Woodhouse { 316f3341e7bSDavid Woodhouse memset(s->req_data, 0, sizeof(s->req_data)); 317f3341e7bSDavid Woodhouse s->req_offset = 0; 318f3341e7bSDavid Woodhouse } 319f3341e7bSDavid Woodhouse 320f3341e7bSDavid Woodhouse static void reset_rsp(XenXenstoreState *s) 321f3341e7bSDavid Woodhouse { 322f3341e7bSDavid Woodhouse s->rsp_pending = false; 323f3341e7bSDavid Woodhouse 324f3341e7bSDavid Woodhouse memset(s->rsp_data, 0, sizeof(s->rsp_data)); 325f3341e7bSDavid Woodhouse s->rsp_offset = 0; 326f3341e7bSDavid Woodhouse } 327f3341e7bSDavid Woodhouse 3280254c4d1SDavid Woodhouse static void xs_error(XenXenstoreState *s, unsigned int id, 3290254c4d1SDavid Woodhouse xs_transaction_t tx_id, int errnum) 3300254c4d1SDavid Woodhouse { 3310254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 3320254c4d1SDavid Woodhouse const char *errstr = NULL; 3330254c4d1SDavid Woodhouse 3340254c4d1SDavid Woodhouse for (unsigned int i = 0; i < ARRAY_SIZE(xsd_errors); i++) { 3358ac98aedSDavid Woodhouse const struct xsd_errors *xsd_error = &xsd_errors[i]; 3360254c4d1SDavid Woodhouse 3370254c4d1SDavid Woodhouse if (xsd_error->errnum == errnum) { 3380254c4d1SDavid Woodhouse errstr = xsd_error->errstring; 3390254c4d1SDavid Woodhouse break; 3400254c4d1SDavid Woodhouse } 3410254c4d1SDavid Woodhouse } 3420254c4d1SDavid Woodhouse assert(errstr); 3430254c4d1SDavid Woodhouse 3440254c4d1SDavid Woodhouse trace_xenstore_error(id, tx_id, errstr); 3450254c4d1SDavid Woodhouse 3460254c4d1SDavid Woodhouse rsp->type = XS_ERROR; 3470254c4d1SDavid Woodhouse rsp->req_id = id; 3480254c4d1SDavid Woodhouse rsp->tx_id = tx_id; 3490254c4d1SDavid Woodhouse rsp->len = (uint32_t)strlen(errstr) + 1; 3500254c4d1SDavid Woodhouse 3510254c4d1SDavid Woodhouse memcpy(&rsp[1], errstr, rsp->len); 3520254c4d1SDavid Woodhouse } 3530254c4d1SDavid Woodhouse 3540254c4d1SDavid Woodhouse static void xs_ok(XenXenstoreState *s, unsigned int type, unsigned int req_id, 3550254c4d1SDavid Woodhouse xs_transaction_t tx_id) 3560254c4d1SDavid Woodhouse { 3570254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 3580254c4d1SDavid Woodhouse const char *okstr = "OK"; 3590254c4d1SDavid Woodhouse 3600254c4d1SDavid Woodhouse rsp->type = type; 3610254c4d1SDavid Woodhouse rsp->req_id = req_id; 3620254c4d1SDavid Woodhouse rsp->tx_id = tx_id; 3630254c4d1SDavid Woodhouse rsp->len = (uint32_t)strlen(okstr) + 1; 3640254c4d1SDavid Woodhouse 3650254c4d1SDavid Woodhouse memcpy(&rsp[1], okstr, rsp->len); 3660254c4d1SDavid Woodhouse } 3670254c4d1SDavid Woodhouse 3680254c4d1SDavid Woodhouse /* 3690254c4d1SDavid Woodhouse * The correct request and response formats are documented in xen.git: 3700254c4d1SDavid Woodhouse * docs/misc/xenstore.txt. A summary is given below for convenience. 3710254c4d1SDavid Woodhouse * The '|' symbol represents a NUL character. 3720254c4d1SDavid Woodhouse * 3730254c4d1SDavid Woodhouse * ---------- Database read, write and permissions operations ---------- 3740254c4d1SDavid Woodhouse * 3750254c4d1SDavid Woodhouse * READ <path>| <value|> 3760254c4d1SDavid Woodhouse * WRITE <path>|<value|> 3770254c4d1SDavid Woodhouse * Store and read the octet string <value> at <path>. 3780254c4d1SDavid Woodhouse * WRITE creates any missing parent paths, with empty values. 3790254c4d1SDavid Woodhouse * 3800254c4d1SDavid Woodhouse * MKDIR <path>| 3810254c4d1SDavid Woodhouse * Ensures that the <path> exists, by necessary by creating 3820254c4d1SDavid Woodhouse * it and any missing parents with empty values. If <path> 3830254c4d1SDavid Woodhouse * or any parent already exists, its value is left unchanged. 3840254c4d1SDavid Woodhouse * 3850254c4d1SDavid Woodhouse * RM <path>| 3860254c4d1SDavid Woodhouse * Ensures that the <path> does not exist, by deleting 3870254c4d1SDavid Woodhouse * it and all of its children. It is not an error if <path> does 3880254c4d1SDavid Woodhouse * not exist, but it _is_ an error if <path>'s immediate parent 3890254c4d1SDavid Woodhouse * does not exist either. 3900254c4d1SDavid Woodhouse * 3910254c4d1SDavid Woodhouse * DIRECTORY <path>| <child-leaf-name>|* 3920254c4d1SDavid Woodhouse * Gives a list of the immediate children of <path>, as only the 3930254c4d1SDavid Woodhouse * leafnames. The resulting children are each named 3940254c4d1SDavid Woodhouse * <path>/<child-leaf-name>. 3950254c4d1SDavid Woodhouse * 3960254c4d1SDavid Woodhouse * DIRECTORY_PART <path>|<offset> <gencnt>|<child-leaf-name>|* 3970254c4d1SDavid Woodhouse * Same as DIRECTORY, but to be used for children lists longer than 3980254c4d1SDavid Woodhouse * XENSTORE_PAYLOAD_MAX. Input are <path> and the byte offset into 3990254c4d1SDavid Woodhouse * the list of children to return. Return values are the generation 4000254c4d1SDavid Woodhouse * count <gencnt> of the node (to be used to ensure the node hasn't 4010254c4d1SDavid Woodhouse * changed between two reads: <gencnt> being the same for multiple 4020254c4d1SDavid Woodhouse * reads guarantees the node hasn't changed) and the list of children 4030254c4d1SDavid Woodhouse * starting at the specified <offset> of the complete list. 4040254c4d1SDavid Woodhouse * 4050254c4d1SDavid Woodhouse * GET_PERMS <path>| <perm-as-string>|+ 4060254c4d1SDavid Woodhouse * SET_PERMS <path>|<perm-as-string>|+? 4070254c4d1SDavid Woodhouse * <perm-as-string> is one of the following 4080254c4d1SDavid Woodhouse * w<domid> write only 4090254c4d1SDavid Woodhouse * r<domid> read only 4100254c4d1SDavid Woodhouse * b<domid> both read and write 4110254c4d1SDavid Woodhouse * n<domid> no access 4120254c4d1SDavid Woodhouse * See https://wiki.xen.org/wiki/XenBus section 4130254c4d1SDavid Woodhouse * `Permissions' for details of the permissions system. 4140254c4d1SDavid Woodhouse * It is possible to set permissions for the special watch paths 4150254c4d1SDavid Woodhouse * "@introduceDomain" and "@releaseDomain" to enable receiving those 4160254c4d1SDavid Woodhouse * watches in unprivileged domains. 4170254c4d1SDavid Woodhouse * 4180254c4d1SDavid Woodhouse * ---------- Watches ---------- 4190254c4d1SDavid Woodhouse * 4200254c4d1SDavid Woodhouse * WATCH <wpath>|<token>|? 4210254c4d1SDavid Woodhouse * Adds a watch. 4220254c4d1SDavid Woodhouse * 4230254c4d1SDavid Woodhouse * When a <path> is modified (including path creation, removal, 4240254c4d1SDavid Woodhouse * contents change or permissions change) this generates an event 4250254c4d1SDavid Woodhouse * on the changed <path>. Changes made in transactions cause an 4260254c4d1SDavid Woodhouse * event only if and when committed. Each occurring event is 4270254c4d1SDavid Woodhouse * matched against all the watches currently set up, and each 4280254c4d1SDavid Woodhouse * matching watch results in a WATCH_EVENT message (see below). 4290254c4d1SDavid Woodhouse * 4300254c4d1SDavid Woodhouse * The event's path matches the watch's <wpath> if it is an child 4310254c4d1SDavid Woodhouse * of <wpath>. 4320254c4d1SDavid Woodhouse * 4330254c4d1SDavid Woodhouse * <wpath> can be a <path> to watch or @<wspecial>. In the 4340254c4d1SDavid Woodhouse * latter case <wspecial> may have any syntax but it matches 4350254c4d1SDavid Woodhouse * (according to the rules above) only the following special 4360254c4d1SDavid Woodhouse * events which are invented by xenstored: 4370254c4d1SDavid Woodhouse * @introduceDomain occurs on INTRODUCE 4380254c4d1SDavid Woodhouse * @releaseDomain occurs on any domain crash or 4390254c4d1SDavid Woodhouse * shutdown, and also on RELEASE 4400254c4d1SDavid Woodhouse * and domain destruction 4410254c4d1SDavid Woodhouse * <wspecial> events are sent to privileged callers or explicitly 4420254c4d1SDavid Woodhouse * via SET_PERMS enabled domains only. 4430254c4d1SDavid Woodhouse * 4440254c4d1SDavid Woodhouse * When a watch is first set up it is triggered once straight 4450254c4d1SDavid Woodhouse * away, with <path> equal to <wpath>. Watches may be triggered 4460254c4d1SDavid Woodhouse * spuriously. The tx_id in a WATCH request is ignored. 4470254c4d1SDavid Woodhouse * 4480254c4d1SDavid Woodhouse * Watches are supposed to be restricted by the permissions 4490254c4d1SDavid Woodhouse * system but in practice the implementation is imperfect. 4500254c4d1SDavid Woodhouse * Applications should not rely on being sent a notification for 4510254c4d1SDavid Woodhouse * paths that they cannot read; however, an application may rely 4520254c4d1SDavid Woodhouse * on being sent a watch when a path which it _is_ able to read 4530254c4d1SDavid Woodhouse * is deleted even if that leaves only a nonexistent unreadable 4540254c4d1SDavid Woodhouse * parent. A notification may omitted if a node's permissions 4550254c4d1SDavid Woodhouse * are changed so as to make it unreadable, in which case future 4560254c4d1SDavid Woodhouse * notifications may be suppressed (and if the node is later made 4570254c4d1SDavid Woodhouse * readable, some notifications may have been lost). 4580254c4d1SDavid Woodhouse * 4590254c4d1SDavid Woodhouse * WATCH_EVENT <epath>|<token>| 4600254c4d1SDavid Woodhouse * Unsolicited `reply' generated for matching modification events 4610254c4d1SDavid Woodhouse * as described above. req_id and tx_id are both 0. 4620254c4d1SDavid Woodhouse * 4630254c4d1SDavid Woodhouse * <epath> is the event's path, ie the actual path that was 4640254c4d1SDavid Woodhouse * modified; however if the event was the recursive removal of an 4650254c4d1SDavid Woodhouse * parent of <wpath>, <epath> is just 4660254c4d1SDavid Woodhouse * <wpath> (rather than the actual path which was removed). So 4670254c4d1SDavid Woodhouse * <epath> is a child of <wpath>, regardless. 4680254c4d1SDavid Woodhouse * 4690254c4d1SDavid Woodhouse * Iff <wpath> for the watch was specified as a relative pathname, 4700254c4d1SDavid Woodhouse * the <epath> path will also be relative (with the same base, 4710254c4d1SDavid Woodhouse * obviously). 4720254c4d1SDavid Woodhouse * 4730254c4d1SDavid Woodhouse * UNWATCH <wpath>|<token>|? 4740254c4d1SDavid Woodhouse * 4750254c4d1SDavid Woodhouse * RESET_WATCHES | 4760254c4d1SDavid Woodhouse * Reset all watches and transactions of the caller. 4770254c4d1SDavid Woodhouse * 4780254c4d1SDavid Woodhouse * ---------- Transactions ---------- 4790254c4d1SDavid Woodhouse * 4800254c4d1SDavid Woodhouse * TRANSACTION_START | <transid>| 4810254c4d1SDavid Woodhouse * <transid> is an opaque uint32_t allocated by xenstored 4820254c4d1SDavid Woodhouse * represented as unsigned decimal. After this, transaction may 4830254c4d1SDavid Woodhouse * be referenced by using <transid> (as 32-bit binary) in the 4840254c4d1SDavid Woodhouse * tx_id request header field. When transaction is started whole 4850254c4d1SDavid Woodhouse * db is copied; reads and writes happen on the copy. 4860254c4d1SDavid Woodhouse * It is not legal to send non-0 tx_id in TRANSACTION_START. 4870254c4d1SDavid Woodhouse * 4880254c4d1SDavid Woodhouse * TRANSACTION_END T| 4890254c4d1SDavid Woodhouse * TRANSACTION_END F| 4900254c4d1SDavid Woodhouse * tx_id must refer to existing transaction. After this 4910254c4d1SDavid Woodhouse * request the tx_id is no longer valid and may be reused by 4920254c4d1SDavid Woodhouse * xenstore. If F, the transaction is discarded. If T, 4930254c4d1SDavid Woodhouse * it is committed: if there were any other intervening writes 4940254c4d1SDavid Woodhouse * then our END gets get EAGAIN. 4950254c4d1SDavid Woodhouse * 4960254c4d1SDavid Woodhouse * The plan is that in the future only intervening `conflicting' 4970254c4d1SDavid Woodhouse * writes cause EAGAIN, meaning only writes or other commits 4980254c4d1SDavid Woodhouse * which changed paths which were read or written in the 4990254c4d1SDavid Woodhouse * transaction at hand. 5000254c4d1SDavid Woodhouse * 5010254c4d1SDavid Woodhouse */ 5020254c4d1SDavid Woodhouse 5030254c4d1SDavid Woodhouse static void xs_read(XenXenstoreState *s, unsigned int req_id, 5040254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, unsigned int len) 5050254c4d1SDavid Woodhouse { 5060254c4d1SDavid Woodhouse const char *path = (const char *)req_data; 5070254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 5080254c4d1SDavid Woodhouse uint8_t *rsp_data = (uint8_t *)&rsp[1]; 5090254c4d1SDavid Woodhouse g_autoptr(GByteArray) data = g_byte_array_new(); 5100254c4d1SDavid Woodhouse int err; 5110254c4d1SDavid Woodhouse 5120254c4d1SDavid Woodhouse if (len == 0 || req_data[len - 1] != '\0') { 5130254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 5140254c4d1SDavid Woodhouse return; 5150254c4d1SDavid Woodhouse } 5160254c4d1SDavid Woodhouse 5170254c4d1SDavid Woodhouse trace_xenstore_read(tx_id, path); 5180254c4d1SDavid Woodhouse err = xs_impl_read(s->impl, xen_domid, tx_id, path, data); 5190254c4d1SDavid Woodhouse if (err) { 5200254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 5210254c4d1SDavid Woodhouse return; 5220254c4d1SDavid Woodhouse } 5230254c4d1SDavid Woodhouse 5240254c4d1SDavid Woodhouse rsp->type = XS_READ; 5250254c4d1SDavid Woodhouse rsp->req_id = req_id; 5260254c4d1SDavid Woodhouse rsp->tx_id = tx_id; 5270254c4d1SDavid Woodhouse rsp->len = 0; 5280254c4d1SDavid Woodhouse 5290254c4d1SDavid Woodhouse len = data->len; 5300254c4d1SDavid Woodhouse if (len > XENSTORE_PAYLOAD_MAX) { 5310254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, E2BIG); 5320254c4d1SDavid Woodhouse return; 5330254c4d1SDavid Woodhouse } 5340254c4d1SDavid Woodhouse 5350254c4d1SDavid Woodhouse memcpy(&rsp_data[rsp->len], data->data, len); 5360254c4d1SDavid Woodhouse rsp->len += len; 5370254c4d1SDavid Woodhouse } 5380254c4d1SDavid Woodhouse 5390254c4d1SDavid Woodhouse static void xs_write(XenXenstoreState *s, unsigned int req_id, 5400254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 5410254c4d1SDavid Woodhouse unsigned int len) 5420254c4d1SDavid Woodhouse { 5430254c4d1SDavid Woodhouse g_autoptr(GByteArray) data = g_byte_array_new(); 5440254c4d1SDavid Woodhouse const char *path; 5450254c4d1SDavid Woodhouse int err; 5460254c4d1SDavid Woodhouse 5470254c4d1SDavid Woodhouse if (len == 0) { 5480254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 5490254c4d1SDavid Woodhouse return; 5500254c4d1SDavid Woodhouse } 5510254c4d1SDavid Woodhouse 5520254c4d1SDavid Woodhouse path = (const char *)req_data; 5530254c4d1SDavid Woodhouse 5540254c4d1SDavid Woodhouse while (len--) { 5550254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 5560254c4d1SDavid Woodhouse break; 5570254c4d1SDavid Woodhouse } 5580254c4d1SDavid Woodhouse if (len == 0) { 5590254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 5600254c4d1SDavid Woodhouse return; 5610254c4d1SDavid Woodhouse } 5620254c4d1SDavid Woodhouse } 5630254c4d1SDavid Woodhouse 5640254c4d1SDavid Woodhouse g_byte_array_append(data, req_data, len); 5650254c4d1SDavid Woodhouse 5660254c4d1SDavid Woodhouse trace_xenstore_write(tx_id, path); 5670254c4d1SDavid Woodhouse err = xs_impl_write(s->impl, xen_domid, tx_id, path, data); 5680254c4d1SDavid Woodhouse if (err) { 5690254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 5700254c4d1SDavid Woodhouse return; 5710254c4d1SDavid Woodhouse } 5720254c4d1SDavid Woodhouse 5730254c4d1SDavid Woodhouse xs_ok(s, XS_WRITE, req_id, tx_id); 5740254c4d1SDavid Woodhouse } 5750254c4d1SDavid Woodhouse 5760254c4d1SDavid Woodhouse static void xs_mkdir(XenXenstoreState *s, unsigned int req_id, 5770254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 5780254c4d1SDavid Woodhouse unsigned int len) 5790254c4d1SDavid Woodhouse { 5800254c4d1SDavid Woodhouse g_autoptr(GByteArray) data = g_byte_array_new(); 5810254c4d1SDavid Woodhouse const char *path; 5820254c4d1SDavid Woodhouse int err; 5830254c4d1SDavid Woodhouse 5840254c4d1SDavid Woodhouse if (len == 0 || req_data[len - 1] != '\0') { 5850254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 5860254c4d1SDavid Woodhouse return; 5870254c4d1SDavid Woodhouse } 5880254c4d1SDavid Woodhouse 5890254c4d1SDavid Woodhouse path = (const char *)req_data; 5900254c4d1SDavid Woodhouse 5910254c4d1SDavid Woodhouse trace_xenstore_mkdir(tx_id, path); 5920254c4d1SDavid Woodhouse err = xs_impl_read(s->impl, xen_domid, tx_id, path, data); 5930254c4d1SDavid Woodhouse if (err == ENOENT) { 5940254c4d1SDavid Woodhouse err = xs_impl_write(s->impl, xen_domid, tx_id, path, data); 5950254c4d1SDavid Woodhouse } 5960254c4d1SDavid Woodhouse 5970254c4d1SDavid Woodhouse if (!err) { 5980254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 5990254c4d1SDavid Woodhouse return; 6000254c4d1SDavid Woodhouse } 6010254c4d1SDavid Woodhouse 6020254c4d1SDavid Woodhouse xs_ok(s, XS_MKDIR, req_id, tx_id); 6030254c4d1SDavid Woodhouse } 6040254c4d1SDavid Woodhouse 6050254c4d1SDavid Woodhouse static void xs_append_strings(XenXenstoreState *s, struct xsd_sockmsg *rsp, 6060254c4d1SDavid Woodhouse GList *strings, unsigned int start, bool truncate) 6070254c4d1SDavid Woodhouse { 6080254c4d1SDavid Woodhouse uint8_t *rsp_data = (uint8_t *)&rsp[1]; 6090254c4d1SDavid Woodhouse GList *l; 6100254c4d1SDavid Woodhouse 6110254c4d1SDavid Woodhouse for (l = strings; l; l = l->next) { 6120254c4d1SDavid Woodhouse size_t len = strlen(l->data) + 1; /* Including the NUL termination */ 6130254c4d1SDavid Woodhouse char *str = l->data; 6140254c4d1SDavid Woodhouse 6150254c4d1SDavid Woodhouse if (rsp->len + len > XENSTORE_PAYLOAD_MAX) { 6160254c4d1SDavid Woodhouse if (truncate) { 6170254c4d1SDavid Woodhouse len = XENSTORE_PAYLOAD_MAX - rsp->len; 6180254c4d1SDavid Woodhouse if (!len) { 6190254c4d1SDavid Woodhouse return; 6200254c4d1SDavid Woodhouse } 6210254c4d1SDavid Woodhouse } else { 6220254c4d1SDavid Woodhouse xs_error(s, rsp->req_id, rsp->tx_id, E2BIG); 6230254c4d1SDavid Woodhouse return; 6240254c4d1SDavid Woodhouse } 6250254c4d1SDavid Woodhouse } 6260254c4d1SDavid Woodhouse 6270254c4d1SDavid Woodhouse if (start) { 6280254c4d1SDavid Woodhouse if (start >= len) { 6290254c4d1SDavid Woodhouse start -= len; 6300254c4d1SDavid Woodhouse continue; 6310254c4d1SDavid Woodhouse } 6320254c4d1SDavid Woodhouse 6330254c4d1SDavid Woodhouse str += start; 6340254c4d1SDavid Woodhouse len -= start; 6350254c4d1SDavid Woodhouse start = 0; 6360254c4d1SDavid Woodhouse } 6370254c4d1SDavid Woodhouse 6380254c4d1SDavid Woodhouse memcpy(&rsp_data[rsp->len], str, len); 6390254c4d1SDavid Woodhouse rsp->len += len; 6400254c4d1SDavid Woodhouse } 6410254c4d1SDavid Woodhouse /* XS_DIRECTORY_PART wants an extra NUL to indicate the end */ 6420254c4d1SDavid Woodhouse if (truncate && rsp->len < XENSTORE_PAYLOAD_MAX) { 6430254c4d1SDavid Woodhouse rsp_data[rsp->len++] = '\0'; 6440254c4d1SDavid Woodhouse } 6450254c4d1SDavid Woodhouse } 6460254c4d1SDavid Woodhouse 6470254c4d1SDavid Woodhouse static void xs_directory(XenXenstoreState *s, unsigned int req_id, 6480254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 6490254c4d1SDavid Woodhouse unsigned int len) 6500254c4d1SDavid Woodhouse { 6510254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 6520254c4d1SDavid Woodhouse GList *items = NULL; 6530254c4d1SDavid Woodhouse const char *path; 6540254c4d1SDavid Woodhouse int err; 6550254c4d1SDavid Woodhouse 6560254c4d1SDavid Woodhouse if (len == 0 || req_data[len - 1] != '\0') { 6570254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 6580254c4d1SDavid Woodhouse return; 6590254c4d1SDavid Woodhouse } 6600254c4d1SDavid Woodhouse 6610254c4d1SDavid Woodhouse path = (const char *)req_data; 6620254c4d1SDavid Woodhouse 6630254c4d1SDavid Woodhouse trace_xenstore_directory(tx_id, path); 6640254c4d1SDavid Woodhouse err = xs_impl_directory(s->impl, xen_domid, tx_id, path, NULL, &items); 6650254c4d1SDavid Woodhouse if (err != 0) { 6660254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 6670254c4d1SDavid Woodhouse return; 6680254c4d1SDavid Woodhouse } 6690254c4d1SDavid Woodhouse 6700254c4d1SDavid Woodhouse rsp->type = XS_DIRECTORY; 6710254c4d1SDavid Woodhouse rsp->req_id = req_id; 6720254c4d1SDavid Woodhouse rsp->tx_id = tx_id; 6730254c4d1SDavid Woodhouse rsp->len = 0; 6740254c4d1SDavid Woodhouse 6750254c4d1SDavid Woodhouse xs_append_strings(s, rsp, items, 0, false); 6760254c4d1SDavid Woodhouse 6770254c4d1SDavid Woodhouse g_list_free_full(items, g_free); 6780254c4d1SDavid Woodhouse } 6790254c4d1SDavid Woodhouse 6800254c4d1SDavid Woodhouse static void xs_directory_part(XenXenstoreState *s, unsigned int req_id, 6810254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 6820254c4d1SDavid Woodhouse unsigned int len) 6830254c4d1SDavid Woodhouse { 6840254c4d1SDavid Woodhouse const char *offset_str, *path = (const char *)req_data; 6850254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 6860254c4d1SDavid Woodhouse char *rsp_data = (char *)&rsp[1]; 6870254c4d1SDavid Woodhouse uint64_t gencnt = 0; 6880254c4d1SDavid Woodhouse unsigned int offset; 6890254c4d1SDavid Woodhouse GList *items = NULL; 6900254c4d1SDavid Woodhouse int err; 6910254c4d1SDavid Woodhouse 6920254c4d1SDavid Woodhouse if (len == 0) { 6930254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 6940254c4d1SDavid Woodhouse return; 6950254c4d1SDavid Woodhouse } 6960254c4d1SDavid Woodhouse 6970254c4d1SDavid Woodhouse while (len--) { 6980254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 6990254c4d1SDavid Woodhouse break; 7000254c4d1SDavid Woodhouse } 7010254c4d1SDavid Woodhouse if (len == 0) { 7020254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 7030254c4d1SDavid Woodhouse return; 7040254c4d1SDavid Woodhouse } 7050254c4d1SDavid Woodhouse } 7060254c4d1SDavid Woodhouse 7070254c4d1SDavid Woodhouse offset_str = (const char *)req_data; 7080254c4d1SDavid Woodhouse while (len--) { 7090254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 7100254c4d1SDavid Woodhouse break; 7110254c4d1SDavid Woodhouse } 7120254c4d1SDavid Woodhouse if (len == 0) { 7130254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 7140254c4d1SDavid Woodhouse return; 7150254c4d1SDavid Woodhouse } 7160254c4d1SDavid Woodhouse } 7170254c4d1SDavid Woodhouse 7180254c4d1SDavid Woodhouse if (len) { 7190254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 7200254c4d1SDavid Woodhouse return; 7210254c4d1SDavid Woodhouse } 7220254c4d1SDavid Woodhouse 7230254c4d1SDavid Woodhouse if (qemu_strtoui(offset_str, NULL, 10, &offset) < 0) { 7240254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 7250254c4d1SDavid Woodhouse return; 7260254c4d1SDavid Woodhouse } 7270254c4d1SDavid Woodhouse 7280254c4d1SDavid Woodhouse trace_xenstore_directory_part(tx_id, path, offset); 7290254c4d1SDavid Woodhouse err = xs_impl_directory(s->impl, xen_domid, tx_id, path, &gencnt, &items); 7300254c4d1SDavid Woodhouse if (err != 0) { 7310254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 7320254c4d1SDavid Woodhouse return; 7330254c4d1SDavid Woodhouse } 7340254c4d1SDavid Woodhouse 7350254c4d1SDavid Woodhouse rsp->type = XS_DIRECTORY_PART; 7360254c4d1SDavid Woodhouse rsp->req_id = req_id; 7370254c4d1SDavid Woodhouse rsp->tx_id = tx_id; 7380254c4d1SDavid Woodhouse rsp->len = snprintf(rsp_data, XENSTORE_PAYLOAD_MAX, "%" PRIu64, gencnt) + 1; 7390254c4d1SDavid Woodhouse 7400254c4d1SDavid Woodhouse xs_append_strings(s, rsp, items, offset, true); 7410254c4d1SDavid Woodhouse 7420254c4d1SDavid Woodhouse g_list_free_full(items, g_free); 7430254c4d1SDavid Woodhouse } 7440254c4d1SDavid Woodhouse 7450254c4d1SDavid Woodhouse static void xs_transaction_start(XenXenstoreState *s, unsigned int req_id, 7460254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 7470254c4d1SDavid Woodhouse unsigned int len) 7480254c4d1SDavid Woodhouse { 7490254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 7500254c4d1SDavid Woodhouse char *rsp_data = (char *)&rsp[1]; 7510254c4d1SDavid Woodhouse int err; 7520254c4d1SDavid Woodhouse 7530254c4d1SDavid Woodhouse if (len != 1 || req_data[0] != '\0') { 7540254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 7550254c4d1SDavid Woodhouse return; 7560254c4d1SDavid Woodhouse } 7570254c4d1SDavid Woodhouse 7580254c4d1SDavid Woodhouse rsp->type = XS_TRANSACTION_START; 7590254c4d1SDavid Woodhouse rsp->req_id = req_id; 7600254c4d1SDavid Woodhouse rsp->tx_id = tx_id; 7610254c4d1SDavid Woodhouse rsp->len = 0; 7620254c4d1SDavid Woodhouse 7630254c4d1SDavid Woodhouse err = xs_impl_transaction_start(s->impl, xen_domid, &tx_id); 7640254c4d1SDavid Woodhouse if (err) { 7650254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 7660254c4d1SDavid Woodhouse return; 7670254c4d1SDavid Woodhouse } 7680254c4d1SDavid Woodhouse 7690254c4d1SDavid Woodhouse trace_xenstore_transaction_start(tx_id); 7700254c4d1SDavid Woodhouse 7710254c4d1SDavid Woodhouse rsp->len = snprintf(rsp_data, XENSTORE_PAYLOAD_MAX, "%u", tx_id); 7720254c4d1SDavid Woodhouse assert(rsp->len < XENSTORE_PAYLOAD_MAX); 7730254c4d1SDavid Woodhouse rsp->len++; 7740254c4d1SDavid Woodhouse } 7750254c4d1SDavid Woodhouse 7760254c4d1SDavid Woodhouse static void xs_transaction_end(XenXenstoreState *s, unsigned int req_id, 7770254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 7780254c4d1SDavid Woodhouse unsigned int len) 7790254c4d1SDavid Woodhouse { 7800254c4d1SDavid Woodhouse bool commit; 7810254c4d1SDavid Woodhouse int err; 7820254c4d1SDavid Woodhouse 7830254c4d1SDavid Woodhouse if (len != 2 || req_data[1] != '\0') { 7840254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 7850254c4d1SDavid Woodhouse return; 7860254c4d1SDavid Woodhouse } 7870254c4d1SDavid Woodhouse 7880254c4d1SDavid Woodhouse switch (req_data[0]) { 7890254c4d1SDavid Woodhouse case 'T': 7900254c4d1SDavid Woodhouse commit = true; 7910254c4d1SDavid Woodhouse break; 7920254c4d1SDavid Woodhouse case 'F': 7930254c4d1SDavid Woodhouse commit = false; 7940254c4d1SDavid Woodhouse break; 7950254c4d1SDavid Woodhouse default: 7960254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 7970254c4d1SDavid Woodhouse return; 7980254c4d1SDavid Woodhouse } 7990254c4d1SDavid Woodhouse 8000254c4d1SDavid Woodhouse trace_xenstore_transaction_end(tx_id, commit); 8010254c4d1SDavid Woodhouse err = xs_impl_transaction_end(s->impl, xen_domid, tx_id, commit); 8020254c4d1SDavid Woodhouse if (err) { 8030254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 8040254c4d1SDavid Woodhouse return; 8050254c4d1SDavid Woodhouse } 8060254c4d1SDavid Woodhouse 8070254c4d1SDavid Woodhouse xs_ok(s, XS_TRANSACTION_END, req_id, tx_id); 8080254c4d1SDavid Woodhouse } 8090254c4d1SDavid Woodhouse 8100254c4d1SDavid Woodhouse static void xs_rm(XenXenstoreState *s, unsigned int req_id, 8110254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, unsigned int len) 8120254c4d1SDavid Woodhouse { 8130254c4d1SDavid Woodhouse const char *path = (const char *)req_data; 8140254c4d1SDavid Woodhouse int err; 8150254c4d1SDavid Woodhouse 8160254c4d1SDavid Woodhouse if (len == 0 || req_data[len - 1] != '\0') { 8170254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 8180254c4d1SDavid Woodhouse return; 8190254c4d1SDavid Woodhouse } 8200254c4d1SDavid Woodhouse 8210254c4d1SDavid Woodhouse trace_xenstore_rm(tx_id, path); 8220254c4d1SDavid Woodhouse err = xs_impl_rm(s->impl, xen_domid, tx_id, path); 8230254c4d1SDavid Woodhouse if (err) { 8240254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 8250254c4d1SDavid Woodhouse return; 8260254c4d1SDavid Woodhouse } 8270254c4d1SDavid Woodhouse 8280254c4d1SDavid Woodhouse xs_ok(s, XS_RM, req_id, tx_id); 8290254c4d1SDavid Woodhouse } 8300254c4d1SDavid Woodhouse 8310254c4d1SDavid Woodhouse static void xs_get_perms(XenXenstoreState *s, unsigned int req_id, 8320254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 8330254c4d1SDavid Woodhouse unsigned int len) 8340254c4d1SDavid Woodhouse { 8350254c4d1SDavid Woodhouse const char *path = (const char *)req_data; 8360254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 8370254c4d1SDavid Woodhouse GList *perms = NULL; 8380254c4d1SDavid Woodhouse int err; 8390254c4d1SDavid Woodhouse 8400254c4d1SDavid Woodhouse if (len == 0 || req_data[len - 1] != '\0') { 8410254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 8420254c4d1SDavid Woodhouse return; 8430254c4d1SDavid Woodhouse } 8440254c4d1SDavid Woodhouse 8450254c4d1SDavid Woodhouse trace_xenstore_get_perms(tx_id, path); 8460254c4d1SDavid Woodhouse err = xs_impl_get_perms(s->impl, xen_domid, tx_id, path, &perms); 8470254c4d1SDavid Woodhouse if (err) { 8480254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 8490254c4d1SDavid Woodhouse return; 8500254c4d1SDavid Woodhouse } 8510254c4d1SDavid Woodhouse 8520254c4d1SDavid Woodhouse rsp->type = XS_GET_PERMS; 8530254c4d1SDavid Woodhouse rsp->req_id = req_id; 8540254c4d1SDavid Woodhouse rsp->tx_id = tx_id; 8550254c4d1SDavid Woodhouse rsp->len = 0; 8560254c4d1SDavid Woodhouse 8570254c4d1SDavid Woodhouse xs_append_strings(s, rsp, perms, 0, false); 8580254c4d1SDavid Woodhouse 8590254c4d1SDavid Woodhouse g_list_free_full(perms, g_free); 8600254c4d1SDavid Woodhouse } 8610254c4d1SDavid Woodhouse 8620254c4d1SDavid Woodhouse static void xs_set_perms(XenXenstoreState *s, unsigned int req_id, 8630254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 8640254c4d1SDavid Woodhouse unsigned int len) 8650254c4d1SDavid Woodhouse { 8660254c4d1SDavid Woodhouse const char *path = (const char *)req_data; 8670254c4d1SDavid Woodhouse uint8_t *perm; 8680254c4d1SDavid Woodhouse GList *perms = NULL; 8690254c4d1SDavid Woodhouse int err; 8700254c4d1SDavid Woodhouse 8710254c4d1SDavid Woodhouse if (len == 0) { 8720254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 8730254c4d1SDavid Woodhouse return; 8740254c4d1SDavid Woodhouse } 8750254c4d1SDavid Woodhouse 8760254c4d1SDavid Woodhouse while (len--) { 8770254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 8780254c4d1SDavid Woodhouse break; 8790254c4d1SDavid Woodhouse } 8800254c4d1SDavid Woodhouse if (len == 0) { 8810254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 8820254c4d1SDavid Woodhouse return; 8830254c4d1SDavid Woodhouse } 8840254c4d1SDavid Woodhouse } 8850254c4d1SDavid Woodhouse 8860254c4d1SDavid Woodhouse perm = req_data; 8870254c4d1SDavid Woodhouse while (len--) { 8880254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 8890254c4d1SDavid Woodhouse perms = g_list_append(perms, perm); 8900254c4d1SDavid Woodhouse perm = req_data; 8910254c4d1SDavid Woodhouse } 8920254c4d1SDavid Woodhouse } 8930254c4d1SDavid Woodhouse 8940254c4d1SDavid Woodhouse /* 8950254c4d1SDavid Woodhouse * Note that there may be trailing garbage at the end of the buffer. 8960254c4d1SDavid Woodhouse * This is explicitly permitted by the '?' at the end of the definition: 8970254c4d1SDavid Woodhouse * 8980254c4d1SDavid Woodhouse * SET_PERMS <path>|<perm-as-string>|+? 8990254c4d1SDavid Woodhouse */ 9000254c4d1SDavid Woodhouse 9010254c4d1SDavid Woodhouse trace_xenstore_set_perms(tx_id, path); 9020254c4d1SDavid Woodhouse err = xs_impl_set_perms(s->impl, xen_domid, tx_id, path, perms); 9030254c4d1SDavid Woodhouse g_list_free(perms); 9040254c4d1SDavid Woodhouse if (err) { 9050254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 9060254c4d1SDavid Woodhouse return; 9070254c4d1SDavid Woodhouse } 9080254c4d1SDavid Woodhouse 9090254c4d1SDavid Woodhouse xs_ok(s, XS_SET_PERMS, req_id, tx_id); 9100254c4d1SDavid Woodhouse } 9110254c4d1SDavid Woodhouse 9120254c4d1SDavid Woodhouse static void xs_watch(XenXenstoreState *s, unsigned int req_id, 9130254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 9140254c4d1SDavid Woodhouse unsigned int len) 9150254c4d1SDavid Woodhouse { 9160254c4d1SDavid Woodhouse const char *token, *path = (const char *)req_data; 9170254c4d1SDavid Woodhouse int err; 9180254c4d1SDavid Woodhouse 9190254c4d1SDavid Woodhouse if (len == 0) { 9200254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 9210254c4d1SDavid Woodhouse return; 9220254c4d1SDavid Woodhouse } 9230254c4d1SDavid Woodhouse 9240254c4d1SDavid Woodhouse while (len--) { 9250254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 9260254c4d1SDavid Woodhouse break; 9270254c4d1SDavid Woodhouse } 9280254c4d1SDavid Woodhouse if (len == 0) { 9290254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 9300254c4d1SDavid Woodhouse return; 9310254c4d1SDavid Woodhouse } 9320254c4d1SDavid Woodhouse } 9330254c4d1SDavid Woodhouse 9340254c4d1SDavid Woodhouse token = (const char *)req_data; 9350254c4d1SDavid Woodhouse while (len--) { 9360254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 9370254c4d1SDavid Woodhouse break; 9380254c4d1SDavid Woodhouse } 9390254c4d1SDavid Woodhouse if (len == 0) { 9400254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 9410254c4d1SDavid Woodhouse return; 9420254c4d1SDavid Woodhouse } 9430254c4d1SDavid Woodhouse } 9440254c4d1SDavid Woodhouse 9450254c4d1SDavid Woodhouse /* 9460254c4d1SDavid Woodhouse * Note that there may be trailing garbage at the end of the buffer. 9470254c4d1SDavid Woodhouse * This is explicitly permitted by the '?' at the end of the definition: 9480254c4d1SDavid Woodhouse * 9490254c4d1SDavid Woodhouse * WATCH <wpath>|<token>|? 9500254c4d1SDavid Woodhouse */ 9510254c4d1SDavid Woodhouse 9520254c4d1SDavid Woodhouse trace_xenstore_watch(path, token); 9530254c4d1SDavid Woodhouse err = xs_impl_watch(s->impl, xen_domid, path, token, fire_watch_cb, s); 9540254c4d1SDavid Woodhouse if (err) { 9550254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 9560254c4d1SDavid Woodhouse return; 9570254c4d1SDavid Woodhouse } 9580254c4d1SDavid Woodhouse 9590254c4d1SDavid Woodhouse xs_ok(s, XS_WATCH, req_id, tx_id); 9600254c4d1SDavid Woodhouse } 9610254c4d1SDavid Woodhouse 9620254c4d1SDavid Woodhouse static void xs_unwatch(XenXenstoreState *s, unsigned int req_id, 9630254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 9640254c4d1SDavid Woodhouse unsigned int len) 9650254c4d1SDavid Woodhouse { 9660254c4d1SDavid Woodhouse const char *token, *path = (const char *)req_data; 9670254c4d1SDavid Woodhouse int err; 9680254c4d1SDavid Woodhouse 9690254c4d1SDavid Woodhouse if (len == 0) { 9700254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 9710254c4d1SDavid Woodhouse return; 9720254c4d1SDavid Woodhouse } 9730254c4d1SDavid Woodhouse 9740254c4d1SDavid Woodhouse while (len--) { 9750254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 9760254c4d1SDavid Woodhouse break; 9770254c4d1SDavid Woodhouse } 9780254c4d1SDavid Woodhouse if (len == 0) { 9790254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 9800254c4d1SDavid Woodhouse return; 9810254c4d1SDavid Woodhouse } 9820254c4d1SDavid Woodhouse } 9830254c4d1SDavid Woodhouse 9840254c4d1SDavid Woodhouse token = (const char *)req_data; 9850254c4d1SDavid Woodhouse while (len--) { 9860254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 9870254c4d1SDavid Woodhouse break; 9880254c4d1SDavid Woodhouse } 9890254c4d1SDavid Woodhouse if (len == 0) { 9900254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 9910254c4d1SDavid Woodhouse return; 9920254c4d1SDavid Woodhouse } 9930254c4d1SDavid Woodhouse } 9940254c4d1SDavid Woodhouse 9950254c4d1SDavid Woodhouse trace_xenstore_unwatch(path, token); 9960254c4d1SDavid Woodhouse err = xs_impl_unwatch(s->impl, xen_domid, path, token, fire_watch_cb, s); 9970254c4d1SDavid Woodhouse if (err) { 9980254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 9990254c4d1SDavid Woodhouse return; 10000254c4d1SDavid Woodhouse } 10010254c4d1SDavid Woodhouse 10020254c4d1SDavid Woodhouse xs_ok(s, XS_UNWATCH, req_id, tx_id); 10030254c4d1SDavid Woodhouse } 10040254c4d1SDavid Woodhouse 10050254c4d1SDavid Woodhouse static void xs_reset_watches(XenXenstoreState *s, unsigned int req_id, 10060254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 10070254c4d1SDavid Woodhouse unsigned int len) 10080254c4d1SDavid Woodhouse { 10090254c4d1SDavid Woodhouse if (len == 0 || req_data[len - 1] != '\0') { 10100254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 10110254c4d1SDavid Woodhouse return; 10120254c4d1SDavid Woodhouse } 10130254c4d1SDavid Woodhouse 10140254c4d1SDavid Woodhouse trace_xenstore_reset_watches(); 10150254c4d1SDavid Woodhouse xs_impl_reset_watches(s->impl, xen_domid); 10160254c4d1SDavid Woodhouse 10170254c4d1SDavid Woodhouse xs_ok(s, XS_RESET_WATCHES, req_id, tx_id); 10180254c4d1SDavid Woodhouse } 10190254c4d1SDavid Woodhouse 10200254c4d1SDavid Woodhouse static void xs_priv(XenXenstoreState *s, unsigned int req_id, 10210254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *data, 10220254c4d1SDavid Woodhouse unsigned int len) 10230254c4d1SDavid Woodhouse { 10240254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EACCES); 10250254c4d1SDavid Woodhouse } 10260254c4d1SDavid Woodhouse 10270254c4d1SDavid Woodhouse static void xs_unimpl(XenXenstoreState *s, unsigned int req_id, 10280254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *data, 10290254c4d1SDavid Woodhouse unsigned int len) 10300254c4d1SDavid Woodhouse { 10310254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, ENOSYS); 10320254c4d1SDavid Woodhouse } 10330254c4d1SDavid Woodhouse 10340254c4d1SDavid Woodhouse typedef void (*xs_impl)(XenXenstoreState *s, unsigned int req_id, 10350254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *data, 10360254c4d1SDavid Woodhouse unsigned int len); 10370254c4d1SDavid Woodhouse 10380254c4d1SDavid Woodhouse struct xsd_req { 10390254c4d1SDavid Woodhouse const char *name; 10400254c4d1SDavid Woodhouse xs_impl fn; 10410254c4d1SDavid Woodhouse }; 10420254c4d1SDavid Woodhouse #define XSD_REQ(_type, _fn) \ 10430254c4d1SDavid Woodhouse [_type] = { .name = #_type, .fn = _fn } 10440254c4d1SDavid Woodhouse 10450254c4d1SDavid Woodhouse struct xsd_req xsd_reqs[] = { 10460254c4d1SDavid Woodhouse XSD_REQ(XS_READ, xs_read), 10470254c4d1SDavid Woodhouse XSD_REQ(XS_WRITE, xs_write), 10480254c4d1SDavid Woodhouse XSD_REQ(XS_MKDIR, xs_mkdir), 10490254c4d1SDavid Woodhouse XSD_REQ(XS_DIRECTORY, xs_directory), 10500254c4d1SDavid Woodhouse XSD_REQ(XS_DIRECTORY_PART, xs_directory_part), 10510254c4d1SDavid Woodhouse XSD_REQ(XS_TRANSACTION_START, xs_transaction_start), 10520254c4d1SDavid Woodhouse XSD_REQ(XS_TRANSACTION_END, xs_transaction_end), 10530254c4d1SDavid Woodhouse XSD_REQ(XS_RM, xs_rm), 10540254c4d1SDavid Woodhouse XSD_REQ(XS_GET_PERMS, xs_get_perms), 10550254c4d1SDavid Woodhouse XSD_REQ(XS_SET_PERMS, xs_set_perms), 10560254c4d1SDavid Woodhouse XSD_REQ(XS_WATCH, xs_watch), 10570254c4d1SDavid Woodhouse XSD_REQ(XS_UNWATCH, xs_unwatch), 10580254c4d1SDavid Woodhouse XSD_REQ(XS_CONTROL, xs_priv), 10590254c4d1SDavid Woodhouse XSD_REQ(XS_INTRODUCE, xs_priv), 10600254c4d1SDavid Woodhouse XSD_REQ(XS_RELEASE, xs_priv), 10610254c4d1SDavid Woodhouse XSD_REQ(XS_IS_DOMAIN_INTRODUCED, xs_priv), 10620254c4d1SDavid Woodhouse XSD_REQ(XS_RESUME, xs_priv), 10630254c4d1SDavid Woodhouse XSD_REQ(XS_SET_TARGET, xs_priv), 10640254c4d1SDavid Woodhouse XSD_REQ(XS_RESET_WATCHES, xs_reset_watches), 10650254c4d1SDavid Woodhouse }; 10660254c4d1SDavid Woodhouse 1067f3341e7bSDavid Woodhouse static void process_req(XenXenstoreState *s) 1068f3341e7bSDavid Woodhouse { 1069f3341e7bSDavid Woodhouse struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data; 10700254c4d1SDavid Woodhouse xs_impl handler = NULL; 1071f3341e7bSDavid Woodhouse 1072f3341e7bSDavid Woodhouse assert(req_pending(s)); 1073f3341e7bSDavid Woodhouse assert(!s->rsp_pending); 1074f3341e7bSDavid Woodhouse 10750254c4d1SDavid Woodhouse if (req->type < ARRAY_SIZE(xsd_reqs)) { 10760254c4d1SDavid Woodhouse handler = xsd_reqs[req->type].fn; 10770254c4d1SDavid Woodhouse } 10780254c4d1SDavid Woodhouse if (!handler) { 10790254c4d1SDavid Woodhouse handler = &xs_unimpl; 10800254c4d1SDavid Woodhouse } 10810254c4d1SDavid Woodhouse 10820254c4d1SDavid Woodhouse handler(s, req->req_id, req->tx_id, (uint8_t *)&req[1], req->len); 1083f3341e7bSDavid Woodhouse 1084f3341e7bSDavid Woodhouse s->rsp_pending = true; 1085f3341e7bSDavid Woodhouse reset_req(s); 1086f3341e7bSDavid Woodhouse } 1087f3341e7bSDavid Woodhouse 1088f3341e7bSDavid Woodhouse static unsigned int copy_from_ring(XenXenstoreState *s, uint8_t *ptr, 1089f3341e7bSDavid Woodhouse unsigned int len) 1090f3341e7bSDavid Woodhouse { 1091f3341e7bSDavid Woodhouse if (!len) { 1092f3341e7bSDavid Woodhouse return 0; 1093f3341e7bSDavid Woodhouse } 1094f3341e7bSDavid Woodhouse 1095f3341e7bSDavid Woodhouse XENSTORE_RING_IDX prod = qatomic_read(&s->xs->req_prod); 1096f3341e7bSDavid Woodhouse XENSTORE_RING_IDX cons = qatomic_read(&s->xs->req_cons); 1097f3341e7bSDavid Woodhouse unsigned int copied = 0; 1098f3341e7bSDavid Woodhouse 1099f3341e7bSDavid Woodhouse /* Ensure the ring contents don't cross the req_prod access. */ 1100f3341e7bSDavid Woodhouse smp_rmb(); 1101f3341e7bSDavid Woodhouse 1102f3341e7bSDavid Woodhouse while (len) { 1103f3341e7bSDavid Woodhouse unsigned int avail = prod - cons; 1104f3341e7bSDavid Woodhouse unsigned int offset = MASK_XENSTORE_IDX(cons); 1105f3341e7bSDavid Woodhouse unsigned int copylen = avail; 1106f3341e7bSDavid Woodhouse 1107f3341e7bSDavid Woodhouse if (avail > XENSTORE_RING_SIZE) { 1108f3341e7bSDavid Woodhouse error_report("XenStore ring handling error"); 1109f3341e7bSDavid Woodhouse s->fatal_error = true; 1110f3341e7bSDavid Woodhouse break; 1111f3341e7bSDavid Woodhouse } else if (avail == 0) { 1112f3341e7bSDavid Woodhouse break; 1113f3341e7bSDavid Woodhouse } 1114f3341e7bSDavid Woodhouse 1115f3341e7bSDavid Woodhouse if (copylen > len) { 1116f3341e7bSDavid Woodhouse copylen = len; 1117f3341e7bSDavid Woodhouse } 1118f3341e7bSDavid Woodhouse if (copylen > XENSTORE_RING_SIZE - offset) { 1119f3341e7bSDavid Woodhouse copylen = XENSTORE_RING_SIZE - offset; 1120f3341e7bSDavid Woodhouse } 1121f3341e7bSDavid Woodhouse 1122f3341e7bSDavid Woodhouse memcpy(ptr, &s->xs->req[offset], copylen); 1123f3341e7bSDavid Woodhouse copied += copylen; 1124f3341e7bSDavid Woodhouse 1125f3341e7bSDavid Woodhouse ptr += copylen; 1126f3341e7bSDavid Woodhouse len -= copylen; 1127f3341e7bSDavid Woodhouse 1128f3341e7bSDavid Woodhouse cons += copylen; 1129f3341e7bSDavid Woodhouse } 1130f3341e7bSDavid Woodhouse 1131f3341e7bSDavid Woodhouse /* 1132f3341e7bSDavid Woodhouse * Not sure this ever mattered except on Alpha, but this barrier 1133f3341e7bSDavid Woodhouse * is to ensure that the update to req_cons is globally visible 1134f3341e7bSDavid Woodhouse * only after we have consumed all the data from the ring, and we 1135f3341e7bSDavid Woodhouse * don't end up seeing data written to the ring *after* the other 1136f3341e7bSDavid Woodhouse * end sees the update and writes more to the ring. Xen's own 1137f3341e7bSDavid Woodhouse * xenstored has the same barrier here (although with no comment 1138f3341e7bSDavid Woodhouse * at all, obviously, because it's Xen code). 1139f3341e7bSDavid Woodhouse */ 1140f3341e7bSDavid Woodhouse smp_mb(); 1141f3341e7bSDavid Woodhouse 1142f3341e7bSDavid Woodhouse qatomic_set(&s->xs->req_cons, cons); 1143f3341e7bSDavid Woodhouse 1144f3341e7bSDavid Woodhouse return copied; 1145f3341e7bSDavid Woodhouse } 1146f3341e7bSDavid Woodhouse 1147f3341e7bSDavid Woodhouse static unsigned int copy_to_ring(XenXenstoreState *s, uint8_t *ptr, 1148f3341e7bSDavid Woodhouse unsigned int len) 1149f3341e7bSDavid Woodhouse { 1150f3341e7bSDavid Woodhouse if (!len) { 1151f3341e7bSDavid Woodhouse return 0; 1152f3341e7bSDavid Woodhouse } 1153f3341e7bSDavid Woodhouse 1154f3341e7bSDavid Woodhouse XENSTORE_RING_IDX cons = qatomic_read(&s->xs->rsp_cons); 1155f3341e7bSDavid Woodhouse XENSTORE_RING_IDX prod = qatomic_read(&s->xs->rsp_prod); 1156f3341e7bSDavid Woodhouse unsigned int copied = 0; 1157f3341e7bSDavid Woodhouse 1158f3341e7bSDavid Woodhouse /* 1159f3341e7bSDavid Woodhouse * This matches the barrier in copy_to_ring() (or the guest's 1160bad5cfcdSMichael Tokarev * equivalent) between writing the data to the ring and updating 1161f3341e7bSDavid Woodhouse * rsp_prod. It protects against the pathological case (which 1162f3341e7bSDavid Woodhouse * again I think never happened except on Alpha) where our 1163f3341e7bSDavid Woodhouse * subsequent writes to the ring could *cross* the read of 1164f3341e7bSDavid Woodhouse * rsp_cons and the guest could see the new data when it was 1165f3341e7bSDavid Woodhouse * intending to read the old. 1166f3341e7bSDavid Woodhouse */ 1167f3341e7bSDavid Woodhouse smp_mb(); 1168f3341e7bSDavid Woodhouse 1169f3341e7bSDavid Woodhouse while (len) { 1170f3341e7bSDavid Woodhouse unsigned int avail = cons + XENSTORE_RING_SIZE - prod; 1171f3341e7bSDavid Woodhouse unsigned int offset = MASK_XENSTORE_IDX(prod); 1172f3341e7bSDavid Woodhouse unsigned int copylen = len; 1173f3341e7bSDavid Woodhouse 1174f3341e7bSDavid Woodhouse if (avail > XENSTORE_RING_SIZE) { 1175f3341e7bSDavid Woodhouse error_report("XenStore ring handling error"); 1176f3341e7bSDavid Woodhouse s->fatal_error = true; 1177f3341e7bSDavid Woodhouse break; 1178f3341e7bSDavid Woodhouse } else if (avail == 0) { 1179f3341e7bSDavid Woodhouse break; 1180f3341e7bSDavid Woodhouse } 1181f3341e7bSDavid Woodhouse 1182f3341e7bSDavid Woodhouse if (copylen > avail) { 1183f3341e7bSDavid Woodhouse copylen = avail; 1184f3341e7bSDavid Woodhouse } 1185f3341e7bSDavid Woodhouse if (copylen > XENSTORE_RING_SIZE - offset) { 1186f3341e7bSDavid Woodhouse copylen = XENSTORE_RING_SIZE - offset; 1187f3341e7bSDavid Woodhouse } 1188f3341e7bSDavid Woodhouse 1189f3341e7bSDavid Woodhouse 1190f3341e7bSDavid Woodhouse memcpy(&s->xs->rsp[offset], ptr, copylen); 1191f3341e7bSDavid Woodhouse copied += copylen; 1192f3341e7bSDavid Woodhouse 1193f3341e7bSDavid Woodhouse ptr += copylen; 1194f3341e7bSDavid Woodhouse len -= copylen; 1195f3341e7bSDavid Woodhouse 1196f3341e7bSDavid Woodhouse prod += copylen; 1197f3341e7bSDavid Woodhouse } 1198f3341e7bSDavid Woodhouse 1199f3341e7bSDavid Woodhouse /* Ensure the ring contents are seen before rsp_prod update. */ 1200f3341e7bSDavid Woodhouse smp_wmb(); 1201f3341e7bSDavid Woodhouse 1202f3341e7bSDavid Woodhouse qatomic_set(&s->xs->rsp_prod, prod); 1203f3341e7bSDavid Woodhouse 1204f3341e7bSDavid Woodhouse return copied; 1205f3341e7bSDavid Woodhouse } 1206f3341e7bSDavid Woodhouse 1207f3341e7bSDavid Woodhouse static unsigned int get_req(XenXenstoreState *s) 1208f3341e7bSDavid Woodhouse { 1209f3341e7bSDavid Woodhouse unsigned int copied = 0; 1210f3341e7bSDavid Woodhouse 1211f3341e7bSDavid Woodhouse if (s->fatal_error) { 1212f3341e7bSDavid Woodhouse return 0; 1213f3341e7bSDavid Woodhouse } 1214f3341e7bSDavid Woodhouse 1215f3341e7bSDavid Woodhouse assert(!req_pending(s)); 1216f3341e7bSDavid Woodhouse 1217f3341e7bSDavid Woodhouse if (s->req_offset < XENSTORE_HEADER_SIZE) { 1218f3341e7bSDavid Woodhouse void *ptr = s->req_data + s->req_offset; 1219f3341e7bSDavid Woodhouse unsigned int len = XENSTORE_HEADER_SIZE; 1220f3341e7bSDavid Woodhouse unsigned int copylen = copy_from_ring(s, ptr, len); 1221f3341e7bSDavid Woodhouse 1222f3341e7bSDavid Woodhouse copied += copylen; 1223f3341e7bSDavid Woodhouse s->req_offset += copylen; 1224f3341e7bSDavid Woodhouse } 1225f3341e7bSDavid Woodhouse 1226f3341e7bSDavid Woodhouse if (s->req_offset >= XENSTORE_HEADER_SIZE) { 1227f3341e7bSDavid Woodhouse struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data; 1228f3341e7bSDavid Woodhouse 1229f3341e7bSDavid Woodhouse if (req->len > (uint32_t)XENSTORE_PAYLOAD_MAX) { 1230f3341e7bSDavid Woodhouse error_report("Illegal XenStore request"); 1231f3341e7bSDavid Woodhouse s->fatal_error = true; 1232f3341e7bSDavid Woodhouse return 0; 1233f3341e7bSDavid Woodhouse } 1234f3341e7bSDavid Woodhouse 1235f3341e7bSDavid Woodhouse void *ptr = s->req_data + s->req_offset; 1236f3341e7bSDavid Woodhouse unsigned int len = XENSTORE_HEADER_SIZE + req->len - s->req_offset; 1237f3341e7bSDavid Woodhouse unsigned int copylen = copy_from_ring(s, ptr, len); 1238f3341e7bSDavid Woodhouse 1239f3341e7bSDavid Woodhouse copied += copylen; 1240f3341e7bSDavid Woodhouse s->req_offset += copylen; 1241f3341e7bSDavid Woodhouse } 1242f3341e7bSDavid Woodhouse 1243f3341e7bSDavid Woodhouse return copied; 1244f3341e7bSDavid Woodhouse } 1245f3341e7bSDavid Woodhouse 1246f3341e7bSDavid Woodhouse static unsigned int put_rsp(XenXenstoreState *s) 1247f3341e7bSDavid Woodhouse { 1248f3341e7bSDavid Woodhouse if (s->fatal_error) { 1249f3341e7bSDavid Woodhouse return 0; 1250f3341e7bSDavid Woodhouse } 1251f3341e7bSDavid Woodhouse 1252f3341e7bSDavid Woodhouse assert(s->rsp_pending); 1253f3341e7bSDavid Woodhouse 1254f3341e7bSDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 1255f3341e7bSDavid Woodhouse assert(s->rsp_offset < XENSTORE_HEADER_SIZE + rsp->len); 1256f3341e7bSDavid Woodhouse 1257f3341e7bSDavid Woodhouse void *ptr = s->rsp_data + s->rsp_offset; 1258f3341e7bSDavid Woodhouse unsigned int len = XENSTORE_HEADER_SIZE + rsp->len - s->rsp_offset; 1259f3341e7bSDavid Woodhouse unsigned int copylen = copy_to_ring(s, ptr, len); 1260f3341e7bSDavid Woodhouse 1261f3341e7bSDavid Woodhouse s->rsp_offset += copylen; 1262f3341e7bSDavid Woodhouse 1263f3341e7bSDavid Woodhouse /* Have we produced a complete response? */ 1264f3341e7bSDavid Woodhouse if (s->rsp_offset == XENSTORE_HEADER_SIZE + rsp->len) { 1265f3341e7bSDavid Woodhouse reset_rsp(s); 1266f3341e7bSDavid Woodhouse } 1267f3341e7bSDavid Woodhouse 1268f3341e7bSDavid Woodhouse return copylen; 1269f3341e7bSDavid Woodhouse } 1270f3341e7bSDavid Woodhouse 12710254c4d1SDavid Woodhouse static void deliver_watch(XenXenstoreState *s, const char *path, 12720254c4d1SDavid Woodhouse const char *token) 12730254c4d1SDavid Woodhouse { 12740254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 12750254c4d1SDavid Woodhouse uint8_t *rsp_data = (uint8_t *)&rsp[1]; 12760254c4d1SDavid Woodhouse unsigned int len; 12770254c4d1SDavid Woodhouse 12780254c4d1SDavid Woodhouse assert(!s->rsp_pending); 12790254c4d1SDavid Woodhouse 12800254c4d1SDavid Woodhouse trace_xenstore_watch_event(path, token); 12810254c4d1SDavid Woodhouse 12820254c4d1SDavid Woodhouse rsp->type = XS_WATCH_EVENT; 12830254c4d1SDavid Woodhouse rsp->req_id = 0; 12840254c4d1SDavid Woodhouse rsp->tx_id = 0; 12850254c4d1SDavid Woodhouse rsp->len = 0; 12860254c4d1SDavid Woodhouse 12870254c4d1SDavid Woodhouse len = strlen(path); 12880254c4d1SDavid Woodhouse 12890254c4d1SDavid Woodhouse /* XENSTORE_ABS/REL_PATH_MAX should ensure there can be no overflow */ 12900254c4d1SDavid Woodhouse assert(rsp->len + len < XENSTORE_PAYLOAD_MAX); 12910254c4d1SDavid Woodhouse 12920254c4d1SDavid Woodhouse memcpy(&rsp_data[rsp->len], path, len); 12930254c4d1SDavid Woodhouse rsp->len += len; 12940254c4d1SDavid Woodhouse rsp_data[rsp->len] = '\0'; 12950254c4d1SDavid Woodhouse rsp->len++; 12960254c4d1SDavid Woodhouse 12970254c4d1SDavid Woodhouse len = strlen(token); 12980254c4d1SDavid Woodhouse /* 12990254c4d1SDavid Woodhouse * It is possible for the guest to have chosen a token that will 13000254c4d1SDavid Woodhouse * not fit (along with the patch) into a watch event. We have no 13010254c4d1SDavid Woodhouse * choice but to drop the event if this is the case. 13020254c4d1SDavid Woodhouse */ 13030254c4d1SDavid Woodhouse if (rsp->len + len >= XENSTORE_PAYLOAD_MAX) { 13040254c4d1SDavid Woodhouse return; 13050254c4d1SDavid Woodhouse } 13060254c4d1SDavid Woodhouse 13070254c4d1SDavid Woodhouse memcpy(&rsp_data[rsp->len], token, len); 13080254c4d1SDavid Woodhouse rsp->len += len; 13090254c4d1SDavid Woodhouse rsp_data[rsp->len] = '\0'; 13100254c4d1SDavid Woodhouse rsp->len++; 13110254c4d1SDavid Woodhouse 13120254c4d1SDavid Woodhouse s->rsp_pending = true; 13130254c4d1SDavid Woodhouse } 13140254c4d1SDavid Woodhouse 13150254c4d1SDavid Woodhouse struct watch_event { 13160254c4d1SDavid Woodhouse char *path; 13170254c4d1SDavid Woodhouse char *token; 13180254c4d1SDavid Woodhouse }; 13190254c4d1SDavid Woodhouse 132003247512SDavid Woodhouse static void free_watch_event(struct watch_event *ev) 132103247512SDavid Woodhouse { 132203247512SDavid Woodhouse if (ev) { 132303247512SDavid Woodhouse g_free(ev->path); 132403247512SDavid Woodhouse g_free(ev->token); 132503247512SDavid Woodhouse g_free(ev); 132603247512SDavid Woodhouse } 132703247512SDavid Woodhouse } 132803247512SDavid Woodhouse 13290254c4d1SDavid Woodhouse static void queue_watch(XenXenstoreState *s, const char *path, 13300254c4d1SDavid Woodhouse const char *token) 13310254c4d1SDavid Woodhouse { 13320254c4d1SDavid Woodhouse struct watch_event *ev = g_new0(struct watch_event, 1); 13330254c4d1SDavid Woodhouse 13340254c4d1SDavid Woodhouse ev->path = g_strdup(path); 13350254c4d1SDavid Woodhouse ev->token = g_strdup(token); 13360254c4d1SDavid Woodhouse 13370254c4d1SDavid Woodhouse s->watch_events = g_list_append(s->watch_events, ev); 13380254c4d1SDavid Woodhouse } 13390254c4d1SDavid Woodhouse 13400254c4d1SDavid Woodhouse static void fire_watch_cb(void *opaque, const char *path, const char *token) 13410254c4d1SDavid Woodhouse { 13420254c4d1SDavid Woodhouse XenXenstoreState *s = opaque; 13430254c4d1SDavid Woodhouse 1344195801d7SStefan Hajnoczi assert(bql_locked()); 13450254c4d1SDavid Woodhouse 13460254c4d1SDavid Woodhouse /* 13470254c4d1SDavid Woodhouse * If there's a response pending, we obviously can't scribble over 13480254c4d1SDavid Woodhouse * it. But if there's a request pending, it has dibs on the buffer 13490254c4d1SDavid Woodhouse * too. 13500254c4d1SDavid Woodhouse * 13510254c4d1SDavid Woodhouse * In the common case of a watch firing due to backend activity 13520254c4d1SDavid Woodhouse * when the ring was otherwise idle, we should be able to copy the 13530254c4d1SDavid Woodhouse * strings directly into the rsp_data and thence the actual ring, 13540254c4d1SDavid Woodhouse * without needing to perform any allocations and queue them. 13550254c4d1SDavid Woodhouse */ 13560254c4d1SDavid Woodhouse if (s->rsp_pending || req_pending(s)) { 13570254c4d1SDavid Woodhouse queue_watch(s, path, token); 13580254c4d1SDavid Woodhouse } else { 13590254c4d1SDavid Woodhouse deliver_watch(s, path, token); 13600254c4d1SDavid Woodhouse /* 13614a5780f5SDavid Woodhouse * Attempt to queue the message into the actual ring, and send 13624a5780f5SDavid Woodhouse * the event channel notification if any bytes are copied. 13630254c4d1SDavid Woodhouse */ 13644a5780f5SDavid Woodhouse if (s->rsp_pending && put_rsp(s) > 0) { 13650254c4d1SDavid Woodhouse xen_be_evtchn_notify(s->eh, s->be_port); 13660254c4d1SDavid Woodhouse } 13670254c4d1SDavid Woodhouse } 13684a5780f5SDavid Woodhouse } 13690254c4d1SDavid Woodhouse 13700254c4d1SDavid Woodhouse static void process_watch_events(XenXenstoreState *s) 13710254c4d1SDavid Woodhouse { 13720254c4d1SDavid Woodhouse struct watch_event *ev = s->watch_events->data; 13730254c4d1SDavid Woodhouse 13740254c4d1SDavid Woodhouse deliver_watch(s, ev->path, ev->token); 13750254c4d1SDavid Woodhouse 13760254c4d1SDavid Woodhouse s->watch_events = g_list_remove(s->watch_events, ev); 137703247512SDavid Woodhouse free_watch_event(ev); 13780254c4d1SDavid Woodhouse } 13790254c4d1SDavid Woodhouse 1380c08f5d0eSDavid Woodhouse static void xen_xenstore_event(void *opaque) 1381c08f5d0eSDavid Woodhouse { 1382c08f5d0eSDavid Woodhouse XenXenstoreState *s = opaque; 1383c08f5d0eSDavid Woodhouse evtchn_port_t port = xen_be_evtchn_pending(s->eh); 1384f3341e7bSDavid Woodhouse unsigned int copied_to, copied_from; 1385f3341e7bSDavid Woodhouse bool processed, notify = false; 1386f3341e7bSDavid Woodhouse 1387c08f5d0eSDavid Woodhouse if (port != s->be_port) { 1388c08f5d0eSDavid Woodhouse return; 1389c08f5d0eSDavid Woodhouse } 1390f3341e7bSDavid Woodhouse 1391c08f5d0eSDavid Woodhouse /* We know this is a no-op. */ 1392c08f5d0eSDavid Woodhouse xen_be_evtchn_unmask(s->eh, port); 1393f3341e7bSDavid Woodhouse 1394f3341e7bSDavid Woodhouse do { 1395f3341e7bSDavid Woodhouse copied_to = copied_from = 0; 1396f3341e7bSDavid Woodhouse processed = false; 1397f3341e7bSDavid Woodhouse 13980254c4d1SDavid Woodhouse if (!s->rsp_pending && s->watch_events) { 13990254c4d1SDavid Woodhouse process_watch_events(s); 14000254c4d1SDavid Woodhouse } 14010254c4d1SDavid Woodhouse 1402f3341e7bSDavid Woodhouse if (s->rsp_pending) { 1403f3341e7bSDavid Woodhouse copied_to = put_rsp(s); 1404f3341e7bSDavid Woodhouse } 1405f3341e7bSDavid Woodhouse 1406f3341e7bSDavid Woodhouse if (!req_pending(s)) { 1407f3341e7bSDavid Woodhouse copied_from = get_req(s); 1408f3341e7bSDavid Woodhouse } 1409f3341e7bSDavid Woodhouse 14100254c4d1SDavid Woodhouse if (req_pending(s) && !s->rsp_pending && !s->watch_events) { 1411f3341e7bSDavid Woodhouse process_req(s); 1412f3341e7bSDavid Woodhouse processed = true; 1413f3341e7bSDavid Woodhouse } 1414f3341e7bSDavid Woodhouse 1415f3341e7bSDavid Woodhouse notify |= copied_to || copied_from; 1416f3341e7bSDavid Woodhouse } while (copied_to || copied_from || processed); 1417f3341e7bSDavid Woodhouse 1418f3341e7bSDavid Woodhouse if (notify) { 1419c08f5d0eSDavid Woodhouse xen_be_evtchn_notify(s->eh, s->be_port); 1420c08f5d0eSDavid Woodhouse } 1421f3341e7bSDavid Woodhouse } 1422c08f5d0eSDavid Woodhouse 1423c08f5d0eSDavid Woodhouse static void alloc_guest_port(XenXenstoreState *s) 1424c08f5d0eSDavid Woodhouse { 1425c08f5d0eSDavid Woodhouse struct evtchn_alloc_unbound alloc = { 1426c08f5d0eSDavid Woodhouse .dom = DOMID_SELF, 1427c08f5d0eSDavid Woodhouse .remote_dom = DOMID_QEMU, 1428c08f5d0eSDavid Woodhouse }; 1429c08f5d0eSDavid Woodhouse 1430c08f5d0eSDavid Woodhouse if (!xen_evtchn_alloc_unbound_op(&alloc)) { 1431c08f5d0eSDavid Woodhouse s->guest_port = alloc.port; 1432c08f5d0eSDavid Woodhouse } 1433c08f5d0eSDavid Woodhouse } 1434c08f5d0eSDavid Woodhouse 1435c08f5d0eSDavid Woodhouse int xen_xenstore_reset(void) 1436c08f5d0eSDavid Woodhouse { 1437c08f5d0eSDavid Woodhouse XenXenstoreState *s = xen_xenstore_singleton; 1438a72ccc7fSDavid Woodhouse int console_port; 1439d388c9f5SDavid Woodhouse GList *perms; 1440c08f5d0eSDavid Woodhouse int err; 1441c08f5d0eSDavid Woodhouse 1442c08f5d0eSDavid Woodhouse if (!s) { 1443c08f5d0eSDavid Woodhouse return -ENOTSUP; 1444c08f5d0eSDavid Woodhouse } 1445c08f5d0eSDavid Woodhouse 1446c08f5d0eSDavid Woodhouse s->req_offset = s->rsp_offset = 0; 1447c08f5d0eSDavid Woodhouse s->rsp_pending = false; 1448c08f5d0eSDavid Woodhouse 1449c08f5d0eSDavid Woodhouse if (!memory_region_is_mapped(&s->xenstore_page)) { 1450c08f5d0eSDavid Woodhouse uint64_t gpa = XEN_SPECIAL_PFN(XENSTORE) << TARGET_PAGE_BITS; 1451c08f5d0eSDavid Woodhouse xen_overlay_do_map_page(&s->xenstore_page, gpa); 1452c08f5d0eSDavid Woodhouse } 1453c08f5d0eSDavid Woodhouse 1454c08f5d0eSDavid Woodhouse alloc_guest_port(s); 1455c08f5d0eSDavid Woodhouse 1456c08f5d0eSDavid Woodhouse /* 1457c08f5d0eSDavid Woodhouse * As qemu/dom0, bind to the guest's port. For incoming migration, this 1458c08f5d0eSDavid Woodhouse * will be unbound as the guest's evtchn table is overwritten. We then 1459c08f5d0eSDavid Woodhouse * rebind to the correct guest port in xen_xenstore_post_load(). 1460c08f5d0eSDavid Woodhouse */ 1461c08f5d0eSDavid Woodhouse err = xen_be_evtchn_bind_interdomain(s->eh, xen_domid, s->guest_port); 1462c08f5d0eSDavid Woodhouse if (err < 0) { 1463c08f5d0eSDavid Woodhouse return err; 1464c08f5d0eSDavid Woodhouse } 1465c08f5d0eSDavid Woodhouse s->be_port = err; 1466c08f5d0eSDavid Woodhouse 1467d388c9f5SDavid Woodhouse /* Create frontend store nodes */ 1468d388c9f5SDavid Woodhouse perms = g_list_append(NULL, xs_perm_as_string(XS_PERM_NONE, DOMID_QEMU)); 1469d388c9f5SDavid Woodhouse perms = g_list_append(perms, xs_perm_as_string(XS_PERM_READ, xen_domid)); 1470d388c9f5SDavid Woodhouse 1471d388c9f5SDavid Woodhouse relpath_printf(s, perms, "store/port", "%u", s->guest_port); 1472d388c9f5SDavid Woodhouse relpath_printf(s, perms, "store/ring-ref", "%lu", 1473d388c9f5SDavid Woodhouse XEN_SPECIAL_PFN(XENSTORE)); 1474d388c9f5SDavid Woodhouse 1475a72ccc7fSDavid Woodhouse console_port = xen_primary_console_get_port(); 1476a72ccc7fSDavid Woodhouse if (console_port) { 1477a72ccc7fSDavid Woodhouse relpath_printf(s, perms, "console/ring-ref", "%lu", 1478a72ccc7fSDavid Woodhouse XEN_SPECIAL_PFN(CONSOLE)); 1479a72ccc7fSDavid Woodhouse relpath_printf(s, perms, "console/port", "%u", console_port); 1480a72ccc7fSDavid Woodhouse relpath_printf(s, perms, "console/state", "%u", XenbusStateInitialised); 1481a72ccc7fSDavid Woodhouse } 1482a72ccc7fSDavid Woodhouse 1483d388c9f5SDavid Woodhouse g_list_free_full(perms, g_free); 1484d388c9f5SDavid Woodhouse 1485d05864d2SDavid Woodhouse /* 1486d05864d2SDavid Woodhouse * We don't actually access the guest's page through the grant, because 1487d05864d2SDavid Woodhouse * this isn't real Xen, and we can just use the page we gave it in the 1488d05864d2SDavid Woodhouse * first place. Map the grant anyway, mostly for cosmetic purposes so 1489d05864d2SDavid Woodhouse * it *looks* like it's in use in the guest-visible grant table. 1490d05864d2SDavid Woodhouse */ 1491d05864d2SDavid Woodhouse s->gt = qemu_xen_gnttab_open(); 1492d05864d2SDavid Woodhouse uint32_t xs_gntref = GNTTAB_RESERVED_XENSTORE; 1493d05864d2SDavid Woodhouse s->granted_xs = qemu_xen_gnttab_map_refs(s->gt, 1, xen_domid, &xs_gntref, 1494d05864d2SDavid Woodhouse PROT_READ | PROT_WRITE); 1495d05864d2SDavid Woodhouse 1496c08f5d0eSDavid Woodhouse return 0; 1497c08f5d0eSDavid Woodhouse } 149803247512SDavid Woodhouse 149903247512SDavid Woodhouse struct qemu_xs_handle { 150003247512SDavid Woodhouse XenstoreImplState *impl; 150103247512SDavid Woodhouse GList *watches; 150203247512SDavid Woodhouse QEMUBH *watch_bh; 150303247512SDavid Woodhouse }; 150403247512SDavid Woodhouse 150503247512SDavid Woodhouse struct qemu_xs_watch { 150603247512SDavid Woodhouse struct qemu_xs_handle *h; 150703247512SDavid Woodhouse char *path; 150803247512SDavid Woodhouse xs_watch_fn fn; 150903247512SDavid Woodhouse void *opaque; 151003247512SDavid Woodhouse GList *events; 151103247512SDavid Woodhouse }; 151203247512SDavid Woodhouse 151303247512SDavid Woodhouse static char *xs_be_get_domain_path(struct qemu_xs_handle *h, unsigned int domid) 151403247512SDavid Woodhouse { 151503247512SDavid Woodhouse return g_strdup_printf("/local/domain/%u", domid); 151603247512SDavid Woodhouse } 151703247512SDavid Woodhouse 151803247512SDavid Woodhouse static char **xs_be_directory(struct qemu_xs_handle *h, xs_transaction_t t, 151903247512SDavid Woodhouse const char *path, unsigned int *num) 152003247512SDavid Woodhouse { 152103247512SDavid Woodhouse GList *items = NULL, *l; 152203247512SDavid Woodhouse unsigned int i = 0; 152303247512SDavid Woodhouse char **items_ret; 152403247512SDavid Woodhouse int err; 152503247512SDavid Woodhouse 152603247512SDavid Woodhouse err = xs_impl_directory(h->impl, DOMID_QEMU, t, path, NULL, &items); 152703247512SDavid Woodhouse if (err) { 152803247512SDavid Woodhouse errno = err; 152903247512SDavid Woodhouse return NULL; 153003247512SDavid Woodhouse } 153103247512SDavid Woodhouse 153203247512SDavid Woodhouse items_ret = g_new0(char *, g_list_length(items) + 1); 153303247512SDavid Woodhouse *num = 0; 153403247512SDavid Woodhouse for (l = items; l; l = l->next) { 153503247512SDavid Woodhouse items_ret[i++] = l->data; 153603247512SDavid Woodhouse (*num)++; 153703247512SDavid Woodhouse } 153803247512SDavid Woodhouse g_list_free(items); 153903247512SDavid Woodhouse return items_ret; 154003247512SDavid Woodhouse } 154103247512SDavid Woodhouse 154203247512SDavid Woodhouse static void *xs_be_read(struct qemu_xs_handle *h, xs_transaction_t t, 154303247512SDavid Woodhouse const char *path, unsigned int *len) 154403247512SDavid Woodhouse { 154503247512SDavid Woodhouse GByteArray *data = g_byte_array_new(); 154603247512SDavid Woodhouse bool free_segment = false; 154703247512SDavid Woodhouse int err; 154803247512SDavid Woodhouse 154903247512SDavid Woodhouse err = xs_impl_read(h->impl, DOMID_QEMU, t, path, data); 155003247512SDavid Woodhouse if (err) { 155103247512SDavid Woodhouse free_segment = true; 155203247512SDavid Woodhouse errno = err; 155303247512SDavid Woodhouse } else { 155403247512SDavid Woodhouse if (len) { 155503247512SDavid Woodhouse *len = data->len; 155603247512SDavid Woodhouse } 155703247512SDavid Woodhouse /* The xen-bus-helper code expects to get NUL terminated string! */ 155803247512SDavid Woodhouse g_byte_array_append(data, (void *)"", 1); 155903247512SDavid Woodhouse } 156003247512SDavid Woodhouse 156103247512SDavid Woodhouse return g_byte_array_free(data, free_segment); 156203247512SDavid Woodhouse } 156303247512SDavid Woodhouse 156403247512SDavid Woodhouse static bool xs_be_write(struct qemu_xs_handle *h, xs_transaction_t t, 156503247512SDavid Woodhouse const char *path, const void *data, unsigned int len) 156603247512SDavid Woodhouse { 156703247512SDavid Woodhouse GByteArray *gdata = g_byte_array_new(); 156803247512SDavid Woodhouse int err; 156903247512SDavid Woodhouse 157003247512SDavid Woodhouse g_byte_array_append(gdata, data, len); 157103247512SDavid Woodhouse err = xs_impl_write(h->impl, DOMID_QEMU, t, path, gdata); 157203247512SDavid Woodhouse g_byte_array_unref(gdata); 157303247512SDavid Woodhouse if (err) { 157403247512SDavid Woodhouse errno = err; 157503247512SDavid Woodhouse return false; 157603247512SDavid Woodhouse } 157703247512SDavid Woodhouse return true; 157803247512SDavid Woodhouse } 157903247512SDavid Woodhouse 158003247512SDavid Woodhouse static bool xs_be_create(struct qemu_xs_handle *h, xs_transaction_t t, 158103247512SDavid Woodhouse unsigned int owner, unsigned int domid, 158203247512SDavid Woodhouse unsigned int perms, const char *path) 158303247512SDavid Woodhouse { 158403247512SDavid Woodhouse g_autoptr(GByteArray) data = g_byte_array_new(); 158503247512SDavid Woodhouse GList *perms_list = NULL; 158603247512SDavid Woodhouse int err; 158703247512SDavid Woodhouse 158803247512SDavid Woodhouse /* mkdir does this */ 158903247512SDavid Woodhouse err = xs_impl_read(h->impl, DOMID_QEMU, t, path, data); 159003247512SDavid Woodhouse if (err == ENOENT) { 159103247512SDavid Woodhouse err = xs_impl_write(h->impl, DOMID_QEMU, t, path, data); 159203247512SDavid Woodhouse } 159303247512SDavid Woodhouse if (err) { 159403247512SDavid Woodhouse errno = err; 159503247512SDavid Woodhouse return false; 159603247512SDavid Woodhouse } 159703247512SDavid Woodhouse 159803247512SDavid Woodhouse perms_list = g_list_append(perms_list, 159903247512SDavid Woodhouse xs_perm_as_string(XS_PERM_NONE, owner)); 160003247512SDavid Woodhouse perms_list = g_list_append(perms_list, 160103247512SDavid Woodhouse xs_perm_as_string(perms, domid)); 160203247512SDavid Woodhouse 160303247512SDavid Woodhouse err = xs_impl_set_perms(h->impl, DOMID_QEMU, t, path, perms_list); 160403247512SDavid Woodhouse g_list_free_full(perms_list, g_free); 160503247512SDavid Woodhouse if (err) { 160603247512SDavid Woodhouse errno = err; 160703247512SDavid Woodhouse return false; 160803247512SDavid Woodhouse } 160903247512SDavid Woodhouse return true; 161003247512SDavid Woodhouse } 161103247512SDavid Woodhouse 161203247512SDavid Woodhouse static bool xs_be_destroy(struct qemu_xs_handle *h, xs_transaction_t t, 161303247512SDavid Woodhouse const char *path) 161403247512SDavid Woodhouse { 161503247512SDavid Woodhouse int err = xs_impl_rm(h->impl, DOMID_QEMU, t, path); 161603247512SDavid Woodhouse if (err) { 161703247512SDavid Woodhouse errno = err; 161803247512SDavid Woodhouse return false; 161903247512SDavid Woodhouse } 162003247512SDavid Woodhouse return true; 162103247512SDavid Woodhouse } 162203247512SDavid Woodhouse 162303247512SDavid Woodhouse static void be_watch_bh(void *_h) 162403247512SDavid Woodhouse { 162503247512SDavid Woodhouse struct qemu_xs_handle *h = _h; 162603247512SDavid Woodhouse GList *l; 162703247512SDavid Woodhouse 162803247512SDavid Woodhouse for (l = h->watches; l; l = l->next) { 162903247512SDavid Woodhouse struct qemu_xs_watch *w = l->data; 163003247512SDavid Woodhouse 163103247512SDavid Woodhouse while (w->events) { 163203247512SDavid Woodhouse struct watch_event *ev = w->events->data; 163303247512SDavid Woodhouse 163403247512SDavid Woodhouse w->fn(w->opaque, ev->path); 163503247512SDavid Woodhouse 163603247512SDavid Woodhouse w->events = g_list_remove(w->events, ev); 163703247512SDavid Woodhouse free_watch_event(ev); 163803247512SDavid Woodhouse } 163903247512SDavid Woodhouse } 164003247512SDavid Woodhouse } 164103247512SDavid Woodhouse 164203247512SDavid Woodhouse static void xs_be_watch_cb(void *opaque, const char *path, const char *token) 164303247512SDavid Woodhouse { 164403247512SDavid Woodhouse struct watch_event *ev = g_new0(struct watch_event, 1); 164503247512SDavid Woodhouse struct qemu_xs_watch *w = opaque; 164603247512SDavid Woodhouse 164703247512SDavid Woodhouse /* We don't care about the token */ 164803247512SDavid Woodhouse ev->path = g_strdup(path); 164903247512SDavid Woodhouse w->events = g_list_append(w->events, ev); 165003247512SDavid Woodhouse 165103247512SDavid Woodhouse qemu_bh_schedule(w->h->watch_bh); 165203247512SDavid Woodhouse } 165303247512SDavid Woodhouse 165403247512SDavid Woodhouse static struct qemu_xs_watch *xs_be_watch(struct qemu_xs_handle *h, 165503247512SDavid Woodhouse const char *path, xs_watch_fn fn, 165603247512SDavid Woodhouse void *opaque) 165703247512SDavid Woodhouse { 165803247512SDavid Woodhouse struct qemu_xs_watch *w = g_new0(struct qemu_xs_watch, 1); 165903247512SDavid Woodhouse int err; 166003247512SDavid Woodhouse 166103247512SDavid Woodhouse w->h = h; 166203247512SDavid Woodhouse w->fn = fn; 166303247512SDavid Woodhouse w->opaque = opaque; 166403247512SDavid Woodhouse 166503247512SDavid Woodhouse err = xs_impl_watch(h->impl, DOMID_QEMU, path, NULL, xs_be_watch_cb, w); 166603247512SDavid Woodhouse if (err) { 166703247512SDavid Woodhouse errno = err; 166803247512SDavid Woodhouse g_free(w); 166903247512SDavid Woodhouse return NULL; 167003247512SDavid Woodhouse } 167103247512SDavid Woodhouse 167203247512SDavid Woodhouse w->path = g_strdup(path); 167303247512SDavid Woodhouse h->watches = g_list_append(h->watches, w); 167403247512SDavid Woodhouse return w; 167503247512SDavid Woodhouse } 167603247512SDavid Woodhouse 167703247512SDavid Woodhouse static void xs_be_unwatch(struct qemu_xs_handle *h, struct qemu_xs_watch *w) 167803247512SDavid Woodhouse { 167903247512SDavid Woodhouse xs_impl_unwatch(h->impl, DOMID_QEMU, w->path, NULL, xs_be_watch_cb, w); 168003247512SDavid Woodhouse 168103247512SDavid Woodhouse h->watches = g_list_remove(h->watches, w); 168203247512SDavid Woodhouse g_list_free_full(w->events, (GDestroyNotify)free_watch_event); 168303247512SDavid Woodhouse g_free(w->path); 168403247512SDavid Woodhouse g_free(w); 168503247512SDavid Woodhouse } 168603247512SDavid Woodhouse 168703247512SDavid Woodhouse static xs_transaction_t xs_be_transaction_start(struct qemu_xs_handle *h) 168803247512SDavid Woodhouse { 168903247512SDavid Woodhouse unsigned int new_tx = XBT_NULL; 169003247512SDavid Woodhouse int err = xs_impl_transaction_start(h->impl, DOMID_QEMU, &new_tx); 169103247512SDavid Woodhouse if (err) { 169203247512SDavid Woodhouse errno = err; 169303247512SDavid Woodhouse return XBT_NULL; 169403247512SDavid Woodhouse } 169503247512SDavid Woodhouse return new_tx; 169603247512SDavid Woodhouse } 169703247512SDavid Woodhouse 169803247512SDavid Woodhouse static bool xs_be_transaction_end(struct qemu_xs_handle *h, xs_transaction_t t, 169903247512SDavid Woodhouse bool abort) 170003247512SDavid Woodhouse { 170103247512SDavid Woodhouse int err = xs_impl_transaction_end(h->impl, DOMID_QEMU, t, !abort); 170203247512SDavid Woodhouse if (err) { 170303247512SDavid Woodhouse errno = err; 170403247512SDavid Woodhouse return false; 170503247512SDavid Woodhouse } 170603247512SDavid Woodhouse return true; 170703247512SDavid Woodhouse } 170803247512SDavid Woodhouse 170903247512SDavid Woodhouse static struct qemu_xs_handle *xs_be_open(void) 171003247512SDavid Woodhouse { 171103247512SDavid Woodhouse XenXenstoreState *s = xen_xenstore_singleton; 171203247512SDavid Woodhouse struct qemu_xs_handle *h; 171303247512SDavid Woodhouse 1714c9bdfe8dSDavid Woodhouse if (!s || !s->impl) { 171503247512SDavid Woodhouse errno = -ENOSYS; 171603247512SDavid Woodhouse return NULL; 171703247512SDavid Woodhouse } 171803247512SDavid Woodhouse 171903247512SDavid Woodhouse h = g_new0(struct qemu_xs_handle, 1); 172003247512SDavid Woodhouse h->impl = s->impl; 172103247512SDavid Woodhouse 172203247512SDavid Woodhouse h->watch_bh = aio_bh_new(qemu_get_aio_context(), be_watch_bh, h); 172303247512SDavid Woodhouse 172403247512SDavid Woodhouse return h; 172503247512SDavid Woodhouse } 172603247512SDavid Woodhouse 172703247512SDavid Woodhouse static void xs_be_close(struct qemu_xs_handle *h) 172803247512SDavid Woodhouse { 172903247512SDavid Woodhouse while (h->watches) { 173003247512SDavid Woodhouse struct qemu_xs_watch *w = h->watches->data; 173103247512SDavid Woodhouse xs_be_unwatch(h, w); 173203247512SDavid Woodhouse } 173303247512SDavid Woodhouse 173403247512SDavid Woodhouse qemu_bh_delete(h->watch_bh); 173503247512SDavid Woodhouse g_free(h); 173603247512SDavid Woodhouse } 173703247512SDavid Woodhouse 173803247512SDavid Woodhouse static struct xenstore_backend_ops emu_xenstore_backend_ops = { 173903247512SDavid Woodhouse .open = xs_be_open, 174003247512SDavid Woodhouse .close = xs_be_close, 174103247512SDavid Woodhouse .get_domain_path = xs_be_get_domain_path, 174203247512SDavid Woodhouse .directory = xs_be_directory, 174303247512SDavid Woodhouse .read = xs_be_read, 174403247512SDavid Woodhouse .write = xs_be_write, 174503247512SDavid Woodhouse .create = xs_be_create, 174603247512SDavid Woodhouse .destroy = xs_be_destroy, 174703247512SDavid Woodhouse .watch = xs_be_watch, 174803247512SDavid Woodhouse .unwatch = xs_be_unwatch, 174903247512SDavid Woodhouse .transaction_start = xs_be_transaction_start, 175003247512SDavid Woodhouse .transaction_end = xs_be_transaction_end, 175103247512SDavid Woodhouse }; 1752