1c08f5d0eSDavid Woodhouse /* 2c08f5d0eSDavid Woodhouse * QEMU Xen emulation: Shared/overlay pages support 3c08f5d0eSDavid Woodhouse * 4c08f5d0eSDavid Woodhouse * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. 5c08f5d0eSDavid Woodhouse * 6c08f5d0eSDavid Woodhouse * Authors: David Woodhouse <dwmw2@infradead.org> 7c08f5d0eSDavid Woodhouse * 8c08f5d0eSDavid Woodhouse * This work is licensed under the terms of the GNU GPL, version 2 or later. 9c08f5d0eSDavid Woodhouse * See the COPYING file in the top-level directory. 10c08f5d0eSDavid Woodhouse */ 11c08f5d0eSDavid Woodhouse 12c08f5d0eSDavid Woodhouse #include "qemu/osdep.h" 13c08f5d0eSDavid Woodhouse 14c08f5d0eSDavid Woodhouse #include "qemu/host-utils.h" 15c08f5d0eSDavid Woodhouse #include "qemu/module.h" 16c08f5d0eSDavid Woodhouse #include "qemu/main-loop.h" 17c08f5d0eSDavid Woodhouse #include "qemu/cutils.h" 18cc37d98bSRichard Henderson #include "qemu/error-report.h" 19c08f5d0eSDavid Woodhouse #include "qapi/error.h" 20c08f5d0eSDavid Woodhouse #include "qom/object.h" 21c08f5d0eSDavid Woodhouse #include "migration/vmstate.h" 22c08f5d0eSDavid Woodhouse 23c08f5d0eSDavid Woodhouse #include "hw/sysbus.h" 24c08f5d0eSDavid Woodhouse #include "hw/xen/xen.h" 25d05864d2SDavid Woodhouse #include "hw/xen/xen_backend_ops.h" 26c08f5d0eSDavid Woodhouse #include "xen_overlay.h" 27c08f5d0eSDavid Woodhouse #include "xen_evtchn.h" 28c08f5d0eSDavid Woodhouse #include "xen_xenstore.h" 29c08f5d0eSDavid Woodhouse 30c08f5d0eSDavid Woodhouse #include "sysemu/kvm.h" 31c08f5d0eSDavid Woodhouse #include "sysemu/kvm_xen.h" 32c08f5d0eSDavid Woodhouse 330254c4d1SDavid Woodhouse #include "trace.h" 340254c4d1SDavid Woodhouse 350254c4d1SDavid Woodhouse #include "xenstore_impl.h" 360254c4d1SDavid Woodhouse 37c08f5d0eSDavid Woodhouse #include "hw/xen/interface/io/xs_wire.h" 38c08f5d0eSDavid Woodhouse #include "hw/xen/interface/event_channel.h" 39d05864d2SDavid Woodhouse #include "hw/xen/interface/grant_table.h" 40c08f5d0eSDavid Woodhouse 41c08f5d0eSDavid Woodhouse #define TYPE_XEN_XENSTORE "xen-xenstore" 42c08f5d0eSDavid Woodhouse OBJECT_DECLARE_SIMPLE_TYPE(XenXenstoreState, XEN_XENSTORE) 43c08f5d0eSDavid Woodhouse 44c08f5d0eSDavid Woodhouse #define ENTRIES_PER_FRAME_V1 (XEN_PAGE_SIZE / sizeof(grant_entry_v1_t)) 45c08f5d0eSDavid Woodhouse #define ENTRIES_PER_FRAME_V2 (XEN_PAGE_SIZE / sizeof(grant_entry_v2_t)) 46c08f5d0eSDavid Woodhouse 47c08f5d0eSDavid Woodhouse #define XENSTORE_HEADER_SIZE ((unsigned int)sizeof(struct xsd_sockmsg)) 48c08f5d0eSDavid Woodhouse 49c08f5d0eSDavid Woodhouse struct XenXenstoreState { 50c08f5d0eSDavid Woodhouse /*< private >*/ 51c08f5d0eSDavid Woodhouse SysBusDevice busdev; 52c08f5d0eSDavid Woodhouse /*< public >*/ 53c08f5d0eSDavid Woodhouse 540254c4d1SDavid Woodhouse XenstoreImplState *impl; 5503247512SDavid Woodhouse GList *watch_events; /* for the guest */ 560254c4d1SDavid Woodhouse 57c08f5d0eSDavid Woodhouse MemoryRegion xenstore_page; 58c08f5d0eSDavid Woodhouse struct xenstore_domain_interface *xs; 59c08f5d0eSDavid Woodhouse uint8_t req_data[XENSTORE_HEADER_SIZE + XENSTORE_PAYLOAD_MAX]; 60c08f5d0eSDavid Woodhouse uint8_t rsp_data[XENSTORE_HEADER_SIZE + XENSTORE_PAYLOAD_MAX]; 61c08f5d0eSDavid Woodhouse uint32_t req_offset; 62c08f5d0eSDavid Woodhouse uint32_t rsp_offset; 63c08f5d0eSDavid Woodhouse bool rsp_pending; 64c08f5d0eSDavid Woodhouse bool fatal_error; 65c08f5d0eSDavid Woodhouse 66c08f5d0eSDavid Woodhouse evtchn_port_t guest_port; 67c08f5d0eSDavid Woodhouse evtchn_port_t be_port; 68c08f5d0eSDavid Woodhouse struct xenevtchn_handle *eh; 69766804b1SDavid Woodhouse 70766804b1SDavid Woodhouse uint8_t *impl_state; 71766804b1SDavid Woodhouse uint32_t impl_state_size; 72d05864d2SDavid Woodhouse 73d05864d2SDavid Woodhouse struct xengntdev_handle *gt; 74d05864d2SDavid Woodhouse void *granted_xs; 75c08f5d0eSDavid Woodhouse }; 76c08f5d0eSDavid Woodhouse 77c08f5d0eSDavid Woodhouse struct XenXenstoreState *xen_xenstore_singleton; 78c08f5d0eSDavid Woodhouse 79c08f5d0eSDavid Woodhouse static void xen_xenstore_event(void *opaque); 800254c4d1SDavid Woodhouse static void fire_watch_cb(void *opaque, const char *path, const char *token); 81c08f5d0eSDavid Woodhouse 8203247512SDavid Woodhouse static struct xenstore_backend_ops emu_xenstore_backend_ops; 8303247512SDavid Woodhouse 84831b0db8SPaul Durrant static void G_GNUC_PRINTF (4, 5) relpath_printf(XenXenstoreState *s, 85831b0db8SPaul Durrant GList *perms, 86831b0db8SPaul Durrant const char *relpath, 87831b0db8SPaul Durrant const char *fmt, ...) 88831b0db8SPaul Durrant { 89831b0db8SPaul Durrant gchar *abspath; 90831b0db8SPaul Durrant gchar *value; 91831b0db8SPaul Durrant va_list args; 92831b0db8SPaul Durrant GByteArray *data; 93831b0db8SPaul Durrant int err; 94831b0db8SPaul Durrant 95831b0db8SPaul Durrant abspath = g_strdup_printf("/local/domain/%u/%s", xen_domid, relpath); 96831b0db8SPaul Durrant va_start(args, fmt); 97831b0db8SPaul Durrant value = g_strdup_vprintf(fmt, args); 98831b0db8SPaul Durrant va_end(args); 99831b0db8SPaul Durrant 100831b0db8SPaul Durrant data = g_byte_array_new_take((void *)value, strlen(value)); 101831b0db8SPaul Durrant 102831b0db8SPaul Durrant err = xs_impl_write(s->impl, DOMID_QEMU, XBT_NULL, abspath, data); 103831b0db8SPaul Durrant assert(!err); 104831b0db8SPaul Durrant 105831b0db8SPaul Durrant g_byte_array_unref(data); 106831b0db8SPaul Durrant 107831b0db8SPaul Durrant err = xs_impl_set_perms(s->impl, DOMID_QEMU, XBT_NULL, abspath, perms); 108831b0db8SPaul Durrant assert(!err); 109831b0db8SPaul Durrant 110831b0db8SPaul Durrant g_free(abspath); 111831b0db8SPaul Durrant } 112831b0db8SPaul Durrant 113c08f5d0eSDavid Woodhouse static void xen_xenstore_realize(DeviceState *dev, Error **errp) 114c08f5d0eSDavid Woodhouse { 115c08f5d0eSDavid Woodhouse XenXenstoreState *s = XEN_XENSTORE(dev); 116831b0db8SPaul Durrant GList *perms; 117c08f5d0eSDavid Woodhouse 118c08f5d0eSDavid Woodhouse if (xen_mode != XEN_EMULATE) { 119c08f5d0eSDavid Woodhouse error_setg(errp, "Xen xenstore support is for Xen emulation"); 120c08f5d0eSDavid Woodhouse return; 121c08f5d0eSDavid Woodhouse } 122c08f5d0eSDavid Woodhouse memory_region_init_ram(&s->xenstore_page, OBJECT(dev), "xen:xenstore_page", 123c08f5d0eSDavid Woodhouse XEN_PAGE_SIZE, &error_abort); 124c08f5d0eSDavid Woodhouse memory_region_set_enabled(&s->xenstore_page, true); 125c08f5d0eSDavid Woodhouse s->xs = memory_region_get_ram_ptr(&s->xenstore_page); 126c08f5d0eSDavid Woodhouse memset(s->xs, 0, XEN_PAGE_SIZE); 127c08f5d0eSDavid Woodhouse 128c08f5d0eSDavid Woodhouse /* We can't map it this early as KVM isn't ready */ 129c08f5d0eSDavid Woodhouse xen_xenstore_singleton = s; 130c08f5d0eSDavid Woodhouse 131c08f5d0eSDavid Woodhouse s->eh = xen_be_evtchn_open(); 132c08f5d0eSDavid Woodhouse if (!s->eh) { 133c08f5d0eSDavid Woodhouse error_setg(errp, "Xenstore evtchn port init failed"); 134c08f5d0eSDavid Woodhouse return; 135c08f5d0eSDavid Woodhouse } 13660f782b6SStefan Hajnoczi aio_set_fd_handler(qemu_get_aio_context(), xen_be_evtchn_fd(s->eh), 137c08f5d0eSDavid Woodhouse xen_xenstore_event, NULL, NULL, NULL, s); 1380254c4d1SDavid Woodhouse 139be1934dfSPaul Durrant s->impl = xs_impl_create(xen_domid); 140831b0db8SPaul Durrant 141831b0db8SPaul Durrant /* Populate the default nodes */ 142831b0db8SPaul Durrant 143831b0db8SPaul Durrant /* Nodes owned by 'dom0' but readable by the guest */ 144831b0db8SPaul Durrant perms = g_list_append(NULL, xs_perm_as_string(XS_PERM_NONE, DOMID_QEMU)); 145831b0db8SPaul Durrant perms = g_list_append(perms, xs_perm_as_string(XS_PERM_READ, xen_domid)); 146831b0db8SPaul Durrant 147831b0db8SPaul Durrant relpath_printf(s, perms, "", "%s", ""); 148831b0db8SPaul Durrant 149831b0db8SPaul Durrant relpath_printf(s, perms, "domid", "%u", xen_domid); 150831b0db8SPaul Durrant 151831b0db8SPaul Durrant relpath_printf(s, perms, "control/platform-feature-xs_reset_watches", "%u", 1); 152831b0db8SPaul Durrant relpath_printf(s, perms, "control/platform-feature-multiprocessor-suspend", "%u", 1); 153831b0db8SPaul Durrant 154831b0db8SPaul Durrant relpath_printf(s, perms, "platform/acpi", "%u", 1); 155831b0db8SPaul Durrant relpath_printf(s, perms, "platform/acpi_s3", "%u", 1); 156831b0db8SPaul Durrant relpath_printf(s, perms, "platform/acpi_s4", "%u", 1); 157831b0db8SPaul Durrant relpath_printf(s, perms, "platform/acpi_laptop_slate", "%u", 0); 158831b0db8SPaul Durrant 159831b0db8SPaul Durrant g_list_free_full(perms, g_free); 160831b0db8SPaul Durrant 161831b0db8SPaul Durrant /* Nodes owned by the guest */ 162831b0db8SPaul Durrant perms = g_list_append(NULL, xs_perm_as_string(XS_PERM_NONE, xen_domid)); 163831b0db8SPaul Durrant 164831b0db8SPaul Durrant relpath_printf(s, perms, "attr", "%s", ""); 165831b0db8SPaul Durrant 166831b0db8SPaul Durrant relpath_printf(s, perms, "control/shutdown", "%s", ""); 167831b0db8SPaul Durrant relpath_printf(s, perms, "control/feature-poweroff", "%u", 1); 168831b0db8SPaul Durrant relpath_printf(s, perms, "control/feature-reboot", "%u", 1); 169831b0db8SPaul Durrant relpath_printf(s, perms, "control/feature-suspend", "%u", 1); 170831b0db8SPaul Durrant relpath_printf(s, perms, "control/feature-s3", "%u", 1); 171831b0db8SPaul Durrant relpath_printf(s, perms, "control/feature-s4", "%u", 1); 172831b0db8SPaul Durrant 173831b0db8SPaul Durrant relpath_printf(s, perms, "data", "%s", ""); 174831b0db8SPaul Durrant relpath_printf(s, perms, "device", "%s", ""); 175831b0db8SPaul Durrant relpath_printf(s, perms, "drivers", "%s", ""); 176831b0db8SPaul Durrant relpath_printf(s, perms, "error", "%s", ""); 177831b0db8SPaul Durrant relpath_printf(s, perms, "feature", "%s", ""); 178831b0db8SPaul Durrant 179831b0db8SPaul Durrant g_list_free_full(perms, g_free); 18003247512SDavid Woodhouse 18103247512SDavid Woodhouse xen_xenstore_ops = &emu_xenstore_backend_ops; 182c08f5d0eSDavid Woodhouse } 183c08f5d0eSDavid Woodhouse 184c08f5d0eSDavid Woodhouse static bool xen_xenstore_is_needed(void *opaque) 185c08f5d0eSDavid Woodhouse { 186c08f5d0eSDavid Woodhouse return xen_mode == XEN_EMULATE; 187c08f5d0eSDavid Woodhouse } 188c08f5d0eSDavid Woodhouse 189c08f5d0eSDavid Woodhouse static int xen_xenstore_pre_save(void *opaque) 190c08f5d0eSDavid Woodhouse { 191c08f5d0eSDavid Woodhouse XenXenstoreState *s = opaque; 192766804b1SDavid Woodhouse GByteArray *save; 193c08f5d0eSDavid Woodhouse 194c08f5d0eSDavid Woodhouse if (s->eh) { 195c08f5d0eSDavid Woodhouse s->guest_port = xen_be_evtchn_get_guest_port(s->eh); 196c08f5d0eSDavid Woodhouse } 197766804b1SDavid Woodhouse 198766804b1SDavid Woodhouse g_free(s->impl_state); 199766804b1SDavid Woodhouse save = xs_impl_serialize(s->impl); 200766804b1SDavid Woodhouse s->impl_state = save->data; 201766804b1SDavid Woodhouse s->impl_state_size = save->len; 202766804b1SDavid Woodhouse g_byte_array_free(save, false); 203766804b1SDavid Woodhouse 204c08f5d0eSDavid Woodhouse return 0; 205c08f5d0eSDavid Woodhouse } 206c08f5d0eSDavid Woodhouse 207c08f5d0eSDavid Woodhouse static int xen_xenstore_post_load(void *opaque, int ver) 208c08f5d0eSDavid Woodhouse { 209c08f5d0eSDavid Woodhouse XenXenstoreState *s = opaque; 210766804b1SDavid Woodhouse GByteArray *save; 211766804b1SDavid Woodhouse int ret; 212c08f5d0eSDavid Woodhouse 213c08f5d0eSDavid Woodhouse /* 214c08f5d0eSDavid Woodhouse * As qemu/dom0, rebind to the guest's port. The Windows drivers may 215c08f5d0eSDavid Woodhouse * unbind the XenStore evtchn and rebind to it, having obtained the 216c08f5d0eSDavid Woodhouse * "remote" port through EVTCHNOP_status. In the case that migration 217c08f5d0eSDavid Woodhouse * occurs while it's unbound, the "remote" port needs to be the same 218c08f5d0eSDavid Woodhouse * as before so that the guest can find it, but should remain unbound. 219c08f5d0eSDavid Woodhouse */ 220c08f5d0eSDavid Woodhouse if (s->guest_port) { 221c08f5d0eSDavid Woodhouse int be_port = xen_be_evtchn_bind_interdomain(s->eh, xen_domid, 222c08f5d0eSDavid Woodhouse s->guest_port); 223c08f5d0eSDavid Woodhouse if (be_port < 0) { 224c08f5d0eSDavid Woodhouse return be_port; 225c08f5d0eSDavid Woodhouse } 226c08f5d0eSDavid Woodhouse s->be_port = be_port; 227c08f5d0eSDavid Woodhouse } 228766804b1SDavid Woodhouse 229766804b1SDavid Woodhouse save = g_byte_array_new_take(s->impl_state, s->impl_state_size); 230766804b1SDavid Woodhouse s->impl_state = NULL; 231766804b1SDavid Woodhouse s->impl_state_size = 0; 232766804b1SDavid Woodhouse 233766804b1SDavid Woodhouse ret = xs_impl_deserialize(s->impl, save, xen_domid, fire_watch_cb, s); 234766804b1SDavid Woodhouse return ret; 235c08f5d0eSDavid Woodhouse } 236c08f5d0eSDavid Woodhouse 237c08f5d0eSDavid Woodhouse static const VMStateDescription xen_xenstore_vmstate = { 238c08f5d0eSDavid Woodhouse .name = "xen_xenstore", 239766804b1SDavid Woodhouse .unmigratable = 1, /* The PV back ends don't migrate yet */ 240c08f5d0eSDavid Woodhouse .version_id = 1, 241c08f5d0eSDavid Woodhouse .minimum_version_id = 1, 242c08f5d0eSDavid Woodhouse .needed = xen_xenstore_is_needed, 243c08f5d0eSDavid Woodhouse .pre_save = xen_xenstore_pre_save, 244c08f5d0eSDavid Woodhouse .post_load = xen_xenstore_post_load, 245c08f5d0eSDavid Woodhouse .fields = (VMStateField[]) { 246c08f5d0eSDavid Woodhouse VMSTATE_UINT8_ARRAY(req_data, XenXenstoreState, 247c08f5d0eSDavid Woodhouse sizeof_field(XenXenstoreState, req_data)), 248c08f5d0eSDavid Woodhouse VMSTATE_UINT8_ARRAY(rsp_data, XenXenstoreState, 249c08f5d0eSDavid Woodhouse sizeof_field(XenXenstoreState, rsp_data)), 250c08f5d0eSDavid Woodhouse VMSTATE_UINT32(req_offset, XenXenstoreState), 251c08f5d0eSDavid Woodhouse VMSTATE_UINT32(rsp_offset, XenXenstoreState), 252c08f5d0eSDavid Woodhouse VMSTATE_BOOL(rsp_pending, XenXenstoreState), 253c08f5d0eSDavid Woodhouse VMSTATE_UINT32(guest_port, XenXenstoreState), 254c08f5d0eSDavid Woodhouse VMSTATE_BOOL(fatal_error, XenXenstoreState), 255766804b1SDavid Woodhouse VMSTATE_UINT32(impl_state_size, XenXenstoreState), 256766804b1SDavid Woodhouse VMSTATE_VARRAY_UINT32_ALLOC(impl_state, XenXenstoreState, 257766804b1SDavid Woodhouse impl_state_size, 0, 258766804b1SDavid Woodhouse vmstate_info_uint8, uint8_t), 259c08f5d0eSDavid Woodhouse VMSTATE_END_OF_LIST() 260c08f5d0eSDavid Woodhouse } 261c08f5d0eSDavid Woodhouse }; 262c08f5d0eSDavid Woodhouse 263c08f5d0eSDavid Woodhouse static void xen_xenstore_class_init(ObjectClass *klass, void *data) 264c08f5d0eSDavid Woodhouse { 265c08f5d0eSDavid Woodhouse DeviceClass *dc = DEVICE_CLASS(klass); 266c08f5d0eSDavid Woodhouse 267c08f5d0eSDavid Woodhouse dc->realize = xen_xenstore_realize; 268c08f5d0eSDavid Woodhouse dc->vmsd = &xen_xenstore_vmstate; 269c08f5d0eSDavid Woodhouse } 270c08f5d0eSDavid Woodhouse 271c08f5d0eSDavid Woodhouse static const TypeInfo xen_xenstore_info = { 272c08f5d0eSDavid Woodhouse .name = TYPE_XEN_XENSTORE, 273c08f5d0eSDavid Woodhouse .parent = TYPE_SYS_BUS_DEVICE, 274c08f5d0eSDavid Woodhouse .instance_size = sizeof(XenXenstoreState), 275c08f5d0eSDavid Woodhouse .class_init = xen_xenstore_class_init, 276c08f5d0eSDavid Woodhouse }; 277c08f5d0eSDavid Woodhouse 278c08f5d0eSDavid Woodhouse void xen_xenstore_create(void) 279c08f5d0eSDavid Woodhouse { 280c08f5d0eSDavid Woodhouse DeviceState *dev = sysbus_create_simple(TYPE_XEN_XENSTORE, -1, NULL); 281c08f5d0eSDavid Woodhouse 282c08f5d0eSDavid Woodhouse xen_xenstore_singleton = XEN_XENSTORE(dev); 283c08f5d0eSDavid Woodhouse 284c08f5d0eSDavid Woodhouse /* 285c08f5d0eSDavid Woodhouse * Defer the init (xen_xenstore_reset()) until KVM is set up and the 286c08f5d0eSDavid Woodhouse * overlay page can be mapped. 287c08f5d0eSDavid Woodhouse */ 288c08f5d0eSDavid Woodhouse } 289c08f5d0eSDavid Woodhouse 290c08f5d0eSDavid Woodhouse static void xen_xenstore_register_types(void) 291c08f5d0eSDavid Woodhouse { 292c08f5d0eSDavid Woodhouse type_register_static(&xen_xenstore_info); 293c08f5d0eSDavid Woodhouse } 294c08f5d0eSDavid Woodhouse 295c08f5d0eSDavid Woodhouse type_init(xen_xenstore_register_types) 296c08f5d0eSDavid Woodhouse 297c08f5d0eSDavid Woodhouse uint16_t xen_xenstore_get_port(void) 298c08f5d0eSDavid Woodhouse { 299c08f5d0eSDavid Woodhouse XenXenstoreState *s = xen_xenstore_singleton; 300c08f5d0eSDavid Woodhouse if (!s) { 301c08f5d0eSDavid Woodhouse return 0; 302c08f5d0eSDavid Woodhouse } 303c08f5d0eSDavid Woodhouse return s->guest_port; 304c08f5d0eSDavid Woodhouse } 305c08f5d0eSDavid Woodhouse 306f3341e7bSDavid Woodhouse static bool req_pending(XenXenstoreState *s) 307f3341e7bSDavid Woodhouse { 308f3341e7bSDavid Woodhouse struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data; 309f3341e7bSDavid Woodhouse 310f3341e7bSDavid Woodhouse return s->req_offset == XENSTORE_HEADER_SIZE + req->len; 311f3341e7bSDavid Woodhouse } 312f3341e7bSDavid Woodhouse 313f3341e7bSDavid Woodhouse static void reset_req(XenXenstoreState *s) 314f3341e7bSDavid Woodhouse { 315f3341e7bSDavid Woodhouse memset(s->req_data, 0, sizeof(s->req_data)); 316f3341e7bSDavid Woodhouse s->req_offset = 0; 317f3341e7bSDavid Woodhouse } 318f3341e7bSDavid Woodhouse 319f3341e7bSDavid Woodhouse static void reset_rsp(XenXenstoreState *s) 320f3341e7bSDavid Woodhouse { 321f3341e7bSDavid Woodhouse s->rsp_pending = false; 322f3341e7bSDavid Woodhouse 323f3341e7bSDavid Woodhouse memset(s->rsp_data, 0, sizeof(s->rsp_data)); 324f3341e7bSDavid Woodhouse s->rsp_offset = 0; 325f3341e7bSDavid Woodhouse } 326f3341e7bSDavid Woodhouse 3270254c4d1SDavid Woodhouse static void xs_error(XenXenstoreState *s, unsigned int id, 3280254c4d1SDavid Woodhouse xs_transaction_t tx_id, int errnum) 3290254c4d1SDavid Woodhouse { 3300254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 3310254c4d1SDavid Woodhouse const char *errstr = NULL; 3320254c4d1SDavid Woodhouse 3330254c4d1SDavid Woodhouse for (unsigned int i = 0; i < ARRAY_SIZE(xsd_errors); i++) { 3348ac98aedSDavid Woodhouse const struct xsd_errors *xsd_error = &xsd_errors[i]; 3350254c4d1SDavid Woodhouse 3360254c4d1SDavid Woodhouse if (xsd_error->errnum == errnum) { 3370254c4d1SDavid Woodhouse errstr = xsd_error->errstring; 3380254c4d1SDavid Woodhouse break; 3390254c4d1SDavid Woodhouse } 3400254c4d1SDavid Woodhouse } 3410254c4d1SDavid Woodhouse assert(errstr); 3420254c4d1SDavid Woodhouse 3430254c4d1SDavid Woodhouse trace_xenstore_error(id, tx_id, errstr); 3440254c4d1SDavid Woodhouse 3450254c4d1SDavid Woodhouse rsp->type = XS_ERROR; 3460254c4d1SDavid Woodhouse rsp->req_id = id; 3470254c4d1SDavid Woodhouse rsp->tx_id = tx_id; 3480254c4d1SDavid Woodhouse rsp->len = (uint32_t)strlen(errstr) + 1; 3490254c4d1SDavid Woodhouse 3500254c4d1SDavid Woodhouse memcpy(&rsp[1], errstr, rsp->len); 3510254c4d1SDavid Woodhouse } 3520254c4d1SDavid Woodhouse 3530254c4d1SDavid Woodhouse static void xs_ok(XenXenstoreState *s, unsigned int type, unsigned int req_id, 3540254c4d1SDavid Woodhouse xs_transaction_t tx_id) 3550254c4d1SDavid Woodhouse { 3560254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 3570254c4d1SDavid Woodhouse const char *okstr = "OK"; 3580254c4d1SDavid Woodhouse 3590254c4d1SDavid Woodhouse rsp->type = type; 3600254c4d1SDavid Woodhouse rsp->req_id = req_id; 3610254c4d1SDavid Woodhouse rsp->tx_id = tx_id; 3620254c4d1SDavid Woodhouse rsp->len = (uint32_t)strlen(okstr) + 1; 3630254c4d1SDavid Woodhouse 3640254c4d1SDavid Woodhouse memcpy(&rsp[1], okstr, rsp->len); 3650254c4d1SDavid Woodhouse } 3660254c4d1SDavid Woodhouse 3670254c4d1SDavid Woodhouse /* 3680254c4d1SDavid Woodhouse * The correct request and response formats are documented in xen.git: 3690254c4d1SDavid Woodhouse * docs/misc/xenstore.txt. A summary is given below for convenience. 3700254c4d1SDavid Woodhouse * The '|' symbol represents a NUL character. 3710254c4d1SDavid Woodhouse * 3720254c4d1SDavid Woodhouse * ---------- Database read, write and permissions operations ---------- 3730254c4d1SDavid Woodhouse * 3740254c4d1SDavid Woodhouse * READ <path>| <value|> 3750254c4d1SDavid Woodhouse * WRITE <path>|<value|> 3760254c4d1SDavid Woodhouse * Store and read the octet string <value> at <path>. 3770254c4d1SDavid Woodhouse * WRITE creates any missing parent paths, with empty values. 3780254c4d1SDavid Woodhouse * 3790254c4d1SDavid Woodhouse * MKDIR <path>| 3800254c4d1SDavid Woodhouse * Ensures that the <path> exists, by necessary by creating 3810254c4d1SDavid Woodhouse * it and any missing parents with empty values. If <path> 3820254c4d1SDavid Woodhouse * or any parent already exists, its value is left unchanged. 3830254c4d1SDavid Woodhouse * 3840254c4d1SDavid Woodhouse * RM <path>| 3850254c4d1SDavid Woodhouse * Ensures that the <path> does not exist, by deleting 3860254c4d1SDavid Woodhouse * it and all of its children. It is not an error if <path> does 3870254c4d1SDavid Woodhouse * not exist, but it _is_ an error if <path>'s immediate parent 3880254c4d1SDavid Woodhouse * does not exist either. 3890254c4d1SDavid Woodhouse * 3900254c4d1SDavid Woodhouse * DIRECTORY <path>| <child-leaf-name>|* 3910254c4d1SDavid Woodhouse * Gives a list of the immediate children of <path>, as only the 3920254c4d1SDavid Woodhouse * leafnames. The resulting children are each named 3930254c4d1SDavid Woodhouse * <path>/<child-leaf-name>. 3940254c4d1SDavid Woodhouse * 3950254c4d1SDavid Woodhouse * DIRECTORY_PART <path>|<offset> <gencnt>|<child-leaf-name>|* 3960254c4d1SDavid Woodhouse * Same as DIRECTORY, but to be used for children lists longer than 3970254c4d1SDavid Woodhouse * XENSTORE_PAYLOAD_MAX. Input are <path> and the byte offset into 3980254c4d1SDavid Woodhouse * the list of children to return. Return values are the generation 3990254c4d1SDavid Woodhouse * count <gencnt> of the node (to be used to ensure the node hasn't 4000254c4d1SDavid Woodhouse * changed between two reads: <gencnt> being the same for multiple 4010254c4d1SDavid Woodhouse * reads guarantees the node hasn't changed) and the list of children 4020254c4d1SDavid Woodhouse * starting at the specified <offset> of the complete list. 4030254c4d1SDavid Woodhouse * 4040254c4d1SDavid Woodhouse * GET_PERMS <path>| <perm-as-string>|+ 4050254c4d1SDavid Woodhouse * SET_PERMS <path>|<perm-as-string>|+? 4060254c4d1SDavid Woodhouse * <perm-as-string> is one of the following 4070254c4d1SDavid Woodhouse * w<domid> write only 4080254c4d1SDavid Woodhouse * r<domid> read only 4090254c4d1SDavid Woodhouse * b<domid> both read and write 4100254c4d1SDavid Woodhouse * n<domid> no access 4110254c4d1SDavid Woodhouse * See https://wiki.xen.org/wiki/XenBus section 4120254c4d1SDavid Woodhouse * `Permissions' for details of the permissions system. 4130254c4d1SDavid Woodhouse * It is possible to set permissions for the special watch paths 4140254c4d1SDavid Woodhouse * "@introduceDomain" and "@releaseDomain" to enable receiving those 4150254c4d1SDavid Woodhouse * watches in unprivileged domains. 4160254c4d1SDavid Woodhouse * 4170254c4d1SDavid Woodhouse * ---------- Watches ---------- 4180254c4d1SDavid Woodhouse * 4190254c4d1SDavid Woodhouse * WATCH <wpath>|<token>|? 4200254c4d1SDavid Woodhouse * Adds a watch. 4210254c4d1SDavid Woodhouse * 4220254c4d1SDavid Woodhouse * When a <path> is modified (including path creation, removal, 4230254c4d1SDavid Woodhouse * contents change or permissions change) this generates an event 4240254c4d1SDavid Woodhouse * on the changed <path>. Changes made in transactions cause an 4250254c4d1SDavid Woodhouse * event only if and when committed. Each occurring event is 4260254c4d1SDavid Woodhouse * matched against all the watches currently set up, and each 4270254c4d1SDavid Woodhouse * matching watch results in a WATCH_EVENT message (see below). 4280254c4d1SDavid Woodhouse * 4290254c4d1SDavid Woodhouse * The event's path matches the watch's <wpath> if it is an child 4300254c4d1SDavid Woodhouse * of <wpath>. 4310254c4d1SDavid Woodhouse * 4320254c4d1SDavid Woodhouse * <wpath> can be a <path> to watch or @<wspecial>. In the 4330254c4d1SDavid Woodhouse * latter case <wspecial> may have any syntax but it matches 4340254c4d1SDavid Woodhouse * (according to the rules above) only the following special 4350254c4d1SDavid Woodhouse * events which are invented by xenstored: 4360254c4d1SDavid Woodhouse * @introduceDomain occurs on INTRODUCE 4370254c4d1SDavid Woodhouse * @releaseDomain occurs on any domain crash or 4380254c4d1SDavid Woodhouse * shutdown, and also on RELEASE 4390254c4d1SDavid Woodhouse * and domain destruction 4400254c4d1SDavid Woodhouse * <wspecial> events are sent to privileged callers or explicitly 4410254c4d1SDavid Woodhouse * via SET_PERMS enabled domains only. 4420254c4d1SDavid Woodhouse * 4430254c4d1SDavid Woodhouse * When a watch is first set up it is triggered once straight 4440254c4d1SDavid Woodhouse * away, with <path> equal to <wpath>. Watches may be triggered 4450254c4d1SDavid Woodhouse * spuriously. The tx_id in a WATCH request is ignored. 4460254c4d1SDavid Woodhouse * 4470254c4d1SDavid Woodhouse * Watches are supposed to be restricted by the permissions 4480254c4d1SDavid Woodhouse * system but in practice the implementation is imperfect. 4490254c4d1SDavid Woodhouse * Applications should not rely on being sent a notification for 4500254c4d1SDavid Woodhouse * paths that they cannot read; however, an application may rely 4510254c4d1SDavid Woodhouse * on being sent a watch when a path which it _is_ able to read 4520254c4d1SDavid Woodhouse * is deleted even if that leaves only a nonexistent unreadable 4530254c4d1SDavid Woodhouse * parent. A notification may omitted if a node's permissions 4540254c4d1SDavid Woodhouse * are changed so as to make it unreadable, in which case future 4550254c4d1SDavid Woodhouse * notifications may be suppressed (and if the node is later made 4560254c4d1SDavid Woodhouse * readable, some notifications may have been lost). 4570254c4d1SDavid Woodhouse * 4580254c4d1SDavid Woodhouse * WATCH_EVENT <epath>|<token>| 4590254c4d1SDavid Woodhouse * Unsolicited `reply' generated for matching modification events 4600254c4d1SDavid Woodhouse * as described above. req_id and tx_id are both 0. 4610254c4d1SDavid Woodhouse * 4620254c4d1SDavid Woodhouse * <epath> is the event's path, ie the actual path that was 4630254c4d1SDavid Woodhouse * modified; however if the event was the recursive removal of an 4640254c4d1SDavid Woodhouse * parent of <wpath>, <epath> is just 4650254c4d1SDavid Woodhouse * <wpath> (rather than the actual path which was removed). So 4660254c4d1SDavid Woodhouse * <epath> is a child of <wpath>, regardless. 4670254c4d1SDavid Woodhouse * 4680254c4d1SDavid Woodhouse * Iff <wpath> for the watch was specified as a relative pathname, 4690254c4d1SDavid Woodhouse * the <epath> path will also be relative (with the same base, 4700254c4d1SDavid Woodhouse * obviously). 4710254c4d1SDavid Woodhouse * 4720254c4d1SDavid Woodhouse * UNWATCH <wpath>|<token>|? 4730254c4d1SDavid Woodhouse * 4740254c4d1SDavid Woodhouse * RESET_WATCHES | 4750254c4d1SDavid Woodhouse * Reset all watches and transactions of the caller. 4760254c4d1SDavid Woodhouse * 4770254c4d1SDavid Woodhouse * ---------- Transactions ---------- 4780254c4d1SDavid Woodhouse * 4790254c4d1SDavid Woodhouse * TRANSACTION_START | <transid>| 4800254c4d1SDavid Woodhouse * <transid> is an opaque uint32_t allocated by xenstored 4810254c4d1SDavid Woodhouse * represented as unsigned decimal. After this, transaction may 4820254c4d1SDavid Woodhouse * be referenced by using <transid> (as 32-bit binary) in the 4830254c4d1SDavid Woodhouse * tx_id request header field. When transaction is started whole 4840254c4d1SDavid Woodhouse * db is copied; reads and writes happen on the copy. 4850254c4d1SDavid Woodhouse * It is not legal to send non-0 tx_id in TRANSACTION_START. 4860254c4d1SDavid Woodhouse * 4870254c4d1SDavid Woodhouse * TRANSACTION_END T| 4880254c4d1SDavid Woodhouse * TRANSACTION_END F| 4890254c4d1SDavid Woodhouse * tx_id must refer to existing transaction. After this 4900254c4d1SDavid Woodhouse * request the tx_id is no longer valid and may be reused by 4910254c4d1SDavid Woodhouse * xenstore. If F, the transaction is discarded. If T, 4920254c4d1SDavid Woodhouse * it is committed: if there were any other intervening writes 4930254c4d1SDavid Woodhouse * then our END gets get EAGAIN. 4940254c4d1SDavid Woodhouse * 4950254c4d1SDavid Woodhouse * The plan is that in the future only intervening `conflicting' 4960254c4d1SDavid Woodhouse * writes cause EAGAIN, meaning only writes or other commits 4970254c4d1SDavid Woodhouse * which changed paths which were read or written in the 4980254c4d1SDavid Woodhouse * transaction at hand. 4990254c4d1SDavid Woodhouse * 5000254c4d1SDavid Woodhouse */ 5010254c4d1SDavid Woodhouse 5020254c4d1SDavid Woodhouse static void xs_read(XenXenstoreState *s, unsigned int req_id, 5030254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, unsigned int len) 5040254c4d1SDavid Woodhouse { 5050254c4d1SDavid Woodhouse const char *path = (const char *)req_data; 5060254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 5070254c4d1SDavid Woodhouse uint8_t *rsp_data = (uint8_t *)&rsp[1]; 5080254c4d1SDavid Woodhouse g_autoptr(GByteArray) data = g_byte_array_new(); 5090254c4d1SDavid Woodhouse int err; 5100254c4d1SDavid Woodhouse 5110254c4d1SDavid Woodhouse if (len == 0 || req_data[len - 1] != '\0') { 5120254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 5130254c4d1SDavid Woodhouse return; 5140254c4d1SDavid Woodhouse } 5150254c4d1SDavid Woodhouse 5160254c4d1SDavid Woodhouse trace_xenstore_read(tx_id, path); 5170254c4d1SDavid Woodhouse err = xs_impl_read(s->impl, xen_domid, tx_id, path, data); 5180254c4d1SDavid Woodhouse if (err) { 5190254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 5200254c4d1SDavid Woodhouse return; 5210254c4d1SDavid Woodhouse } 5220254c4d1SDavid Woodhouse 5230254c4d1SDavid Woodhouse rsp->type = XS_READ; 5240254c4d1SDavid Woodhouse rsp->req_id = req_id; 5250254c4d1SDavid Woodhouse rsp->tx_id = tx_id; 5260254c4d1SDavid Woodhouse rsp->len = 0; 5270254c4d1SDavid Woodhouse 5280254c4d1SDavid Woodhouse len = data->len; 5290254c4d1SDavid Woodhouse if (len > XENSTORE_PAYLOAD_MAX) { 5300254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, E2BIG); 5310254c4d1SDavid Woodhouse return; 5320254c4d1SDavid Woodhouse } 5330254c4d1SDavid Woodhouse 5340254c4d1SDavid Woodhouse memcpy(&rsp_data[rsp->len], data->data, len); 5350254c4d1SDavid Woodhouse rsp->len += len; 5360254c4d1SDavid Woodhouse } 5370254c4d1SDavid Woodhouse 5380254c4d1SDavid Woodhouse static void xs_write(XenXenstoreState *s, unsigned int req_id, 5390254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 5400254c4d1SDavid Woodhouse unsigned int len) 5410254c4d1SDavid Woodhouse { 5420254c4d1SDavid Woodhouse g_autoptr(GByteArray) data = g_byte_array_new(); 5430254c4d1SDavid Woodhouse const char *path; 5440254c4d1SDavid Woodhouse int err; 5450254c4d1SDavid Woodhouse 5460254c4d1SDavid Woodhouse if (len == 0) { 5470254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 5480254c4d1SDavid Woodhouse return; 5490254c4d1SDavid Woodhouse } 5500254c4d1SDavid Woodhouse 5510254c4d1SDavid Woodhouse path = (const char *)req_data; 5520254c4d1SDavid Woodhouse 5530254c4d1SDavid Woodhouse while (len--) { 5540254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 5550254c4d1SDavid Woodhouse break; 5560254c4d1SDavid Woodhouse } 5570254c4d1SDavid Woodhouse if (len == 0) { 5580254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 5590254c4d1SDavid Woodhouse return; 5600254c4d1SDavid Woodhouse } 5610254c4d1SDavid Woodhouse } 5620254c4d1SDavid Woodhouse 5630254c4d1SDavid Woodhouse g_byte_array_append(data, req_data, len); 5640254c4d1SDavid Woodhouse 5650254c4d1SDavid Woodhouse trace_xenstore_write(tx_id, path); 5660254c4d1SDavid Woodhouse err = xs_impl_write(s->impl, xen_domid, tx_id, path, data); 5670254c4d1SDavid Woodhouse if (err) { 5680254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 5690254c4d1SDavid Woodhouse return; 5700254c4d1SDavid Woodhouse } 5710254c4d1SDavid Woodhouse 5720254c4d1SDavid Woodhouse xs_ok(s, XS_WRITE, req_id, tx_id); 5730254c4d1SDavid Woodhouse } 5740254c4d1SDavid Woodhouse 5750254c4d1SDavid Woodhouse static void xs_mkdir(XenXenstoreState *s, unsigned int req_id, 5760254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 5770254c4d1SDavid Woodhouse unsigned int len) 5780254c4d1SDavid Woodhouse { 5790254c4d1SDavid Woodhouse g_autoptr(GByteArray) data = g_byte_array_new(); 5800254c4d1SDavid Woodhouse const char *path; 5810254c4d1SDavid Woodhouse int err; 5820254c4d1SDavid Woodhouse 5830254c4d1SDavid Woodhouse if (len == 0 || req_data[len - 1] != '\0') { 5840254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 5850254c4d1SDavid Woodhouse return; 5860254c4d1SDavid Woodhouse } 5870254c4d1SDavid Woodhouse 5880254c4d1SDavid Woodhouse path = (const char *)req_data; 5890254c4d1SDavid Woodhouse 5900254c4d1SDavid Woodhouse trace_xenstore_mkdir(tx_id, path); 5910254c4d1SDavid Woodhouse err = xs_impl_read(s->impl, xen_domid, tx_id, path, data); 5920254c4d1SDavid Woodhouse if (err == ENOENT) { 5930254c4d1SDavid Woodhouse err = xs_impl_write(s->impl, xen_domid, tx_id, path, data); 5940254c4d1SDavid Woodhouse } 5950254c4d1SDavid Woodhouse 5960254c4d1SDavid Woodhouse if (!err) { 5970254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 5980254c4d1SDavid Woodhouse return; 5990254c4d1SDavid Woodhouse } 6000254c4d1SDavid Woodhouse 6010254c4d1SDavid Woodhouse xs_ok(s, XS_MKDIR, req_id, tx_id); 6020254c4d1SDavid Woodhouse } 6030254c4d1SDavid Woodhouse 6040254c4d1SDavid Woodhouse static void xs_append_strings(XenXenstoreState *s, struct xsd_sockmsg *rsp, 6050254c4d1SDavid Woodhouse GList *strings, unsigned int start, bool truncate) 6060254c4d1SDavid Woodhouse { 6070254c4d1SDavid Woodhouse uint8_t *rsp_data = (uint8_t *)&rsp[1]; 6080254c4d1SDavid Woodhouse GList *l; 6090254c4d1SDavid Woodhouse 6100254c4d1SDavid Woodhouse for (l = strings; l; l = l->next) { 6110254c4d1SDavid Woodhouse size_t len = strlen(l->data) + 1; /* Including the NUL termination */ 6120254c4d1SDavid Woodhouse char *str = l->data; 6130254c4d1SDavid Woodhouse 6140254c4d1SDavid Woodhouse if (rsp->len + len > XENSTORE_PAYLOAD_MAX) { 6150254c4d1SDavid Woodhouse if (truncate) { 6160254c4d1SDavid Woodhouse len = XENSTORE_PAYLOAD_MAX - rsp->len; 6170254c4d1SDavid Woodhouse if (!len) { 6180254c4d1SDavid Woodhouse return; 6190254c4d1SDavid Woodhouse } 6200254c4d1SDavid Woodhouse } else { 6210254c4d1SDavid Woodhouse xs_error(s, rsp->req_id, rsp->tx_id, E2BIG); 6220254c4d1SDavid Woodhouse return; 6230254c4d1SDavid Woodhouse } 6240254c4d1SDavid Woodhouse } 6250254c4d1SDavid Woodhouse 6260254c4d1SDavid Woodhouse if (start) { 6270254c4d1SDavid Woodhouse if (start >= len) { 6280254c4d1SDavid Woodhouse start -= len; 6290254c4d1SDavid Woodhouse continue; 6300254c4d1SDavid Woodhouse } 6310254c4d1SDavid Woodhouse 6320254c4d1SDavid Woodhouse str += start; 6330254c4d1SDavid Woodhouse len -= start; 6340254c4d1SDavid Woodhouse start = 0; 6350254c4d1SDavid Woodhouse } 6360254c4d1SDavid Woodhouse 6370254c4d1SDavid Woodhouse memcpy(&rsp_data[rsp->len], str, len); 6380254c4d1SDavid Woodhouse rsp->len += len; 6390254c4d1SDavid Woodhouse } 6400254c4d1SDavid Woodhouse /* XS_DIRECTORY_PART wants an extra NUL to indicate the end */ 6410254c4d1SDavid Woodhouse if (truncate && rsp->len < XENSTORE_PAYLOAD_MAX) { 6420254c4d1SDavid Woodhouse rsp_data[rsp->len++] = '\0'; 6430254c4d1SDavid Woodhouse } 6440254c4d1SDavid Woodhouse } 6450254c4d1SDavid Woodhouse 6460254c4d1SDavid Woodhouse static void xs_directory(XenXenstoreState *s, unsigned int req_id, 6470254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 6480254c4d1SDavid Woodhouse unsigned int len) 6490254c4d1SDavid Woodhouse { 6500254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 6510254c4d1SDavid Woodhouse GList *items = NULL; 6520254c4d1SDavid Woodhouse const char *path; 6530254c4d1SDavid Woodhouse int err; 6540254c4d1SDavid Woodhouse 6550254c4d1SDavid Woodhouse if (len == 0 || req_data[len - 1] != '\0') { 6560254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 6570254c4d1SDavid Woodhouse return; 6580254c4d1SDavid Woodhouse } 6590254c4d1SDavid Woodhouse 6600254c4d1SDavid Woodhouse path = (const char *)req_data; 6610254c4d1SDavid Woodhouse 6620254c4d1SDavid Woodhouse trace_xenstore_directory(tx_id, path); 6630254c4d1SDavid Woodhouse err = xs_impl_directory(s->impl, xen_domid, tx_id, path, NULL, &items); 6640254c4d1SDavid Woodhouse if (err != 0) { 6650254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 6660254c4d1SDavid Woodhouse return; 6670254c4d1SDavid Woodhouse } 6680254c4d1SDavid Woodhouse 6690254c4d1SDavid Woodhouse rsp->type = XS_DIRECTORY; 6700254c4d1SDavid Woodhouse rsp->req_id = req_id; 6710254c4d1SDavid Woodhouse rsp->tx_id = tx_id; 6720254c4d1SDavid Woodhouse rsp->len = 0; 6730254c4d1SDavid Woodhouse 6740254c4d1SDavid Woodhouse xs_append_strings(s, rsp, items, 0, false); 6750254c4d1SDavid Woodhouse 6760254c4d1SDavid Woodhouse g_list_free_full(items, g_free); 6770254c4d1SDavid Woodhouse } 6780254c4d1SDavid Woodhouse 6790254c4d1SDavid Woodhouse static void xs_directory_part(XenXenstoreState *s, unsigned int req_id, 6800254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 6810254c4d1SDavid Woodhouse unsigned int len) 6820254c4d1SDavid Woodhouse { 6830254c4d1SDavid Woodhouse const char *offset_str, *path = (const char *)req_data; 6840254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 6850254c4d1SDavid Woodhouse char *rsp_data = (char *)&rsp[1]; 6860254c4d1SDavid Woodhouse uint64_t gencnt = 0; 6870254c4d1SDavid Woodhouse unsigned int offset; 6880254c4d1SDavid Woodhouse GList *items = NULL; 6890254c4d1SDavid Woodhouse int err; 6900254c4d1SDavid Woodhouse 6910254c4d1SDavid Woodhouse if (len == 0) { 6920254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 6930254c4d1SDavid Woodhouse return; 6940254c4d1SDavid Woodhouse } 6950254c4d1SDavid Woodhouse 6960254c4d1SDavid Woodhouse while (len--) { 6970254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 6980254c4d1SDavid Woodhouse break; 6990254c4d1SDavid Woodhouse } 7000254c4d1SDavid Woodhouse if (len == 0) { 7010254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 7020254c4d1SDavid Woodhouse return; 7030254c4d1SDavid Woodhouse } 7040254c4d1SDavid Woodhouse } 7050254c4d1SDavid Woodhouse 7060254c4d1SDavid Woodhouse offset_str = (const char *)req_data; 7070254c4d1SDavid Woodhouse while (len--) { 7080254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 7090254c4d1SDavid Woodhouse break; 7100254c4d1SDavid Woodhouse } 7110254c4d1SDavid Woodhouse if (len == 0) { 7120254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 7130254c4d1SDavid Woodhouse return; 7140254c4d1SDavid Woodhouse } 7150254c4d1SDavid Woodhouse } 7160254c4d1SDavid Woodhouse 7170254c4d1SDavid Woodhouse if (len) { 7180254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 7190254c4d1SDavid Woodhouse return; 7200254c4d1SDavid Woodhouse } 7210254c4d1SDavid Woodhouse 7220254c4d1SDavid Woodhouse if (qemu_strtoui(offset_str, NULL, 10, &offset) < 0) { 7230254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 7240254c4d1SDavid Woodhouse return; 7250254c4d1SDavid Woodhouse } 7260254c4d1SDavid Woodhouse 7270254c4d1SDavid Woodhouse trace_xenstore_directory_part(tx_id, path, offset); 7280254c4d1SDavid Woodhouse err = xs_impl_directory(s->impl, xen_domid, tx_id, path, &gencnt, &items); 7290254c4d1SDavid Woodhouse if (err != 0) { 7300254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 7310254c4d1SDavid Woodhouse return; 7320254c4d1SDavid Woodhouse } 7330254c4d1SDavid Woodhouse 7340254c4d1SDavid Woodhouse rsp->type = XS_DIRECTORY_PART; 7350254c4d1SDavid Woodhouse rsp->req_id = req_id; 7360254c4d1SDavid Woodhouse rsp->tx_id = tx_id; 7370254c4d1SDavid Woodhouse rsp->len = snprintf(rsp_data, XENSTORE_PAYLOAD_MAX, "%" PRIu64, gencnt) + 1; 7380254c4d1SDavid Woodhouse 7390254c4d1SDavid Woodhouse xs_append_strings(s, rsp, items, offset, true); 7400254c4d1SDavid Woodhouse 7410254c4d1SDavid Woodhouse g_list_free_full(items, g_free); 7420254c4d1SDavid Woodhouse } 7430254c4d1SDavid Woodhouse 7440254c4d1SDavid Woodhouse static void xs_transaction_start(XenXenstoreState *s, unsigned int req_id, 7450254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 7460254c4d1SDavid Woodhouse unsigned int len) 7470254c4d1SDavid Woodhouse { 7480254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 7490254c4d1SDavid Woodhouse char *rsp_data = (char *)&rsp[1]; 7500254c4d1SDavid Woodhouse int err; 7510254c4d1SDavid Woodhouse 7520254c4d1SDavid Woodhouse if (len != 1 || req_data[0] != '\0') { 7530254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 7540254c4d1SDavid Woodhouse return; 7550254c4d1SDavid Woodhouse } 7560254c4d1SDavid Woodhouse 7570254c4d1SDavid Woodhouse rsp->type = XS_TRANSACTION_START; 7580254c4d1SDavid Woodhouse rsp->req_id = req_id; 7590254c4d1SDavid Woodhouse rsp->tx_id = tx_id; 7600254c4d1SDavid Woodhouse rsp->len = 0; 7610254c4d1SDavid Woodhouse 7620254c4d1SDavid Woodhouse err = xs_impl_transaction_start(s->impl, xen_domid, &tx_id); 7630254c4d1SDavid Woodhouse if (err) { 7640254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 7650254c4d1SDavid Woodhouse return; 7660254c4d1SDavid Woodhouse } 7670254c4d1SDavid Woodhouse 7680254c4d1SDavid Woodhouse trace_xenstore_transaction_start(tx_id); 7690254c4d1SDavid Woodhouse 7700254c4d1SDavid Woodhouse rsp->len = snprintf(rsp_data, XENSTORE_PAYLOAD_MAX, "%u", tx_id); 7710254c4d1SDavid Woodhouse assert(rsp->len < XENSTORE_PAYLOAD_MAX); 7720254c4d1SDavid Woodhouse rsp->len++; 7730254c4d1SDavid Woodhouse } 7740254c4d1SDavid Woodhouse 7750254c4d1SDavid Woodhouse static void xs_transaction_end(XenXenstoreState *s, unsigned int req_id, 7760254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 7770254c4d1SDavid Woodhouse unsigned int len) 7780254c4d1SDavid Woodhouse { 7790254c4d1SDavid Woodhouse bool commit; 7800254c4d1SDavid Woodhouse int err; 7810254c4d1SDavid Woodhouse 7820254c4d1SDavid Woodhouse if (len != 2 || req_data[1] != '\0') { 7830254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 7840254c4d1SDavid Woodhouse return; 7850254c4d1SDavid Woodhouse } 7860254c4d1SDavid Woodhouse 7870254c4d1SDavid Woodhouse switch (req_data[0]) { 7880254c4d1SDavid Woodhouse case 'T': 7890254c4d1SDavid Woodhouse commit = true; 7900254c4d1SDavid Woodhouse break; 7910254c4d1SDavid Woodhouse case 'F': 7920254c4d1SDavid Woodhouse commit = false; 7930254c4d1SDavid Woodhouse break; 7940254c4d1SDavid Woodhouse default: 7950254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 7960254c4d1SDavid Woodhouse return; 7970254c4d1SDavid Woodhouse } 7980254c4d1SDavid Woodhouse 7990254c4d1SDavid Woodhouse trace_xenstore_transaction_end(tx_id, commit); 8000254c4d1SDavid Woodhouse err = xs_impl_transaction_end(s->impl, xen_domid, tx_id, commit); 8010254c4d1SDavid Woodhouse if (err) { 8020254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 8030254c4d1SDavid Woodhouse return; 8040254c4d1SDavid Woodhouse } 8050254c4d1SDavid Woodhouse 8060254c4d1SDavid Woodhouse xs_ok(s, XS_TRANSACTION_END, req_id, tx_id); 8070254c4d1SDavid Woodhouse } 8080254c4d1SDavid Woodhouse 8090254c4d1SDavid Woodhouse static void xs_rm(XenXenstoreState *s, unsigned int req_id, 8100254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, unsigned int len) 8110254c4d1SDavid Woodhouse { 8120254c4d1SDavid Woodhouse const char *path = (const char *)req_data; 8130254c4d1SDavid Woodhouse int err; 8140254c4d1SDavid Woodhouse 8150254c4d1SDavid Woodhouse if (len == 0 || req_data[len - 1] != '\0') { 8160254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 8170254c4d1SDavid Woodhouse return; 8180254c4d1SDavid Woodhouse } 8190254c4d1SDavid Woodhouse 8200254c4d1SDavid Woodhouse trace_xenstore_rm(tx_id, path); 8210254c4d1SDavid Woodhouse err = xs_impl_rm(s->impl, xen_domid, tx_id, path); 8220254c4d1SDavid Woodhouse if (err) { 8230254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 8240254c4d1SDavid Woodhouse return; 8250254c4d1SDavid Woodhouse } 8260254c4d1SDavid Woodhouse 8270254c4d1SDavid Woodhouse xs_ok(s, XS_RM, req_id, tx_id); 8280254c4d1SDavid Woodhouse } 8290254c4d1SDavid Woodhouse 8300254c4d1SDavid Woodhouse static void xs_get_perms(XenXenstoreState *s, unsigned int req_id, 8310254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 8320254c4d1SDavid Woodhouse unsigned int len) 8330254c4d1SDavid Woodhouse { 8340254c4d1SDavid Woodhouse const char *path = (const char *)req_data; 8350254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 8360254c4d1SDavid Woodhouse GList *perms = NULL; 8370254c4d1SDavid Woodhouse int err; 8380254c4d1SDavid Woodhouse 8390254c4d1SDavid Woodhouse if (len == 0 || req_data[len - 1] != '\0') { 8400254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 8410254c4d1SDavid Woodhouse return; 8420254c4d1SDavid Woodhouse } 8430254c4d1SDavid Woodhouse 8440254c4d1SDavid Woodhouse trace_xenstore_get_perms(tx_id, path); 8450254c4d1SDavid Woodhouse err = xs_impl_get_perms(s->impl, xen_domid, tx_id, path, &perms); 8460254c4d1SDavid Woodhouse if (err) { 8470254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 8480254c4d1SDavid Woodhouse return; 8490254c4d1SDavid Woodhouse } 8500254c4d1SDavid Woodhouse 8510254c4d1SDavid Woodhouse rsp->type = XS_GET_PERMS; 8520254c4d1SDavid Woodhouse rsp->req_id = req_id; 8530254c4d1SDavid Woodhouse rsp->tx_id = tx_id; 8540254c4d1SDavid Woodhouse rsp->len = 0; 8550254c4d1SDavid Woodhouse 8560254c4d1SDavid Woodhouse xs_append_strings(s, rsp, perms, 0, false); 8570254c4d1SDavid Woodhouse 8580254c4d1SDavid Woodhouse g_list_free_full(perms, g_free); 8590254c4d1SDavid Woodhouse } 8600254c4d1SDavid Woodhouse 8610254c4d1SDavid Woodhouse static void xs_set_perms(XenXenstoreState *s, unsigned int req_id, 8620254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 8630254c4d1SDavid Woodhouse unsigned int len) 8640254c4d1SDavid Woodhouse { 8650254c4d1SDavid Woodhouse const char *path = (const char *)req_data; 8660254c4d1SDavid Woodhouse uint8_t *perm; 8670254c4d1SDavid Woodhouse GList *perms = NULL; 8680254c4d1SDavid Woodhouse int err; 8690254c4d1SDavid Woodhouse 8700254c4d1SDavid Woodhouse if (len == 0) { 8710254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 8720254c4d1SDavid Woodhouse return; 8730254c4d1SDavid Woodhouse } 8740254c4d1SDavid Woodhouse 8750254c4d1SDavid Woodhouse while (len--) { 8760254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 8770254c4d1SDavid Woodhouse break; 8780254c4d1SDavid Woodhouse } 8790254c4d1SDavid Woodhouse if (len == 0) { 8800254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 8810254c4d1SDavid Woodhouse return; 8820254c4d1SDavid Woodhouse } 8830254c4d1SDavid Woodhouse } 8840254c4d1SDavid Woodhouse 8850254c4d1SDavid Woodhouse perm = req_data; 8860254c4d1SDavid Woodhouse while (len--) { 8870254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 8880254c4d1SDavid Woodhouse perms = g_list_append(perms, perm); 8890254c4d1SDavid Woodhouse perm = req_data; 8900254c4d1SDavid Woodhouse } 8910254c4d1SDavid Woodhouse } 8920254c4d1SDavid Woodhouse 8930254c4d1SDavid Woodhouse /* 8940254c4d1SDavid Woodhouse * Note that there may be trailing garbage at the end of the buffer. 8950254c4d1SDavid Woodhouse * This is explicitly permitted by the '?' at the end of the definition: 8960254c4d1SDavid Woodhouse * 8970254c4d1SDavid Woodhouse * SET_PERMS <path>|<perm-as-string>|+? 8980254c4d1SDavid Woodhouse */ 8990254c4d1SDavid Woodhouse 9000254c4d1SDavid Woodhouse trace_xenstore_set_perms(tx_id, path); 9010254c4d1SDavid Woodhouse err = xs_impl_set_perms(s->impl, xen_domid, tx_id, path, perms); 9020254c4d1SDavid Woodhouse g_list_free(perms); 9030254c4d1SDavid Woodhouse if (err) { 9040254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 9050254c4d1SDavid Woodhouse return; 9060254c4d1SDavid Woodhouse } 9070254c4d1SDavid Woodhouse 9080254c4d1SDavid Woodhouse xs_ok(s, XS_SET_PERMS, req_id, tx_id); 9090254c4d1SDavid Woodhouse } 9100254c4d1SDavid Woodhouse 9110254c4d1SDavid Woodhouse static void xs_watch(XenXenstoreState *s, unsigned int req_id, 9120254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 9130254c4d1SDavid Woodhouse unsigned int len) 9140254c4d1SDavid Woodhouse { 9150254c4d1SDavid Woodhouse const char *token, *path = (const char *)req_data; 9160254c4d1SDavid Woodhouse int err; 9170254c4d1SDavid Woodhouse 9180254c4d1SDavid Woodhouse if (len == 0) { 9190254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 9200254c4d1SDavid Woodhouse return; 9210254c4d1SDavid Woodhouse } 9220254c4d1SDavid Woodhouse 9230254c4d1SDavid Woodhouse while (len--) { 9240254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 9250254c4d1SDavid Woodhouse break; 9260254c4d1SDavid Woodhouse } 9270254c4d1SDavid Woodhouse if (len == 0) { 9280254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 9290254c4d1SDavid Woodhouse return; 9300254c4d1SDavid Woodhouse } 9310254c4d1SDavid Woodhouse } 9320254c4d1SDavid Woodhouse 9330254c4d1SDavid Woodhouse token = (const char *)req_data; 9340254c4d1SDavid Woodhouse while (len--) { 9350254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 9360254c4d1SDavid Woodhouse break; 9370254c4d1SDavid Woodhouse } 9380254c4d1SDavid Woodhouse if (len == 0) { 9390254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 9400254c4d1SDavid Woodhouse return; 9410254c4d1SDavid Woodhouse } 9420254c4d1SDavid Woodhouse } 9430254c4d1SDavid Woodhouse 9440254c4d1SDavid Woodhouse /* 9450254c4d1SDavid Woodhouse * Note that there may be trailing garbage at the end of the buffer. 9460254c4d1SDavid Woodhouse * This is explicitly permitted by the '?' at the end of the definition: 9470254c4d1SDavid Woodhouse * 9480254c4d1SDavid Woodhouse * WATCH <wpath>|<token>|? 9490254c4d1SDavid Woodhouse */ 9500254c4d1SDavid Woodhouse 9510254c4d1SDavid Woodhouse trace_xenstore_watch(path, token); 9520254c4d1SDavid Woodhouse err = xs_impl_watch(s->impl, xen_domid, path, token, fire_watch_cb, s); 9530254c4d1SDavid Woodhouse if (err) { 9540254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 9550254c4d1SDavid Woodhouse return; 9560254c4d1SDavid Woodhouse } 9570254c4d1SDavid Woodhouse 9580254c4d1SDavid Woodhouse xs_ok(s, XS_WATCH, req_id, tx_id); 9590254c4d1SDavid Woodhouse } 9600254c4d1SDavid Woodhouse 9610254c4d1SDavid Woodhouse static void xs_unwatch(XenXenstoreState *s, unsigned int req_id, 9620254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 9630254c4d1SDavid Woodhouse unsigned int len) 9640254c4d1SDavid Woodhouse { 9650254c4d1SDavid Woodhouse const char *token, *path = (const char *)req_data; 9660254c4d1SDavid Woodhouse int err; 9670254c4d1SDavid Woodhouse 9680254c4d1SDavid Woodhouse if (len == 0) { 9690254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 9700254c4d1SDavid Woodhouse return; 9710254c4d1SDavid Woodhouse } 9720254c4d1SDavid Woodhouse 9730254c4d1SDavid Woodhouse while (len--) { 9740254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 9750254c4d1SDavid Woodhouse break; 9760254c4d1SDavid Woodhouse } 9770254c4d1SDavid Woodhouse if (len == 0) { 9780254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 9790254c4d1SDavid Woodhouse return; 9800254c4d1SDavid Woodhouse } 9810254c4d1SDavid Woodhouse } 9820254c4d1SDavid Woodhouse 9830254c4d1SDavid Woodhouse token = (const char *)req_data; 9840254c4d1SDavid Woodhouse while (len--) { 9850254c4d1SDavid Woodhouse if (*req_data++ == '\0') { 9860254c4d1SDavid Woodhouse break; 9870254c4d1SDavid Woodhouse } 9880254c4d1SDavid Woodhouse if (len == 0) { 9890254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 9900254c4d1SDavid Woodhouse return; 9910254c4d1SDavid Woodhouse } 9920254c4d1SDavid Woodhouse } 9930254c4d1SDavid Woodhouse 9940254c4d1SDavid Woodhouse trace_xenstore_unwatch(path, token); 9950254c4d1SDavid Woodhouse err = xs_impl_unwatch(s->impl, xen_domid, path, token, fire_watch_cb, s); 9960254c4d1SDavid Woodhouse if (err) { 9970254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, err); 9980254c4d1SDavid Woodhouse return; 9990254c4d1SDavid Woodhouse } 10000254c4d1SDavid Woodhouse 10010254c4d1SDavid Woodhouse xs_ok(s, XS_UNWATCH, req_id, tx_id); 10020254c4d1SDavid Woodhouse } 10030254c4d1SDavid Woodhouse 10040254c4d1SDavid Woodhouse static void xs_reset_watches(XenXenstoreState *s, unsigned int req_id, 10050254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *req_data, 10060254c4d1SDavid Woodhouse unsigned int len) 10070254c4d1SDavid Woodhouse { 10080254c4d1SDavid Woodhouse if (len == 0 || req_data[len - 1] != '\0') { 10090254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EINVAL); 10100254c4d1SDavid Woodhouse return; 10110254c4d1SDavid Woodhouse } 10120254c4d1SDavid Woodhouse 10130254c4d1SDavid Woodhouse trace_xenstore_reset_watches(); 10140254c4d1SDavid Woodhouse xs_impl_reset_watches(s->impl, xen_domid); 10150254c4d1SDavid Woodhouse 10160254c4d1SDavid Woodhouse xs_ok(s, XS_RESET_WATCHES, req_id, tx_id); 10170254c4d1SDavid Woodhouse } 10180254c4d1SDavid Woodhouse 10190254c4d1SDavid Woodhouse static void xs_priv(XenXenstoreState *s, unsigned int req_id, 10200254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *data, 10210254c4d1SDavid Woodhouse unsigned int len) 10220254c4d1SDavid Woodhouse { 10230254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, EACCES); 10240254c4d1SDavid Woodhouse } 10250254c4d1SDavid Woodhouse 10260254c4d1SDavid Woodhouse static void xs_unimpl(XenXenstoreState *s, unsigned int req_id, 10270254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *data, 10280254c4d1SDavid Woodhouse unsigned int len) 10290254c4d1SDavid Woodhouse { 10300254c4d1SDavid Woodhouse xs_error(s, req_id, tx_id, ENOSYS); 10310254c4d1SDavid Woodhouse } 10320254c4d1SDavid Woodhouse 10330254c4d1SDavid Woodhouse typedef void (*xs_impl)(XenXenstoreState *s, unsigned int req_id, 10340254c4d1SDavid Woodhouse xs_transaction_t tx_id, uint8_t *data, 10350254c4d1SDavid Woodhouse unsigned int len); 10360254c4d1SDavid Woodhouse 10370254c4d1SDavid Woodhouse struct xsd_req { 10380254c4d1SDavid Woodhouse const char *name; 10390254c4d1SDavid Woodhouse xs_impl fn; 10400254c4d1SDavid Woodhouse }; 10410254c4d1SDavid Woodhouse #define XSD_REQ(_type, _fn) \ 10420254c4d1SDavid Woodhouse [_type] = { .name = #_type, .fn = _fn } 10430254c4d1SDavid Woodhouse 10440254c4d1SDavid Woodhouse struct xsd_req xsd_reqs[] = { 10450254c4d1SDavid Woodhouse XSD_REQ(XS_READ, xs_read), 10460254c4d1SDavid Woodhouse XSD_REQ(XS_WRITE, xs_write), 10470254c4d1SDavid Woodhouse XSD_REQ(XS_MKDIR, xs_mkdir), 10480254c4d1SDavid Woodhouse XSD_REQ(XS_DIRECTORY, xs_directory), 10490254c4d1SDavid Woodhouse XSD_REQ(XS_DIRECTORY_PART, xs_directory_part), 10500254c4d1SDavid Woodhouse XSD_REQ(XS_TRANSACTION_START, xs_transaction_start), 10510254c4d1SDavid Woodhouse XSD_REQ(XS_TRANSACTION_END, xs_transaction_end), 10520254c4d1SDavid Woodhouse XSD_REQ(XS_RM, xs_rm), 10530254c4d1SDavid Woodhouse XSD_REQ(XS_GET_PERMS, xs_get_perms), 10540254c4d1SDavid Woodhouse XSD_REQ(XS_SET_PERMS, xs_set_perms), 10550254c4d1SDavid Woodhouse XSD_REQ(XS_WATCH, xs_watch), 10560254c4d1SDavid Woodhouse XSD_REQ(XS_UNWATCH, xs_unwatch), 10570254c4d1SDavid Woodhouse XSD_REQ(XS_CONTROL, xs_priv), 10580254c4d1SDavid Woodhouse XSD_REQ(XS_INTRODUCE, xs_priv), 10590254c4d1SDavid Woodhouse XSD_REQ(XS_RELEASE, xs_priv), 10600254c4d1SDavid Woodhouse XSD_REQ(XS_IS_DOMAIN_INTRODUCED, xs_priv), 10610254c4d1SDavid Woodhouse XSD_REQ(XS_RESUME, xs_priv), 10620254c4d1SDavid Woodhouse XSD_REQ(XS_SET_TARGET, xs_priv), 10630254c4d1SDavid Woodhouse XSD_REQ(XS_RESET_WATCHES, xs_reset_watches), 10640254c4d1SDavid Woodhouse }; 10650254c4d1SDavid Woodhouse 1066f3341e7bSDavid Woodhouse static void process_req(XenXenstoreState *s) 1067f3341e7bSDavid Woodhouse { 1068f3341e7bSDavid Woodhouse struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data; 10690254c4d1SDavid Woodhouse xs_impl handler = NULL; 1070f3341e7bSDavid Woodhouse 1071f3341e7bSDavid Woodhouse assert(req_pending(s)); 1072f3341e7bSDavid Woodhouse assert(!s->rsp_pending); 1073f3341e7bSDavid Woodhouse 10740254c4d1SDavid Woodhouse if (req->type < ARRAY_SIZE(xsd_reqs)) { 10750254c4d1SDavid Woodhouse handler = xsd_reqs[req->type].fn; 10760254c4d1SDavid Woodhouse } 10770254c4d1SDavid Woodhouse if (!handler) { 10780254c4d1SDavid Woodhouse handler = &xs_unimpl; 10790254c4d1SDavid Woodhouse } 10800254c4d1SDavid Woodhouse 10810254c4d1SDavid Woodhouse handler(s, req->req_id, req->tx_id, (uint8_t *)&req[1], req->len); 1082f3341e7bSDavid Woodhouse 1083f3341e7bSDavid Woodhouse s->rsp_pending = true; 1084f3341e7bSDavid Woodhouse reset_req(s); 1085f3341e7bSDavid Woodhouse } 1086f3341e7bSDavid Woodhouse 1087f3341e7bSDavid Woodhouse static unsigned int copy_from_ring(XenXenstoreState *s, uint8_t *ptr, 1088f3341e7bSDavid Woodhouse unsigned int len) 1089f3341e7bSDavid Woodhouse { 1090f3341e7bSDavid Woodhouse if (!len) { 1091f3341e7bSDavid Woodhouse return 0; 1092f3341e7bSDavid Woodhouse } 1093f3341e7bSDavid Woodhouse 1094f3341e7bSDavid Woodhouse XENSTORE_RING_IDX prod = qatomic_read(&s->xs->req_prod); 1095f3341e7bSDavid Woodhouse XENSTORE_RING_IDX cons = qatomic_read(&s->xs->req_cons); 1096f3341e7bSDavid Woodhouse unsigned int copied = 0; 1097f3341e7bSDavid Woodhouse 1098f3341e7bSDavid Woodhouse /* Ensure the ring contents don't cross the req_prod access. */ 1099f3341e7bSDavid Woodhouse smp_rmb(); 1100f3341e7bSDavid Woodhouse 1101f3341e7bSDavid Woodhouse while (len) { 1102f3341e7bSDavid Woodhouse unsigned int avail = prod - cons; 1103f3341e7bSDavid Woodhouse unsigned int offset = MASK_XENSTORE_IDX(cons); 1104f3341e7bSDavid Woodhouse unsigned int copylen = avail; 1105f3341e7bSDavid Woodhouse 1106f3341e7bSDavid Woodhouse if (avail > XENSTORE_RING_SIZE) { 1107f3341e7bSDavid Woodhouse error_report("XenStore ring handling error"); 1108f3341e7bSDavid Woodhouse s->fatal_error = true; 1109f3341e7bSDavid Woodhouse break; 1110f3341e7bSDavid Woodhouse } else if (avail == 0) { 1111f3341e7bSDavid Woodhouse break; 1112f3341e7bSDavid Woodhouse } 1113f3341e7bSDavid Woodhouse 1114f3341e7bSDavid Woodhouse if (copylen > len) { 1115f3341e7bSDavid Woodhouse copylen = len; 1116f3341e7bSDavid Woodhouse } 1117f3341e7bSDavid Woodhouse if (copylen > XENSTORE_RING_SIZE - offset) { 1118f3341e7bSDavid Woodhouse copylen = XENSTORE_RING_SIZE - offset; 1119f3341e7bSDavid Woodhouse } 1120f3341e7bSDavid Woodhouse 1121f3341e7bSDavid Woodhouse memcpy(ptr, &s->xs->req[offset], copylen); 1122f3341e7bSDavid Woodhouse copied += copylen; 1123f3341e7bSDavid Woodhouse 1124f3341e7bSDavid Woodhouse ptr += copylen; 1125f3341e7bSDavid Woodhouse len -= copylen; 1126f3341e7bSDavid Woodhouse 1127f3341e7bSDavid Woodhouse cons += copylen; 1128f3341e7bSDavid Woodhouse } 1129f3341e7bSDavid Woodhouse 1130f3341e7bSDavid Woodhouse /* 1131f3341e7bSDavid Woodhouse * Not sure this ever mattered except on Alpha, but this barrier 1132f3341e7bSDavid Woodhouse * is to ensure that the update to req_cons is globally visible 1133f3341e7bSDavid Woodhouse * only after we have consumed all the data from the ring, and we 1134f3341e7bSDavid Woodhouse * don't end up seeing data written to the ring *after* the other 1135f3341e7bSDavid Woodhouse * end sees the update and writes more to the ring. Xen's own 1136f3341e7bSDavid Woodhouse * xenstored has the same barrier here (although with no comment 1137f3341e7bSDavid Woodhouse * at all, obviously, because it's Xen code). 1138f3341e7bSDavid Woodhouse */ 1139f3341e7bSDavid Woodhouse smp_mb(); 1140f3341e7bSDavid Woodhouse 1141f3341e7bSDavid Woodhouse qatomic_set(&s->xs->req_cons, cons); 1142f3341e7bSDavid Woodhouse 1143f3341e7bSDavid Woodhouse return copied; 1144f3341e7bSDavid Woodhouse } 1145f3341e7bSDavid Woodhouse 1146f3341e7bSDavid Woodhouse static unsigned int copy_to_ring(XenXenstoreState *s, uint8_t *ptr, 1147f3341e7bSDavid Woodhouse unsigned int len) 1148f3341e7bSDavid Woodhouse { 1149f3341e7bSDavid Woodhouse if (!len) { 1150f3341e7bSDavid Woodhouse return 0; 1151f3341e7bSDavid Woodhouse } 1152f3341e7bSDavid Woodhouse 1153f3341e7bSDavid Woodhouse XENSTORE_RING_IDX cons = qatomic_read(&s->xs->rsp_cons); 1154f3341e7bSDavid Woodhouse XENSTORE_RING_IDX prod = qatomic_read(&s->xs->rsp_prod); 1155f3341e7bSDavid Woodhouse unsigned int copied = 0; 1156f3341e7bSDavid Woodhouse 1157f3341e7bSDavid Woodhouse /* 1158f3341e7bSDavid Woodhouse * This matches the barrier in copy_to_ring() (or the guest's 1159bad5cfcdSMichael Tokarev * equivalent) between writing the data to the ring and updating 1160f3341e7bSDavid Woodhouse * rsp_prod. It protects against the pathological case (which 1161f3341e7bSDavid Woodhouse * again I think never happened except on Alpha) where our 1162f3341e7bSDavid Woodhouse * subsequent writes to the ring could *cross* the read of 1163f3341e7bSDavid Woodhouse * rsp_cons and the guest could see the new data when it was 1164f3341e7bSDavid Woodhouse * intending to read the old. 1165f3341e7bSDavid Woodhouse */ 1166f3341e7bSDavid Woodhouse smp_mb(); 1167f3341e7bSDavid Woodhouse 1168f3341e7bSDavid Woodhouse while (len) { 1169f3341e7bSDavid Woodhouse unsigned int avail = cons + XENSTORE_RING_SIZE - prod; 1170f3341e7bSDavid Woodhouse unsigned int offset = MASK_XENSTORE_IDX(prod); 1171f3341e7bSDavid Woodhouse unsigned int copylen = len; 1172f3341e7bSDavid Woodhouse 1173f3341e7bSDavid Woodhouse if (avail > XENSTORE_RING_SIZE) { 1174f3341e7bSDavid Woodhouse error_report("XenStore ring handling error"); 1175f3341e7bSDavid Woodhouse s->fatal_error = true; 1176f3341e7bSDavid Woodhouse break; 1177f3341e7bSDavid Woodhouse } else if (avail == 0) { 1178f3341e7bSDavid Woodhouse break; 1179f3341e7bSDavid Woodhouse } 1180f3341e7bSDavid Woodhouse 1181f3341e7bSDavid Woodhouse if (copylen > avail) { 1182f3341e7bSDavid Woodhouse copylen = avail; 1183f3341e7bSDavid Woodhouse } 1184f3341e7bSDavid Woodhouse if (copylen > XENSTORE_RING_SIZE - offset) { 1185f3341e7bSDavid Woodhouse copylen = XENSTORE_RING_SIZE - offset; 1186f3341e7bSDavid Woodhouse } 1187f3341e7bSDavid Woodhouse 1188f3341e7bSDavid Woodhouse 1189f3341e7bSDavid Woodhouse memcpy(&s->xs->rsp[offset], ptr, copylen); 1190f3341e7bSDavid Woodhouse copied += copylen; 1191f3341e7bSDavid Woodhouse 1192f3341e7bSDavid Woodhouse ptr += copylen; 1193f3341e7bSDavid Woodhouse len -= copylen; 1194f3341e7bSDavid Woodhouse 1195f3341e7bSDavid Woodhouse prod += copylen; 1196f3341e7bSDavid Woodhouse } 1197f3341e7bSDavid Woodhouse 1198f3341e7bSDavid Woodhouse /* Ensure the ring contents are seen before rsp_prod update. */ 1199f3341e7bSDavid Woodhouse smp_wmb(); 1200f3341e7bSDavid Woodhouse 1201f3341e7bSDavid Woodhouse qatomic_set(&s->xs->rsp_prod, prod); 1202f3341e7bSDavid Woodhouse 1203f3341e7bSDavid Woodhouse return copied; 1204f3341e7bSDavid Woodhouse } 1205f3341e7bSDavid Woodhouse 1206f3341e7bSDavid Woodhouse static unsigned int get_req(XenXenstoreState *s) 1207f3341e7bSDavid Woodhouse { 1208f3341e7bSDavid Woodhouse unsigned int copied = 0; 1209f3341e7bSDavid Woodhouse 1210f3341e7bSDavid Woodhouse if (s->fatal_error) { 1211f3341e7bSDavid Woodhouse return 0; 1212f3341e7bSDavid Woodhouse } 1213f3341e7bSDavid Woodhouse 1214f3341e7bSDavid Woodhouse assert(!req_pending(s)); 1215f3341e7bSDavid Woodhouse 1216f3341e7bSDavid Woodhouse if (s->req_offset < XENSTORE_HEADER_SIZE) { 1217f3341e7bSDavid Woodhouse void *ptr = s->req_data + s->req_offset; 1218f3341e7bSDavid Woodhouse unsigned int len = XENSTORE_HEADER_SIZE; 1219f3341e7bSDavid Woodhouse unsigned int copylen = copy_from_ring(s, ptr, len); 1220f3341e7bSDavid Woodhouse 1221f3341e7bSDavid Woodhouse copied += copylen; 1222f3341e7bSDavid Woodhouse s->req_offset += copylen; 1223f3341e7bSDavid Woodhouse } 1224f3341e7bSDavid Woodhouse 1225f3341e7bSDavid Woodhouse if (s->req_offset >= XENSTORE_HEADER_SIZE) { 1226f3341e7bSDavid Woodhouse struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data; 1227f3341e7bSDavid Woodhouse 1228f3341e7bSDavid Woodhouse if (req->len > (uint32_t)XENSTORE_PAYLOAD_MAX) { 1229f3341e7bSDavid Woodhouse error_report("Illegal XenStore request"); 1230f3341e7bSDavid Woodhouse s->fatal_error = true; 1231f3341e7bSDavid Woodhouse return 0; 1232f3341e7bSDavid Woodhouse } 1233f3341e7bSDavid Woodhouse 1234f3341e7bSDavid Woodhouse void *ptr = s->req_data + s->req_offset; 1235f3341e7bSDavid Woodhouse unsigned int len = XENSTORE_HEADER_SIZE + req->len - s->req_offset; 1236f3341e7bSDavid Woodhouse unsigned int copylen = copy_from_ring(s, ptr, len); 1237f3341e7bSDavid Woodhouse 1238f3341e7bSDavid Woodhouse copied += copylen; 1239f3341e7bSDavid Woodhouse s->req_offset += copylen; 1240f3341e7bSDavid Woodhouse } 1241f3341e7bSDavid Woodhouse 1242f3341e7bSDavid Woodhouse return copied; 1243f3341e7bSDavid Woodhouse } 1244f3341e7bSDavid Woodhouse 1245f3341e7bSDavid Woodhouse static unsigned int put_rsp(XenXenstoreState *s) 1246f3341e7bSDavid Woodhouse { 1247f3341e7bSDavid Woodhouse if (s->fatal_error) { 1248f3341e7bSDavid Woodhouse return 0; 1249f3341e7bSDavid Woodhouse } 1250f3341e7bSDavid Woodhouse 1251f3341e7bSDavid Woodhouse assert(s->rsp_pending); 1252f3341e7bSDavid Woodhouse 1253f3341e7bSDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 1254f3341e7bSDavid Woodhouse assert(s->rsp_offset < XENSTORE_HEADER_SIZE + rsp->len); 1255f3341e7bSDavid Woodhouse 1256f3341e7bSDavid Woodhouse void *ptr = s->rsp_data + s->rsp_offset; 1257f3341e7bSDavid Woodhouse unsigned int len = XENSTORE_HEADER_SIZE + rsp->len - s->rsp_offset; 1258f3341e7bSDavid Woodhouse unsigned int copylen = copy_to_ring(s, ptr, len); 1259f3341e7bSDavid Woodhouse 1260f3341e7bSDavid Woodhouse s->rsp_offset += copylen; 1261f3341e7bSDavid Woodhouse 1262f3341e7bSDavid Woodhouse /* Have we produced a complete response? */ 1263f3341e7bSDavid Woodhouse if (s->rsp_offset == XENSTORE_HEADER_SIZE + rsp->len) { 1264f3341e7bSDavid Woodhouse reset_rsp(s); 1265f3341e7bSDavid Woodhouse } 1266f3341e7bSDavid Woodhouse 1267f3341e7bSDavid Woodhouse return copylen; 1268f3341e7bSDavid Woodhouse } 1269f3341e7bSDavid Woodhouse 12700254c4d1SDavid Woodhouse static void deliver_watch(XenXenstoreState *s, const char *path, 12710254c4d1SDavid Woodhouse const char *token) 12720254c4d1SDavid Woodhouse { 12730254c4d1SDavid Woodhouse struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; 12740254c4d1SDavid Woodhouse uint8_t *rsp_data = (uint8_t *)&rsp[1]; 12750254c4d1SDavid Woodhouse unsigned int len; 12760254c4d1SDavid Woodhouse 12770254c4d1SDavid Woodhouse assert(!s->rsp_pending); 12780254c4d1SDavid Woodhouse 12790254c4d1SDavid Woodhouse trace_xenstore_watch_event(path, token); 12800254c4d1SDavid Woodhouse 12810254c4d1SDavid Woodhouse rsp->type = XS_WATCH_EVENT; 12820254c4d1SDavid Woodhouse rsp->req_id = 0; 12830254c4d1SDavid Woodhouse rsp->tx_id = 0; 12840254c4d1SDavid Woodhouse rsp->len = 0; 12850254c4d1SDavid Woodhouse 12860254c4d1SDavid Woodhouse len = strlen(path); 12870254c4d1SDavid Woodhouse 12880254c4d1SDavid Woodhouse /* XENSTORE_ABS/REL_PATH_MAX should ensure there can be no overflow */ 12890254c4d1SDavid Woodhouse assert(rsp->len + len < XENSTORE_PAYLOAD_MAX); 12900254c4d1SDavid Woodhouse 12910254c4d1SDavid Woodhouse memcpy(&rsp_data[rsp->len], path, len); 12920254c4d1SDavid Woodhouse rsp->len += len; 12930254c4d1SDavid Woodhouse rsp_data[rsp->len] = '\0'; 12940254c4d1SDavid Woodhouse rsp->len++; 12950254c4d1SDavid Woodhouse 12960254c4d1SDavid Woodhouse len = strlen(token); 12970254c4d1SDavid Woodhouse /* 12980254c4d1SDavid Woodhouse * It is possible for the guest to have chosen a token that will 12990254c4d1SDavid Woodhouse * not fit (along with the patch) into a watch event. We have no 13000254c4d1SDavid Woodhouse * choice but to drop the event if this is the case. 13010254c4d1SDavid Woodhouse */ 13020254c4d1SDavid Woodhouse if (rsp->len + len >= XENSTORE_PAYLOAD_MAX) { 13030254c4d1SDavid Woodhouse return; 13040254c4d1SDavid Woodhouse } 13050254c4d1SDavid Woodhouse 13060254c4d1SDavid Woodhouse memcpy(&rsp_data[rsp->len], token, len); 13070254c4d1SDavid Woodhouse rsp->len += len; 13080254c4d1SDavid Woodhouse rsp_data[rsp->len] = '\0'; 13090254c4d1SDavid Woodhouse rsp->len++; 13100254c4d1SDavid Woodhouse 13110254c4d1SDavid Woodhouse s->rsp_pending = true; 13120254c4d1SDavid Woodhouse } 13130254c4d1SDavid Woodhouse 13140254c4d1SDavid Woodhouse struct watch_event { 13150254c4d1SDavid Woodhouse char *path; 13160254c4d1SDavid Woodhouse char *token; 13170254c4d1SDavid Woodhouse }; 13180254c4d1SDavid Woodhouse 131903247512SDavid Woodhouse static void free_watch_event(struct watch_event *ev) 132003247512SDavid Woodhouse { 132103247512SDavid Woodhouse if (ev) { 132203247512SDavid Woodhouse g_free(ev->path); 132303247512SDavid Woodhouse g_free(ev->token); 132403247512SDavid Woodhouse g_free(ev); 132503247512SDavid Woodhouse } 132603247512SDavid Woodhouse } 132703247512SDavid Woodhouse 13280254c4d1SDavid Woodhouse static void queue_watch(XenXenstoreState *s, const char *path, 13290254c4d1SDavid Woodhouse const char *token) 13300254c4d1SDavid Woodhouse { 13310254c4d1SDavid Woodhouse struct watch_event *ev = g_new0(struct watch_event, 1); 13320254c4d1SDavid Woodhouse 13330254c4d1SDavid Woodhouse ev->path = g_strdup(path); 13340254c4d1SDavid Woodhouse ev->token = g_strdup(token); 13350254c4d1SDavid Woodhouse 13360254c4d1SDavid Woodhouse s->watch_events = g_list_append(s->watch_events, ev); 13370254c4d1SDavid Woodhouse } 13380254c4d1SDavid Woodhouse 13390254c4d1SDavid Woodhouse static void fire_watch_cb(void *opaque, const char *path, const char *token) 13400254c4d1SDavid Woodhouse { 13410254c4d1SDavid Woodhouse XenXenstoreState *s = opaque; 13420254c4d1SDavid Woodhouse 13430254c4d1SDavid Woodhouse assert(qemu_mutex_iothread_locked()); 13440254c4d1SDavid Woodhouse 13450254c4d1SDavid Woodhouse /* 13460254c4d1SDavid Woodhouse * If there's a response pending, we obviously can't scribble over 13470254c4d1SDavid Woodhouse * it. But if there's a request pending, it has dibs on the buffer 13480254c4d1SDavid Woodhouse * too. 13490254c4d1SDavid Woodhouse * 13500254c4d1SDavid Woodhouse * In the common case of a watch firing due to backend activity 13510254c4d1SDavid Woodhouse * when the ring was otherwise idle, we should be able to copy the 13520254c4d1SDavid Woodhouse * strings directly into the rsp_data and thence the actual ring, 13530254c4d1SDavid Woodhouse * without needing to perform any allocations and queue them. 13540254c4d1SDavid Woodhouse */ 13550254c4d1SDavid Woodhouse if (s->rsp_pending || req_pending(s)) { 13560254c4d1SDavid Woodhouse queue_watch(s, path, token); 13570254c4d1SDavid Woodhouse } else { 13580254c4d1SDavid Woodhouse deliver_watch(s, path, token); 13590254c4d1SDavid Woodhouse /* 13604a5780f5SDavid Woodhouse * Attempt to queue the message into the actual ring, and send 13614a5780f5SDavid Woodhouse * the event channel notification if any bytes are copied. 13620254c4d1SDavid Woodhouse */ 13634a5780f5SDavid Woodhouse if (s->rsp_pending && put_rsp(s) > 0) { 13640254c4d1SDavid Woodhouse xen_be_evtchn_notify(s->eh, s->be_port); 13650254c4d1SDavid Woodhouse } 13660254c4d1SDavid Woodhouse } 13674a5780f5SDavid Woodhouse } 13680254c4d1SDavid Woodhouse 13690254c4d1SDavid Woodhouse static void process_watch_events(XenXenstoreState *s) 13700254c4d1SDavid Woodhouse { 13710254c4d1SDavid Woodhouse struct watch_event *ev = s->watch_events->data; 13720254c4d1SDavid Woodhouse 13730254c4d1SDavid Woodhouse deliver_watch(s, ev->path, ev->token); 13740254c4d1SDavid Woodhouse 13750254c4d1SDavid Woodhouse s->watch_events = g_list_remove(s->watch_events, ev); 137603247512SDavid Woodhouse free_watch_event(ev); 13770254c4d1SDavid Woodhouse } 13780254c4d1SDavid Woodhouse 1379c08f5d0eSDavid Woodhouse static void xen_xenstore_event(void *opaque) 1380c08f5d0eSDavid Woodhouse { 1381c08f5d0eSDavid Woodhouse XenXenstoreState *s = opaque; 1382c08f5d0eSDavid Woodhouse evtchn_port_t port = xen_be_evtchn_pending(s->eh); 1383f3341e7bSDavid Woodhouse unsigned int copied_to, copied_from; 1384f3341e7bSDavid Woodhouse bool processed, notify = false; 1385f3341e7bSDavid Woodhouse 1386c08f5d0eSDavid Woodhouse if (port != s->be_port) { 1387c08f5d0eSDavid Woodhouse return; 1388c08f5d0eSDavid Woodhouse } 1389f3341e7bSDavid Woodhouse 1390c08f5d0eSDavid Woodhouse /* We know this is a no-op. */ 1391c08f5d0eSDavid Woodhouse xen_be_evtchn_unmask(s->eh, port); 1392f3341e7bSDavid Woodhouse 1393f3341e7bSDavid Woodhouse do { 1394f3341e7bSDavid Woodhouse copied_to = copied_from = 0; 1395f3341e7bSDavid Woodhouse processed = false; 1396f3341e7bSDavid Woodhouse 13970254c4d1SDavid Woodhouse if (!s->rsp_pending && s->watch_events) { 13980254c4d1SDavid Woodhouse process_watch_events(s); 13990254c4d1SDavid Woodhouse } 14000254c4d1SDavid Woodhouse 1401f3341e7bSDavid Woodhouse if (s->rsp_pending) { 1402f3341e7bSDavid Woodhouse copied_to = put_rsp(s); 1403f3341e7bSDavid Woodhouse } 1404f3341e7bSDavid Woodhouse 1405f3341e7bSDavid Woodhouse if (!req_pending(s)) { 1406f3341e7bSDavid Woodhouse copied_from = get_req(s); 1407f3341e7bSDavid Woodhouse } 1408f3341e7bSDavid Woodhouse 14090254c4d1SDavid Woodhouse if (req_pending(s) && !s->rsp_pending && !s->watch_events) { 1410f3341e7bSDavid Woodhouse process_req(s); 1411f3341e7bSDavid Woodhouse processed = true; 1412f3341e7bSDavid Woodhouse } 1413f3341e7bSDavid Woodhouse 1414f3341e7bSDavid Woodhouse notify |= copied_to || copied_from; 1415f3341e7bSDavid Woodhouse } while (copied_to || copied_from || processed); 1416f3341e7bSDavid Woodhouse 1417f3341e7bSDavid Woodhouse if (notify) { 1418c08f5d0eSDavid Woodhouse xen_be_evtchn_notify(s->eh, s->be_port); 1419c08f5d0eSDavid Woodhouse } 1420f3341e7bSDavid Woodhouse } 1421c08f5d0eSDavid Woodhouse 1422c08f5d0eSDavid Woodhouse static void alloc_guest_port(XenXenstoreState *s) 1423c08f5d0eSDavid Woodhouse { 1424c08f5d0eSDavid Woodhouse struct evtchn_alloc_unbound alloc = { 1425c08f5d0eSDavid Woodhouse .dom = DOMID_SELF, 1426c08f5d0eSDavid Woodhouse .remote_dom = DOMID_QEMU, 1427c08f5d0eSDavid Woodhouse }; 1428c08f5d0eSDavid Woodhouse 1429c08f5d0eSDavid Woodhouse if (!xen_evtchn_alloc_unbound_op(&alloc)) { 1430c08f5d0eSDavid Woodhouse s->guest_port = alloc.port; 1431c08f5d0eSDavid Woodhouse } 1432c08f5d0eSDavid Woodhouse } 1433c08f5d0eSDavid Woodhouse 1434c08f5d0eSDavid Woodhouse int xen_xenstore_reset(void) 1435c08f5d0eSDavid Woodhouse { 1436c08f5d0eSDavid Woodhouse XenXenstoreState *s = xen_xenstore_singleton; 1437*d388c9f5SDavid Woodhouse GList *perms; 1438c08f5d0eSDavid Woodhouse int err; 1439c08f5d0eSDavid Woodhouse 1440c08f5d0eSDavid Woodhouse if (!s) { 1441c08f5d0eSDavid Woodhouse return -ENOTSUP; 1442c08f5d0eSDavid Woodhouse } 1443c08f5d0eSDavid Woodhouse 1444c08f5d0eSDavid Woodhouse s->req_offset = s->rsp_offset = 0; 1445c08f5d0eSDavid Woodhouse s->rsp_pending = false; 1446c08f5d0eSDavid Woodhouse 1447c08f5d0eSDavid Woodhouse if (!memory_region_is_mapped(&s->xenstore_page)) { 1448c08f5d0eSDavid Woodhouse uint64_t gpa = XEN_SPECIAL_PFN(XENSTORE) << TARGET_PAGE_BITS; 1449c08f5d0eSDavid Woodhouse xen_overlay_do_map_page(&s->xenstore_page, gpa); 1450c08f5d0eSDavid Woodhouse } 1451c08f5d0eSDavid Woodhouse 1452c08f5d0eSDavid Woodhouse alloc_guest_port(s); 1453c08f5d0eSDavid Woodhouse 1454c08f5d0eSDavid Woodhouse /* 1455c08f5d0eSDavid Woodhouse * As qemu/dom0, bind to the guest's port. For incoming migration, this 1456c08f5d0eSDavid Woodhouse * will be unbound as the guest's evtchn table is overwritten. We then 1457c08f5d0eSDavid Woodhouse * rebind to the correct guest port in xen_xenstore_post_load(). 1458c08f5d0eSDavid Woodhouse */ 1459c08f5d0eSDavid Woodhouse err = xen_be_evtchn_bind_interdomain(s->eh, xen_domid, s->guest_port); 1460c08f5d0eSDavid Woodhouse if (err < 0) { 1461c08f5d0eSDavid Woodhouse return err; 1462c08f5d0eSDavid Woodhouse } 1463c08f5d0eSDavid Woodhouse s->be_port = err; 1464c08f5d0eSDavid Woodhouse 1465*d388c9f5SDavid Woodhouse /* Create frontend store nodes */ 1466*d388c9f5SDavid Woodhouse perms = g_list_append(NULL, xs_perm_as_string(XS_PERM_NONE, DOMID_QEMU)); 1467*d388c9f5SDavid Woodhouse perms = g_list_append(perms, xs_perm_as_string(XS_PERM_READ, xen_domid)); 1468*d388c9f5SDavid Woodhouse 1469*d388c9f5SDavid Woodhouse relpath_printf(s, perms, "store/port", "%u", s->guest_port); 1470*d388c9f5SDavid Woodhouse relpath_printf(s, perms, "store/ring-ref", "%lu", 1471*d388c9f5SDavid Woodhouse XEN_SPECIAL_PFN(XENSTORE)); 1472*d388c9f5SDavid Woodhouse 1473*d388c9f5SDavid Woodhouse g_list_free_full(perms, g_free); 1474*d388c9f5SDavid Woodhouse 1475d05864d2SDavid Woodhouse /* 1476d05864d2SDavid Woodhouse * We don't actually access the guest's page through the grant, because 1477d05864d2SDavid Woodhouse * this isn't real Xen, and we can just use the page we gave it in the 1478d05864d2SDavid Woodhouse * first place. Map the grant anyway, mostly for cosmetic purposes so 1479d05864d2SDavid Woodhouse * it *looks* like it's in use in the guest-visible grant table. 1480d05864d2SDavid Woodhouse */ 1481d05864d2SDavid Woodhouse s->gt = qemu_xen_gnttab_open(); 1482d05864d2SDavid Woodhouse uint32_t xs_gntref = GNTTAB_RESERVED_XENSTORE; 1483d05864d2SDavid Woodhouse s->granted_xs = qemu_xen_gnttab_map_refs(s->gt, 1, xen_domid, &xs_gntref, 1484d05864d2SDavid Woodhouse PROT_READ | PROT_WRITE); 1485d05864d2SDavid Woodhouse 1486c08f5d0eSDavid Woodhouse return 0; 1487c08f5d0eSDavid Woodhouse } 148803247512SDavid Woodhouse 148903247512SDavid Woodhouse struct qemu_xs_handle { 149003247512SDavid Woodhouse XenstoreImplState *impl; 149103247512SDavid Woodhouse GList *watches; 149203247512SDavid Woodhouse QEMUBH *watch_bh; 149303247512SDavid Woodhouse }; 149403247512SDavid Woodhouse 149503247512SDavid Woodhouse struct qemu_xs_watch { 149603247512SDavid Woodhouse struct qemu_xs_handle *h; 149703247512SDavid Woodhouse char *path; 149803247512SDavid Woodhouse xs_watch_fn fn; 149903247512SDavid Woodhouse void *opaque; 150003247512SDavid Woodhouse GList *events; 150103247512SDavid Woodhouse }; 150203247512SDavid Woodhouse 150303247512SDavid Woodhouse static char *xs_be_get_domain_path(struct qemu_xs_handle *h, unsigned int domid) 150403247512SDavid Woodhouse { 150503247512SDavid Woodhouse return g_strdup_printf("/local/domain/%u", domid); 150603247512SDavid Woodhouse } 150703247512SDavid Woodhouse 150803247512SDavid Woodhouse static char **xs_be_directory(struct qemu_xs_handle *h, xs_transaction_t t, 150903247512SDavid Woodhouse const char *path, unsigned int *num) 151003247512SDavid Woodhouse { 151103247512SDavid Woodhouse GList *items = NULL, *l; 151203247512SDavid Woodhouse unsigned int i = 0; 151303247512SDavid Woodhouse char **items_ret; 151403247512SDavid Woodhouse int err; 151503247512SDavid Woodhouse 151603247512SDavid Woodhouse err = xs_impl_directory(h->impl, DOMID_QEMU, t, path, NULL, &items); 151703247512SDavid Woodhouse if (err) { 151803247512SDavid Woodhouse errno = err; 151903247512SDavid Woodhouse return NULL; 152003247512SDavid Woodhouse } 152103247512SDavid Woodhouse 152203247512SDavid Woodhouse items_ret = g_new0(char *, g_list_length(items) + 1); 152303247512SDavid Woodhouse *num = 0; 152403247512SDavid Woodhouse for (l = items; l; l = l->next) { 152503247512SDavid Woodhouse items_ret[i++] = l->data; 152603247512SDavid Woodhouse (*num)++; 152703247512SDavid Woodhouse } 152803247512SDavid Woodhouse g_list_free(items); 152903247512SDavid Woodhouse return items_ret; 153003247512SDavid Woodhouse } 153103247512SDavid Woodhouse 153203247512SDavid Woodhouse static void *xs_be_read(struct qemu_xs_handle *h, xs_transaction_t t, 153303247512SDavid Woodhouse const char *path, unsigned int *len) 153403247512SDavid Woodhouse { 153503247512SDavid Woodhouse GByteArray *data = g_byte_array_new(); 153603247512SDavid Woodhouse bool free_segment = false; 153703247512SDavid Woodhouse int err; 153803247512SDavid Woodhouse 153903247512SDavid Woodhouse err = xs_impl_read(h->impl, DOMID_QEMU, t, path, data); 154003247512SDavid Woodhouse if (err) { 154103247512SDavid Woodhouse free_segment = true; 154203247512SDavid Woodhouse errno = err; 154303247512SDavid Woodhouse } else { 154403247512SDavid Woodhouse if (len) { 154503247512SDavid Woodhouse *len = data->len; 154603247512SDavid Woodhouse } 154703247512SDavid Woodhouse /* The xen-bus-helper code expects to get NUL terminated string! */ 154803247512SDavid Woodhouse g_byte_array_append(data, (void *)"", 1); 154903247512SDavid Woodhouse } 155003247512SDavid Woodhouse 155103247512SDavid Woodhouse return g_byte_array_free(data, free_segment); 155203247512SDavid Woodhouse } 155303247512SDavid Woodhouse 155403247512SDavid Woodhouse static bool xs_be_write(struct qemu_xs_handle *h, xs_transaction_t t, 155503247512SDavid Woodhouse const char *path, const void *data, unsigned int len) 155603247512SDavid Woodhouse { 155703247512SDavid Woodhouse GByteArray *gdata = g_byte_array_new(); 155803247512SDavid Woodhouse int err; 155903247512SDavid Woodhouse 156003247512SDavid Woodhouse g_byte_array_append(gdata, data, len); 156103247512SDavid Woodhouse err = xs_impl_write(h->impl, DOMID_QEMU, t, path, gdata); 156203247512SDavid Woodhouse g_byte_array_unref(gdata); 156303247512SDavid Woodhouse if (err) { 156403247512SDavid Woodhouse errno = err; 156503247512SDavid Woodhouse return false; 156603247512SDavid Woodhouse } 156703247512SDavid Woodhouse return true; 156803247512SDavid Woodhouse } 156903247512SDavid Woodhouse 157003247512SDavid Woodhouse static bool xs_be_create(struct qemu_xs_handle *h, xs_transaction_t t, 157103247512SDavid Woodhouse unsigned int owner, unsigned int domid, 157203247512SDavid Woodhouse unsigned int perms, const char *path) 157303247512SDavid Woodhouse { 157403247512SDavid Woodhouse g_autoptr(GByteArray) data = g_byte_array_new(); 157503247512SDavid Woodhouse GList *perms_list = NULL; 157603247512SDavid Woodhouse int err; 157703247512SDavid Woodhouse 157803247512SDavid Woodhouse /* mkdir does this */ 157903247512SDavid Woodhouse err = xs_impl_read(h->impl, DOMID_QEMU, t, path, data); 158003247512SDavid Woodhouse if (err == ENOENT) { 158103247512SDavid Woodhouse err = xs_impl_write(h->impl, DOMID_QEMU, t, path, data); 158203247512SDavid Woodhouse } 158303247512SDavid Woodhouse if (err) { 158403247512SDavid Woodhouse errno = err; 158503247512SDavid Woodhouse return false; 158603247512SDavid Woodhouse } 158703247512SDavid Woodhouse 158803247512SDavid Woodhouse perms_list = g_list_append(perms_list, 158903247512SDavid Woodhouse xs_perm_as_string(XS_PERM_NONE, owner)); 159003247512SDavid Woodhouse perms_list = g_list_append(perms_list, 159103247512SDavid Woodhouse xs_perm_as_string(perms, domid)); 159203247512SDavid Woodhouse 159303247512SDavid Woodhouse err = xs_impl_set_perms(h->impl, DOMID_QEMU, t, path, perms_list); 159403247512SDavid Woodhouse g_list_free_full(perms_list, g_free); 159503247512SDavid Woodhouse if (err) { 159603247512SDavid Woodhouse errno = err; 159703247512SDavid Woodhouse return false; 159803247512SDavid Woodhouse } 159903247512SDavid Woodhouse return true; 160003247512SDavid Woodhouse } 160103247512SDavid Woodhouse 160203247512SDavid Woodhouse static bool xs_be_destroy(struct qemu_xs_handle *h, xs_transaction_t t, 160303247512SDavid Woodhouse const char *path) 160403247512SDavid Woodhouse { 160503247512SDavid Woodhouse int err = xs_impl_rm(h->impl, DOMID_QEMU, t, path); 160603247512SDavid Woodhouse if (err) { 160703247512SDavid Woodhouse errno = err; 160803247512SDavid Woodhouse return false; 160903247512SDavid Woodhouse } 161003247512SDavid Woodhouse return true; 161103247512SDavid Woodhouse } 161203247512SDavid Woodhouse 161303247512SDavid Woodhouse static void be_watch_bh(void *_h) 161403247512SDavid Woodhouse { 161503247512SDavid Woodhouse struct qemu_xs_handle *h = _h; 161603247512SDavid Woodhouse GList *l; 161703247512SDavid Woodhouse 161803247512SDavid Woodhouse for (l = h->watches; l; l = l->next) { 161903247512SDavid Woodhouse struct qemu_xs_watch *w = l->data; 162003247512SDavid Woodhouse 162103247512SDavid Woodhouse while (w->events) { 162203247512SDavid Woodhouse struct watch_event *ev = w->events->data; 162303247512SDavid Woodhouse 162403247512SDavid Woodhouse w->fn(w->opaque, ev->path); 162503247512SDavid Woodhouse 162603247512SDavid Woodhouse w->events = g_list_remove(w->events, ev); 162703247512SDavid Woodhouse free_watch_event(ev); 162803247512SDavid Woodhouse } 162903247512SDavid Woodhouse } 163003247512SDavid Woodhouse } 163103247512SDavid Woodhouse 163203247512SDavid Woodhouse static void xs_be_watch_cb(void *opaque, const char *path, const char *token) 163303247512SDavid Woodhouse { 163403247512SDavid Woodhouse struct watch_event *ev = g_new0(struct watch_event, 1); 163503247512SDavid Woodhouse struct qemu_xs_watch *w = opaque; 163603247512SDavid Woodhouse 163703247512SDavid Woodhouse /* We don't care about the token */ 163803247512SDavid Woodhouse ev->path = g_strdup(path); 163903247512SDavid Woodhouse w->events = g_list_append(w->events, ev); 164003247512SDavid Woodhouse 164103247512SDavid Woodhouse qemu_bh_schedule(w->h->watch_bh); 164203247512SDavid Woodhouse } 164303247512SDavid Woodhouse 164403247512SDavid Woodhouse static struct qemu_xs_watch *xs_be_watch(struct qemu_xs_handle *h, 164503247512SDavid Woodhouse const char *path, xs_watch_fn fn, 164603247512SDavid Woodhouse void *opaque) 164703247512SDavid Woodhouse { 164803247512SDavid Woodhouse struct qemu_xs_watch *w = g_new0(struct qemu_xs_watch, 1); 164903247512SDavid Woodhouse int err; 165003247512SDavid Woodhouse 165103247512SDavid Woodhouse w->h = h; 165203247512SDavid Woodhouse w->fn = fn; 165303247512SDavid Woodhouse w->opaque = opaque; 165403247512SDavid Woodhouse 165503247512SDavid Woodhouse err = xs_impl_watch(h->impl, DOMID_QEMU, path, NULL, xs_be_watch_cb, w); 165603247512SDavid Woodhouse if (err) { 165703247512SDavid Woodhouse errno = err; 165803247512SDavid Woodhouse g_free(w); 165903247512SDavid Woodhouse return NULL; 166003247512SDavid Woodhouse } 166103247512SDavid Woodhouse 166203247512SDavid Woodhouse w->path = g_strdup(path); 166303247512SDavid Woodhouse h->watches = g_list_append(h->watches, w); 166403247512SDavid Woodhouse return w; 166503247512SDavid Woodhouse } 166603247512SDavid Woodhouse 166703247512SDavid Woodhouse static void xs_be_unwatch(struct qemu_xs_handle *h, struct qemu_xs_watch *w) 166803247512SDavid Woodhouse { 166903247512SDavid Woodhouse xs_impl_unwatch(h->impl, DOMID_QEMU, w->path, NULL, xs_be_watch_cb, w); 167003247512SDavid Woodhouse 167103247512SDavid Woodhouse h->watches = g_list_remove(h->watches, w); 167203247512SDavid Woodhouse g_list_free_full(w->events, (GDestroyNotify)free_watch_event); 167303247512SDavid Woodhouse g_free(w->path); 167403247512SDavid Woodhouse g_free(w); 167503247512SDavid Woodhouse } 167603247512SDavid Woodhouse 167703247512SDavid Woodhouse static xs_transaction_t xs_be_transaction_start(struct qemu_xs_handle *h) 167803247512SDavid Woodhouse { 167903247512SDavid Woodhouse unsigned int new_tx = XBT_NULL; 168003247512SDavid Woodhouse int err = xs_impl_transaction_start(h->impl, DOMID_QEMU, &new_tx); 168103247512SDavid Woodhouse if (err) { 168203247512SDavid Woodhouse errno = err; 168303247512SDavid Woodhouse return XBT_NULL; 168403247512SDavid Woodhouse } 168503247512SDavid Woodhouse return new_tx; 168603247512SDavid Woodhouse } 168703247512SDavid Woodhouse 168803247512SDavid Woodhouse static bool xs_be_transaction_end(struct qemu_xs_handle *h, xs_transaction_t t, 168903247512SDavid Woodhouse bool abort) 169003247512SDavid Woodhouse { 169103247512SDavid Woodhouse int err = xs_impl_transaction_end(h->impl, DOMID_QEMU, t, !abort); 169203247512SDavid Woodhouse if (err) { 169303247512SDavid Woodhouse errno = err; 169403247512SDavid Woodhouse return false; 169503247512SDavid Woodhouse } 169603247512SDavid Woodhouse return true; 169703247512SDavid Woodhouse } 169803247512SDavid Woodhouse 169903247512SDavid Woodhouse static struct qemu_xs_handle *xs_be_open(void) 170003247512SDavid Woodhouse { 170103247512SDavid Woodhouse XenXenstoreState *s = xen_xenstore_singleton; 170203247512SDavid Woodhouse struct qemu_xs_handle *h; 170303247512SDavid Woodhouse 1704c9bdfe8dSDavid Woodhouse if (!s || !s->impl) { 170503247512SDavid Woodhouse errno = -ENOSYS; 170603247512SDavid Woodhouse return NULL; 170703247512SDavid Woodhouse } 170803247512SDavid Woodhouse 170903247512SDavid Woodhouse h = g_new0(struct qemu_xs_handle, 1); 171003247512SDavid Woodhouse h->impl = s->impl; 171103247512SDavid Woodhouse 171203247512SDavid Woodhouse h->watch_bh = aio_bh_new(qemu_get_aio_context(), be_watch_bh, h); 171303247512SDavid Woodhouse 171403247512SDavid Woodhouse return h; 171503247512SDavid Woodhouse } 171603247512SDavid Woodhouse 171703247512SDavid Woodhouse static void xs_be_close(struct qemu_xs_handle *h) 171803247512SDavid Woodhouse { 171903247512SDavid Woodhouse while (h->watches) { 172003247512SDavid Woodhouse struct qemu_xs_watch *w = h->watches->data; 172103247512SDavid Woodhouse xs_be_unwatch(h, w); 172203247512SDavid Woodhouse } 172303247512SDavid Woodhouse 172403247512SDavid Woodhouse qemu_bh_delete(h->watch_bh); 172503247512SDavid Woodhouse g_free(h); 172603247512SDavid Woodhouse } 172703247512SDavid Woodhouse 172803247512SDavid Woodhouse static struct xenstore_backend_ops emu_xenstore_backend_ops = { 172903247512SDavid Woodhouse .open = xs_be_open, 173003247512SDavid Woodhouse .close = xs_be_close, 173103247512SDavid Woodhouse .get_domain_path = xs_be_get_domain_path, 173203247512SDavid Woodhouse .directory = xs_be_directory, 173303247512SDavid Woodhouse .read = xs_be_read, 173403247512SDavid Woodhouse .write = xs_be_write, 173503247512SDavid Woodhouse .create = xs_be_create, 173603247512SDavid Woodhouse .destroy = xs_be_destroy, 173703247512SDavid Woodhouse .watch = xs_be_watch, 173803247512SDavid Woodhouse .unwatch = xs_be_unwatch, 173903247512SDavid Woodhouse .transaction_start = xs_be_transaction_start, 174003247512SDavid Woodhouse .transaction_end = xs_be_transaction_end, 174103247512SDavid Woodhouse }; 1742