Lines Matching +full:- +full:- +full:disable +full:- +full:vhost +full:- +full:crypto

2  * vhost-user
7 * See the COPYING file in the top-level directory.
13 #include "hw/virtio/virtio-dmabuf.h"
14 #include "hw/virtio/vhost.h"
15 #include "hw/virtio/virtio-crypto.h"
16 #include "hw/virtio/vhost-user.h"
17 #include "hw/virtio/vhost-backend.h"
19 #include "hw/virtio/virtio-net.h"
20 #include "chardev/char-fe.h"
21 #include "io/channel-socket.h"
23 #include "qemu/error-report.h"
24 #include "qemu/main-loop.h"
29 #include "migration/postcopy-ram.h"
37 #include "standard-headers/linux/vhost_types.h"
60 #define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
169 /* session id for success, -1 on errors */
244 /* Shared between vhost devs of the same virtio device */
257 * vhost region.
277 struct vhost_user *u = dev->opaque; in vhost_user_read_header()
278 CharBackend *chr = u->user->chr; in vhost_user_read_header()
286 " Original request %d.", r, size, msg->hdr.request); in vhost_user_read_header()
287 return r < 0 ? -saved_errno : -EIO; in vhost_user_read_header()
291 if (msg->hdr.flags != (VHOST_USER_REPLY_MASK | VHOST_USER_VERSION)) { in vhost_user_read_header()
293 " Flags 0x%x instead of 0x%x.", msg->hdr.flags, in vhost_user_read_header()
295 return -EPROTO; in vhost_user_read_header()
298 trace_vhost_user_read(msg->hdr.request, msg->hdr.flags); in vhost_user_read_header()
305 struct vhost_user *u = dev->opaque; in vhost_user_read()
306 CharBackend *chr = u->user->chr; in vhost_user_read()
316 if (msg->hdr.size > VHOST_USER_PAYLOAD_SIZE) { in vhost_user_read()
318 " Size %d exceeds the maximum %zu.", msg->hdr.size, in vhost_user_read()
320 return -EPROTO; in vhost_user_read()
323 if (msg->hdr.size) { in vhost_user_read()
325 size = msg->hdr.size; in vhost_user_read()
330 " Read %d instead of %d.", r, msg->hdr.size); in vhost_user_read()
331 return r < 0 ? -saved_errno : -EIO; in vhost_user_read()
344 if ((msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) == 0) { in process_message_reply()
353 if (msg_reply.hdr.request != msg->hdr.request) { in process_message_reply()
356 msg->hdr.request, msg_reply.hdr.request); in process_message_reply()
357 return -EPROTO; in process_message_reply()
360 return msg_reply.payload.u64 ? -EIO : 0; in process_message_reply()
381 /* most non-init callers ignore the error */
385 struct vhost_user *u = dev->opaque; in vhost_user_write()
386 CharBackend *chr = u->user->chr; in vhost_user_write()
387 int ret, size = VHOST_USER_HDR_SIZE + msg->hdr.size; in vhost_user_write()
390 * Some devices, like virtio-scsi, are implemented as a single vhost_dev, in vhost_user_write()
391 * while others, like virtio-net, contain multiple vhost_devs. For in vhost_user_write()
394 * vhost-user messages should only be sent once. in vhost_user_write()
396 * Devices with multiple vhost_devs are given an associated dev->vq_index in vhost_user_write()
399 if (vhost_user_per_device_request(msg->hdr.request) in vhost_user_write()
400 && dev->vq_index != 0) { in vhost_user_write()
401 msg->hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK; in vhost_user_write()
407 return -EINVAL; in vhost_user_write()
415 return ret < 0 ? -saved_errno : -EIO; in vhost_user_write()
418 trace_vhost_user_write(msg->hdr.request, msg->hdr.flags); in vhost_user_write()
438 bool shmfd = virtio_has_feature(dev->protocol_features, in vhost_user_set_log_base()
444 .payload.log.mmap_size = log->size * sizeof(*(log->log)), in vhost_user_set_log_base()
450 if (dev->vq_index != 0) { in vhost_user_set_log_base()
454 if (shmfd && log->fd != -1) { in vhost_user_set_log_base()
455 fds[fd_num++] = log->fd; in vhost_user_set_log_base()
474 return -EPROTO; in vhost_user_set_log_base()
489 *offset += mr->ram_block->fd_offset; in vhost_user_get_mr_data()
499 dst->userspace_addr = src->userspace_addr; in vhost_user_fill_msg_region()
500 dst->memory_size = src->memory_size; in vhost_user_fill_msg_region()
501 dst->guest_phys_addr = src->guest_phys_addr; in vhost_user_fill_msg_region()
502 dst->mmap_offset = mmap_offset; in vhost_user_fill_msg_region()
517 msg->hdr.request = VHOST_USER_SET_MEM_TABLE; in vhost_user_fill_set_mem_table_msg()
519 for (i = 0; i < dev->mem->nregions; ++i) { in vhost_user_fill_set_mem_table_msg()
520 reg = dev->mem->regions + i; in vhost_user_fill_set_mem_table_msg()
522 mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd); in vhost_user_fill_set_mem_table_msg()
526 trace_vhost_user_set_mem_table_withfd(*fd_num, mr->name, in vhost_user_fill_set_mem_table_msg()
527 reg->memory_size, in vhost_user_fill_set_mem_table_msg()
528 reg->guest_phys_addr, in vhost_user_fill_set_mem_table_msg()
529 reg->userspace_addr, in vhost_user_fill_set_mem_table_msg()
531 u->region_rb_offset[i] = offset; in vhost_user_fill_set_mem_table_msg()
532 u->region_rb[i] = mr->ram_block; in vhost_user_fill_set_mem_table_msg()
534 error_report("Failed preparing vhost-user memory table msg"); in vhost_user_fill_set_mem_table_msg()
535 return -ENOBUFS; in vhost_user_fill_set_mem_table_msg()
538 msg->payload.memory.regions[*fd_num] = region_buffer; in vhost_user_fill_set_mem_table_msg()
541 u->region_rb_offset[i] = 0; in vhost_user_fill_set_mem_table_msg()
542 u->region_rb[i] = NULL; in vhost_user_fill_set_mem_table_msg()
546 msg->payload.memory.nregions = *fd_num; in vhost_user_fill_set_mem_table_msg()
549 error_report("Failed initializing vhost-user memory map, " in vhost_user_fill_set_mem_table_msg()
550 "consider using -object memory-backend-file share=on"); in vhost_user_fill_set_mem_table_msg()
551 return -EINVAL; in vhost_user_fill_set_mem_table_msg()
554 msg->hdr.size = sizeof(msg->payload.memory.nregions); in vhost_user_fill_set_mem_table_msg()
555 msg->hdr.size += sizeof(msg->payload.memory.padding); in vhost_user_fill_set_mem_table_msg()
556 msg->hdr.size += *fd_num * sizeof(VhostUserMemoryRegion); in vhost_user_fill_set_mem_table_msg()
564 return shadow_reg->guest_phys_addr == vdev_reg->guest_phys_addr && in reg_equal()
565 shadow_reg->userspace_addr == vdev_reg->userspace_addr && in reg_equal()
566 shadow_reg->memory_size == vdev_reg->memory_size; in reg_equal()
576 struct vhost_user *u = dev->opaque; in scrub_shadow_regions()
590 for (i = 0; i < u->num_shadow_regions; i++) { in scrub_shadow_regions()
591 shadow_reg = &u->shadow_regions[i]; in scrub_shadow_regions()
594 for (j = 0; j < dev->mem->nregions; j++) { in scrub_shadow_regions()
595 reg = &dev->mem->regions[j]; in scrub_shadow_regions()
597 mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd); in scrub_shadow_regions()
608 u->region_rb_offset[j] = offset; in scrub_shadow_regions()
609 u->region_rb[j] = mr->ram_block; in scrub_shadow_regions()
610 shadow_pcb[j] = u->postcopy_client_bases[i]; in scrub_shadow_regions()
612 u->region_rb_offset[j] = 0; in scrub_shadow_regions()
613 u->region_rb[j] = NULL; in scrub_shadow_regions()
636 for (i = 0; i < dev->mem->nregions; i++) { in scrub_shadow_regions()
637 reg = &dev->mem->regions[i]; in scrub_shadow_regions()
638 vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd); in scrub_shadow_regions()
664 struct vhost_user *u = dev->opaque; in send_remove_regions()
675 for (i = nr_rem_reg - 1; i >= 0; i--) { in send_remove_regions()
679 vhost_user_get_mr_data(shadow_reg->userspace_addr, &offset, &fd); in send_remove_regions()
682 msg->hdr.request = VHOST_USER_REM_MEM_REG; in send_remove_regions()
684 msg->payload.mem_reg.region = region_buffer; in send_remove_regions()
703 memmove(&u->shadow_regions[shadow_reg_idx], in send_remove_regions()
704 &u->shadow_regions[shadow_reg_idx + 1], in send_remove_regions()
706 (u->num_shadow_regions - shadow_reg_idx - 1)); in send_remove_regions()
707 u->num_shadow_regions--; in send_remove_regions()
718 struct vhost_user *u = dev->opaque; in send_add_regions()
731 mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd); in send_add_regions()
735 trace_vhost_user_set_mem_table_withfd(reg_fd_idx, mr->name, in send_add_regions()
736 reg->memory_size, in send_add_regions()
737 reg->guest_phys_addr, in send_add_regions()
738 reg->userspace_addr, in send_add_regions()
740 u->region_rb_offset[reg_idx] = offset; in send_add_regions()
741 u->region_rb[reg_idx] = mr->ram_block; in send_add_regions()
743 msg->hdr.request = VHOST_USER_ADD_MEM_REG; in send_add_regions()
745 msg->payload.mem_reg.region = region_buffer; in send_add_regions()
767 return -EPROTO; in send_add_regions()
774 if (msg_reply.hdr.size != msg->hdr.size) { in send_add_regions()
777 msg->hdr.size); in send_add_regions()
778 return -EPROTO; in send_add_regions()
782 if (reply_gpa == dev->mem->regions[reg_idx].guest_phys_addr) { in send_add_regions()
787 msg->payload.mem_reg.region.userspace_addr, in send_add_regions()
793 dev->mem->regions[reg_idx].guest_phys_addr); in send_add_regions()
794 return -EPROTO; in send_add_regions()
803 u->region_rb_offset[reg_idx] = 0; in send_add_regions()
804 u->region_rb[reg_idx] = NULL; in send_add_regions()
813 u->shadow_regions[u->num_shadow_regions].guest_phys_addr = in send_add_regions()
814 reg->guest_phys_addr; in send_add_regions()
815 u->shadow_regions[u->num_shadow_regions].userspace_addr = in send_add_regions()
816 reg->userspace_addr; in send_add_regions()
817 u->shadow_regions[u->num_shadow_regions].memory_size = in send_add_regions()
818 reg->memory_size; in send_add_regions()
819 u->num_shadow_regions++; in send_add_regions()
830 struct vhost_user *u = dev->opaque; in vhost_user_add_remove_regions()
837 msg->hdr.size = sizeof(msg->payload.mem_reg); in vhost_user_add_remove_regions()
860 memcpy(u->postcopy_client_bases, shadow_pcb, in vhost_user_add_remove_regions()
868 msg->hdr.size = sizeof(msg->payload.u64); in vhost_user_add_remove_regions()
869 msg->payload.u64 = 0; /* OK */ in vhost_user_add_remove_regions()
881 memcpy(u->postcopy_client_bases, shadow_pcb, in vhost_user_add_remove_regions()
893 struct vhost_user *u = dev->opaque; in vhost_user_set_mem_table_postcopy()
904 if (u->region_rb_len < dev->mem->nregions) { in vhost_user_set_mem_table_postcopy()
905 u->region_rb = g_renew(RAMBlock*, u->region_rb, dev->mem->nregions); in vhost_user_set_mem_table_postcopy()
906 u->region_rb_offset = g_renew(ram_addr_t, u->region_rb_offset, in vhost_user_set_mem_table_postcopy()
907 dev->mem->nregions); in vhost_user_set_mem_table_postcopy()
908 memset(&(u->region_rb[u->region_rb_len]), '\0', in vhost_user_set_mem_table_postcopy()
909 sizeof(RAMBlock *) * (dev->mem->nregions - u->region_rb_len)); in vhost_user_set_mem_table_postcopy()
910 memset(&(u->region_rb_offset[u->region_rb_len]), '\0', in vhost_user_set_mem_table_postcopy()
911 sizeof(ram_addr_t) * (dev->mem->nregions - u->region_rb_len)); in vhost_user_set_mem_table_postcopy()
912 u->region_rb_len = dev->mem->nregions; in vhost_user_set_mem_table_postcopy()
941 return -EPROTO; in vhost_user_set_mem_table_postcopy()
952 return -EPROTO; in vhost_user_set_mem_table_postcopy()
955 memset(u->postcopy_client_bases, 0, in vhost_user_set_mem_table_postcopy()
964 region_i < dev->mem->nregions; in vhost_user_set_mem_table_postcopy()
968 dev->mem->regions[region_i].guest_phys_addr) { in vhost_user_set_mem_table_postcopy()
969 u->postcopy_client_bases[region_i] = in vhost_user_set_mem_table_postcopy()
982 return -EIO; in vhost_user_set_mem_table_postcopy()
1005 struct vhost_user *u = dev->opaque; in vhost_user_set_mem_table()
1008 bool do_postcopy = u->postcopy_listen && u->postcopy_fd.handler; in vhost_user_set_mem_table()
1009 bool reply_supported = virtio_has_feature(dev->protocol_features, in vhost_user_set_mem_table()
1012 virtio_has_feature(dev->protocol_features, in vhost_user_set_mem_table()
1061 bool cross_endian = virtio_has_feature(dev->protocol_features, in vhost_user_set_vring_endian()
1071 error_report("vhost-user trying to send unhandled ioctl"); in vhost_user_set_vring_endian()
1072 return -ENOTSUP; in vhost_user_set_vring_endian()
1086 if (vhost_user_per_device_request(request) && dev->vq_index != 0) { in vhost_user_get_u64()
1103 return -EPROTO; in vhost_user_get_u64()
1108 return -EPROTO; in vhost_user_get_u64()
1119 return -EPROTO; in vhost_user_get_features()
1125 /* Note: "msg->hdr.flags" may be modified. */
1132 bool reply_supported = virtio_has_feature(dev->protocol_features, in vhost_user_write_sync()
1135 msg->hdr.flags |= VHOST_USER_NEED_REPLY_MASK; in vhost_user_write_sync()
1147 if (msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) { in vhost_user_write_sync()
1186 if (n->unmap_addr) { in vhost_user_host_notifier_free()
1187 munmap(n->unmap_addr, qemu_real_host_page_size()); in vhost_user_host_notifier_free()
1188 n->unmap_addr = NULL; in vhost_user_host_notifier_free()
1190 if (n->destroy) { in vhost_user_host_notifier_free()
1192 object_unparent(OBJECT(&n->mr)); in vhost_user_host_notifier_free()
1199 * clean-up function for notifier, will finally free the structure
1206 * if destroy == false and n->addr == NULL, we have nothing to do. in vhost_user_host_notifier_remove()
1209 if (!n || (!destroy && !n->addr)) { in vhost_user_host_notifier_remove()
1213 if (n->addr) { in vhost_user_host_notifier_remove()
1216 virtio_queue_set_host_notifier_mr(vdev, n->idx, &n->mr, false); in vhost_user_host_notifier_remove()
1219 assert(!n->unmap_addr); in vhost_user_host_notifier_remove()
1220 n->unmap_addr = n->addr; in vhost_user_host_notifier_remove()
1221 n->addr = NULL; in vhost_user_host_notifier_remove()
1223 n->destroy = destroy; in vhost_user_host_notifier_remove()
1237 if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) { in vhost_user_set_vring_enable()
1238 return -EINVAL; in vhost_user_set_vring_enable()
1241 for (i = 0; i < dev->nvqs; ++i) { in vhost_user_set_vring_enable()
1244 .index = dev->vq_index + i, in vhost_user_set_vring_enable()
1249 * SET_VRING_ENABLE travels from guest to QEMU to vhost-user backend / in vhost_user_set_vring_enable()
1251 * from guest to vhost-user backend / data plane thread via eventfd. in vhost_user_set_vring_enable()
1257 * seemingly disabled queue). To prevent this out-of-order delivery, in vhost_user_set_vring_enable()
1259 * backend control plane acknowledges enabling the queue -- IOW, pass in vhost_user_set_vring_enable()
1267 * the device-level recovery. in vhost_user_set_vring_enable()
1279 if (idx >= u->notifiers->len) { in fetch_notifier()
1282 return g_ptr_array_index(u->notifiers, idx); in fetch_notifier()
1295 struct vhost_user *u = dev->opaque; in vhost_user_get_vring_base()
1297 VhostUserHostNotifier *n = fetch_notifier(u->user, ring->index); in vhost_user_get_vring_base()
1298 vhost_user_host_notifier_remove(n, dev->vdev, false); in vhost_user_get_vring_base()
1313 return -EPROTO; in vhost_user_get_vring_base()
1318 return -EPROTO; in vhost_user_get_vring_base()
1335 .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK, in vhost_set_vring_file()
1339 if (file->fd > 0) { in vhost_set_vring_file()
1340 fds[fd_num++] = file->fd; in vhost_set_vring_file()
1380 bool wait_for_reply = addr->flags & (1 << VHOST_VRING_F_LOG); in vhost_user_set_vring_addr()
1452 features | dev->backend_features, in vhost_user_set_features()
1455 if (virtio_has_feature(dev->protocol_features, in vhost_user_set_features()
1510 if (!virtio_has_feature(dev->protocol_features, in vhost_user_reset_device()
1512 return -ENOSYS; in vhost_user_reset_device()
1520 if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) { in vhost_user_backend_handle_config_change()
1521 return -ENOSYS; in vhost_user_backend_handle_config_change()
1524 return dev->config_ops->vhost_dev_config_notifier(dev); in vhost_user_backend_handle_config_change()
1535 if (idx >= u->notifiers->len) { in fetch_or_create_notifier()
1536 g_ptr_array_set_size(u->notifiers, idx + 1); in fetch_or_create_notifier()
1539 n = g_ptr_array_index(u->notifiers, idx); in fetch_or_create_notifier()
1542 * In case notification arrive out-of-order, in fetch_or_create_notifier()
1545 g_ptr_array_remove_index(u->notifiers, idx); in fetch_or_create_notifier()
1547 n->idx = idx; in fetch_or_create_notifier()
1548 g_ptr_array_insert(u->notifiers, idx, n); in fetch_or_create_notifier()
1559 int queue_idx = area->u64 & VHOST_USER_VRING_IDX_MASK; in vhost_user_backend_handle_vring_host_notifier()
1561 struct vhost_user *u = dev->opaque; in vhost_user_backend_handle_vring_host_notifier()
1562 VhostUserState *user = u->user; in vhost_user_backend_handle_vring_host_notifier()
1563 VirtIODevice *vdev = dev->vdev; in vhost_user_backend_handle_vring_host_notifier()
1568 if (!virtio_has_feature(dev->protocol_features, in vhost_user_backend_handle_vring_host_notifier()
1571 return -EINVAL; in vhost_user_backend_handle_vring_host_notifier()
1581 if (area->u64 & VHOST_USER_VRING_NOFD_MASK) { in vhost_user_backend_handle_vring_host_notifier()
1586 if (area->size != page_size) { in vhost_user_backend_handle_vring_host_notifier()
1587 return -EINVAL; in vhost_user_backend_handle_vring_host_notifier()
1591 fd, area->offset); in vhost_user_backend_handle_vring_host_notifier()
1593 return -EFAULT; in vhost_user_backend_handle_vring_host_notifier()
1596 name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]", in vhost_user_backend_handle_vring_host_notifier()
1598 if (!n->mr.ram) { /* Don't init again after suspend. */ in vhost_user_backend_handle_vring_host_notifier()
1599 memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name, in vhost_user_backend_handle_vring_host_notifier()
1602 n->mr.ram_block->host = addr; in vhost_user_backend_handle_vring_host_notifier()
1606 if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) { in vhost_user_backend_handle_vring_host_notifier()
1607 object_unparent(OBJECT(&n->mr)); in vhost_user_backend_handle_vring_host_notifier()
1609 return -ENXIO; in vhost_user_backend_handle_vring_host_notifier()
1612 n->addr = addr; in vhost_user_backend_handle_vring_host_notifier()
1623 memcpy(uuid.data, object->uuid, sizeof(object->uuid)); in vhost_user_backend_handle_shared_object_add()
1638 memcpy(uuid.data, object->uuid, sizeof(object->uuid)); in vhost_user_backend_handle_shared_object_remove()
1644 /* Not allowed to remove non-owned entries */ in vhost_user_backend_handle_shared_object_remove()
1650 /* Not allowed to remove non-owned entries */ in vhost_user_backend_handle_shared_object_remove()
1662 { .iov_base = payload, .iov_len = hdr->size }, in vhost_user_send_resp()
1665 hdr->flags &= ~VHOST_USER_NEED_REPLY_MASK; in vhost_user_send_resp()
1666 hdr->flags |= VHOST_USER_REPLY_MASK; in vhost_user_send_resp()
1675 hdr->size = sizeof(payload->u64); in vhost_user_backend_send_dmabuf_fd()
1682 struct vhost_user *u = dev->opaque; in vhost_user_get_shared_object()
1683 CharBackend *chr = u->user->chr; in vhost_user_get_shared_object()
1705 return -EPROTO; in vhost_user_get_shared_object()
1711 return -EIO; in vhost_user_get_shared_object()
1724 CharBackend *chr = u->user->chr; in vhost_user_backend_handle_shared_object_lookup()
1726 int dmabuf_fd = -1; in vhost_user_backend_handle_shared_object_lookup()
1729 memcpy(uuid.data, payload->object.uuid, sizeof(payload->object.uuid)); in vhost_user_backend_handle_shared_object_lookup()
1731 payload->u64 = 0; in vhost_user_backend_handle_shared_object_lookup()
1740 payload->u64 = -EINVAL; in vhost_user_backend_handle_shared_object_lookup()
1745 payload->u64 = ret; in vhost_user_backend_handle_shared_object_lookup()
1750 payload->u64 = -EINVAL; in vhost_user_backend_handle_shared_object_lookup()
1754 if (dmabuf_fd != -1) { in vhost_user_backend_handle_shared_object_lookup()
1760 payload->u64 = -EINVAL; in vhost_user_backend_handle_shared_object_lookup()
1765 return -EINVAL; in vhost_user_backend_handle_shared_object_lookup()
1773 g_source_destroy(u->backend_src); in close_backend_channel()
1774 g_source_unref(u->backend_src); in close_backend_channel()
1775 u->backend_src = NULL; in close_backend_channel()
1776 object_unref(OBJECT(u->backend_ioc)); in close_backend_channel()
1777 u->backend_ioc = NULL; in close_backend_channel()
1784 struct vhost_user *u = dev->opaque; in backend_read()
1826 fd ? fd[0] : -1); in backend_read()
1836 ret = vhost_user_backend_handle_shared_object_lookup(dev->opaque, ioc, in backend_read()
1841 ret = -EINVAL; in backend_read()
1879 struct vhost_user *u = dev->opaque; in vhost_setup_backend_channel()
1881 bool reply_supported = virtio_has_feature(dev->protocol_features, in vhost_setup_backend_channel()
1886 if (!virtio_has_feature(dev->protocol_features, in vhost_setup_backend_channel()
1891 if (qemu_socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) { in vhost_setup_backend_channel()
1894 return -saved_errno; in vhost_setup_backend_channel()
1900 return -ECONNREFUSED; in vhost_setup_backend_channel()
1902 u->backend_ioc = ioc; in vhost_setup_backend_channel()
1903 u->backend_src = qio_channel_add_watch_source(u->backend_ioc, in vhost_setup_backend_channel()
1938 struct vhost_dev *dev = pcfd->data; in vhost_user_postcopy_fault_handler()
1939 struct vhost_user *u = dev->opaque; in vhost_user_postcopy_fault_handler()
1941 uint64_t faultaddr = msg->arg.pagefault.address; in vhost_user_postcopy_fault_handler()
1946 trace_vhost_user_postcopy_fault_handler(pcfd->idstr, faultaddr, in vhost_user_postcopy_fault_handler()
1947 dev->mem->nregions); in vhost_user_postcopy_fault_handler()
1948 for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) { in vhost_user_postcopy_fault_handler()
1950 u->postcopy_client_bases[i], dev->mem->regions[i].memory_size); in vhost_user_postcopy_fault_handler()
1951 if (faultaddr >= u->postcopy_client_bases[i]) { in vhost_user_postcopy_fault_handler()
1952 /* Ofset of the fault address in the vhost region */ in vhost_user_postcopy_fault_handler()
1953 uint64_t region_offset = faultaddr - u->postcopy_client_bases[i]; in vhost_user_postcopy_fault_handler()
1954 if (region_offset < dev->mem->regions[i].memory_size) { in vhost_user_postcopy_fault_handler()
1955 rb_offset = region_offset + u->region_rb_offset[i]; in vhost_user_postcopy_fault_handler()
1958 rb = u->region_rb[i]; in vhost_user_postcopy_fault_handler()
1966 return -1; in vhost_user_postcopy_fault_handler()
1972 struct vhost_dev *dev = pcfd->data; in vhost_user_postcopy_waker()
1973 struct vhost_user *u = dev->opaque; in vhost_user_postcopy_waker()
1982 for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) { in vhost_user_postcopy_waker()
1983 if (u->region_rb[i] == rb && in vhost_user_postcopy_waker()
1984 offset >= u->region_rb_offset[i] && in vhost_user_postcopy_waker()
1985 offset < (u->region_rb_offset[i] + in vhost_user_postcopy_waker()
1986 dev->mem->regions[i].memory_size)) { in vhost_user_postcopy_waker()
1987 uint64_t client_addr = (offset - u->region_rb_offset[i]) + in vhost_user_postcopy_waker()
1988 u->postcopy_client_bases[i]; in vhost_user_postcopy_waker()
2006 struct vhost_user *u = dev->opaque; in vhost_user_postcopy_advise()
2007 CharBackend *chr = u->user->chr; in vhost_user_postcopy_advise()
2017 error_setg(errp, "Failed to send postcopy_advise to vhost"); in vhost_user_postcopy_advise()
2023 error_setg(errp, "Failed to get postcopy_advise reply from vhost"); in vhost_user_postcopy_advise()
2030 return -EPROTO; in vhost_user_postcopy_advise()
2035 return -EPROTO; in vhost_user_postcopy_advise()
2040 return -EIO; in vhost_user_postcopy_advise()
2045 u->postcopy_fd.fd = ufd; in vhost_user_postcopy_advise()
2046 u->postcopy_fd.data = dev; in vhost_user_postcopy_advise()
2047 u->postcopy_fd.handler = vhost_user_postcopy_fault_handler; in vhost_user_postcopy_advise()
2048 u->postcopy_fd.waker = vhost_user_postcopy_waker; in vhost_user_postcopy_advise()
2049 u->postcopy_fd.idstr = "vhost-user"; /* Need to find unique name */ in vhost_user_postcopy_advise()
2050 postcopy_register_shared_ufd(&u->postcopy_fd); in vhost_user_postcopy_advise()
2053 error_setg(errp, "Postcopy not supported on non-Linux systems"); in vhost_user_postcopy_advise()
2054 return -ENOSYS; in vhost_user_postcopy_advise()
2063 struct vhost_user *u = dev->opaque; in vhost_user_postcopy_listen()
2069 u->postcopy_listen = true; in vhost_user_postcopy_listen()
2075 error_setg(errp, "Failed to send postcopy_listen to vhost"); in vhost_user_postcopy_listen()
2098 struct vhost_user *u = dev->opaque; in vhost_user_postcopy_end()
2104 error_setg(errp, "Failed to send postcopy_end to vhost"); in vhost_user_postcopy_end()
2113 postcopy_unregister_shared_ufd(&u->postcopy_fd); in vhost_user_postcopy_end()
2114 close(u->postcopy_fd.fd); in vhost_user_postcopy_end()
2115 u->postcopy_fd.handler = NULL; in vhost_user_postcopy_end()
2128 struct vhost_dev *dev = u->dev; in vhost_user_postcopy_notifier()
2130 switch (pnd->reason) { in vhost_user_postcopy_notifier()
2132 if (!virtio_has_feature(dev->protocol_features, in vhost_user_postcopy_notifier()
2136 "vhost-user backend not capable of postcopy"); in vhost_user_postcopy_notifier()
2137 return -ENOENT; in vhost_user_postcopy_notifier()
2166 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); in vhost_user_backend_init()
2169 u->user = vus; in vhost_user_backend_init()
2170 u->dev = dev; in vhost_user_backend_init()
2171 dev->opaque = u; in vhost_user_backend_init()
2175 error_setg_errno(errp, -err, "vhost_backend_init failed"); in vhost_user_backend_init()
2180 bool supports_f_config = vus->supports_config || in vhost_user_backend_init()
2181 (dev->config_ops && dev->config_ops->vhost_dev_config_notifier); in vhost_user_backend_init()
2184 dev->backend_features |= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES; in vhost_user_backend_init()
2190 return -EPROTO; in vhost_user_backend_init()
2194 * We will use all the protocol features we support - although in vhost_user_backend_init()
2203 error_setg(errp, "vhost-user device expecting " in vhost_user_backend_init()
2204 "VHOST_USER_PROTOCOL_F_CONFIG but the vhost-user backend does " in vhost_user_backend_init()
2206 return -EPROTO; in vhost_user_backend_init()
2211 warn_report("vhost-user backend supports " in vhost_user_backend_init()
2218 dev->protocol_features = protocol_features; in vhost_user_backend_init()
2219 err = vhost_user_set_protocol_features(dev, dev->protocol_features); in vhost_user_backend_init()
2222 return -EPROTO; in vhost_user_backend_init()
2226 if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)) { in vhost_user_backend_init()
2228 &dev->max_queues); in vhost_user_backend_init()
2231 return -EPROTO; in vhost_user_backend_init()
2234 dev->max_queues = 1; in vhost_user_backend_init()
2237 if (dev->num_queues && dev->max_queues < dev->num_queues) { in vhost_user_backend_init()
2239 "backend is %" PRIu64, dev->max_queues); in vhost_user_backend_init()
2240 return -EINVAL; in vhost_user_backend_init()
2244 !(virtio_has_feature(dev->protocol_features, in vhost_user_backend_init()
2246 virtio_has_feature(dev->protocol_features, in vhost_user_backend_init()
2248 error_setg(errp, "IOMMU support requires reply-ack and " in vhost_user_backend_init()
2249 "backend-req protocol features."); in vhost_user_backend_init()
2250 return -EINVAL; in vhost_user_backend_init()
2254 if (!virtio_has_feature(dev->protocol_features, in vhost_user_backend_init()
2256 u->user->memory_slots = VHOST_MEMORY_BASELINE_NREGIONS; in vhost_user_backend_init()
2261 return -EPROTO; in vhost_user_backend_init()
2264 if (ram_slots < u->user->memory_slots) { in vhost_user_backend_init()
2268 u->user->memory_slots); in vhost_user_backend_init()
2269 return -EINVAL; in vhost_user_backend_init()
2272 u->user->memory_slots = MIN(ram_slots, VHOST_USER_MAX_RAM_SLOTS); in vhost_user_backend_init()
2276 if (dev->migration_blocker == NULL && in vhost_user_backend_init()
2277 !virtio_has_feature(dev->protocol_features, in vhost_user_backend_init()
2279 error_setg(&dev->migration_blocker, in vhost_user_backend_init()
2280 "Migration disabled: vhost-user backend lacks " in vhost_user_backend_init()
2284 if (dev->vq_index == 0) { in vhost_user_backend_init()
2288 return -EPROTO; in vhost_user_backend_init()
2292 u->postcopy_notifier.notify = vhost_user_postcopy_notifier; in vhost_user_backend_init()
2293 postcopy_add_notifier(&u->postcopy_notifier); in vhost_user_backend_init()
2302 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); in vhost_user_backend_cleanup()
2304 u = dev->opaque; in vhost_user_backend_cleanup()
2305 if (u->postcopy_notifier.notify) { in vhost_user_backend_cleanup()
2306 postcopy_remove_notifier(&u->postcopy_notifier); in vhost_user_backend_cleanup()
2307 u->postcopy_notifier.notify = NULL; in vhost_user_backend_cleanup()
2309 u->postcopy_listen = false; in vhost_user_backend_cleanup()
2310 if (u->postcopy_fd.handler) { in vhost_user_backend_cleanup()
2311 postcopy_unregister_shared_ufd(&u->postcopy_fd); in vhost_user_backend_cleanup()
2312 close(u->postcopy_fd.fd); in vhost_user_backend_cleanup()
2313 u->postcopy_fd.handler = NULL; in vhost_user_backend_cleanup()
2315 if (u->backend_ioc) { in vhost_user_backend_cleanup()
2318 g_free(u->region_rb); in vhost_user_backend_cleanup()
2319 u->region_rb = NULL; in vhost_user_backend_cleanup()
2320 g_free(u->region_rb_offset); in vhost_user_backend_cleanup()
2321 u->region_rb_offset = NULL; in vhost_user_backend_cleanup()
2322 u->region_rb_len = 0; in vhost_user_backend_cleanup()
2324 dev->opaque = 0; in vhost_user_backend_cleanup()
2331 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs); in vhost_user_get_vq_index()
2338 struct vhost_user *u = dev->opaque; in vhost_user_memslots_limit()
2340 return u->user->memory_slots; in vhost_user_memslots_limit()
2345 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); in vhost_user_requires_shm_log()
2347 return virtio_has_feature(dev->protocol_features, in vhost_user_requires_shm_log()
2355 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); in vhost_user_migration_done()
2358 if (virtio_has_feature(dev->acked_features, VIRTIO_NET_F_GUEST_ANNOUNCE)) { in vhost_user_migration_done()
2363 if (virtio_has_feature(dev->protocol_features, in vhost_user_migration_done()
2372 return -ENOTSUP; in vhost_user_migration_done()
2378 bool reply_supported = virtio_has_feature(dev->protocol_features, in vhost_user_net_set_mtu()
2382 if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) { in vhost_user_net_set_mtu()
2429 /* No-op as the receive channel is not dedicated to IOTLB messages. */ in vhost_user_set_iotlb_callback()
2442 if (!virtio_has_feature(dev->protocol_features, in vhost_user_get_config()
2445 return -EINVAL; in vhost_user_get_config()
2454 error_setg_errno(errp, -ret, "vhost_get_config failed"); in vhost_user_get_config()
2460 error_setg_errno(errp, -ret, "vhost_get_config failed"); in vhost_user_get_config()
2468 return -EPROTO; in vhost_user_get_config()
2473 return -EPROTO; in vhost_user_get_config()
2486 bool reply_supported = virtio_has_feature(dev->protocol_features, in vhost_user_set_config()
2495 if (!virtio_has_feature(dev->protocol_features, in vhost_user_set_config()
2497 return -ENOTSUP; in vhost_user_set_config()
2505 return -EINVAL; in vhost_user_set_config()
2531 bool crypto_session = virtio_has_feature(dev->protocol_features, in vhost_user_crypto_create_session()
2540 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); in vhost_user_crypto_create_session()
2543 error_report("vhost-user trying to send unhandled ioctl"); in vhost_user_crypto_create_session()
2544 return -ENOTSUP; in vhost_user_crypto_create_session()
2547 if (backend_info->op_code == VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION) { in vhost_user_crypto_create_session()
2548 CryptoDevBackendAsymSessionInfo *sess = &backend_info->u.asym_sess_info; in vhost_user_crypto_create_session()
2553 if (sess->keylen) { in vhost_user_crypto_create_session()
2555 if (sess->keylen > keylen) { in vhost_user_crypto_create_session()
2557 return -ENOTSUP; in vhost_user_crypto_create_session()
2560 memcpy(&msg.payload.session.u.asym.key, sess->key, in vhost_user_crypto_create_session()
2561 sess->keylen); in vhost_user_crypto_create_session()
2564 CryptoDevBackendSymSessionInfo *sess = &backend_info->u.sym_sess_info; in vhost_user_crypto_create_session()
2569 if (sess->key_len) { in vhost_user_crypto_create_session()
2571 if (sess->key_len > keylen) { in vhost_user_crypto_create_session()
2573 return -ENOTSUP; in vhost_user_crypto_create_session()
2576 memcpy(&msg.payload.session.u.sym.key, sess->cipher_key, in vhost_user_crypto_create_session()
2577 sess->key_len); in vhost_user_crypto_create_session()
2580 if (sess->auth_key_len > 0) { in vhost_user_crypto_create_session()
2582 if (sess->auth_key_len > keylen) { in vhost_user_crypto_create_session()
2584 return -ENOTSUP; in vhost_user_crypto_create_session()
2587 memcpy(&msg.payload.session.u.sym.auth_key, sess->auth_key, in vhost_user_crypto_create_session()
2588 sess->auth_key_len); in vhost_user_crypto_create_session()
2592 msg.payload.session.op_code = backend_info->op_code; in vhost_user_crypto_create_session()
2593 msg.payload.session.session_id = backend_info->session_id; in vhost_user_crypto_create_session()
2611 return -EPROTO; in vhost_user_crypto_create_session()
2616 return -EPROTO; in vhost_user_crypto_create_session()
2622 return -EINVAL; in vhost_user_crypto_create_session()
2633 bool crypto_session = virtio_has_feature(dev->protocol_features, in vhost_user_crypto_close_session()
2643 error_report("vhost-user trying to send unhandled ioctl"); in vhost_user_crypto_close_session()
2644 return -ENOTSUP; in vhost_user_crypto_close_session()
2669 struct vhost_user *u = dev->opaque; in vhost_user_get_inflight_fd()
2670 CharBackend *chr = u->user->chr; in vhost_user_get_inflight_fd()
2674 .payload.inflight.num_queues = dev->nvqs, in vhost_user_get_inflight_fd()
2679 if (!virtio_has_feature(dev->protocol_features, in vhost_user_get_inflight_fd()
2698 return -EPROTO; in vhost_user_get_inflight_fd()
2703 return -EPROTO; in vhost_user_get_inflight_fd()
2713 return -EIO; in vhost_user_get_inflight_fd()
2722 return -EFAULT; in vhost_user_get_inflight_fd()
2725 inflight->addr = addr; in vhost_user_get_inflight_fd()
2726 inflight->fd = fd; in vhost_user_get_inflight_fd()
2727 inflight->size = msg.payload.inflight.mmap_size; in vhost_user_get_inflight_fd()
2728 inflight->offset = msg.payload.inflight.mmap_offset; in vhost_user_get_inflight_fd()
2729 inflight->queue_size = queue_size; in vhost_user_get_inflight_fd()
2740 .payload.inflight.mmap_size = inflight->size, in vhost_user_set_inflight_fd()
2741 .payload.inflight.mmap_offset = inflight->offset, in vhost_user_set_inflight_fd()
2742 .payload.inflight.num_queues = dev->nvqs, in vhost_user_set_inflight_fd()
2743 .payload.inflight.queue_size = inflight->queue_size, in vhost_user_set_inflight_fd()
2747 if (!virtio_has_feature(dev->protocol_features, in vhost_user_set_inflight_fd()
2752 return vhost_user_write(dev, &msg, &inflight->fd, 1); in vhost_user_set_inflight_fd()
2763 if (user->chr) { in vhost_user_init()
2764 error_setg(errp, "Cannot initialize vhost-user state"); in vhost_user_init()
2767 user->chr = chr; in vhost_user_init()
2768 user->memory_slots = 0; in vhost_user_init()
2769 user->notifiers = g_ptr_array_new_full(VIRTIO_QUEUE_MAX / 4, in vhost_user_init()
2776 if (!user->chr) { in vhost_user_cleanup()
2779 user->notifiers = (GPtrArray *) g_ptr_array_free(user->notifiers, true); in vhost_user_cleanup()
2780 user->chr = NULL; in vhost_user_cleanup()
2788 struct vhost_dev *vhost; member
2795 data->cb(data->dev); in vhost_user_async_close_bh()
2802 * we want to keep all the in-flight data as is for migration
2806 CharBackend *chardev, struct vhost_dev *vhost, in vhost_user_async_close() argument
2811 * A close event may happen during a read/write, but vhost in vhost_user_async_close()
2819 data->cb = cb; in vhost_user_async_close()
2820 data->dev = d; in vhost_user_async_close()
2821 data->cd = chardev; in vhost_user_async_close()
2822 data->vhost = vhost; in vhost_user_async_close()
2824 /* Disable any further notifications on the chardev */ in vhost_user_async_close()
2832 * Move vhost device to the stopped state. The vhost-user device in vhost_user_async_close()
2834 * the vhost migration code. If disconnect was caught there is an in vhost_user_async_close()
2835 * option for the general vhost code to get the dev state without in vhost_user_async_close()
2836 * knowing its type (in this case vhost-user). in vhost_user_async_close()
2838 * Note if the vhost device is fully cleared by the time we in vhost_user_async_close()
2841 vhost->started = false; in vhost_user_async_close()
2847 if (!virtio_has_feature(dev->protocol_features, in vhost_user_dev_start()
2853 if (dev->vq_index + dev->nvqs != dev->vq_index_end) { in vhost_user_dev_start()
2869 if (dev->vq_index + dev->nvqs != dev->vq_index_end) { in vhost_user_reset_status()
2873 if (virtio_has_feature(dev->protocol_features, in vhost_user_reset_status()
2881 return virtio_has_feature(dev->protocol_features, in vhost_user_supports_device_state()
2893 struct vhost_user *vu = dev->opaque; in vhost_user_set_device_state_fd()
2906 *reply_fd = -1; in vhost_user_set_device_state_fd()
2910 error_setg(errp, "Back-end does not support migration state transfer"); in vhost_user_set_device_state_fd()
2911 return -ENOTSUP; in vhost_user_set_device_state_fd()
2917 error_setg_errno(errp, -ret, in vhost_user_set_device_state_fd()
2924 error_setg_errno(errp, -ret, in vhost_user_set_device_state_fd()
2933 return -EPROTO; in vhost_user_set_device_state_fd()
2940 return -EPROTO; in vhost_user_set_device_state_fd()
2944 error_setg(errp, "Back-end did not accept migration state transfer"); in vhost_user_set_device_state_fd()
2945 return -EIO; in vhost_user_set_device_state_fd()
2949 *reply_fd = qemu_chr_fe_get_msgfd(vu->user->chr); in vhost_user_set_device_state_fd()
2952 "Failed to get back-end-provided transfer pipe FD"); in vhost_user_set_device_state_fd()
2953 *reply_fd = -1; in vhost_user_set_device_state_fd()
2954 return -EIO; in vhost_user_set_device_state_fd()
2973 error_setg(errp, "Back-end does not support migration state transfer"); in vhost_user_check_device_state()
2974 return -ENOTSUP; in vhost_user_check_device_state()
2979 error_setg_errno(errp, -ret, in vhost_user_check_device_state()
2986 error_setg_errno(errp, -ret, in vhost_user_check_device_state()
2995 return -EPROTO; in vhost_user_check_device_state()
3002 return -EPROTO; in vhost_user_check_device_state()
3006 error_setg(errp, "Back-end failed to process its internal state"); in vhost_user_check_device_state()
3007 return -EIO; in vhost_user_check_device_state()