Lines Matching +full:0 +full:- +full:dev
2 * vhost-user
7 * See the COPYING file in the top-level directory.
13 #include "hw/virtio/virtio-dmabuf.h"
15 #include "hw/virtio/virtio-crypto.h"
16 #include "hw/virtio/vhost-user.h"
17 #include "hw/virtio/vhost-backend.h"
19 #include "hw/virtio/virtio-net.h"
20 #include "chardev/char-fe.h"
21 #include "io/channel-socket.h"
23 #include "qemu/error-report.h"
24 #include "qemu/main-loop.h"
29 #include "migration/postcopy-ram.h"
37 #include "standard-headers/linux/vhost_types.h"
60 #define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
63 VHOST_USER_NONE = 0,
111 VHOST_USER_BACKEND_NONE = 0,
169 /* session id for success, -1 on errors */
198 #define VHOST_USER_VERSION_MASK (0x3)
199 #define VHOST_USER_REPLY_MASK (0x1 << 2)
200 #define VHOST_USER_NEED_REPLY_MASK (0x1 << 3)
212 #define VHOST_USER_VRING_IDX_MASK (0xff)
213 #define VHOST_USER_VRING_NOFD_MASK (0x1 << 8)
240 #define VHOST_USER_VERSION (0x1)
243 struct vhost_dev *dev; member
275 static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg) in vhost_user_read_header() argument
277 struct vhost_user *u = dev->opaque; in vhost_user_read_header()
278 CharBackend *chr = u->user->chr; in vhost_user_read_header()
286 " Original request %d.", r, size, msg->hdr.request); in vhost_user_read_header()
287 return r < 0 ? -saved_errno : -EIO; in vhost_user_read_header()
291 if (msg->hdr.flags != (VHOST_USER_REPLY_MASK | VHOST_USER_VERSION)) { in vhost_user_read_header()
293 " Flags 0x%x instead of 0x%x.", msg->hdr.flags, in vhost_user_read_header()
295 return -EPROTO; in vhost_user_read_header()
298 trace_vhost_user_read(msg->hdr.request, msg->hdr.flags); in vhost_user_read_header()
300 return 0; in vhost_user_read_header()
303 static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg) in vhost_user_read() argument
305 struct vhost_user *u = dev->opaque; in vhost_user_read()
306 CharBackend *chr = u->user->chr; in vhost_user_read()
310 r = vhost_user_read_header(dev, msg); in vhost_user_read()
311 if (r < 0) { in vhost_user_read()
316 if (msg->hdr.size > VHOST_USER_PAYLOAD_SIZE) { in vhost_user_read()
318 " Size %d exceeds the maximum %zu.", msg->hdr.size, in vhost_user_read()
320 return -EPROTO; in vhost_user_read()
323 if (msg->hdr.size) { in vhost_user_read()
325 size = msg->hdr.size; in vhost_user_read()
330 " Read %d instead of %d.", r, msg->hdr.size); in vhost_user_read()
331 return r < 0 ? -saved_errno : -EIO; in vhost_user_read()
335 return 0; in vhost_user_read()
338 static int process_message_reply(struct vhost_dev *dev, in process_message_reply() argument
344 if ((msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) == 0) { in process_message_reply()
345 return 0; in process_message_reply()
348 ret = vhost_user_read(dev, &msg_reply); in process_message_reply()
349 if (ret < 0) { in process_message_reply()
353 if (msg_reply.hdr.request != msg->hdr.request) { in process_message_reply()
356 msg->hdr.request, msg_reply.hdr.request); in process_message_reply()
357 return -EPROTO; in process_message_reply()
360 return msg_reply.payload.u64 ? -EIO : 0; in process_message_reply()
381 /* most non-init callers ignore the error */
382 static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg, in vhost_user_write() argument
385 struct vhost_user *u = dev->opaque; in vhost_user_write()
386 CharBackend *chr = u->user->chr; in vhost_user_write()
387 int ret, size = VHOST_USER_HDR_SIZE + msg->hdr.size; in vhost_user_write()
390 * Some devices, like virtio-scsi, are implemented as a single vhost_dev, in vhost_user_write()
391 * while others, like virtio-net, contain multiple vhost_devs. For in vhost_user_write()
394 * vhost-user messages should only be sent once. in vhost_user_write()
396 * Devices with multiple vhost_devs are given an associated dev->vq_index in vhost_user_write()
397 * so per_device requests are only sent if vq_index is 0. in vhost_user_write()
399 if (vhost_user_per_device_request(msg->hdr.request) in vhost_user_write()
400 && dev->vq_index != 0) { in vhost_user_write()
401 msg->hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK; in vhost_user_write()
402 return 0; in vhost_user_write()
405 if (qemu_chr_fe_set_msgfds(chr, fds, fd_num) < 0) { in vhost_user_write()
407 return -EINVAL; in vhost_user_write()
415 return ret < 0 ? -saved_errno : -EIO; in vhost_user_write()
418 trace_vhost_user_write(msg->hdr.request, msg->hdr.flags); in vhost_user_write()
420 return 0; in vhost_user_write()
423 int vhost_user_gpu_set_socket(struct vhost_dev *dev, int fd) in vhost_user_gpu_set_socket() argument
430 return vhost_user_write(dev, &msg, &fd, 1); in vhost_user_gpu_set_socket()
433 static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base, in vhost_user_set_log_base() argument
437 size_t fd_num = 0; in vhost_user_set_log_base()
438 bool shmfd = virtio_has_feature(dev->protocol_features, in vhost_user_set_log_base()
444 .payload.log.mmap_size = log->size * sizeof(*(log->log)), in vhost_user_set_log_base()
445 .payload.log.mmap_offset = 0, in vhost_user_set_log_base()
450 if (dev->vq_index != 0) { in vhost_user_set_log_base()
451 return 0; in vhost_user_set_log_base()
454 if (shmfd && log->fd != -1) { in vhost_user_set_log_base()
455 fds[fd_num++] = log->fd; in vhost_user_set_log_base()
458 ret = vhost_user_write(dev, &msg, fds, fd_num); in vhost_user_set_log_base()
459 if (ret < 0) { in vhost_user_set_log_base()
464 msg.hdr.size = 0; in vhost_user_set_log_base()
465 ret = vhost_user_read(dev, &msg); in vhost_user_set_log_base()
466 if (ret < 0) { in vhost_user_set_log_base()
474 return -EPROTO; in vhost_user_set_log_base()
478 return 0; in vhost_user_set_log_base()
489 *offset += mr->ram_block->fd_offset; in vhost_user_get_mr_data()
499 dst->userspace_addr = src->userspace_addr; in vhost_user_fill_msg_region()
500 dst->memory_size = src->memory_size; in vhost_user_fill_msg_region()
501 dst->guest_phys_addr = src->guest_phys_addr; in vhost_user_fill_msg_region()
502 dst->mmap_offset = mmap_offset; in vhost_user_fill_msg_region()
506 struct vhost_dev *dev, in vhost_user_fill_set_mem_table_msg() argument
517 msg->hdr.request = VHOST_USER_SET_MEM_TABLE; in vhost_user_fill_set_mem_table_msg()
519 for (i = 0; i < dev->mem->nregions; ++i) { in vhost_user_fill_set_mem_table_msg()
520 reg = dev->mem->regions + i; in vhost_user_fill_set_mem_table_msg()
522 mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd); in vhost_user_fill_set_mem_table_msg()
523 if (fd > 0) { in vhost_user_fill_set_mem_table_msg()
526 trace_vhost_user_set_mem_table_withfd(*fd_num, mr->name, in vhost_user_fill_set_mem_table_msg()
527 reg->memory_size, in vhost_user_fill_set_mem_table_msg()
528 reg->guest_phys_addr, in vhost_user_fill_set_mem_table_msg()
529 reg->userspace_addr, in vhost_user_fill_set_mem_table_msg()
531 u->region_rb_offset[i] = offset; in vhost_user_fill_set_mem_table_msg()
532 u->region_rb[i] = mr->ram_block; in vhost_user_fill_set_mem_table_msg()
534 error_report("Failed preparing vhost-user memory table msg"); in vhost_user_fill_set_mem_table_msg()
535 return -ENOBUFS; in vhost_user_fill_set_mem_table_msg()
538 msg->payload.memory.regions[*fd_num] = region_buffer; in vhost_user_fill_set_mem_table_msg()
541 u->region_rb_offset[i] = 0; in vhost_user_fill_set_mem_table_msg()
542 u->region_rb[i] = NULL; in vhost_user_fill_set_mem_table_msg()
546 msg->payload.memory.nregions = *fd_num; in vhost_user_fill_set_mem_table_msg()
549 error_report("Failed initializing vhost-user memory map, " in vhost_user_fill_set_mem_table_msg()
550 "consider using -object memory-backend-file share=on"); in vhost_user_fill_set_mem_table_msg()
551 return -EINVAL; in vhost_user_fill_set_mem_table_msg()
554 msg->hdr.size = sizeof(msg->payload.memory.nregions); in vhost_user_fill_set_mem_table_msg()
555 msg->hdr.size += sizeof(msg->payload.memory.padding); in vhost_user_fill_set_mem_table_msg()
556 msg->hdr.size += *fd_num * sizeof(VhostUserMemoryRegion); in vhost_user_fill_set_mem_table_msg()
558 return 0; in vhost_user_fill_set_mem_table_msg()
564 return shadow_reg->guest_phys_addr == vdev_reg->guest_phys_addr && in reg_equal()
565 shadow_reg->userspace_addr == vdev_reg->userspace_addr && in reg_equal()
566 shadow_reg->memory_size == vdev_reg->memory_size; in reg_equal()
569 static void scrub_shadow_regions(struct vhost_dev *dev, in scrub_shadow_regions() argument
576 struct vhost_user *u = dev->opaque; in scrub_shadow_regions()
579 int i, j, fd, add_idx = 0, rm_idx = 0, fd_num = 0; in scrub_shadow_regions()
590 for (i = 0; i < u->num_shadow_regions; i++) { in scrub_shadow_regions()
591 shadow_reg = &u->shadow_regions[i]; in scrub_shadow_regions()
594 for (j = 0; j < dev->mem->nregions; j++) { in scrub_shadow_regions()
595 reg = &dev->mem->regions[j]; in scrub_shadow_regions()
597 mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd); in scrub_shadow_regions()
607 if (fd > 0) { in scrub_shadow_regions()
608 u->region_rb_offset[j] = offset; in scrub_shadow_regions()
609 u->region_rb[j] = mr->ram_block; in scrub_shadow_regions()
610 shadow_pcb[j] = u->postcopy_client_bases[i]; in scrub_shadow_regions()
612 u->region_rb_offset[j] = 0; in scrub_shadow_regions()
613 u->region_rb[j] = NULL; in scrub_shadow_regions()
636 for (i = 0; i < dev->mem->nregions; i++) { in scrub_shadow_regions()
637 reg = &dev->mem->regions[i]; in scrub_shadow_regions()
638 vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd); in scrub_shadow_regions()
639 if (fd > 0) { in scrub_shadow_regions()
659 static int send_remove_regions(struct vhost_dev *dev, in send_remove_regions() argument
664 struct vhost_user *u = dev->opaque; in send_remove_regions()
675 for (i = nr_rem_reg - 1; i >= 0; i--) { in send_remove_regions()
679 vhost_user_get_mr_data(shadow_reg->userspace_addr, &offset, &fd); in send_remove_regions()
681 if (fd > 0) { in send_remove_regions()
682 msg->hdr.request = VHOST_USER_REM_MEM_REG; in send_remove_regions()
683 vhost_user_fill_msg_region(®ion_buffer, shadow_reg, 0); in send_remove_regions()
684 msg->payload.mem_reg.region = region_buffer; in send_remove_regions()
686 ret = vhost_user_write(dev, msg, NULL, 0); in send_remove_regions()
687 if (ret < 0) { in send_remove_regions()
692 ret = process_message_reply(dev, msg); in send_remove_regions()
703 memmove(&u->shadow_regions[shadow_reg_idx], in send_remove_regions()
704 &u->shadow_regions[shadow_reg_idx + 1], in send_remove_regions()
706 (u->num_shadow_regions - shadow_reg_idx - 1)); in send_remove_regions()
707 u->num_shadow_regions--; in send_remove_regions()
710 return 0; in send_remove_regions()
713 static int send_add_regions(struct vhost_dev *dev, in send_add_regions() argument
718 struct vhost_user *u = dev->opaque; in send_add_regions()
726 for (i = 0; i < nr_add_reg; i++) { in send_add_regions()
731 mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd); in send_add_regions()
733 if (fd > 0) { in send_add_regions()
735 trace_vhost_user_set_mem_table_withfd(reg_fd_idx, mr->name, in send_add_regions()
736 reg->memory_size, in send_add_regions()
737 reg->guest_phys_addr, in send_add_regions()
738 reg->userspace_addr, in send_add_regions()
740 u->region_rb_offset[reg_idx] = offset; in send_add_regions()
741 u->region_rb[reg_idx] = mr->ram_block; in send_add_regions()
743 msg->hdr.request = VHOST_USER_ADD_MEM_REG; in send_add_regions()
745 msg->payload.mem_reg.region = region_buffer; in send_add_regions()
747 ret = vhost_user_write(dev, msg, &fd, 1); in send_add_regions()
748 if (ret < 0) { in send_add_regions()
755 ret = vhost_user_read(dev, &msg_reply); in send_add_regions()
756 if (ret < 0) { in send_add_regions()
767 return -EPROTO; in send_add_regions()
774 if (msg_reply.hdr.size != msg->hdr.size) { in send_add_regions()
777 msg->hdr.size); in send_add_regions()
778 return -EPROTO; in send_add_regions()
782 if (reply_gpa == dev->mem->regions[reg_idx].guest_phys_addr) { in send_add_regions()
787 msg->payload.mem_reg.region.userspace_addr, in send_add_regions()
793 dev->mem->regions[reg_idx].guest_phys_addr); in send_add_regions()
794 return -EPROTO; in send_add_regions()
797 ret = process_message_reply(dev, msg); in send_add_regions()
803 u->region_rb_offset[reg_idx] = 0; in send_add_regions()
804 u->region_rb[reg_idx] = NULL; in send_add_regions()
813 u->shadow_regions[u->num_shadow_regions].guest_phys_addr = in send_add_regions()
814 reg->guest_phys_addr; in send_add_regions()
815 u->shadow_regions[u->num_shadow_regions].userspace_addr = in send_add_regions()
816 reg->userspace_addr; in send_add_regions()
817 u->shadow_regions[u->num_shadow_regions].memory_size = in send_add_regions()
818 reg->memory_size; in send_add_regions()
819 u->num_shadow_regions++; in send_add_regions()
822 return 0; in send_add_regions()
825 static int vhost_user_add_remove_regions(struct vhost_dev *dev, in vhost_user_add_remove_regions() argument
830 struct vhost_user *u = dev->opaque; in vhost_user_add_remove_regions()
837 msg->hdr.size = sizeof(msg->payload.mem_reg); in vhost_user_add_remove_regions()
840 scrub_shadow_regions(dev, add_reg, &nr_add_reg, rem_reg, &nr_rem_reg, in vhost_user_add_remove_regions()
844 ret = send_remove_regions(dev, rem_reg, nr_rem_reg, msg, in vhost_user_add_remove_regions()
846 if (ret < 0) { in vhost_user_add_remove_regions()
852 ret = send_add_regions(dev, add_reg, nr_add_reg, msg, shadow_pcb, in vhost_user_add_remove_regions()
854 if (ret < 0) { in vhost_user_add_remove_regions()
860 memcpy(u->postcopy_client_bases, shadow_pcb, in vhost_user_add_remove_regions()
868 msg->hdr.size = sizeof(msg->payload.u64); in vhost_user_add_remove_regions()
869 msg->payload.u64 = 0; /* OK */ in vhost_user_add_remove_regions()
871 ret = vhost_user_write(dev, msg, NULL, 0); in vhost_user_add_remove_regions()
872 if (ret < 0) { in vhost_user_add_remove_regions()
877 return 0; in vhost_user_add_remove_regions()
881 memcpy(u->postcopy_client_bases, shadow_pcb, in vhost_user_add_remove_regions()
888 static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev, in vhost_user_set_mem_table_postcopy() argument
893 struct vhost_user *u = dev->opaque; in vhost_user_set_mem_table_postcopy()
895 size_t fd_num = 0; in vhost_user_set_mem_table_postcopy()
904 if (u->region_rb_len < dev->mem->nregions) { in vhost_user_set_mem_table_postcopy()
905 u->region_rb = g_renew(RAMBlock*, u->region_rb, dev->mem->nregions); in vhost_user_set_mem_table_postcopy()
906 u->region_rb_offset = g_renew(ram_addr_t, u->region_rb_offset, in vhost_user_set_mem_table_postcopy()
907 dev->mem->nregions); in vhost_user_set_mem_table_postcopy()
908 memset(&(u->region_rb[u->region_rb_len]), '\0', in vhost_user_set_mem_table_postcopy()
909 sizeof(RAMBlock *) * (dev->mem->nregions - u->region_rb_len)); in vhost_user_set_mem_table_postcopy()
910 memset(&(u->region_rb_offset[u->region_rb_len]), '\0', in vhost_user_set_mem_table_postcopy()
911 sizeof(ram_addr_t) * (dev->mem->nregions - u->region_rb_len)); in vhost_user_set_mem_table_postcopy()
912 u->region_rb_len = dev->mem->nregions; in vhost_user_set_mem_table_postcopy()
916 ret = vhost_user_add_remove_regions(dev, &msg, reply_supported, true); in vhost_user_set_mem_table_postcopy()
917 if (ret < 0) { in vhost_user_set_mem_table_postcopy()
921 ret = vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num, in vhost_user_set_mem_table_postcopy()
923 if (ret < 0) { in vhost_user_set_mem_table_postcopy()
927 ret = vhost_user_write(dev, &msg, fds, fd_num); in vhost_user_set_mem_table_postcopy()
928 if (ret < 0) { in vhost_user_set_mem_table_postcopy()
932 ret = vhost_user_read(dev, &msg_reply); in vhost_user_set_mem_table_postcopy()
933 if (ret < 0) { in vhost_user_set_mem_table_postcopy()
941 return -EPROTO; in vhost_user_set_mem_table_postcopy()
952 return -EPROTO; in vhost_user_set_mem_table_postcopy()
955 memset(u->postcopy_client_bases, 0, in vhost_user_set_mem_table_postcopy()
963 for (msg_i = 0, region_i = 0; in vhost_user_set_mem_table_postcopy()
964 region_i < dev->mem->nregions; in vhost_user_set_mem_table_postcopy()
968 dev->mem->regions[region_i].guest_phys_addr) { in vhost_user_set_mem_table_postcopy()
969 u->postcopy_client_bases[region_i] = in vhost_user_set_mem_table_postcopy()
982 return -EIO; in vhost_user_set_mem_table_postcopy()
992 msg.payload.u64 = 0; /* OK */ in vhost_user_set_mem_table_postcopy()
993 ret = vhost_user_write(dev, &msg, NULL, 0); in vhost_user_set_mem_table_postcopy()
994 if (ret < 0) { in vhost_user_set_mem_table_postcopy()
999 return 0; in vhost_user_set_mem_table_postcopy()
1002 static int vhost_user_set_mem_table(struct vhost_dev *dev, in vhost_user_set_mem_table() argument
1005 struct vhost_user *u = dev->opaque; in vhost_user_set_mem_table()
1007 size_t fd_num = 0; in vhost_user_set_mem_table()
1008 bool do_postcopy = u->postcopy_listen && u->postcopy_fd.handler; in vhost_user_set_mem_table()
1009 bool reply_supported = virtio_has_feature(dev->protocol_features, in vhost_user_set_mem_table()
1012 virtio_has_feature(dev->protocol_features, in vhost_user_set_mem_table()
1021 return vhost_user_set_mem_table_postcopy(dev, mem, reply_supported, in vhost_user_set_mem_table()
1034 ret = vhost_user_add_remove_regions(dev, &msg, reply_supported, false); in vhost_user_set_mem_table()
1035 if (ret < 0) { in vhost_user_set_mem_table()
1039 ret = vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num, in vhost_user_set_mem_table()
1041 if (ret < 0) { in vhost_user_set_mem_table()
1045 ret = vhost_user_write(dev, &msg, fds, fd_num); in vhost_user_set_mem_table()
1046 if (ret < 0) { in vhost_user_set_mem_table()
1051 return process_message_reply(dev, &msg); in vhost_user_set_mem_table()
1055 return 0; in vhost_user_set_mem_table()
1058 static int vhost_user_set_vring_endian(struct vhost_dev *dev, in vhost_user_set_vring_endian() argument
1061 bool cross_endian = virtio_has_feature(dev->protocol_features, in vhost_user_set_vring_endian()
1071 error_report("vhost-user trying to send unhandled ioctl"); in vhost_user_set_vring_endian()
1072 return -ENOTSUP; in vhost_user_set_vring_endian()
1075 return vhost_user_write(dev, &msg, NULL, 0); in vhost_user_set_vring_endian()
1078 static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64) in vhost_user_get_u64() argument
1086 if (vhost_user_per_device_request(request) && dev->vq_index != 0) { in vhost_user_get_u64()
1087 return 0; in vhost_user_get_u64()
1090 ret = vhost_user_write(dev, &msg, NULL, 0); in vhost_user_get_u64()
1091 if (ret < 0) { in vhost_user_get_u64()
1095 ret = vhost_user_read(dev, &msg); in vhost_user_get_u64()
1096 if (ret < 0) { in vhost_user_get_u64()
1103 return -EPROTO; in vhost_user_get_u64()
1108 return -EPROTO; in vhost_user_get_u64()
1113 return 0; in vhost_user_get_u64()
1116 static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features) in vhost_user_get_features() argument
1118 if (vhost_user_get_u64(dev, VHOST_USER_GET_FEATURES, features) < 0) { in vhost_user_get_features()
1119 return -EPROTO; in vhost_user_get_features()
1122 return 0; in vhost_user_get_features()
1125 /* Note: "msg->hdr.flags" may be modified. */
1126 static int vhost_user_write_sync(struct vhost_dev *dev, VhostUserMsg *msg, in vhost_user_write_sync() argument
1132 bool reply_supported = virtio_has_feature(dev->protocol_features, in vhost_user_write_sync()
1135 msg->hdr.flags |= VHOST_USER_NEED_REPLY_MASK; in vhost_user_write_sync()
1139 ret = vhost_user_write(dev, msg, NULL, 0); in vhost_user_write_sync()
1140 if (ret < 0) { in vhost_user_write_sync()
1147 if (msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) { in vhost_user_write_sync()
1148 return process_message_reply(dev, msg); in vhost_user_write_sync()
1157 return vhost_user_get_features(dev, &dummy); in vhost_user_write_sync()
1160 return 0; in vhost_user_write_sync()
1163 static int vhost_set_vring(struct vhost_dev *dev, in vhost_set_vring() argument
1175 return vhost_user_write_sync(dev, &msg, wait_for_reply); in vhost_set_vring()
1178 static int vhost_user_set_vring_num(struct vhost_dev *dev, in vhost_user_set_vring_num() argument
1181 return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring, false); in vhost_user_set_vring_num()
1186 if (n->unmap_addr) { in vhost_user_host_notifier_free()
1187 munmap(n->unmap_addr, qemu_real_host_page_size()); in vhost_user_host_notifier_free()
1188 n->unmap_addr = NULL; in vhost_user_host_notifier_free()
1190 if (n->destroy) { in vhost_user_host_notifier_free()
1192 object_unparent(OBJECT(&n->mr)); in vhost_user_host_notifier_free()
1199 * clean-up function for notifier, will finally free the structure
1206 * if destroy == false and n->addr == NULL, we have nothing to do. in vhost_user_host_notifier_remove()
1209 if (!n || (!destroy && !n->addr)) { in vhost_user_host_notifier_remove()
1213 if (n->addr) { in vhost_user_host_notifier_remove()
1216 virtio_queue_set_host_notifier_mr(vdev, n->idx, &n->mr, false); in vhost_user_host_notifier_remove()
1219 assert(!n->unmap_addr); in vhost_user_host_notifier_remove()
1220 n->unmap_addr = n->addr; in vhost_user_host_notifier_remove()
1221 n->addr = NULL; in vhost_user_host_notifier_remove()
1223 n->destroy = destroy; in vhost_user_host_notifier_remove()
1227 static int vhost_user_set_vring_base(struct vhost_dev *dev, in vhost_user_set_vring_base() argument
1230 return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring, false); in vhost_user_set_vring_base()
1233 static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable) in vhost_user_set_vring_enable() argument
1237 if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) { in vhost_user_set_vring_enable()
1238 return -EINVAL; in vhost_user_set_vring_enable()
1241 for (i = 0; i < dev->nvqs; ++i) { in vhost_user_set_vring_enable()
1244 .index = dev->vq_index + i, in vhost_user_set_vring_enable()
1249 * SET_VRING_ENABLE travels from guest to QEMU to vhost-user backend / in vhost_user_set_vring_enable()
1251 * from guest to vhost-user backend / data plane thread via eventfd. in vhost_user_set_vring_enable()
1257 * seemingly disabled queue). To prevent this out-of-order delivery, in vhost_user_set_vring_enable()
1259 * backend control plane acknowledges enabling the queue -- IOW, pass in vhost_user_set_vring_enable()
1262 ret = vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state, true); in vhost_user_set_vring_enable()
1263 if (ret < 0) { in vhost_user_set_vring_enable()
1267 * the device-level recovery. in vhost_user_set_vring_enable()
1273 return 0; in vhost_user_set_vring_enable()
1279 if (idx >= u->notifiers->len) { in fetch_notifier()
1282 return g_ptr_array_index(u->notifiers, idx); in fetch_notifier()
1285 static int vhost_user_get_vring_base(struct vhost_dev *dev, in vhost_user_get_vring_base() argument
1295 struct vhost_user *u = dev->opaque; in vhost_user_get_vring_base()
1297 VhostUserHostNotifier *n = fetch_notifier(u->user, ring->index); in vhost_user_get_vring_base()
1298 vhost_user_host_notifier_remove(n, dev->vdev, false); in vhost_user_get_vring_base()
1300 ret = vhost_user_write(dev, &msg, NULL, 0); in vhost_user_get_vring_base()
1301 if (ret < 0) { in vhost_user_get_vring_base()
1305 ret = vhost_user_read(dev, &msg); in vhost_user_get_vring_base()
1306 if (ret < 0) { in vhost_user_get_vring_base()
1313 return -EPROTO; in vhost_user_get_vring_base()
1318 return -EPROTO; in vhost_user_get_vring_base()
1323 return 0; in vhost_user_get_vring_base()
1326 static int vhost_set_vring_file(struct vhost_dev *dev, in vhost_set_vring_file() argument
1331 size_t fd_num = 0; in vhost_set_vring_file()
1335 .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK, in vhost_set_vring_file()
1339 if (file->fd > 0) { in vhost_set_vring_file()
1340 fds[fd_num++] = file->fd; in vhost_set_vring_file()
1345 return vhost_user_write(dev, &msg, fds, fd_num); in vhost_set_vring_file()
1348 static int vhost_user_set_vring_kick(struct vhost_dev *dev, in vhost_user_set_vring_kick() argument
1351 return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_KICK, file); in vhost_user_set_vring_kick()
1354 static int vhost_user_set_vring_call(struct vhost_dev *dev, in vhost_user_set_vring_call() argument
1357 return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file); in vhost_user_set_vring_call()
1360 static int vhost_user_set_vring_err(struct vhost_dev *dev, in vhost_user_set_vring_err() argument
1363 return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_ERR, file); in vhost_user_set_vring_err()
1366 static int vhost_user_set_vring_addr(struct vhost_dev *dev, in vhost_user_set_vring_addr() argument
1380 bool wait_for_reply = addr->flags & (1 << VHOST_VRING_F_LOG); in vhost_user_set_vring_addr()
1382 return vhost_user_write_sync(dev, &msg, wait_for_reply); in vhost_user_set_vring_addr()
1385 static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64, in vhost_user_set_u64() argument
1395 return vhost_user_write_sync(dev, &msg, wait_for_reply); in vhost_user_set_u64()
1398 static int vhost_user_set_status(struct vhost_dev *dev, uint8_t status) in vhost_user_set_status() argument
1400 return vhost_user_set_u64(dev, VHOST_USER_SET_STATUS, status, false); in vhost_user_set_status()
1403 static int vhost_user_get_status(struct vhost_dev *dev, uint8_t *status) in vhost_user_get_status() argument
1408 ret = vhost_user_get_u64(dev, VHOST_USER_GET_STATUS, &value); in vhost_user_get_status()
1409 if (ret < 0) { in vhost_user_get_status()
1414 return 0; in vhost_user_get_status()
1417 static int vhost_user_add_status(struct vhost_dev *dev, uint8_t status) in vhost_user_add_status() argument
1422 ret = vhost_user_get_status(dev, &s); in vhost_user_add_status()
1423 if (ret < 0) { in vhost_user_add_status()
1428 return 0; in vhost_user_add_status()
1432 return vhost_user_set_status(dev, s); in vhost_user_add_status()
1435 static int vhost_user_set_features(struct vhost_dev *dev, in vhost_user_set_features() argument
1442 bool log_enabled = features & (0x1ULL << VHOST_F_LOG_ALL); in vhost_user_set_features()
1451 ret = vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, in vhost_user_set_features()
1452 features | dev->backend_features, in vhost_user_set_features()
1455 if (virtio_has_feature(dev->protocol_features, in vhost_user_set_features()
1458 return vhost_user_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK); in vhost_user_set_features()
1465 static int vhost_user_set_protocol_features(struct vhost_dev *dev, in vhost_user_set_protocol_features() argument
1468 return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features, in vhost_user_set_protocol_features()
1472 static int vhost_user_set_owner(struct vhost_dev *dev) in vhost_user_set_owner() argument
1479 return vhost_user_write(dev, &msg, NULL, 0); in vhost_user_set_owner()
1482 static int vhost_user_get_max_memslots(struct vhost_dev *dev, in vhost_user_get_max_memslots() argument
1488 err = vhost_user_get_u64(dev, VHOST_USER_GET_MAX_MEM_SLOTS, in vhost_user_get_max_memslots()
1490 if (err < 0) { in vhost_user_get_max_memslots()
1496 return 0; in vhost_user_get_max_memslots()
1499 static int vhost_user_reset_device(struct vhost_dev *dev) in vhost_user_reset_device() argument
1510 if (!virtio_has_feature(dev->protocol_features, in vhost_user_reset_device()
1512 return -ENOSYS; in vhost_user_reset_device()
1515 return vhost_user_write(dev, &msg, NULL, 0); in vhost_user_reset_device()
1518 static int vhost_user_backend_handle_config_change(struct vhost_dev *dev) in vhost_user_backend_handle_config_change() argument
1520 if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) { in vhost_user_backend_handle_config_change()
1521 return -ENOSYS; in vhost_user_backend_handle_config_change()
1524 return dev->config_ops->vhost_dev_config_notifier(dev); in vhost_user_backend_handle_config_change()
1535 if (idx >= u->notifiers->len) { in fetch_or_create_notifier()
1536 g_ptr_array_set_size(u->notifiers, idx + 1); in fetch_or_create_notifier()
1539 n = g_ptr_array_index(u->notifiers, idx); in fetch_or_create_notifier()
1542 * In case notification arrive out-of-order, in fetch_or_create_notifier()
1545 g_ptr_array_remove_index(u->notifiers, idx); in fetch_or_create_notifier()
1547 n->idx = idx; in fetch_or_create_notifier()
1548 g_ptr_array_insert(u->notifiers, idx, n); in fetch_or_create_notifier()
1555 static int vhost_user_backend_handle_vring_host_notifier(struct vhost_dev *dev, in vhost_user_backend_handle_vring_host_notifier() argument
1559 int queue_idx = area->u64 & VHOST_USER_VRING_IDX_MASK; in vhost_user_backend_handle_vring_host_notifier()
1561 struct vhost_user *u = dev->opaque; in vhost_user_backend_handle_vring_host_notifier()
1562 VhostUserState *user = u->user; in vhost_user_backend_handle_vring_host_notifier()
1563 VirtIODevice *vdev = dev->vdev; in vhost_user_backend_handle_vring_host_notifier()
1568 if (!virtio_has_feature(dev->protocol_features, in vhost_user_backend_handle_vring_host_notifier()
1571 return -EINVAL; in vhost_user_backend_handle_vring_host_notifier()
1581 if (area->u64 & VHOST_USER_VRING_NOFD_MASK) { in vhost_user_backend_handle_vring_host_notifier()
1582 return 0; in vhost_user_backend_handle_vring_host_notifier()
1586 if (area->size != page_size) { in vhost_user_backend_handle_vring_host_notifier()
1587 return -EINVAL; in vhost_user_backend_handle_vring_host_notifier()
1591 fd, area->offset); in vhost_user_backend_handle_vring_host_notifier()
1593 return -EFAULT; in vhost_user_backend_handle_vring_host_notifier()
1596 name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]", in vhost_user_backend_handle_vring_host_notifier()
1598 if (!n->mr.ram) { /* Don't init again after suspend. */ in vhost_user_backend_handle_vring_host_notifier()
1599 memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name, in vhost_user_backend_handle_vring_host_notifier()
1602 n->mr.ram_block->host = addr; in vhost_user_backend_handle_vring_host_notifier()
1606 if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) { in vhost_user_backend_handle_vring_host_notifier()
1607 object_unparent(OBJECT(&n->mr)); in vhost_user_backend_handle_vring_host_notifier()
1609 return -ENXIO; in vhost_user_backend_handle_vring_host_notifier()
1612 n->addr = addr; in vhost_user_backend_handle_vring_host_notifier()
1614 return 0; in vhost_user_backend_handle_vring_host_notifier()
1618 vhost_user_backend_handle_shared_object_add(struct vhost_dev *dev, in vhost_user_backend_handle_shared_object_add() argument
1623 memcpy(uuid.data, object->uuid, sizeof(object->uuid)); in vhost_user_backend_handle_shared_object_add()
1624 return !virtio_add_vhost_device(&uuid, dev); in vhost_user_backend_handle_shared_object_add()
1630 * Return: 0 on success, 1 on error.
1633 vhost_user_backend_handle_shared_object_remove(struct vhost_dev *dev, in vhost_user_backend_handle_shared_object_remove() argument
1638 memcpy(uuid.data, object->uuid, sizeof(object->uuid)); in vhost_user_backend_handle_shared_object_remove()
1643 if (dev != owner) { in vhost_user_backend_handle_shared_object_remove()
1644 /* Not allowed to remove non-owned entries */ in vhost_user_backend_handle_shared_object_remove()
1650 /* Not allowed to remove non-owned entries */ in vhost_user_backend_handle_shared_object_remove()
1662 { .iov_base = payload, .iov_len = hdr->size }, in vhost_user_send_resp()
1665 hdr->flags &= ~VHOST_USER_NEED_REPLY_MASK; in vhost_user_send_resp()
1666 hdr->flags |= VHOST_USER_REPLY_MASK; in vhost_user_send_resp()
1675 hdr->size = sizeof(payload->u64); in vhost_user_backend_send_dmabuf_fd()
1679 int vhost_user_get_shared_object(struct vhost_dev *dev, unsigned char *uuid, in vhost_user_get_shared_object() argument
1682 struct vhost_user *u = dev->opaque; in vhost_user_get_shared_object()
1683 CharBackend *chr = u->user->chr; in vhost_user_get_shared_object()
1691 ret = vhost_user_write(dev, &msg, NULL, 0); in vhost_user_get_shared_object()
1692 if (ret < 0) { in vhost_user_get_shared_object()
1696 ret = vhost_user_read(dev, &msg); in vhost_user_get_shared_object()
1697 if (ret < 0) { in vhost_user_get_shared_object()
1705 return -EPROTO; in vhost_user_get_shared_object()
1709 if (*dmabuf_fd < 0) { in vhost_user_get_shared_object()
1711 return -EIO; in vhost_user_get_shared_object()
1714 return 0; in vhost_user_get_shared_object()
1724 CharBackend *chr = u->user->chr; in vhost_user_backend_handle_shared_object_lookup()
1726 int dmabuf_fd = -1; in vhost_user_backend_handle_shared_object_lookup()
1727 int fd_num = 0; in vhost_user_backend_handle_shared_object_lookup()
1729 memcpy(uuid.data, payload->object.uuid, sizeof(payload->object.uuid)); in vhost_user_backend_handle_shared_object_lookup()
1731 payload->u64 = 0; in vhost_user_backend_handle_shared_object_lookup()
1738 struct vhost_dev *dev = virtio_lookup_vhost_device(&uuid); in vhost_user_backend_handle_shared_object_lookup() local
1739 if (dev == NULL) { in vhost_user_backend_handle_shared_object_lookup()
1740 payload->u64 = -EINVAL; in vhost_user_backend_handle_shared_object_lookup()
1743 int ret = vhost_user_get_shared_object(dev, uuid.data, &dmabuf_fd); in vhost_user_backend_handle_shared_object_lookup()
1744 if (ret < 0) { in vhost_user_backend_handle_shared_object_lookup()
1745 payload->u64 = ret; in vhost_user_backend_handle_shared_object_lookup()
1750 payload->u64 = -EINVAL; in vhost_user_backend_handle_shared_object_lookup()
1754 if (dmabuf_fd != -1) { in vhost_user_backend_handle_shared_object_lookup()
1758 if (qemu_chr_fe_set_msgfds(chr, &dmabuf_fd, fd_num) < 0) { in vhost_user_backend_handle_shared_object_lookup()
1760 payload->u64 = -EINVAL; in vhost_user_backend_handle_shared_object_lookup()
1765 return -EINVAL; in vhost_user_backend_handle_shared_object_lookup()
1768 return 0; in vhost_user_backend_handle_shared_object_lookup()
1773 g_source_destroy(u->backend_src); in close_backend_channel()
1774 g_source_unref(u->backend_src); in close_backend_channel()
1775 u->backend_src = NULL; in close_backend_channel()
1776 object_unref(OBJECT(u->backend_ioc)); in close_backend_channel()
1777 u->backend_ioc = NULL; in close_backend_channel()
1783 struct vhost_dev *dev = opaque; in backend_read() local
1784 struct vhost_user *u = dev->opaque; in backend_read()
1785 VhostUserHeader hdr = { 0, }; in backend_read()
1786 VhostUserPayload payload = { 0, }; in backend_read()
1789 int ret = 0; in backend_read()
1792 size_t fdsize = 0; in backend_read()
1819 ret = vhost_backend_handle_iotlb_msg(dev, &payload.iotlb); in backend_read()
1822 ret = vhost_user_backend_handle_config_change(dev); in backend_read()
1825 ret = vhost_user_backend_handle_vring_host_notifier(dev, &payload.area, in backend_read()
1826 fd ? fd[0] : -1); in backend_read()
1829 ret = vhost_user_backend_handle_shared_object_add(dev, &payload.object); in backend_read()
1832 ret = vhost_user_backend_handle_shared_object_remove(dev, in backend_read()
1836 ret = vhost_user_backend_handle_shared_object_lookup(dev->opaque, ioc, in backend_read()
1841 ret = -EINVAL; in backend_read()
1866 for (i = 0; i < fdsize; i++) { in backend_read()
1873 static int vhost_setup_backend_channel(struct vhost_dev *dev) in vhost_setup_backend_channel() argument
1879 struct vhost_user *u = dev->opaque; in vhost_setup_backend_channel()
1880 int sv[2], ret = 0; in vhost_setup_backend_channel()
1881 bool reply_supported = virtio_has_feature(dev->protocol_features, in vhost_setup_backend_channel()
1886 if (!virtio_has_feature(dev->protocol_features, in vhost_setup_backend_channel()
1888 return 0; in vhost_setup_backend_channel()
1891 if (qemu_socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) { in vhost_setup_backend_channel()
1894 return -saved_errno; in vhost_setup_backend_channel()
1897 ioc = QIO_CHANNEL(qio_channel_socket_new_fd(sv[0], &local_err)); in vhost_setup_backend_channel()
1900 return -ECONNREFUSED; in vhost_setup_backend_channel()
1902 u->backend_ioc = ioc; in vhost_setup_backend_channel()
1903 u->backend_src = qio_channel_add_watch_source(u->backend_ioc, in vhost_setup_backend_channel()
1905 backend_read, dev, NULL, NULL); in vhost_setup_backend_channel()
1911 ret = vhost_user_write(dev, &msg, &sv[1], 1); in vhost_setup_backend_channel()
1917 ret = process_message_reply(dev, &msg); in vhost_setup_backend_channel()
1938 struct vhost_dev *dev = pcfd->data; in vhost_user_postcopy_fault_handler() local
1939 struct vhost_user *u = dev->opaque; in vhost_user_postcopy_fault_handler()
1941 uint64_t faultaddr = msg->arg.pagefault.address; in vhost_user_postcopy_fault_handler()
1946 trace_vhost_user_postcopy_fault_handler(pcfd->idstr, faultaddr, in vhost_user_postcopy_fault_handler()
1947 dev->mem->nregions); in vhost_user_postcopy_fault_handler()
1948 for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) { in vhost_user_postcopy_fault_handler()
1950 u->postcopy_client_bases[i], dev->mem->regions[i].memory_size); in vhost_user_postcopy_fault_handler()
1951 if (faultaddr >= u->postcopy_client_bases[i]) { in vhost_user_postcopy_fault_handler()
1953 uint64_t region_offset = faultaddr - u->postcopy_client_bases[i]; in vhost_user_postcopy_fault_handler()
1954 if (region_offset < dev->mem->regions[i].memory_size) { in vhost_user_postcopy_fault_handler()
1955 rb_offset = region_offset + u->region_rb_offset[i]; in vhost_user_postcopy_fault_handler()
1958 rb = u->region_rb[i]; in vhost_user_postcopy_fault_handler()
1966 return -1; in vhost_user_postcopy_fault_handler()
1972 struct vhost_dev *dev = pcfd->data; in vhost_user_postcopy_waker() local
1973 struct vhost_user *u = dev->opaque; in vhost_user_postcopy_waker()
1979 return 0; in vhost_user_postcopy_waker()
1982 for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) { in vhost_user_postcopy_waker()
1983 if (u->region_rb[i] == rb && in vhost_user_postcopy_waker()
1984 offset >= u->region_rb_offset[i] && in vhost_user_postcopy_waker()
1985 offset < (u->region_rb_offset[i] + in vhost_user_postcopy_waker()
1986 dev->mem->regions[i].memory_size)) { in vhost_user_postcopy_waker()
1987 uint64_t client_addr = (offset - u->region_rb_offset[i]) + in vhost_user_postcopy_waker()
1988 u->postcopy_client_bases[i]; in vhost_user_postcopy_waker()
1995 return 0; in vhost_user_postcopy_waker()
2003 static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp) in vhost_user_postcopy_advise() argument
2006 struct vhost_user *u = dev->opaque; in vhost_user_postcopy_advise()
2007 CharBackend *chr = u->user->chr; in vhost_user_postcopy_advise()
2015 ret = vhost_user_write(dev, &msg, NULL, 0); in vhost_user_postcopy_advise()
2016 if (ret < 0) { in vhost_user_postcopy_advise()
2021 ret = vhost_user_read(dev, &msg); in vhost_user_postcopy_advise()
2022 if (ret < 0) { in vhost_user_postcopy_advise()
2030 return -EPROTO; in vhost_user_postcopy_advise()
2035 return -EPROTO; in vhost_user_postcopy_advise()
2038 if (ufd < 0) { in vhost_user_postcopy_advise()
2040 return -EIO; in vhost_user_postcopy_advise()
2045 u->postcopy_fd.fd = ufd; in vhost_user_postcopy_advise()
2046 u->postcopy_fd.data = dev; in vhost_user_postcopy_advise()
2047 u->postcopy_fd.handler = vhost_user_postcopy_fault_handler; in vhost_user_postcopy_advise()
2048 u->postcopy_fd.waker = vhost_user_postcopy_waker; in vhost_user_postcopy_advise()
2049 u->postcopy_fd.idstr = "vhost-user"; /* Need to find unique name */ in vhost_user_postcopy_advise()
2050 postcopy_register_shared_ufd(&u->postcopy_fd); in vhost_user_postcopy_advise()
2051 return 0; in vhost_user_postcopy_advise()
2053 error_setg(errp, "Postcopy not supported on non-Linux systems"); in vhost_user_postcopy_advise()
2054 return -ENOSYS; in vhost_user_postcopy_advise()
2061 static int vhost_user_postcopy_listen(struct vhost_dev *dev, Error **errp) in vhost_user_postcopy_listen() argument
2063 struct vhost_user *u = dev->opaque; in vhost_user_postcopy_listen()
2069 u->postcopy_listen = true; in vhost_user_postcopy_listen()
2073 ret = vhost_user_write(dev, &msg, NULL, 0); in vhost_user_postcopy_listen()
2074 if (ret < 0) { in vhost_user_postcopy_listen()
2079 ret = process_message_reply(dev, &msg); in vhost_user_postcopy_listen()
2085 return 0; in vhost_user_postcopy_listen()
2091 static int vhost_user_postcopy_end(struct vhost_dev *dev, Error **errp) in vhost_user_postcopy_end() argument
2098 struct vhost_user *u = dev->opaque; in vhost_user_postcopy_end()
2102 ret = vhost_user_write(dev, &msg, NULL, 0); in vhost_user_postcopy_end()
2103 if (ret < 0) { in vhost_user_postcopy_end()
2108 ret = process_message_reply(dev, &msg); in vhost_user_postcopy_end()
2113 postcopy_unregister_shared_ufd(&u->postcopy_fd); in vhost_user_postcopy_end()
2114 close(u->postcopy_fd.fd); in vhost_user_postcopy_end()
2115 u->postcopy_fd.handler = NULL; in vhost_user_postcopy_end()
2119 return 0; in vhost_user_postcopy_end()
2128 struct vhost_dev *dev = u->dev; in vhost_user_postcopy_notifier() local
2130 switch (pnd->reason) { in vhost_user_postcopy_notifier()
2132 if (!virtio_has_feature(dev->protocol_features, in vhost_user_postcopy_notifier()
2136 "vhost-user backend not capable of postcopy"); in vhost_user_postcopy_notifier()
2137 return -ENOENT; in vhost_user_postcopy_notifier()
2142 return vhost_user_postcopy_advise(dev, errp); in vhost_user_postcopy_notifier()
2145 return vhost_user_postcopy_listen(dev, errp); in vhost_user_postcopy_notifier()
2148 return vhost_user_postcopy_end(dev, errp); in vhost_user_postcopy_notifier()
2155 return 0; in vhost_user_postcopy_notifier()
2158 static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque, in vhost_user_backend_init() argument
2166 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); in vhost_user_backend_init()
2169 u->user = vus; in vhost_user_backend_init()
2170 u->dev = dev; in vhost_user_backend_init()
2171 dev->opaque = u; in vhost_user_backend_init()
2173 err = vhost_user_get_features(dev, &features); in vhost_user_backend_init()
2174 if (err < 0) { in vhost_user_backend_init()
2175 error_setg_errno(errp, -err, "vhost_backend_init failed"); in vhost_user_backend_init()
2180 bool supports_f_config = vus->supports_config || in vhost_user_backend_init()
2181 (dev->config_ops && dev->config_ops->vhost_dev_config_notifier); in vhost_user_backend_init()
2184 dev->backend_features |= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES; in vhost_user_backend_init()
2186 err = vhost_user_get_u64(dev, VHOST_USER_GET_PROTOCOL_FEATURES, in vhost_user_backend_init()
2188 if (err < 0) { in vhost_user_backend_init()
2190 return -EPROTO; in vhost_user_backend_init()
2194 * We will use all the protocol features we support - although in vhost_user_backend_init()
2203 error_setg(errp, "vhost-user device expecting " in vhost_user_backend_init()
2204 "VHOST_USER_PROTOCOL_F_CONFIG but the vhost-user backend does " in vhost_user_backend_init()
2206 return -EPROTO; in vhost_user_backend_init()
2211 warn_report("vhost-user backend supports " in vhost_user_backend_init()
2218 dev->protocol_features = protocol_features; in vhost_user_backend_init()
2219 err = vhost_user_set_protocol_features(dev, dev->protocol_features); in vhost_user_backend_init()
2220 if (err < 0) { in vhost_user_backend_init()
2222 return -EPROTO; in vhost_user_backend_init()
2226 if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)) { in vhost_user_backend_init()
2227 err = vhost_user_get_u64(dev, VHOST_USER_GET_QUEUE_NUM, in vhost_user_backend_init()
2228 &dev->max_queues); in vhost_user_backend_init()
2229 if (err < 0) { in vhost_user_backend_init()
2231 return -EPROTO; in vhost_user_backend_init()
2234 dev->max_queues = 1; in vhost_user_backend_init()
2237 if (dev->num_queues && dev->max_queues < dev->num_queues) { in vhost_user_backend_init()
2239 "backend is %" PRIu64, dev->max_queues); in vhost_user_backend_init()
2240 return -EINVAL; in vhost_user_backend_init()
2244 !(virtio_has_feature(dev->protocol_features, in vhost_user_backend_init()
2246 virtio_has_feature(dev->protocol_features, in vhost_user_backend_init()
2248 error_setg(errp, "IOMMU support requires reply-ack and " in vhost_user_backend_init()
2249 "backend-req protocol features."); in vhost_user_backend_init()
2250 return -EINVAL; in vhost_user_backend_init()
2254 if (!virtio_has_feature(dev->protocol_features, in vhost_user_backend_init()
2256 u->user->memory_slots = VHOST_MEMORY_BASELINE_NREGIONS; in vhost_user_backend_init()
2258 err = vhost_user_get_max_memslots(dev, &ram_slots); in vhost_user_backend_init()
2259 if (err < 0) { in vhost_user_backend_init()
2261 return -EPROTO; in vhost_user_backend_init()
2264 if (ram_slots < u->user->memory_slots) { in vhost_user_backend_init()
2268 u->user->memory_slots); in vhost_user_backend_init()
2269 return -EINVAL; in vhost_user_backend_init()
2272 u->user->memory_slots = MIN(ram_slots, VHOST_USER_MAX_RAM_SLOTS); in vhost_user_backend_init()
2276 if (dev->migration_blocker == NULL && in vhost_user_backend_init()
2277 !virtio_has_feature(dev->protocol_features, in vhost_user_backend_init()
2279 error_setg(&dev->migration_blocker, in vhost_user_backend_init()
2280 "Migration disabled: vhost-user backend lacks " in vhost_user_backend_init()
2284 if (dev->vq_index == 0) { in vhost_user_backend_init()
2285 err = vhost_setup_backend_channel(dev); in vhost_user_backend_init()
2286 if (err < 0) { in vhost_user_backend_init()
2288 return -EPROTO; in vhost_user_backend_init()
2292 u->postcopy_notifier.notify = vhost_user_postcopy_notifier; in vhost_user_backend_init()
2293 postcopy_add_notifier(&u->postcopy_notifier); in vhost_user_backend_init()
2295 return 0; in vhost_user_backend_init()
2298 static int vhost_user_backend_cleanup(struct vhost_dev *dev) in vhost_user_backend_cleanup() argument
2302 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); in vhost_user_backend_cleanup()
2304 u = dev->opaque; in vhost_user_backend_cleanup()
2305 if (u->postcopy_notifier.notify) { in vhost_user_backend_cleanup()
2306 postcopy_remove_notifier(&u->postcopy_notifier); in vhost_user_backend_cleanup()
2307 u->postcopy_notifier.notify = NULL; in vhost_user_backend_cleanup()
2309 u->postcopy_listen = false; in vhost_user_backend_cleanup()
2310 if (u->postcopy_fd.handler) { in vhost_user_backend_cleanup()
2311 postcopy_unregister_shared_ufd(&u->postcopy_fd); in vhost_user_backend_cleanup()
2312 close(u->postcopy_fd.fd); in vhost_user_backend_cleanup()
2313 u->postcopy_fd.handler = NULL; in vhost_user_backend_cleanup()
2315 if (u->backend_ioc) { in vhost_user_backend_cleanup()
2318 g_free(u->region_rb); in vhost_user_backend_cleanup()
2319 u->region_rb = NULL; in vhost_user_backend_cleanup()
2320 g_free(u->region_rb_offset); in vhost_user_backend_cleanup()
2321 u->region_rb_offset = NULL; in vhost_user_backend_cleanup()
2322 u->region_rb_len = 0; in vhost_user_backend_cleanup()
2324 dev->opaque = 0; in vhost_user_backend_cleanup()
2326 return 0; in vhost_user_backend_cleanup()
2329 static int vhost_user_get_vq_index(struct vhost_dev *dev, int idx) in vhost_user_get_vq_index() argument
2331 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs); in vhost_user_get_vq_index()
2336 static int vhost_user_memslots_limit(struct vhost_dev *dev) in vhost_user_memslots_limit() argument
2338 struct vhost_user *u = dev->opaque; in vhost_user_memslots_limit()
2340 return u->user->memory_slots; in vhost_user_memslots_limit()
2343 static bool vhost_user_requires_shm_log(struct vhost_dev *dev) in vhost_user_requires_shm_log() argument
2345 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); in vhost_user_requires_shm_log()
2347 return virtio_has_feature(dev->protocol_features, in vhost_user_requires_shm_log()
2351 static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr) in vhost_user_migration_done() argument
2355 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); in vhost_user_migration_done()
2358 if (virtio_has_feature(dev->acked_features, VIRTIO_NET_F_GUEST_ANNOUNCE)) { in vhost_user_migration_done()
2359 return 0; in vhost_user_migration_done()
2363 if (virtio_has_feature(dev->protocol_features, in vhost_user_migration_done()
2370 return vhost_user_write(dev, &msg, NULL, 0); in vhost_user_migration_done()
2372 return -ENOTSUP; in vhost_user_migration_done()
2375 static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu) in vhost_user_net_set_mtu() argument
2378 bool reply_supported = virtio_has_feature(dev->protocol_features, in vhost_user_net_set_mtu()
2382 if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) { in vhost_user_net_set_mtu()
2383 return 0; in vhost_user_net_set_mtu()
2394 ret = vhost_user_write(dev, &msg, NULL, 0); in vhost_user_net_set_mtu()
2395 if (ret < 0) { in vhost_user_net_set_mtu()
2401 return process_message_reply(dev, &msg); in vhost_user_net_set_mtu()
2404 return 0; in vhost_user_net_set_mtu()
2407 static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev, in vhost_user_send_device_iotlb_msg() argument
2418 ret = vhost_user_write(dev, &msg, NULL, 0); in vhost_user_send_device_iotlb_msg()
2419 if (ret < 0) { in vhost_user_send_device_iotlb_msg()
2423 return process_message_reply(dev, &msg); in vhost_user_send_device_iotlb_msg()
2427 static void vhost_user_set_iotlb_callback(struct vhost_dev *dev, int enabled) in vhost_user_set_iotlb_callback() argument
2429 /* No-op as the receive channel is not dedicated to IOTLB messages. */ in vhost_user_set_iotlb_callback()
2432 static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config, in vhost_user_get_config() argument
2442 if (!virtio_has_feature(dev->protocol_features, in vhost_user_get_config()
2445 return -EINVAL; in vhost_user_get_config()
2450 msg.payload.config.offset = 0; in vhost_user_get_config()
2452 ret = vhost_user_write(dev, &msg, NULL, 0); in vhost_user_get_config()
2453 if (ret < 0) { in vhost_user_get_config()
2454 error_setg_errno(errp, -ret, "vhost_get_config failed"); in vhost_user_get_config()
2458 ret = vhost_user_read(dev, &msg); in vhost_user_get_config()
2459 if (ret < 0) { in vhost_user_get_config()
2460 error_setg_errno(errp, -ret, "vhost_get_config failed"); in vhost_user_get_config()
2468 return -EPROTO; in vhost_user_get_config()
2473 return -EPROTO; in vhost_user_get_config()
2478 return 0; in vhost_user_get_config()
2481 static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data, in vhost_user_set_config() argument
2486 bool reply_supported = virtio_has_feature(dev->protocol_features, in vhost_user_set_config()
2495 if (!virtio_has_feature(dev->protocol_features, in vhost_user_set_config()
2497 return -ENOTSUP; in vhost_user_set_config()
2505 return -EINVAL; in vhost_user_set_config()
2514 ret = vhost_user_write(dev, &msg, NULL, 0); in vhost_user_set_config()
2515 if (ret < 0) { in vhost_user_set_config()
2520 return process_message_reply(dev, &msg); in vhost_user_set_config()
2523 return 0; in vhost_user_set_config()
2526 static int vhost_user_crypto_create_session(struct vhost_dev *dev, in vhost_user_crypto_create_session() argument
2531 bool crypto_session = virtio_has_feature(dev->protocol_features, in vhost_user_crypto_create_session()
2540 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); in vhost_user_crypto_create_session()
2543 error_report("vhost-user trying to send unhandled ioctl"); in vhost_user_crypto_create_session()
2544 return -ENOTSUP; in vhost_user_crypto_create_session()
2547 if (backend_info->op_code == VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION) { in vhost_user_crypto_create_session()
2548 CryptoDevBackendAsymSessionInfo *sess = &backend_info->u.asym_sess_info; in vhost_user_crypto_create_session()
2553 if (sess->keylen) { in vhost_user_crypto_create_session()
2555 if (sess->keylen > keylen) { in vhost_user_crypto_create_session()
2557 return -ENOTSUP; in vhost_user_crypto_create_session()
2560 memcpy(&msg.payload.session.u.asym.key, sess->key, in vhost_user_crypto_create_session()
2561 sess->keylen); in vhost_user_crypto_create_session()
2564 CryptoDevBackendSymSessionInfo *sess = &backend_info->u.sym_sess_info; in vhost_user_crypto_create_session()
2569 if (sess->key_len) { in vhost_user_crypto_create_session()
2571 if (sess->key_len > keylen) { in vhost_user_crypto_create_session()
2573 return -ENOTSUP; in vhost_user_crypto_create_session()
2576 memcpy(&msg.payload.session.u.sym.key, sess->cipher_key, in vhost_user_crypto_create_session()
2577 sess->key_len); in vhost_user_crypto_create_session()
2580 if (sess->auth_key_len > 0) { in vhost_user_crypto_create_session()
2582 if (sess->auth_key_len > keylen) { in vhost_user_crypto_create_session()
2584 return -ENOTSUP; in vhost_user_crypto_create_session()
2587 memcpy(&msg.payload.session.u.sym.auth_key, sess->auth_key, in vhost_user_crypto_create_session()
2588 sess->auth_key_len); in vhost_user_crypto_create_session()
2592 msg.payload.session.op_code = backend_info->op_code; in vhost_user_crypto_create_session()
2593 msg.payload.session.session_id = backend_info->session_id; in vhost_user_crypto_create_session()
2594 ret = vhost_user_write(dev, &msg, NULL, 0); in vhost_user_crypto_create_session()
2595 if (ret < 0) { in vhost_user_crypto_create_session()
2601 ret = vhost_user_read(dev, &msg); in vhost_user_crypto_create_session()
2602 if (ret < 0) { in vhost_user_crypto_create_session()
2611 return -EPROTO; in vhost_user_crypto_create_session()
2616 return -EPROTO; in vhost_user_crypto_create_session()
2619 if (msg.payload.session.session_id < 0) { in vhost_user_crypto_create_session()
2622 return -EINVAL; in vhost_user_crypto_create_session()
2626 return 0; in vhost_user_crypto_create_session()
2630 vhost_user_crypto_close_session(struct vhost_dev *dev, uint64_t session_id) in vhost_user_crypto_close_session() argument
2633 bool crypto_session = virtio_has_feature(dev->protocol_features, in vhost_user_crypto_close_session()
2643 error_report("vhost-user trying to send unhandled ioctl"); in vhost_user_crypto_close_session()
2644 return -ENOTSUP; in vhost_user_crypto_close_session()
2647 ret = vhost_user_write(dev, &msg, NULL, 0); in vhost_user_crypto_close_session()
2648 if (ret < 0) { in vhost_user_crypto_close_session()
2654 return 0; in vhost_user_crypto_close_session()
2657 static bool vhost_user_no_private_memslots(struct vhost_dev *dev) in vhost_user_no_private_memslots() argument
2662 static int vhost_user_get_inflight_fd(struct vhost_dev *dev, in vhost_user_get_inflight_fd() argument
2669 struct vhost_user *u = dev->opaque; in vhost_user_get_inflight_fd()
2670 CharBackend *chr = u->user->chr; in vhost_user_get_inflight_fd()
2674 .payload.inflight.num_queues = dev->nvqs, in vhost_user_get_inflight_fd()
2679 if (!virtio_has_feature(dev->protocol_features, in vhost_user_get_inflight_fd()
2681 return 0; in vhost_user_get_inflight_fd()
2684 ret = vhost_user_write(dev, &msg, NULL, 0); in vhost_user_get_inflight_fd()
2685 if (ret < 0) { in vhost_user_get_inflight_fd()
2689 ret = vhost_user_read(dev, &msg); in vhost_user_get_inflight_fd()
2690 if (ret < 0) { in vhost_user_get_inflight_fd()
2698 return -EPROTO; in vhost_user_get_inflight_fd()
2703 return -EPROTO; in vhost_user_get_inflight_fd()
2707 return 0; in vhost_user_get_inflight_fd()
2711 if (fd < 0) { in vhost_user_get_inflight_fd()
2713 return -EIO; in vhost_user_get_inflight_fd()
2716 addr = mmap(0, msg.payload.inflight.mmap_size, PROT_READ | PROT_WRITE, in vhost_user_get_inflight_fd()
2722 return -EFAULT; in vhost_user_get_inflight_fd()
2725 inflight->addr = addr; in vhost_user_get_inflight_fd()
2726 inflight->fd = fd; in vhost_user_get_inflight_fd()
2727 inflight->size = msg.payload.inflight.mmap_size; in vhost_user_get_inflight_fd()
2728 inflight->offset = msg.payload.inflight.mmap_offset; in vhost_user_get_inflight_fd()
2729 inflight->queue_size = queue_size; in vhost_user_get_inflight_fd()
2731 return 0; in vhost_user_get_inflight_fd()
2734 static int vhost_user_set_inflight_fd(struct vhost_dev *dev, in vhost_user_set_inflight_fd() argument
2740 .payload.inflight.mmap_size = inflight->size, in vhost_user_set_inflight_fd()
2741 .payload.inflight.mmap_offset = inflight->offset, in vhost_user_set_inflight_fd()
2742 .payload.inflight.num_queues = dev->nvqs, in vhost_user_set_inflight_fd()
2743 .payload.inflight.queue_size = inflight->queue_size, in vhost_user_set_inflight_fd()
2747 if (!virtio_has_feature(dev->protocol_features, in vhost_user_set_inflight_fd()
2749 return 0; in vhost_user_set_inflight_fd()
2752 return vhost_user_write(dev, &msg, &inflight->fd, 1); in vhost_user_set_inflight_fd()
2763 if (user->chr) { in vhost_user_init()
2764 error_setg(errp, "Cannot initialize vhost-user state"); in vhost_user_init()
2767 user->chr = chr; in vhost_user_init()
2768 user->memory_slots = 0; in vhost_user_init()
2769 user->notifiers = g_ptr_array_new_full(VIRTIO_QUEUE_MAX / 4, in vhost_user_init()
2776 if (!user->chr) { in vhost_user_cleanup()
2779 user->notifiers = (GPtrArray *) g_ptr_array_free(user->notifiers, true); in vhost_user_cleanup()
2780 user->chr = NULL; in vhost_user_cleanup()
2786 DeviceState *dev; member
2795 data->cb(data->dev); in vhost_user_async_close_bh()
2802 * we want to keep all the in-flight data as is for migration
2819 data->cb = cb; in vhost_user_async_close()
2820 data->dev = d; in vhost_user_async_close()
2821 data->cd = chardev; in vhost_user_async_close()
2822 data->vhost = vhost; in vhost_user_async_close()
2832 * Move vhost device to the stopped state. The vhost-user device in vhost_user_async_close()
2835 * option for the general vhost code to get the dev state without in vhost_user_async_close()
2836 * knowing its type (in this case vhost-user). in vhost_user_async_close()
2841 vhost->started = false; in vhost_user_async_close()
2845 static int vhost_user_dev_start(struct vhost_dev *dev, bool started) in vhost_user_dev_start() argument
2847 if (!virtio_has_feature(dev->protocol_features, in vhost_user_dev_start()
2849 return 0; in vhost_user_dev_start()
2853 if (dev->vq_index + dev->nvqs != dev->vq_index_end) { in vhost_user_dev_start()
2854 return 0; in vhost_user_dev_start()
2858 return vhost_user_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | in vhost_user_dev_start()
2862 return 0; in vhost_user_dev_start()
2866 static void vhost_user_reset_status(struct vhost_dev *dev) in vhost_user_reset_status() argument
2869 if (dev->vq_index + dev->nvqs != dev->vq_index_end) { in vhost_user_reset_status()
2873 if (virtio_has_feature(dev->protocol_features, in vhost_user_reset_status()
2875 vhost_user_set_status(dev, 0); in vhost_user_reset_status()
2879 static bool vhost_user_supports_device_state(struct vhost_dev *dev) in vhost_user_supports_device_state() argument
2881 return virtio_has_feature(dev->protocol_features, in vhost_user_supports_device_state()
2885 static int vhost_user_set_device_state_fd(struct vhost_dev *dev, in vhost_user_set_device_state_fd() argument
2893 struct vhost_user *vu = dev->opaque; in vhost_user_set_device_state_fd()
2906 *reply_fd = -1; in vhost_user_set_device_state_fd()
2908 if (!vhost_user_supports_device_state(dev)) { in vhost_user_set_device_state_fd()
2910 error_setg(errp, "Back-end does not support migration state transfer"); in vhost_user_set_device_state_fd()
2911 return -ENOTSUP; in vhost_user_set_device_state_fd()
2914 ret = vhost_user_write(dev, &msg, &fd, 1); in vhost_user_set_device_state_fd()
2916 if (ret < 0) { in vhost_user_set_device_state_fd()
2917 error_setg_errno(errp, -ret, in vhost_user_set_device_state_fd()
2922 ret = vhost_user_read(dev, &msg); in vhost_user_set_device_state_fd()
2923 if (ret < 0) { in vhost_user_set_device_state_fd()
2924 error_setg_errno(errp, -ret, in vhost_user_set_device_state_fd()
2933 return -EPROTO; in vhost_user_set_device_state_fd()
2940 return -EPROTO; in vhost_user_set_device_state_fd()
2943 if ((msg.payload.u64 & 0xff) != 0) { in vhost_user_set_device_state_fd()
2944 error_setg(errp, "Back-end did not accept migration state transfer"); in vhost_user_set_device_state_fd()
2945 return -EIO; in vhost_user_set_device_state_fd()
2949 *reply_fd = qemu_chr_fe_get_msgfd(vu->user->chr); in vhost_user_set_device_state_fd()
2950 if (*reply_fd < 0) { in vhost_user_set_device_state_fd()
2952 "Failed to get back-end-provided transfer pipe FD"); in vhost_user_set_device_state_fd()
2953 *reply_fd = -1; in vhost_user_set_device_state_fd()
2954 return -EIO; in vhost_user_set_device_state_fd()
2958 return 0; in vhost_user_set_device_state_fd()
2961 static int vhost_user_check_device_state(struct vhost_dev *dev, Error **errp) in vhost_user_check_device_state() argument
2968 .size = 0, in vhost_user_check_device_state()
2972 if (!vhost_user_supports_device_state(dev)) { in vhost_user_check_device_state()
2973 error_setg(errp, "Back-end does not support migration state transfer"); in vhost_user_check_device_state()
2974 return -ENOTSUP; in vhost_user_check_device_state()
2977 ret = vhost_user_write(dev, &msg, NULL, 0); in vhost_user_check_device_state()
2978 if (ret < 0) { in vhost_user_check_device_state()
2979 error_setg_errno(errp, -ret, in vhost_user_check_device_state()
2984 ret = vhost_user_read(dev, &msg); in vhost_user_check_device_state()
2985 if (ret < 0) { in vhost_user_check_device_state()
2986 error_setg_errno(errp, -ret, in vhost_user_check_device_state()
2995 return -EPROTO; in vhost_user_check_device_state()
3002 return -EPROTO; in vhost_user_check_device_state()
3005 if (msg.payload.u64 != 0) { in vhost_user_check_device_state()
3006 error_setg(errp, "Back-end failed to process its internal state"); in vhost_user_check_device_state()
3007 return -EIO; in vhost_user_check_device_state()
3010 return 0; in vhost_user_check_device_state()