Lines Matching +full:- +full:- +full:enable +full:- +full:trace +full:- +full:backend
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
21 #include "qemu/error-report.h"
24 #include "standard-headers/linux/vhost_types.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "hw/mem/memory-device.h"
28 #include "migration/qemu-file-types.h"
30 #include "trace.h"
32 /* enabled until disconnected backend stabilizes */
39 strerror(-retval), -retval); \
65 max = MIN(max, hdev->vhost_ops->vhost_backend_memslots_limit(hdev)); in vhost_get_max_memslots()
76 unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev); in vhost_get_free_memslots()
79 if (hdev->vhost_ops->vhost_backend_no_private_memslots && in vhost_get_free_memslots()
80 hdev->vhost_ops->vhost_backend_no_private_memslots(hdev)) { in vhost_get_free_memslots()
81 cur_free = r - used_shared_memslots; in vhost_get_free_memslots()
83 cur_free = r - used_memslots; in vhost_get_free_memslots()
95 vhost_log_chunk_t *dev_log = dev->log->log; in vhost_dev_sync_region()
106 assert(end / VHOST_LOG_CHUNK < dev->log_size); in vhost_dev_sync_region()
107 assert(start / VHOST_LOG_CHUNK < dev->log_size); in vhost_dev_sync_region()
111 /* We first check with non-atomic: much cheaper, in vhost_dev_sync_region()
112 * and we expect non-dirty to be the common case. */ in vhost_dev_sync_region()
126 section_offset = page_addr - section->offset_within_address_space; in vhost_dev_sync_region()
127 mr_offset = section_offset + section->offset_within_region; in vhost_dev_sync_region()
128 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE); in vhost_dev_sync_region()
137 VirtIODevice *vdev = dev->vdev; in vhost_dev_has_iommu()
140 * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support in vhost_dev_has_iommu()
142 * does not have IOMMU, there's no need to enable this feature in vhost_dev_has_iommu()
155 assert(dev->vhost_ops); in vhost_dev_should_log()
156 assert(dev->vhost_ops->backend_type > VHOST_BACKEND_TYPE_NONE); in vhost_dev_should_log()
157 assert(dev->vhost_ops->backend_type < VHOST_BACKEND_TYPE_MAX); in vhost_dev_should_log()
159 return dev == QLIST_FIRST(&vhost_log_devs[dev->vhost_ops->backend_type]); in vhost_dev_should_log()
166 assert(hdev->vhost_ops); in vhost_dev_elect_mem_logger()
168 backend_type = hdev->vhost_ops->backend_type; in vhost_dev_elect_mem_logger()
203 if (!dev->log_enabled || !dev->started) { in vhost_sync_dirty_bitmap()
206 start_addr = section->offset_within_address_space; in vhost_sync_dirty_bitmap()
207 end_addr = range_get_last(start_addr, int128_get64(section->size)); in vhost_sync_dirty_bitmap()
212 for (i = 0; i < dev->mem->nregions; ++i) { in vhost_sync_dirty_bitmap()
213 struct vhost_memory_region *reg = dev->mem->regions + i; in vhost_sync_dirty_bitmap()
215 reg->guest_phys_addr, in vhost_sync_dirty_bitmap()
216 range_get_last(reg->guest_phys_addr, in vhost_sync_dirty_bitmap()
217 reg->memory_size)); in vhost_sync_dirty_bitmap()
220 for (i = 0; i < dev->nvqs; ++i) { in vhost_sync_dirty_bitmap()
221 struct vhost_virtqueue *vq = dev->vqs + i; in vhost_sync_dirty_bitmap()
223 if (!vq->used_phys && !vq->used_size) { in vhost_sync_dirty_bitmap()
229 hwaddr used_phys = vq->used_phys, used_size = vq->used_size; in vhost_sync_dirty_bitmap()
234 iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as, in vhost_sync_dirty_bitmap()
244 return -EINVAL; in vhost_sync_dirty_bitmap()
254 s = iotlb.addr_mask - offset; in vhost_sync_dirty_bitmap()
260 s = MIN(s, used_size - 1) + 1; in vhost_sync_dirty_bitmap()
264 used_size -= s; in vhost_sync_dirty_bitmap()
269 end_addr, vq->used_phys, in vhost_sync_dirty_bitmap()
270 range_get_last(vq->used_phys, vq->used_size)); in vhost_sync_dirty_bitmap()
289 for (i = 0; i < dev->n_mem_sections; ++i) { in vhost_log_sync_range()
290 MemoryRegionSection *section = &dev->mem_sections[i]; in vhost_log_sync_range()
299 for (i = 0; i < dev->mem->nregions; ++i) { in vhost_get_log_size()
300 struct vhost_memory_region *reg = dev->mem->regions + i; in vhost_get_log_size()
301 uint64_t last = range_get_last(reg->guest_phys_addr, in vhost_get_log_size()
302 reg->memory_size); in vhost_get_log_size()
316 dev->vhost_ops = &kernel_ops; in vhost_set_backend_type()
321 dev->vhost_ops = &user_ops; in vhost_set_backend_type()
326 dev->vhost_ops = &vdpa_ops; in vhost_set_backend_type()
330 error_report("Unknown vhost backend type"); in vhost_set_backend_type()
331 r = -1; in vhost_set_backend_type()
335 assert(dev->vhost_ops->backend_type == backend_type); in vhost_set_backend_type()
345 uint64_t logsize = size * sizeof(*(log->log)); in vhost_log_alloc()
346 int fd = -1; in vhost_log_alloc()
350 log->log = qemu_memfd_alloc("vhost-log", logsize, in vhost_log_alloc()
358 memset(log->log, 0, logsize); in vhost_log_alloc()
360 log->log = g_malloc0(logsize); in vhost_log_alloc()
363 log->size = size; in vhost_log_alloc()
364 log->refcnt = 1; in vhost_log_alloc()
365 log->fd = fd; in vhost_log_alloc()
380 if (!log || log->size != size) { in vhost_log_get()
388 ++log->refcnt; in vhost_log_get()
396 struct vhost_log *log = dev->log; in vhost_log_put()
403 assert(dev->vhost_ops); in vhost_log_put()
404 backend_type = dev->vhost_ops->backend_type; in vhost_log_put()
411 --log->refcnt; in vhost_log_put()
412 if (log->refcnt == 0) { in vhost_log_put()
414 if (dev->log_size && sync) { in vhost_log_put()
415 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1); in vhost_log_put()
419 g_free(log->log); in vhost_log_put()
422 qemu_memfd_free(log->log, log->size * sizeof(*(log->log)), in vhost_log_put()
423 log->fd); in vhost_log_put()
431 dev->log = NULL; in vhost_log_put()
432 dev->log_size = 0; in vhost_log_put()
437 return dev->vhost_ops->vhost_requires_shm_log && in vhost_dev_log_is_shared()
438 dev->vhost_ops->vhost_requires_shm_log(dev); in vhost_dev_log_is_shared()
443 struct vhost_log *log = vhost_log_get(dev->vhost_ops->backend_type, in vhost_dev_log_resize()
445 uint64_t log_base = (uintptr_t)log->log; in vhost_dev_log_resize()
448 /* inform backend of log switching, this must be done before in vhost_dev_log_resize()
450 r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log); in vhost_dev_log_resize()
456 dev->log = log; in vhost_dev_log_resize()
457 dev->log_size = size; in vhost_dev_log_resize()
495 return -ENOMEM; in vhost_verify_ring_part_mapping()
498 hva_ring_offset = ring_gpa - reg_gpa; in vhost_verify_ring_part_mapping()
500 return -EBUSY; in vhost_verify_ring_part_mapping()
523 for (i = 0; i < dev->nvqs; ++i) { in vhost_verify_ring_mappings()
524 struct vhost_virtqueue *vq = dev->vqs + i; in vhost_verify_ring_mappings()
526 if (vq->desc_phys == 0) { in vhost_verify_ring_mappings()
532 vq->desc, vq->desc_phys, vq->desc_size, in vhost_verify_ring_mappings()
540 vq->avail, vq->avail_phys, vq->avail_size, in vhost_verify_ring_mappings()
548 vq->used, vq->used_phys, vq->used_size, in vhost_verify_ring_mappings()
555 if (r == -ENOMEM) { in vhost_verify_ring_mappings()
557 } else if (r == -EBUSY) { in vhost_verify_ring_mappings()
571 MemoryRegion *mr = section->mr; in vhost_section()
579 * dirty-tracking other than migration for which it has in vhost_section()
582 * self-modiying code detection flags. However a vhost-user in vhost_section()
583 * client could still confuse a TCG guest if it re-writes in vhost_section()
590 trace_vhost_reject_section(mr->name, 1); in vhost_section()
595 * Some backends (like vhost-user) can only handle memory regions in vhost_section()
601 if (memory_region_get_fd(section->mr) < 0 && in vhost_section()
602 dev->vhost_ops->vhost_backend_no_private_memslots && in vhost_section()
603 dev->vhost_ops->vhost_backend_no_private_memslots(dev)) { in vhost_section()
604 trace_vhost_reject_section(mr->name, 2); in vhost_section()
608 trace_vhost_section(mr->name); in vhost_section()
611 trace_vhost_reject_section(mr->name, 3); in vhost_section()
620 dev->tmp_sections = NULL; in vhost_begin()
621 dev->n_tmp_sections = 0; in vhost_begin()
640 old_sections = dev->mem_sections; in vhost_commit()
641 n_old_sections = dev->n_mem_sections; in vhost_commit()
642 dev->mem_sections = dev->tmp_sections; in vhost_commit()
643 dev->n_mem_sections = dev->n_tmp_sections; in vhost_commit()
645 if (dev->n_mem_sections != n_old_sections) { in vhost_commit()
651 &dev->mem_sections[i])) { in vhost_commit()
658 trace_vhost_commit(dev->started, changed); in vhost_commit()
665 dev->n_mem_sections * sizeof dev->mem->regions[0]; in vhost_commit()
666 dev->mem = g_realloc(dev->mem, regions_size); in vhost_commit()
667 dev->mem->nregions = dev->n_mem_sections; in vhost_commit()
669 if (dev->vhost_ops->vhost_backend_no_private_memslots && in vhost_commit()
670 dev->vhost_ops->vhost_backend_no_private_memslots(dev)) { in vhost_commit()
671 used_shared_memslots = dev->mem->nregions; in vhost_commit()
673 used_memslots = dev->mem->nregions; in vhost_commit()
676 for (i = 0; i < dev->n_mem_sections; i++) { in vhost_commit()
677 struct vhost_memory_region *cur_vmr = dev->mem->regions + i; in vhost_commit()
678 struct MemoryRegionSection *mrs = dev->mem_sections + i; in vhost_commit()
680 cur_vmr->guest_phys_addr = mrs->offset_within_address_space; in vhost_commit()
681 cur_vmr->memory_size = int128_get64(mrs->size); in vhost_commit()
682 cur_vmr->userspace_addr = in vhost_commit()
683 (uintptr_t)memory_region_get_ram_ptr(mrs->mr) + in vhost_commit()
684 mrs->offset_within_region; in vhost_commit()
685 cur_vmr->flags_padding = 0; in vhost_commit()
688 if (!dev->started) { in vhost_commit()
692 for (i = 0; i < dev->mem->nregions; i++) { in vhost_commit()
694 (void *)(uintptr_t)dev->mem->regions[i].userspace_addr, in vhost_commit()
695 dev->mem->regions[i].guest_phys_addr, in vhost_commit()
696 dev->mem->regions[i].memory_size)) { in vhost_commit()
702 if (!dev->log_enabled) { in vhost_commit()
703 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); in vhost_commit()
712 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log) in vhost_commit()
714 if (dev->log_size < log_size) { in vhost_commit()
717 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); in vhost_commit()
722 if (dev->log_size > log_size + VHOST_LOG_BUFFER) { in vhost_commit()
731 while (n_old_sections--) { in vhost_commit()
746 uint64_t mrs_size = int128_get64(section->size); in vhost_region_add_section()
747 uint64_t mrs_gpa = section->offset_within_address_space; in vhost_region_add_section()
748 uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) + in vhost_region_add_section()
749 section->offset_within_region; in vhost_region_add_section()
750 RAMBlock *mrs_rb = section->mr->ram_block; in vhost_region_add_section()
752 trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size, in vhost_region_add_section()
755 if (dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER) { in vhost_region_add_section()
759 uint64_t alignage = mrs_host & (mrs_page - 1); in vhost_region_add_section()
761 mrs_host -= alignage; in vhost_region_add_section()
763 mrs_gpa -= alignage; in vhost_region_add_section()
766 alignage = mrs_size & (mrs_page - 1); in vhost_region_add_section()
768 mrs_size += mrs_page - alignage; in vhost_region_add_section()
770 trace_vhost_region_add_section_aligned(section->mr->name, mrs_gpa, in vhost_region_add_section()
774 if (dev->n_tmp_sections && !section->unmergeable) { in vhost_region_add_section()
780 MemoryRegionSection *prev_sec = dev->tmp_sections + in vhost_region_add_section()
781 (dev->n_tmp_sections - 1); in vhost_region_add_section()
782 uint64_t prev_gpa_start = prev_sec->offset_within_address_space; in vhost_region_add_section()
783 uint64_t prev_size = int128_get64(prev_sec->size); in vhost_region_add_section()
786 (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) + in vhost_region_add_section()
787 prev_sec->offset_within_region; in vhost_region_add_section()
791 /* OK, looks like overlapping/intersecting - it's possible that in vhost_region_add_section()
798 __func__, section->mr->name, mrs_gpa, in vhost_region_add_section()
799 prev_sec->mr->name, prev_gpa_start); in vhost_region_add_section()
804 size_t offset = mrs_gpa - prev_gpa_start; in vhost_region_add_section()
807 section->mr == prev_sec->mr && !prev_sec->unmergeable) { in vhost_region_add_section()
810 prev_sec->offset_within_address_space = in vhost_region_add_section()
812 prev_sec->offset_within_region = in vhost_region_add_section()
813 MIN(prev_host_start, mrs_host) - in vhost_region_add_section()
814 (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr); in vhost_region_add_section()
815 prev_sec->size = int128_make64(max_end - MIN(prev_host_start, in vhost_region_add_section()
817 trace_vhost_region_add_section_merge(section->mr->name, in vhost_region_add_section()
818 int128_get64(prev_sec->size), in vhost_region_add_section()
819 prev_sec->offset_within_address_space, in vhost_region_add_section()
820 prev_sec->offset_within_region); in vhost_region_add_section()
836 ++dev->n_tmp_sections; in vhost_region_add_section()
837 dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections, in vhost_region_add_section()
838 dev->n_tmp_sections); in vhost_region_add_section()
839 dev->tmp_sections[dev->n_tmp_sections - 1] = *section; in vhost_region_add_section()
843 dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL; in vhost_region_add_section()
844 memory_region_ref(section->mr); in vhost_region_add_section()
864 struct vhost_dev *hdev = iommu->hdev; in vhost_iommu_unmap_notify()
865 hwaddr iova = iotlb->iova + iommu->iommu_offset; in vhost_iommu_unmap_notify()
868 iotlb->addr_mask + 1)) { in vhost_iommu_unmap_notify()
883 if (!memory_region_is_iommu(section->mr)) { in vhost_iommu_region_add()
887 iommu_mr = IOMMU_MEMORY_REGION(section->mr); in vhost_iommu_region_add()
890 end = int128_add(int128_make64(section->offset_within_region), in vhost_iommu_region_add()
891 section->size); in vhost_iommu_region_add()
895 iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify, in vhost_iommu_region_add()
896 dev->vdev->device_iotlb_enabled ? in vhost_iommu_region_add()
899 section->offset_within_region, in vhost_iommu_region_add()
902 iommu->mr = section->mr; in vhost_iommu_region_add()
903 iommu->iommu_offset = section->offset_within_address_space - in vhost_iommu_region_add()
904 section->offset_within_region; in vhost_iommu_region_add()
905 iommu->hdev = dev; in vhost_iommu_region_add()
906 memory_region_register_iommu_notifier(section->mr, &iommu->n, in vhost_iommu_region_add()
908 QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next); in vhost_iommu_region_add()
919 if (!memory_region_is_iommu(section->mr)) { in vhost_iommu_region_del()
923 QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) { in vhost_iommu_region_del()
924 if (iommu->mr == section->mr && in vhost_iommu_region_del()
925 iommu->n.start == section->offset_within_region) { in vhost_iommu_region_del()
926 memory_region_unregister_iommu_notifier(iommu->mr, in vhost_iommu_region_del()
927 &iommu->n); in vhost_iommu_region_del()
941 if (vdev->vhost_started) { in vhost_toggle_device_iotlb()
942 dev = vdc->get_vhost(vdev); in vhost_toggle_device_iotlb()
947 QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) { in vhost_toggle_device_iotlb()
948 memory_region_unregister_iommu_notifier(iommu->mr, &iommu->n); in vhost_toggle_device_iotlb()
949 iommu->n.notifier_flags = vdev->device_iotlb_enabled ? in vhost_toggle_device_iotlb()
951 memory_region_register_iommu_notifier(iommu->mr, &iommu->n, in vhost_toggle_device_iotlb()
964 if (dev->vhost_ops->vhost_vq_get_addr) { in vhost_virtqueue_set_addr()
965 r = dev->vhost_ops->vhost_vq_get_addr(dev, &addr, vq); in vhost_virtqueue_set_addr()
971 addr.desc_user_addr = (uint64_t)(unsigned long)vq->desc; in vhost_virtqueue_set_addr()
972 addr.avail_user_addr = (uint64_t)(unsigned long)vq->avail; in vhost_virtqueue_set_addr()
973 addr.used_user_addr = (uint64_t)(unsigned long)vq->used; in vhost_virtqueue_set_addr()
976 addr.log_guest_addr = vq->used_phys; in vhost_virtqueue_set_addr()
978 r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr); in vhost_virtqueue_set_addr()
988 uint64_t features = dev->acked_features; in vhost_dev_set_features()
996 if (dev->vhost_ops->vhost_force_iommu) { in vhost_dev_set_features()
997 if (dev->vhost_ops->vhost_force_iommu(dev) == true) { in vhost_dev_set_features()
1001 r = dev->vhost_ops->vhost_set_features(dev, features); in vhost_dev_set_features()
1006 if (dev->vhost_ops->vhost_set_backend_cap) { in vhost_dev_set_features()
1007 r = dev->vhost_ops->vhost_set_backend_cap(dev); in vhost_dev_set_features()
1027 for (i = 0; i < dev->nvqs; ++i) { in vhost_dev_set_log()
1028 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); in vhost_dev_set_log()
1029 addr = virtio_queue_get_desc_addr(dev->vdev, idx); in vhost_dev_set_log()
1039 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, in vhost_dev_set_log()
1050 * backend. in vhost_dev_set_log()
1056 for (; i >= 0; --i) { in vhost_dev_set_log()
1057 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); in vhost_dev_set_log()
1058 addr = virtio_queue_get_desc_addr(dev->vdev, idx); in vhost_dev_set_log()
1062 vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, in vhost_dev_set_log()
1063 dev->log_enabled); in vhost_dev_set_log()
1065 vhost_dev_set_features(dev, dev->log_enabled); in vhost_dev_set_log()
1070 static int vhost_migration_log(MemoryListener *listener, bool enable) in vhost_migration_log() argument
1075 if (enable == dev->log_enabled) { in vhost_migration_log()
1078 if (!dev->started) { in vhost_migration_log()
1079 dev->log_enabled = enable; in vhost_migration_log()
1084 if (!enable) { in vhost_migration_log()
1099 dev->log_enabled = enable; in vhost_migration_log()
1101 * vhost-user-* devices could change their state during log in vhost_migration_log()
1105 if (!dev->started) { in vhost_migration_log()
1114 dev->log_enabled = false; in vhost_migration_log()
1156 * cross-endian legacy devices and modern devices. Only legacy devices
1157 * exposed to a bi-endian guest may require the vhost driver to use a
1166 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE; in vhost_needs_vring_endian()
1168 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG; in vhost_needs_vring_endian()
1182 r = dev->vhost_ops->vhost_set_vring_endian(dev, &s); in vhost_virtqueue_set_vring_endian_legacy()
1195 for (i = 0; i < hdev->mem->nregions; i++) { in vhost_memory_region_lookup()
1196 struct vhost_memory_region *reg = hdev->mem->regions + i; in vhost_memory_region_lookup()
1198 if (gpa >= reg->guest_phys_addr && in vhost_memory_region_lookup()
1199 reg->guest_phys_addr + reg->memory_size > gpa) { in vhost_memory_region_lookup()
1200 *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr; in vhost_memory_region_lookup()
1201 *len = reg->guest_phys_addr + reg->memory_size - gpa; in vhost_memory_region_lookup()
1206 return -EFAULT; in vhost_memory_region_lookup()
1213 int ret = -EFAULT; in vhost_device_iotlb_miss()
1219 iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as, in vhost_device_iotlb_miss()
1260 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); in vhost_virtqueue_start()
1275 vq->num = state.num = virtio_queue_get_num(vdev, idx); in vhost_virtqueue_start()
1276 r = dev->vhost_ops->vhost_set_vring_num(dev, &state); in vhost_virtqueue_start()
1283 r = dev->vhost_ops->vhost_set_vring_base(dev, &state); in vhost_virtqueue_start()
1298 vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx); in vhost_virtqueue_start()
1299 vq->desc_phys = a; in vhost_virtqueue_start()
1300 vq->desc = vhost_memory_map(dev, a, &l, false); in vhost_virtqueue_start()
1301 if (!vq->desc || l != s) { in vhost_virtqueue_start()
1302 r = -ENOMEM; in vhost_virtqueue_start()
1305 vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx); in vhost_virtqueue_start()
1306 vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx); in vhost_virtqueue_start()
1307 vq->avail = vhost_memory_map(dev, a, &l, false); in vhost_virtqueue_start()
1308 if (!vq->avail || l != s) { in vhost_virtqueue_start()
1309 r = -ENOMEM; in vhost_virtqueue_start()
1312 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx); in vhost_virtqueue_start()
1313 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx); in vhost_virtqueue_start()
1314 vq->used = vhost_memory_map(dev, a, &l, true); in vhost_virtqueue_start()
1315 if (!vq->used || l != s) { in vhost_virtqueue_start()
1316 r = -ENOMEM; in vhost_virtqueue_start()
1320 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled); in vhost_virtqueue_start()
1326 r = dev->vhost_ops->vhost_set_vring_kick(dev, &file); in vhost_virtqueue_start()
1333 event_notifier_test_and_clear(&vq->masked_notifier); in vhost_virtqueue_start()
1338 if (!vdev->use_guest_notifier_mask) { in vhost_virtqueue_start()
1343 if (k->query_guest_notifiers && in vhost_virtqueue_start()
1344 k->query_guest_notifiers(qbus->parent) && in vhost_virtqueue_start()
1346 file.fd = -1; in vhost_virtqueue_start()
1347 r = dev->vhost_ops->vhost_set_vring_call(dev, &file); in vhost_virtqueue_start()
1358 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx), in vhost_virtqueue_start()
1361 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx), in vhost_virtqueue_start()
1364 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx), in vhost_virtqueue_start()
1375 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); in vhost_virtqueue_stop()
1386 r = dev->vhost_ops->vhost_get_vring_base(dev, &state); in vhost_virtqueue_stop()
1389 /* Connection to the backend is broken, so let's sync internal in vhost_virtqueue_stop()
1399 /* In the cross-endian case, we need to reset the vring endianness to in vhost_virtqueue_stop()
1408 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx), in vhost_virtqueue_stop()
1410 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx), in vhost_virtqueue_stop()
1412 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx), in vhost_virtqueue_stop()
1420 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n); in vhost_virtqueue_set_busyloop_timeout()
1427 if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) { in vhost_virtqueue_set_busyloop_timeout()
1428 return -EINVAL; in vhost_virtqueue_set_busyloop_timeout()
1431 r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state); in vhost_virtqueue_set_busyloop_timeout()
1444 struct vhost_dev *dev = vq->dev; in vhost_virtqueue_error_notifier()
1445 int index = vq - dev->vqs; in vhost_virtqueue_error_notifier()
1447 if (event_notifier_test_and_clear(n) && dev->vdev) { in vhost_virtqueue_error_notifier()
1448 VHOST_OPS_DEBUG(-EINVAL, "vhost vring error in virtqueue %d", in vhost_virtqueue_error_notifier()
1449 dev->vq_index + index); in vhost_virtqueue_error_notifier()
1456 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n); in vhost_virtqueue_init()
1460 int r = event_notifier_init(&vq->masked_notifier, 0); in vhost_virtqueue_init()
1465 file.fd = event_notifier_get_wfd(&vq->masked_notifier); in vhost_virtqueue_init()
1466 r = dev->vhost_ops->vhost_set_vring_call(dev, &file); in vhost_virtqueue_init()
1472 vq->dev = dev; in vhost_virtqueue_init()
1474 if (dev->vhost_ops->vhost_set_vring_err) { in vhost_virtqueue_init()
1475 r = event_notifier_init(&vq->error_notifier, 0); in vhost_virtqueue_init()
1480 file.fd = event_notifier_get_fd(&vq->error_notifier); in vhost_virtqueue_init()
1481 r = dev->vhost_ops->vhost_set_vring_err(dev, &file); in vhost_virtqueue_init()
1487 event_notifier_set_handler(&vq->error_notifier, in vhost_virtqueue_init()
1494 event_notifier_cleanup(&vq->error_notifier); in vhost_virtqueue_init()
1496 event_notifier_cleanup(&vq->masked_notifier); in vhost_virtqueue_init()
1502 event_notifier_cleanup(&vq->masked_notifier); in vhost_virtqueue_cleanup()
1503 if (vq->dev->vhost_ops->vhost_set_vring_err) { in vhost_virtqueue_cleanup()
1504 event_notifier_set_handler(&vq->error_notifier, NULL); in vhost_virtqueue_cleanup()
1505 event_notifier_cleanup(&vq->error_notifier); in vhost_virtqueue_cleanup()
1517 hdev->vdev = NULL; in vhost_dev_init()
1518 hdev->migration_blocker = NULL; in vhost_dev_init()
1523 r = hdev->vhost_ops->vhost_backend_init(hdev, opaque, errp); in vhost_dev_init()
1528 r = hdev->vhost_ops->vhost_set_owner(hdev); in vhost_dev_init()
1530 error_setg_errno(errp, -r, "vhost_set_owner failed"); in vhost_dev_init()
1534 r = hdev->vhost_ops->vhost_get_features(hdev, &features); in vhost_dev_init()
1536 error_setg_errno(errp, -r, "vhost_get_features failed"); in vhost_dev_init()
1540 limit = hdev->vhost_ops->vhost_backend_memslots_limit(hdev); in vhost_dev_init()
1543 error_setg(errp, "some memory device (like virtio-mem)" in vhost_dev_init()
1545 " number of memory slots; this vhost backend would further" in vhost_dev_init()
1547 error_append_hint(errp, "Try plugging this vhost backend before" in vhost_dev_init()
1549 r = -EINVAL; in vhost_dev_init()
1553 for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) { in vhost_dev_init()
1554 r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i); in vhost_dev_init()
1556 error_setg_errno(errp, -r, "Failed to initialize virtqueue %d", i); in vhost_dev_init()
1562 for (i = 0; i < hdev->nvqs; ++i) { in vhost_dev_init()
1563 r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, in vhost_dev_init()
1566 error_setg_errno(errp, -r, "Failed to set busyloop timeout"); in vhost_dev_init()
1572 hdev->features = features; in vhost_dev_init()
1574 hdev->memory_listener = (MemoryListener) { in vhost_dev_init()
1588 hdev->iommu_listener = (MemoryListener) { in vhost_dev_init()
1589 .name = "vhost-iommu", in vhost_dev_init()
1594 if (hdev->migration_blocker == NULL) { in vhost_dev_init()
1595 if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) { in vhost_dev_init()
1596 error_setg(&hdev->migration_blocker, in vhost_dev_init()
1599 error_setg(&hdev->migration_blocker, in vhost_dev_init()
1604 if (hdev->migration_blocker != NULL) { in vhost_dev_init()
1605 r = migrate_add_blocker_normal(&hdev->migration_blocker, errp); in vhost_dev_init()
1611 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions)); in vhost_dev_init()
1612 hdev->n_mem_sections = 0; in vhost_dev_init()
1613 hdev->mem_sections = NULL; in vhost_dev_init()
1614 hdev->log = NULL; in vhost_dev_init()
1615 hdev->log_size = 0; in vhost_dev_init()
1616 hdev->log_enabled = false; in vhost_dev_init()
1617 hdev->started = false; in vhost_dev_init()
1618 memory_listener_register(&hdev->memory_listener, &address_space_memory); in vhost_dev_init()
1625 if (hdev->vhost_ops->vhost_backend_no_private_memslots && in vhost_dev_init()
1626 hdev->vhost_ops->vhost_backend_no_private_memslots(hdev)) { in vhost_dev_init()
1633 * in our vhost backend. This might not be true, for example, if the in vhost_dev_init()
1634 * memslot would be ROM. If ever relevant, we can optimize for that -- in vhost_dev_init()
1639 error_setg(errp, "vhost backend memory slots limit (%d) is less" in vhost_dev_init()
1642 r = -EINVAL; in vhost_dev_init()
1650 while (--i >= 0) { in vhost_dev_init()
1651 vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0); in vhost_dev_init()
1655 hdev->nvqs = n_initialized_vqs; in vhost_dev_init()
1666 for (i = 0; i < hdev->nvqs; ++i) { in vhost_dev_cleanup()
1667 vhost_virtqueue_cleanup(hdev->vqs + i); in vhost_dev_cleanup()
1669 if (hdev->mem) { in vhost_dev_cleanup()
1671 memory_listener_unregister(&hdev->memory_listener); in vhost_dev_cleanup()
1674 migrate_del_blocker(&hdev->migration_blocker); in vhost_dev_cleanup()
1675 g_free(hdev->mem); in vhost_dev_cleanup()
1676 g_free(hdev->mem_sections); in vhost_dev_cleanup()
1677 if (hdev->vhost_ops) { in vhost_dev_cleanup()
1678 hdev->vhost_ops->vhost_backend_cleanup(hdev); in vhost_dev_cleanup()
1680 assert(!hdev->log); in vhost_dev_cleanup()
1699 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, in vhost_dev_disable_notifiers_nvqs()
1702 error_report("vhost VQ %d notifier cleanup failed: %d", i, -r); in vhost_dev_disable_notifiers_nvqs()
1714 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i); in vhost_dev_disable_notifiers_nvqs()
1742 for (i = 0; i < hdev->nvqs; ++i) { in vhost_dev_enable_notifiers()
1743 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, in vhost_dev_enable_notifiers()
1746 error_report("vhost VQ %d notifier binding failed: %d", i, -r); in vhost_dev_enable_notifiers()
1765 vhost_dev_disable_notifiers_nvqs(hdev, vdev, hdev->nvqs); in vhost_dev_disable_notifiers()
1773 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index; in vhost_virtqueue_pending()
1774 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs); in vhost_virtqueue_pending()
1775 return event_notifier_test_and_clear(&vq->masked_notifier); in vhost_virtqueue_pending()
1783 int r, index = n - hdev->vq_index; in vhost_virtqueue_mask()
1786 /* should only be called after backend is connected */ in vhost_virtqueue_mask()
1787 assert(hdev->vhost_ops); in vhost_virtqueue_mask()
1790 assert(vdev->use_guest_notifier_mask); in vhost_virtqueue_mask()
1791 file.fd = event_notifier_get_wfd(&hdev->vqs[index].masked_notifier); in vhost_virtqueue_mask()
1796 file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n); in vhost_virtqueue_mask()
1797 r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file); in vhost_virtqueue_mask()
1799 error_report("vhost_set_vring_call failed %d", -r); in vhost_virtqueue_mask()
1805 assert(hdev->vhost_ops); in vhost_config_pending()
1806 if ((hdev->started == false) || in vhost_config_pending()
1807 (hdev->vhost_ops->vhost_set_config_call == NULL)) { in vhost_config_pending()
1812 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier; in vhost_config_pending()
1821 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier; in vhost_config_mask()
1822 EventNotifier *config_notifier = &vdev->config_notifier; in vhost_config_mask()
1823 assert(hdev->vhost_ops); in vhost_config_mask()
1825 if ((hdev->started == false) || in vhost_config_mask()
1826 (hdev->vhost_ops->vhost_set_config_call == NULL)) { in vhost_config_mask()
1830 assert(vdev->use_guest_notifier_mask); in vhost_config_mask()
1835 r = hdev->vhost_ops->vhost_set_config_call(hdev, fd); in vhost_config_mask()
1837 error_report("vhost_set_config_call failed %d", -r); in vhost_config_mask()
1843 int fd = -1; in vhost_stop_config_intr()
1844 assert(dev->vhost_ops); in vhost_stop_config_intr()
1845 if (dev->vhost_ops->vhost_set_config_call) { in vhost_stop_config_intr()
1846 dev->vhost_ops->vhost_set_config_call(dev, fd); in vhost_stop_config_intr()
1854 assert(dev->vhost_ops); in vhost_start_config_intr()
1855 int fd = event_notifier_get_fd(&dev->vdev->config_notifier); in vhost_start_config_intr()
1856 if (dev->vhost_ops->vhost_set_config_call) { in vhost_start_config_intr()
1857 r = dev->vhost_ops->vhost_set_config_call(dev, fd); in vhost_start_config_intr()
1859 event_notifier_set(&dev->vdev->config_notifier); in vhost_start_config_intr()
1870 if (!(hdev->features & bit_mask)) { in vhost_get_features()
1885 hdev->acked_features |= bit_mask; in vhost_ack_features()
1894 assert(hdev->vhost_ops); in vhost_dev_get_config()
1896 if (hdev->vhost_ops->vhost_get_config) { in vhost_dev_get_config()
1897 return hdev->vhost_ops->vhost_get_config(hdev, config, config_len, in vhost_dev_get_config()
1902 return -ENOSYS; in vhost_dev_get_config()
1908 assert(hdev->vhost_ops); in vhost_dev_set_config()
1910 if (hdev->vhost_ops->vhost_set_config) { in vhost_dev_set_config()
1911 return hdev->vhost_ops->vhost_set_config(hdev, data, offset, in vhost_dev_set_config()
1915 return -ENOSYS; in vhost_dev_set_config()
1921 hdev->config_ops = ops; in vhost_dev_set_config_notifier()
1926 if (inflight && inflight->addr) { in vhost_dev_free_inflight()
1927 qemu_memfd_free(inflight->addr, inflight->size, inflight->fd); in vhost_dev_free_inflight()
1928 inflight->addr = NULL; in vhost_dev_free_inflight()
1929 inflight->fd = -1; in vhost_dev_free_inflight()
1937 if (hdev->vhost_ops->vhost_get_inflight_fd == NULL || in vhost_dev_prepare_inflight()
1938 hdev->vhost_ops->vhost_set_inflight_fd == NULL) { in vhost_dev_prepare_inflight()
1942 hdev->vdev = vdev; in vhost_dev_prepare_inflight()
1944 r = vhost_dev_set_features(hdev, hdev->log_enabled); in vhost_dev_prepare_inflight()
1958 if (dev->vhost_ops->vhost_set_inflight_fd && inflight->addr) { in vhost_dev_set_inflight()
1959 r = dev->vhost_ops->vhost_set_inflight_fd(dev, inflight); in vhost_dev_set_inflight()
1974 if (dev->vhost_ops->vhost_get_inflight_fd) { in vhost_dev_get_inflight()
1975 r = dev->vhost_ops->vhost_get_inflight_fd(dev, queue_size, inflight); in vhost_dev_get_inflight()
1985 static int vhost_dev_set_vring_enable(struct vhost_dev *hdev, int enable) in vhost_dev_set_vring_enable() argument
1987 if (!hdev->vhost_ops->vhost_set_vring_enable) { in vhost_dev_set_vring_enable()
1992 * For vhost-user devices, if VHOST_USER_F_PROTOCOL_FEATURES has not in vhost_dev_set_vring_enable()
1997 if (hdev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER && in vhost_dev_set_vring_enable()
1998 !virtio_has_feature(hdev->backend_features, in vhost_dev_set_vring_enable()
2003 return hdev->vhost_ops->vhost_set_vring_enable(hdev, enable); in vhost_dev_set_vring_enable()
2009 * If @vrings is true, this function will enable all vrings before starting the
2017 /* should only be called after backend is connected */ in vhost_dev_start()
2018 assert(hdev->vhost_ops); in vhost_dev_start()
2020 trace_vhost_dev_start(hdev, vdev->name, vrings); in vhost_dev_start()
2022 vdev->vhost_started = true; in vhost_dev_start()
2023 hdev->started = true; in vhost_dev_start()
2024 hdev->vdev = vdev; in vhost_dev_start()
2026 r = vhost_dev_set_features(hdev, hdev->log_enabled); in vhost_dev_start()
2032 memory_listener_register(&hdev->iommu_listener, vdev->dma_as); in vhost_dev_start()
2035 r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem); in vhost_dev_start()
2040 for (i = 0; i < hdev->nvqs; ++i) { in vhost_dev_start()
2043 hdev->vqs + i, in vhost_dev_start()
2044 hdev->vq_index + i); in vhost_dev_start()
2051 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier, 0); in vhost_dev_start()
2057 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier); in vhost_dev_start()
2058 if (!vdev->use_guest_notifier_mask) { in vhost_dev_start()
2061 if (hdev->log_enabled) { in vhost_dev_start()
2064 hdev->log_size = vhost_get_log_size(hdev); in vhost_dev_start()
2065 hdev->log = vhost_log_get(hdev->vhost_ops->backend_type, in vhost_dev_start()
2066 hdev->log_size, in vhost_dev_start()
2068 log_base = (uintptr_t)hdev->log->log; in vhost_dev_start()
2069 r = hdev->vhost_ops->vhost_set_log_base(hdev, in vhost_dev_start()
2070 hdev->log_size ? log_base : 0, in vhost_dev_start()
2071 hdev->log); in vhost_dev_start()
2084 if (hdev->vhost_ops->vhost_dev_start) { in vhost_dev_start()
2085 r = hdev->vhost_ops->vhost_dev_start(hdev, true); in vhost_dev_start()
2091 hdev->vhost_ops->vhost_set_iotlb_callback) { in vhost_dev_start()
2092 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true); in vhost_dev_start()
2095 * vhost-kernel code requires for this.*/ in vhost_dev_start()
2096 for (i = 0; i < hdev->nvqs; ++i) { in vhost_dev_start()
2097 struct vhost_virtqueue *vq = hdev->vqs + i; in vhost_dev_start()
2098 r = vhost_device_iotlb_miss(hdev, vq->used_phys, true); in vhost_dev_start()
2108 hdev->vhost_ops->vhost_set_iotlb_callback) { in vhost_dev_start()
2109 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false); in vhost_dev_start()
2111 if (hdev->vhost_ops->vhost_dev_start) { in vhost_dev_start()
2112 hdev->vhost_ops->vhost_dev_start(hdev, false); in vhost_dev_start()
2121 while (--i >= 0) { in vhost_dev_start()
2124 hdev->vqs + i, in vhost_dev_start()
2125 hdev->vq_index + i); in vhost_dev_start()
2130 memory_listener_unregister(&hdev->iommu_listener); in vhost_dev_start()
2133 vdev->vhost_started = false; in vhost_dev_start()
2134 hdev->started = false; in vhost_dev_start()
2144 /* should only be called after backend is connected */ in vhost_dev_stop()
2145 assert(hdev->vhost_ops); in vhost_dev_stop()
2147 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier); in vhost_dev_stop()
2148 event_notifier_test_and_clear(&vdev->config_notifier); in vhost_dev_stop()
2150 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier); in vhost_dev_stop()
2152 trace_vhost_dev_stop(hdev, vdev->name, vrings); in vhost_dev_stop()
2154 if (hdev->vhost_ops->vhost_dev_start) { in vhost_dev_stop()
2155 hdev->vhost_ops->vhost_dev_start(hdev, false); in vhost_dev_stop()
2160 for (i = 0; i < hdev->nvqs; ++i) { in vhost_dev_stop()
2163 hdev->vqs + i, in vhost_dev_stop()
2164 hdev->vq_index + i); in vhost_dev_stop()
2166 if (hdev->vhost_ops->vhost_reset_status) { in vhost_dev_stop()
2167 hdev->vhost_ops->vhost_reset_status(hdev); in vhost_dev_stop()
2171 if (hdev->vhost_ops->vhost_set_iotlb_callback) { in vhost_dev_stop()
2172 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false); in vhost_dev_stop()
2174 memory_listener_unregister(&hdev->iommu_listener); in vhost_dev_stop()
2178 hdev->started = false; in vhost_dev_stop()
2179 vdev->vhost_started = false; in vhost_dev_stop()
2180 hdev->vdev = NULL; in vhost_dev_stop()
2187 if (hdev->vhost_ops->vhost_net_set_backend) { in vhost_net_set_backend()
2188 return hdev->vhost_ops->vhost_net_set_backend(hdev, file); in vhost_net_set_backend()
2191 return -ENOSYS; in vhost_net_set_backend()
2196 if (hdev->vhost_ops->vhost_reset_device) { in vhost_reset_device()
2197 return hdev->vhost_ops->vhost_reset_device(hdev); in vhost_reset_device()
2200 return -ENOSYS; in vhost_reset_device()
2205 if (dev->vhost_ops->vhost_supports_device_state) { in vhost_supports_device_state()
2206 return dev->vhost_ops->vhost_supports_device_state(dev); in vhost_supports_device_state()
2219 if (dev->vhost_ops->vhost_set_device_state_fd) { in vhost_set_device_state_fd()
2220 return dev->vhost_ops->vhost_set_device_state_fd(dev, direction, phase, in vhost_set_device_state_fd()
2226 return -ENOSYS; in vhost_set_device_state_fd()
2231 if (dev->vhost_ops->vhost_check_device_state) { in vhost_check_device_state()
2232 return dev->vhost_ops->vhost_check_device_state(dev, errp); in vhost_check_device_state()
2237 return -ENOSYS; in vhost_check_device_state()
2247 int pipe_fds[2], read_fd = -1, write_fd = -1, reply_fd = -1; in vhost_save_backend_state()
2250 /* [0] for reading (our end), [1] for writing (back-end's end) */ in vhost_save_backend_state()
2253 g_err->message); in vhost_save_backend_state()
2254 ret = -EINVAL; in vhost_save_backend_state()
2264 * vhost-user, so just check that it is stopped at all. in vhost_save_backend_state()
2266 assert(!dev->started); in vhost_save_backend_state()
2268 /* Transfer ownership of write_fd to the back-end */ in vhost_save_backend_state()
2280 /* If the back-end wishes to use a different pipe, switch over */ in vhost_save_backend_state()
2293 ret = -errno; in vhost_save_backend_state()
2294 error_setg_errno(errp, -ret, "Failed to receive state"); in vhost_save_backend_state()
2310 * Back-end will not really care, but be clean and close our end of the pipe in vhost_save_backend_state()
2311 * before inquiring the back-end about whether transfer was successful in vhost_save_backend_state()
2314 read_fd = -1; in vhost_save_backend_state()
2317 assert(!dev->started); in vhost_save_backend_state()
2339 int pipe_fds[2], read_fd = -1, write_fd = -1, reply_fd = -1; in vhost_load_backend_state()
2342 /* [0] for reading (back-end's end), [1] for writing (our end) */ in vhost_load_backend_state()
2345 g_err->message); in vhost_load_backend_state()
2346 ret = -EINVAL; in vhost_load_backend_state()
2356 * vhost-user, so just check that it is stopped at all. in vhost_load_backend_state()
2358 assert(!dev->started); in vhost_load_backend_state()
2360 /* Transfer ownership of read_fd to the back-end */ in vhost_load_backend_state()
2372 /* If the back-end wishes to use a different pipe, switch over */ in vhost_load_backend_state()
2397 ret = -EINVAL; in vhost_load_backend_state()
2407 ret = -errno; in vhost_load_backend_state()
2408 error_setg_errno(errp, -ret, "Failed to send state"); in vhost_load_backend_state()
2412 ret = -ECONNRESET; in vhost_load_backend_state()
2417 this_chunk_size -= write_ret; in vhost_load_backend_state()
2423 * Close our end, thus ending transfer, before inquiring the back-end about in vhost_load_backend_state()
2427 write_fd = -1; in vhost_load_backend_state()
2430 assert(!dev->started); in vhost_load_backend_state()