Lines Matching +full:- +full:- +full:disable +full:- +full:vhost +full:- +full:vdpa
2 * vhost-vdpa
4 * Copyright(c) 2017-2018 Intel Corporation.
8 * See the COPYING file in the top-level directory.
13 #include <linux/vhost.h>
18 #include "hw/virtio/vhost.h"
19 #include "hw/virtio/vhost-backend.h"
20 #include "hw/virtio/virtio-net.h"
21 #include "hw/virtio/vhost-shadow-virtqueue.h"
22 #include "hw/virtio/vhost-vdpa.h"
23 #include "system/address-spaces.h"
26 #include "qemu/main-loop.h"
37 Int128 llend = int128_make64(section->offset_within_address_space); in vhost_vdpa_section_end()
38 llend = int128_add(llend, section->size); in vhost_vdpa_section_end()
50 bool is_ram = memory_region_is_ram(section->mr); in vhost_vdpa_listener_skipped_section()
51 bool is_iommu = memory_region_is_iommu(section->mr); in vhost_vdpa_listener_skipped_section()
52 bool is_protected = memory_region_is_protected(section->mr); in vhost_vdpa_listener_skipped_section()
54 /* vhost-vDPA doesn't allow MMIO to be mapped */ in vhost_vdpa_listener_skipped_section()
55 bool is_ram_device = memory_region_is_ram_device(section->mr); in vhost_vdpa_listener_skipped_section()
64 if (section->offset_within_address_space < iova_min) { in vhost_vdpa_listener_skipped_section()
67 iova_min, section->offset_within_address_space); in vhost_vdpa_listener_skipped_section()
98 int fd = s->device_fd; in vhost_vdpa_dma_map()
116 return -EIO ; in vhost_vdpa_dma_map()
130 int fd = s->device_fd; in vhost_vdpa_dma_unmap()
145 return -EIO ; in vhost_vdpa_dma_unmap()
153 int fd = s->device_fd; in vhost_vdpa_listener_begin_batch()
168 if (s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) && in vhost_vdpa_iotlb_batch_begin_once()
169 !s->iotlb_batch_begin_sent) { in vhost_vdpa_iotlb_batch_begin_once()
173 s->iotlb_batch_begin_sent = true; in vhost_vdpa_iotlb_batch_begin_once()
180 int fd = s->device_fd; in vhost_vdpa_listener_commit()
182 if (!(s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) { in vhost_vdpa_listener_commit()
186 if (!s->iotlb_batch_begin_sent) { in vhost_vdpa_listener_commit()
199 s->iotlb_batch_begin_sent = false; in vhost_vdpa_listener_commit()
206 hwaddr iova = iotlb->iova + iommu->iommu_offset; in vhost_vdpa_iommu_map_notify()
207 VhostVDPAShared *s = iommu->dev_shared; in vhost_vdpa_iommu_map_notify()
215 if (iotlb->target_as != &address_space_memory) { in vhost_vdpa_iommu_map_notify()
217 iotlb->target_as->name ? iotlb->target_as->name : "none"); in vhost_vdpa_iommu_map_notify()
222 llend = int128_add(int128_makes64(iotlb->addr_mask), int128_makes64(iova)); in vhost_vdpa_iommu_map_notify()
223 if (int128_gt(llend, int128_make64(s->iova_range.last))) { in vhost_vdpa_iommu_map_notify()
226 s->iova_range.last, int128_get64(llend)); in vhost_vdpa_iommu_map_notify()
230 if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { in vhost_vdpa_iommu_map_notify()
239 read_only = !(iotlb->perm & IOMMU_WO) || mr->readonly; in vhost_vdpa_iommu_map_notify()
242 iotlb->addr_mask + 1, vaddr, read_only); in vhost_vdpa_iommu_map_notify()
246 s, iova, iotlb->addr_mask + 1, vaddr, ret); in vhost_vdpa_iommu_map_notify()
250 iotlb->addr_mask + 1); in vhost_vdpa_iommu_map_notify()
254 s, iova, iotlb->addr_mask + 1, ret); in vhost_vdpa_iommu_map_notify()
270 iommu_mr = IOMMU_MEMORY_REGION(section->mr); in vhost_vdpa_iommu_region_add()
273 end = int128_add(int128_make64(section->offset_within_region), in vhost_vdpa_iommu_region_add()
274 section->size); in vhost_vdpa_iommu_region_add()
278 iommu->iommu_mr = iommu_mr; in vhost_vdpa_iommu_region_add()
279 iommu_notifier_init(&iommu->n, vhost_vdpa_iommu_map_notify, in vhost_vdpa_iommu_region_add()
281 section->offset_within_region, in vhost_vdpa_iommu_region_add()
284 iommu->iommu_offset = section->offset_within_address_space - in vhost_vdpa_iommu_region_add()
285 section->offset_within_region; in vhost_vdpa_iommu_region_add()
286 iommu->dev_shared = s; in vhost_vdpa_iommu_region_add()
288 ret = memory_region_register_iommu_notifier(section->mr, &iommu->n, NULL); in vhost_vdpa_iommu_region_add()
294 QLIST_INSERT_HEAD(&s->iommu_list, iommu, iommu_next); in vhost_vdpa_iommu_region_add()
295 memory_region_iommu_replay(iommu->iommu_mr, &iommu->n); in vhost_vdpa_iommu_region_add()
305 QLIST_FOREACH(iommu, &s->iommu_list, iommu_next) in vhost_vdpa_iommu_region_del()
307 if (MEMORY_REGION(iommu->iommu_mr) == section->mr && in vhost_vdpa_iommu_region_del()
308 iommu->n.start == section->offset_within_region) { in vhost_vdpa_iommu_region_del()
309 memory_region_unregister_iommu_notifier(section->mr, &iommu->n); in vhost_vdpa_iommu_region_del()
327 int page_mask = -page_size; in vhost_vdpa_listener_region_add()
329 if (vhost_vdpa_listener_skipped_section(section, s->iova_range.first, in vhost_vdpa_listener_region_add()
330 s->iova_range.last, page_mask)) { in vhost_vdpa_listener_region_add()
333 if (memory_region_is_iommu(section->mr)) { in vhost_vdpa_listener_region_add()
338 if (unlikely((section->offset_within_address_space & ~page_mask) != in vhost_vdpa_listener_region_add()
339 (section->offset_within_region & ~page_mask))) { in vhost_vdpa_listener_region_add()
340 trace_vhost_vdpa_listener_region_add_unaligned(s, section->mr->name, in vhost_vdpa_listener_region_add()
341 section->offset_within_address_space & ~page_mask, in vhost_vdpa_listener_region_add()
342 section->offset_within_region & ~page_mask); in vhost_vdpa_listener_region_add()
346 iova = ROUND_UP(section->offset_within_address_space, page_size); in vhost_vdpa_listener_region_add()
352 memory_region_ref(section->mr); in vhost_vdpa_listener_region_add()
354 /* Here we assume that memory_region_is_ram(section->mr)==true */ in vhost_vdpa_listener_region_add()
356 vaddr = memory_region_get_ram_ptr(section->mr) + in vhost_vdpa_listener_region_add()
357 section->offset_within_region + in vhost_vdpa_listener_region_add()
358 (iova - section->offset_within_address_space); in vhost_vdpa_listener_region_add()
361 vaddr, section->readonly); in vhost_vdpa_listener_region_add()
364 if (s->shadow_data) { in vhost_vdpa_listener_region_add()
366 hwaddr gpa = section->offset_within_address_space; in vhost_vdpa_listener_region_add()
368 mem_region.size = int128_get64(llsize) - 1, in vhost_vdpa_listener_region_add()
369 mem_region.perm = IOMMU_ACCESS_FLAG(true, section->readonly), in vhost_vdpa_listener_region_add()
371 r = vhost_iova_tree_map_alloc_gpa(s->iova_tree, &mem_region, gpa); in vhost_vdpa_listener_region_add()
376 error_report("Insertion to GPA->IOVA tree failed"); in vhost_vdpa_listener_region_add()
377 /* Remove the mapping from the IOVA-only tree */ in vhost_vdpa_listener_region_add()
388 int128_get64(llsize), vaddr, section->readonly); in vhost_vdpa_listener_region_add()
390 error_report("vhost vdpa map fail!"); in vhost_vdpa_listener_region_add()
397 if (s->shadow_data) { in vhost_vdpa_listener_region_add()
398 vhost_iova_tree_remove_gpa(s->iova_tree, mem_region); in vhost_vdpa_listener_region_add()
407 error_report("vhost-vdpa: DMA mapping failed, unable to continue"); in vhost_vdpa_listener_region_add()
420 int page_mask = -page_size; in vhost_vdpa_listener_region_del()
422 if (vhost_vdpa_listener_skipped_section(section, s->iova_range.first, in vhost_vdpa_listener_region_del()
423 s->iova_range.last, page_mask)) { in vhost_vdpa_listener_region_del()
426 if (memory_region_is_iommu(section->mr)) { in vhost_vdpa_listener_region_del()
430 if (unlikely((section->offset_within_address_space & ~page_mask) != in vhost_vdpa_listener_region_del()
431 (section->offset_within_region & ~page_mask))) { in vhost_vdpa_listener_region_del()
432 trace_vhost_vdpa_listener_region_del_unaligned(s, section->mr->name, in vhost_vdpa_listener_region_del()
433 section->offset_within_address_space & ~page_mask, in vhost_vdpa_listener_region_del()
434 section->offset_within_region & ~page_mask); in vhost_vdpa_listener_region_del()
438 iova = ROUND_UP(section->offset_within_address_space, page_size); in vhost_vdpa_listener_region_del()
450 if (s->shadow_data) { in vhost_vdpa_listener_region_del()
453 .translated_addr = section->offset_within_address_space, in vhost_vdpa_listener_region_del()
454 .size = int128_get64(llsize) - 1, in vhost_vdpa_listener_region_del()
457 result = vhost_iova_tree_find_gpa(s->iova_tree, &mem_region); in vhost_vdpa_listener_region_del()
462 iova = result->iova; in vhost_vdpa_listener_region_del()
463 vhost_iova_tree_remove_gpa(s->iova_tree, *result); in vhost_vdpa_listener_region_del()
467 * The unmap ioctl doesn't accept a full 64-bit. need to check it in vhost_vdpa_listener_region_del()
490 memory_region_unref(section->mr); in vhost_vdpa_listener_region_del()
493 * IOTLB API is used by vhost-vdpa which requires incremental updating
494 * of the mapping. So we can not use generic vhost memory listener which
498 .name = "vhost-vdpa",
507 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_call()
508 int fd = v->shared->device_fd; in vhost_vdpa_call()
511 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); in vhost_vdpa_call()
514 return ret < 0 ? -errno : ret; in vhost_vdpa_call()
545 return -EIO; in vhost_vdpa_add_status()
555 return ret < 0 ? -errno : 0; in vhost_vdpa_get_iova_range()
563 * set, which would need to check dev->vq_index_end instead.
567 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_first_dev()
569 return v->index == 0; in vhost_vdpa_first_dev()
574 return dev->vq_index + dev->nvqs == dev->vq_index_end; in vhost_vdpa_last_dev()
591 shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free); in vhost_vdpa_init_svq()
592 for (unsigned n = 0; n < hdev->nvqs; ++n) { in vhost_vdpa_init_svq()
595 svq = vhost_svq_new(v->shadow_vq_ops, v->shadow_vq_ops_opaque); in vhost_vdpa_init_svq()
599 v->shadow_vqs = g_steal_pointer(&shadow_vqs); in vhost_vdpa_init_svq()
604 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_set_backend_cap()
614 return -EFAULT; in vhost_vdpa_set_backend_cap()
622 return -EFAULT; in vhost_vdpa_set_backend_cap()
626 dev->backend_cap = features; in vhost_vdpa_set_backend_cap()
627 v->shared->backend_cap = features; in vhost_vdpa_set_backend_cap()
635 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); in vhost_vdpa_init()
636 trace_vhost_vdpa_init(dev, v->shared, opaque); in vhost_vdpa_init()
639 v->dev = dev; in vhost_vdpa_init()
640 dev->opaque = opaque ; in vhost_vdpa_init()
649 error_propagate(&dev->migration_blocker, v->migration_blocker); in vhost_vdpa_init()
655 * If dev->shadow_vqs_enabled at initialization that means the device has in vhost_vdpa_init()
656 * been started with x-svq=on, so don't block migration in vhost_vdpa_init()
658 if (dev->migration_blocker == NULL && !v->shadow_vqs_enabled) { in vhost_vdpa_init()
659 /* We don't have dev->features yet */ in vhost_vdpa_init()
663 error_setg_errno(errp, -ret, "Could not get device features"); in vhost_vdpa_init()
666 vhost_svq_valid_features(features, &dev->migration_blocker); in vhost_vdpa_init()
671 * disable discarding of RAM. in vhost_vdpa_init()
682 v->shared->listener = vhost_vdpa_memory_listener; in vhost_vdpa_init()
690 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_host_notifier_uninit()
691 VirtIODevice *vdev = dev->vdev; in vhost_vdpa_host_notifier_uninit()
694 n = &v->notifier[queue_index]; in vhost_vdpa_host_notifier_uninit()
696 if (n->addr) { in vhost_vdpa_host_notifier_uninit()
697 virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false); in vhost_vdpa_host_notifier_uninit()
698 object_unparent(OBJECT(&n->mr)); in vhost_vdpa_host_notifier_uninit()
699 munmap(n->addr, page_size); in vhost_vdpa_host_notifier_uninit()
700 n->addr = NULL; in vhost_vdpa_host_notifier_uninit()
707 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_host_notifier_init()
708 VirtIODevice *vdev = dev->vdev; in vhost_vdpa_host_notifier_init()
710 int fd = v->shared->device_fd; in vhost_vdpa_host_notifier_init()
716 n = &v->notifier[queue_index]; in vhost_vdpa_host_notifier_init()
724 name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]", in vhost_vdpa_host_notifier_init()
726 memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name, in vhost_vdpa_host_notifier_init()
730 if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) { in vhost_vdpa_host_notifier_init()
731 object_unparent(OBJECT(&n->mr)); in vhost_vdpa_host_notifier_init()
735 n->addr = addr; in vhost_vdpa_host_notifier_init()
740 return -1; in vhost_vdpa_host_notifier_init()
754 for (i = dev->vq_index; i < dev->vq_index + n; i++) { in vhost_vdpa_host_notifiers_uninit()
763 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_host_notifiers_init()
766 if (v->shadow_vqs_enabled) { in vhost_vdpa_host_notifiers_init()
778 for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) { in vhost_vdpa_host_notifiers_init()
780 vhost_vdpa_host_notifiers_uninit(dev, i - dev->vq_index); in vhost_vdpa_host_notifiers_init()
790 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_svq_cleanup()
793 for (idx = 0; idx < v->shadow_vqs->len; ++idx) { in vhost_vdpa_svq_cleanup()
794 vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx)); in vhost_vdpa_svq_cleanup()
796 g_ptr_array_free(v->shadow_vqs, true); in vhost_vdpa_svq_cleanup()
802 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); in vhost_vdpa_cleanup()
803 v = dev->opaque; in vhost_vdpa_cleanup()
807 memory_listener_unregister(&v->shared->listener); in vhost_vdpa_cleanup()
810 vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); in vhost_vdpa_cleanup()
813 dev->opaque = NULL; in vhost_vdpa_cleanup()
831 trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding); in vhost_vdpa_set_mem_table()
835 for (i = 0; i < mem->nregions; i++) { in vhost_vdpa_set_mem_table()
837 mem->regions[i].guest_phys_addr, in vhost_vdpa_set_mem_table()
838 mem->regions[i].memory_size, in vhost_vdpa_set_mem_table()
839 mem->regions[i].userspace_addr, in vhost_vdpa_set_mem_table()
840 mem->regions[i].flags_padding); in vhost_vdpa_set_mem_table()
843 if (mem->padding) { in vhost_vdpa_set_mem_table()
844 return -EINVAL; in vhost_vdpa_set_mem_table()
853 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_set_features()
860 if (v->shadow_vqs_enabled) { in vhost_vdpa_set_features()
861 if ((v->acked_features ^ features) == BIT_ULL(VHOST_F_LOG_ALL)) { in vhost_vdpa_set_features()
863 * QEMU is just trying to enable or disable logging. SVQ handles in vhost_vdpa_set_features()
866 v->acked_features = features; in vhost_vdpa_set_features()
870 v->acked_features = features; in vhost_vdpa_set_features()
896 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_reset_device()
906 memory_listener_unregister(&v->shared->listener); in vhost_vdpa_reset_device()
907 v->shared->listener_registered = false; in vhost_vdpa_reset_device()
908 v->suspended = false; in vhost_vdpa_reset_device()
914 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs); in vhost_vdpa_get_vq_index()
923 struct vhost_dev *dev = v->dev; in vhost_vdpa_set_vring_enable_one()
936 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_set_vring_enable()
940 for (i = 0; i < dev->nvqs; ++i) { in vhost_vdpa_set_vring_enable()
969 len = MIN(config_len - b, 16); in vhost_vdpa_dump_config()
973 trace_vhost_vdpa_dump_config(dev, b, str->str); in vhost_vdpa_dump_config()
987 config->off = offset; in vhost_vdpa_set_config()
988 config->len = size; in vhost_vdpa_set_config()
989 memcpy(config->buf, data, size); in vhost_vdpa_set_config()
1008 v_config->len = config_len; in vhost_vdpa_get_config()
1009 v_config->off = 0; in vhost_vdpa_get_config()
1011 memcpy(config, v_config->buf, config_len); in vhost_vdpa_get_config()
1023 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_set_dev_vring_base()
1025 trace_vhost_vdpa_set_dev_vring_base(dev, ring->index, ring->num, in vhost_vdpa_set_dev_vring_base()
1026 v->shadow_vqs_enabled); in vhost_vdpa_set_dev_vring_base()
1033 trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd); in vhost_vdpa_set_vring_dev_kick()
1040 trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd); in vhost_vdpa_set_vring_dev_call()
1047 trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags, in vhost_vdpa_set_vring_dev_addr()
1048 addr->desc_user_addr, addr->used_user_addr, in vhost_vdpa_set_vring_dev_addr()
1049 addr->avail_user_addr, in vhost_vdpa_set_vring_dev_addr()
1050 addr->log_guest_addr); in vhost_vdpa_set_vring_dev_addr()
1059 * @dev: The vhost device model
1061 * @idx: The index of the virtqueue in the vhost device
1072 .index = dev->vq_index + idx, in vhost_vdpa_svq_set_fds()
1074 const EventNotifier *event_notifier = &svq->hdev_kick; in vhost_vdpa_svq_set_fds()
1077 r = event_notifier_init(&svq->hdev_kick, 0); in vhost_vdpa_svq_set_fds()
1079 error_setg_errno(errp, -r, "Couldn't create kick event notifier"); in vhost_vdpa_svq_set_fds()
1083 r = event_notifier_init(&svq->hdev_call, 0); in vhost_vdpa_svq_set_fds()
1085 error_setg_errno(errp, -r, "Couldn't create call event notifier"); in vhost_vdpa_svq_set_fds()
1092 error_setg_errno(errp, -r, "Can't set device kick fd"); in vhost_vdpa_svq_set_fds()
1096 event_notifier = &svq->hdev_call; in vhost_vdpa_svq_set_fds()
1100 error_setg_errno(errp, -r, "Can't set device call fd"); in vhost_vdpa_svq_set_fds()
1107 event_notifier_set_handler(&svq->hdev_call, NULL); in vhost_vdpa_svq_set_fds()
1110 event_notifier_cleanup(&svq->hdev_kick); in vhost_vdpa_svq_set_fds()
1124 const DMAMap *result = vhost_iova_tree_find_iova(v->shared->iova_tree, in vhost_vdpa_svq_unmap_ring()
1134 size = ROUND_UP(result->size, qemu_real_host_page_size()); in vhost_vdpa_svq_unmap_ring()
1135 r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, result->iova, in vhost_vdpa_svq_unmap_ring()
1138 error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r); in vhost_vdpa_svq_unmap_ring()
1142 vhost_iova_tree_remove(v->shared->iova_tree, *result); in vhost_vdpa_svq_unmap_ring()
1148 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_svq_unmap_rings()
1161 * @v: Vhost-vdpa device
1171 r = vhost_iova_tree_map_alloc(v->shared->iova_tree, needle, taddr); in vhost_vdpa_svq_map_ring()
1175 if (needle->translated_addr == taddr) { in vhost_vdpa_svq_map_ring()
1176 error_append_hint(errp, "Insertion to IOVA->HVA tree failed"); in vhost_vdpa_svq_map_ring()
1177 /* Remove the mapping from the IOVA-only tree */ in vhost_vdpa_svq_map_ring()
1178 vhost_iova_tree_remove(v->shared->iova_tree, *needle); in vhost_vdpa_svq_map_ring()
1183 r = vhost_vdpa_dma_map(v->shared, v->address_space_id, needle->iova, in vhost_vdpa_svq_map_ring()
1184 needle->size + 1, in vhost_vdpa_svq_map_ring()
1185 (void *)(uintptr_t)needle->translated_addr, in vhost_vdpa_svq_map_ring()
1186 needle->perm == IOMMU_RO); in vhost_vdpa_svq_map_ring()
1188 error_setg_errno(errp, -r, "Cannot map region to device"); in vhost_vdpa_svq_map_ring()
1189 vhost_iova_tree_remove(v->shared->iova_tree, *needle); in vhost_vdpa_svq_map_ring()
1198 * @dev: The vhost device
1211 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_svq_map_rings()
1220 .size = driver_size - 1, in vhost_vdpa_svq_map_rings()
1229 addr->desc_user_addr = driver_region.iova; in vhost_vdpa_svq_map_rings()
1230 avail_offset = svq_addr.avail_user_addr - svq_addr.desc_user_addr; in vhost_vdpa_svq_map_rings()
1231 addr->avail_user_addr = driver_region.iova + avail_offset; in vhost_vdpa_svq_map_rings()
1234 .size = device_size - 1, in vhost_vdpa_svq_map_rings()
1243 addr->used_user_addr = device_region.iova; in vhost_vdpa_svq_map_rings()
1252 uint16_t vq_index = dev->vq_index + idx; in vhost_vdpa_svq_setup()
1260 error_setg_errno(errp, -r, "Cannot set vring base"); in vhost_vdpa_svq_setup()
1270 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_svqs_start()
1274 if (!v->shadow_vqs_enabled) { in vhost_vdpa_svqs_start()
1278 for (i = 0; i < v->shadow_vqs->len; ++i) { in vhost_vdpa_svqs_start()
1279 VirtQueue *vq = virtio_get_queue(dev->vdev, dev->vq_index + i); in vhost_vdpa_svqs_start()
1280 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); in vhost_vdpa_svqs_start()
1282 .index = dev->vq_index + i, in vhost_vdpa_svqs_start()
1290 vhost_svq_start(svq, dev->vdev, vq, v->shared->iova_tree); in vhost_vdpa_svqs_start()
1296 /* Override vring GPA set by vhost subsystem */ in vhost_vdpa_svqs_start()
1299 error_setg_errno(&err, -r, "Cannot set device address"); in vhost_vdpa_svqs_start()
1307 vhost_vdpa_svq_unmap_rings(dev, g_ptr_array_index(v->shadow_vqs, i)); in vhost_vdpa_svqs_start()
1310 vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, i)); in vhost_vdpa_svqs_start()
1315 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, j); in vhost_vdpa_svqs_start()
1325 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_svqs_stop()
1327 if (!v->shadow_vqs_enabled) { in vhost_vdpa_svqs_stop()
1331 for (unsigned i = 0; i < v->shadow_vqs->len; ++i) { in vhost_vdpa_svqs_stop()
1332 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); in vhost_vdpa_svqs_stop()
1337 event_notifier_cleanup(&svq->hdev_kick); in vhost_vdpa_svqs_stop()
1338 event_notifier_cleanup(&svq->hdev_call); in vhost_vdpa_svqs_stop()
1344 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_suspend()
1351 if (dev->backend_cap & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) { in vhost_vdpa_suspend()
1353 r = ioctl(v->shared->device_fd, VHOST_VDPA_SUSPEND); in vhost_vdpa_suspend()
1357 v->suspended = true; in vhost_vdpa_suspend()
1367 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_dev_start()
1375 return -1; in vhost_vdpa_dev_start()
1380 vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); in vhost_vdpa_dev_start()
1388 if (vhost_dev_has_iommu(dev) && (v->shadow_vqs_enabled)) { in vhost_vdpa_dev_start()
1389 error_report("SVQ can not work while IOMMU enable, please disable" in vhost_vdpa_dev_start()
1391 return -1; in vhost_vdpa_dev_start()
1393 if (v->shared->listener_registered && in vhost_vdpa_dev_start()
1394 dev->vdev->dma_as != v->shared->listener.address_space) { in vhost_vdpa_dev_start()
1395 memory_listener_unregister(&v->shared->listener); in vhost_vdpa_dev_start()
1396 v->shared->listener_registered = false; in vhost_vdpa_dev_start()
1398 if (!v->shared->listener_registered) { in vhost_vdpa_dev_start()
1399 memory_listener_register(&v->shared->listener, dev->vdev->dma_as); in vhost_vdpa_dev_start()
1400 v->shared->listener_registered = true; in vhost_vdpa_dev_start()
1423 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_set_log_base()
1424 if (v->shadow_vqs_enabled || !vhost_vdpa_first_dev(dev)) { in vhost_vdpa_set_log_base()
1428 trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd, in vhost_vdpa_set_log_base()
1429 log->log); in vhost_vdpa_set_log_base()
1436 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_set_vring_addr()
1438 if (v->shadow_vqs_enabled) { in vhost_vdpa_set_vring_addr()
1452 trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num); in vhost_vdpa_set_vring_num()
1459 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_set_vring_base()
1461 if (v->shadow_vqs_enabled) { in vhost_vdpa_set_vring_base()
1475 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_get_vring_base()
1478 if (v->shadow_vqs_enabled) { in vhost_vdpa_get_vring_base()
1479 ring->num = virtio_queue_get_last_avail_idx(dev->vdev, ring->index); in vhost_vdpa_get_vring_base()
1480 trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num, true); in vhost_vdpa_get_vring_base()
1484 if (!v->suspended) { in vhost_vdpa_get_vring_base()
1486 * Cannot trust in value returned by device, let vhost recover used in vhost_vdpa_get_vring_base()
1489 return -1; in vhost_vdpa_get_vring_base()
1493 trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num, false); in vhost_vdpa_get_vring_base()
1500 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_set_vring_kick()
1501 int vdpa_idx = file->index - dev->vq_index; in vhost_vdpa_set_vring_kick()
1503 if (v->shadow_vqs_enabled) { in vhost_vdpa_set_vring_kick()
1504 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx); in vhost_vdpa_set_vring_kick()
1505 vhost_svq_set_svq_kick_fd(svq, file->fd); in vhost_vdpa_set_vring_kick()
1515 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_set_vring_call()
1516 int vdpa_idx = file->index - dev->vq_index; in vhost_vdpa_set_vring_call()
1517 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx); in vhost_vdpa_set_vring_call()
1520 vhost_svq_set_svq_call_fd(svq, file->fd); in vhost_vdpa_set_vring_call()
1528 if (v->shadow_vqs_enabled && in vhost_vdpa_set_vring_call()
1529 v->shared->svq_switching != SVQ_TSTATE_DISABLING) { in vhost_vdpa_set_vring_call()
1568 v = dev->opaque; in vhost_vdpa_set_owner()
1569 memory_listener_register(&v->shared->listener, &address_space_memory); in vhost_vdpa_set_owner()
1570 v->shared->listener_registered = true; in vhost_vdpa_set_owner()
1577 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); in vhost_vdpa_vq_get_addr()
1578 addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys; in vhost_vdpa_vq_get_addr()
1579 addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys; in vhost_vdpa_vq_get_addr()
1580 addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys; in vhost_vdpa_vq_get_addr()
1581 trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr, in vhost_vdpa_vq_get_addr()
1582 addr->avail_user_addr, addr->used_user_addr); in vhost_vdpa_vq_get_addr()