Lines Matching +full:0 +full:- +full:dev
2 * vhost-vdpa
4 * Copyright(c) 2017-2018 Intel Corporation.
8 * See the COPYING file in the top-level directory.
19 #include "hw/virtio/vhost-backend.h"
20 #include "hw/virtio/virtio-net.h"
21 #include "hw/virtio/vhost-shadow-virtqueue.h"
22 #include "hw/virtio/vhost-vdpa.h"
23 #include "system/address-spaces.h"
26 #include "qemu/main-loop.h"
37 Int128 llend = int128_make64(section->offset_within_address_space); in vhost_vdpa_section_end()
38 llend = int128_add(llend, section->size); in vhost_vdpa_section_end()
50 bool is_ram = memory_region_is_ram(section->mr); in vhost_vdpa_listener_skipped_section()
51 bool is_iommu = memory_region_is_iommu(section->mr); in vhost_vdpa_listener_skipped_section()
52 bool is_protected = memory_region_is_protected(section->mr); in vhost_vdpa_listener_skipped_section()
54 /* vhost-vDPA doesn't allow MMIO to be mapped */ in vhost_vdpa_listener_skipped_section()
55 bool is_ram_device = memory_region_is_ram_device(section->mr); in vhost_vdpa_listener_skipped_section()
64 if (section->offset_within_address_space < iova_min) { in vhost_vdpa_listener_skipped_section()
65 error_report("RAM section out of device range (min=0x%" PRIx64 in vhost_vdpa_listener_skipped_section()
66 ", addr=0x%" HWADDR_PRIx ")", in vhost_vdpa_listener_skipped_section()
67 iova_min, section->offset_within_address_space); in vhost_vdpa_listener_skipped_section()
80 error_report("RAM section out of device range (max=0x%" PRIx64 in vhost_vdpa_listener_skipped_section()
81 ", end addr=0x%" PRIx64 ")", in vhost_vdpa_listener_skipped_section()
91 * The caller must set asid = 0 if the device does not support asid.
92 * This is not an ABI break since it is set to 0 by the initializer anyway.
98 int fd = s->device_fd; in vhost_vdpa_dma_map()
99 int ret = 0; in vhost_vdpa_dma_map()
116 return -EIO ; in vhost_vdpa_dma_map()
123 * The caller must set asid = 0 if the device does not support asid.
124 * This is not an ABI break since it is set to 0 by the initializer anyway.
130 int fd = s->device_fd; in vhost_vdpa_dma_unmap()
131 int ret = 0; in vhost_vdpa_dma_unmap()
145 return -EIO ; in vhost_vdpa_dma_unmap()
153 int fd = s->device_fd; in vhost_vdpa_listener_begin_batch()
168 if (s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) && in vhost_vdpa_iotlb_batch_begin_once()
169 !s->iotlb_batch_begin_sent) { in vhost_vdpa_iotlb_batch_begin_once()
173 s->iotlb_batch_begin_sent = true; in vhost_vdpa_iotlb_batch_begin_once()
180 int fd = s->device_fd; in vhost_vdpa_listener_commit()
182 if (!(s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) { in vhost_vdpa_listener_commit()
186 if (!s->iotlb_batch_begin_sent) { in vhost_vdpa_listener_commit()
199 s->iotlb_batch_begin_sent = false; in vhost_vdpa_listener_commit()
206 hwaddr iova = iotlb->iova + iommu->iommu_offset; in vhost_vdpa_iommu_map_notify()
207 VhostVDPAShared *s = iommu->dev_shared; in vhost_vdpa_iommu_map_notify()
215 if (iotlb->target_as != &address_space_memory) { in vhost_vdpa_iommu_map_notify()
217 iotlb->target_as->name ? iotlb->target_as->name : "none"); in vhost_vdpa_iommu_map_notify()
222 llend = int128_add(int128_makes64(iotlb->addr_mask), int128_makes64(iova)); in vhost_vdpa_iommu_map_notify()
223 if (int128_gt(llend, int128_make64(s->iova_range.last))) { in vhost_vdpa_iommu_map_notify()
224 error_report("RAM section out of device range (max=0x%" PRIx64 in vhost_vdpa_iommu_map_notify()
225 ", end addr=0x%" PRIx64 ")", in vhost_vdpa_iommu_map_notify()
226 s->iova_range.last, int128_get64(llend)); in vhost_vdpa_iommu_map_notify()
230 if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { in vhost_vdpa_iommu_map_notify()
239 read_only = !(iotlb->perm & IOMMU_WO) || mr->readonly; in vhost_vdpa_iommu_map_notify()
242 iotlb->addr_mask + 1, vaddr, read_only); in vhost_vdpa_iommu_map_notify()
244 error_report("vhost_vdpa_dma_map(%p, 0x%" HWADDR_PRIx ", " in vhost_vdpa_iommu_map_notify()
245 "0x%" HWADDR_PRIx ", %p) = %d (%m)", in vhost_vdpa_iommu_map_notify()
246 s, iova, iotlb->addr_mask + 1, vaddr, ret); in vhost_vdpa_iommu_map_notify()
250 iotlb->addr_mask + 1); in vhost_vdpa_iommu_map_notify()
252 error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " in vhost_vdpa_iommu_map_notify()
253 "0x%" HWADDR_PRIx ") = %d (%m)", in vhost_vdpa_iommu_map_notify()
254 s, iova, iotlb->addr_mask + 1, ret); in vhost_vdpa_iommu_map_notify()
270 iommu_mr = IOMMU_MEMORY_REGION(section->mr); in vhost_vdpa_iommu_region_add()
273 end = int128_add(int128_make64(section->offset_within_region), in vhost_vdpa_iommu_region_add()
274 section->size); in vhost_vdpa_iommu_region_add()
278 iommu->iommu_mr = iommu_mr; in vhost_vdpa_iommu_region_add()
279 iommu_notifier_init(&iommu->n, vhost_vdpa_iommu_map_notify, in vhost_vdpa_iommu_region_add()
281 section->offset_within_region, in vhost_vdpa_iommu_region_add()
284 iommu->iommu_offset = section->offset_within_address_space - in vhost_vdpa_iommu_region_add()
285 section->offset_within_region; in vhost_vdpa_iommu_region_add()
286 iommu->dev_shared = s; in vhost_vdpa_iommu_region_add()
288 ret = memory_region_register_iommu_notifier(section->mr, &iommu->n, NULL); in vhost_vdpa_iommu_region_add()
294 QLIST_INSERT_HEAD(&s->iommu_list, iommu, iommu_next); in vhost_vdpa_iommu_region_add()
295 memory_region_iommu_replay(iommu->iommu_mr, &iommu->n); in vhost_vdpa_iommu_region_add()
305 QLIST_FOREACH(iommu, &s->iommu_list, iommu_next) in vhost_vdpa_iommu_region_del()
307 if (MEMORY_REGION(iommu->iommu_mr) == section->mr && in vhost_vdpa_iommu_region_del()
308 iommu->n.start == section->offset_within_region) { in vhost_vdpa_iommu_region_del()
309 memory_region_unregister_iommu_notifier(section->mr, &iommu->n); in vhost_vdpa_iommu_region_del()
327 int page_mask = -page_size; in vhost_vdpa_listener_region_add()
329 if (vhost_vdpa_listener_skipped_section(section, s->iova_range.first, in vhost_vdpa_listener_region_add()
330 s->iova_range.last, page_mask)) { in vhost_vdpa_listener_region_add()
333 if (memory_region_is_iommu(section->mr)) { in vhost_vdpa_listener_region_add()
338 if (unlikely((section->offset_within_address_space & ~page_mask) != in vhost_vdpa_listener_region_add()
339 (section->offset_within_region & ~page_mask))) { in vhost_vdpa_listener_region_add()
340 trace_vhost_vdpa_listener_region_add_unaligned(s, section->mr->name, in vhost_vdpa_listener_region_add()
341 section->offset_within_address_space & ~page_mask, in vhost_vdpa_listener_region_add()
342 section->offset_within_region & ~page_mask); in vhost_vdpa_listener_region_add()
346 iova = ROUND_UP(section->offset_within_address_space, page_size); in vhost_vdpa_listener_region_add()
352 memory_region_ref(section->mr); in vhost_vdpa_listener_region_add()
354 /* Here we assume that memory_region_is_ram(section->mr)==true */ in vhost_vdpa_listener_region_add()
356 vaddr = memory_region_get_ram_ptr(section->mr) + in vhost_vdpa_listener_region_add()
357 section->offset_within_region + in vhost_vdpa_listener_region_add()
358 (iova - section->offset_within_address_space); in vhost_vdpa_listener_region_add()
361 vaddr, section->readonly); in vhost_vdpa_listener_region_add()
364 if (s->shadow_data) { in vhost_vdpa_listener_region_add()
366 hwaddr gpa = section->offset_within_address_space; in vhost_vdpa_listener_region_add()
368 mem_region.size = int128_get64(llsize) - 1, in vhost_vdpa_listener_region_add()
369 mem_region.perm = IOMMU_ACCESS_FLAG(true, section->readonly), in vhost_vdpa_listener_region_add()
371 r = vhost_iova_tree_map_alloc_gpa(s->iova_tree, &mem_region, gpa); in vhost_vdpa_listener_region_add()
376 error_report("Insertion to GPA->IOVA tree failed"); in vhost_vdpa_listener_region_add()
377 /* Remove the mapping from the IOVA-only tree */ in vhost_vdpa_listener_region_add()
388 int128_get64(llsize), vaddr, section->readonly); in vhost_vdpa_listener_region_add()
397 if (s->shadow_data) { in vhost_vdpa_listener_region_add()
398 vhost_iova_tree_remove_gpa(s->iova_tree, mem_region); in vhost_vdpa_listener_region_add()
407 error_report("vhost-vdpa: DMA mapping failed, unable to continue"); in vhost_vdpa_listener_region_add()
420 int page_mask = -page_size; in vhost_vdpa_listener_region_del()
422 if (vhost_vdpa_listener_skipped_section(section, s->iova_range.first, in vhost_vdpa_listener_region_del()
423 s->iova_range.last, page_mask)) { in vhost_vdpa_listener_region_del()
426 if (memory_region_is_iommu(section->mr)) { in vhost_vdpa_listener_region_del()
430 if (unlikely((section->offset_within_address_space & ~page_mask) != in vhost_vdpa_listener_region_del()
431 (section->offset_within_region & ~page_mask))) { in vhost_vdpa_listener_region_del()
432 trace_vhost_vdpa_listener_region_del_unaligned(s, section->mr->name, in vhost_vdpa_listener_region_del()
433 section->offset_within_address_space & ~page_mask, in vhost_vdpa_listener_region_del()
434 section->offset_within_region & ~page_mask); in vhost_vdpa_listener_region_del()
438 iova = ROUND_UP(section->offset_within_address_space, page_size); in vhost_vdpa_listener_region_del()
450 if (s->shadow_data) { in vhost_vdpa_listener_region_del()
453 .translated_addr = section->offset_within_address_space, in vhost_vdpa_listener_region_del()
454 .size = int128_get64(llsize) - 1, in vhost_vdpa_listener_region_del()
457 result = vhost_iova_tree_find_gpa(s->iova_tree, &mem_region); in vhost_vdpa_listener_region_del()
462 iova = result->iova; in vhost_vdpa_listener_region_del()
463 vhost_iova_tree_remove_gpa(s->iova_tree, *result); in vhost_vdpa_listener_region_del()
467 * The unmap ioctl doesn't accept a full 64-bit. need to check it in vhost_vdpa_listener_region_del()
475 error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " in vhost_vdpa_listener_region_del()
476 "0x%" HWADDR_PRIx ") = %d (%m)", in vhost_vdpa_listener_region_del()
485 error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " in vhost_vdpa_listener_region_del()
486 "0x%" HWADDR_PRIx ") = %d (%m)", in vhost_vdpa_listener_region_del()
490 memory_region_unref(section->mr); in vhost_vdpa_listener_region_del()
493 * IOTLB API is used by vhost-vdpa which requires incremental updating
498 .name = "vhost-vdpa",
504 static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request, in vhost_vdpa_call() argument
507 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_call()
508 int fd = v->shared->device_fd; in vhost_vdpa_call()
511 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); in vhost_vdpa_call()
514 return ret < 0 ? -errno : ret; in vhost_vdpa_call()
517 static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status) in vhost_vdpa_add_status() argument
522 trace_vhost_vdpa_add_status(dev, status); in vhost_vdpa_add_status()
523 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s); in vhost_vdpa_add_status()
524 if (ret < 0) { in vhost_vdpa_add_status()
529 return 0; in vhost_vdpa_add_status()
534 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s); in vhost_vdpa_add_status()
535 if (ret < 0) { in vhost_vdpa_add_status()
539 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s); in vhost_vdpa_add_status()
540 if (ret < 0) { in vhost_vdpa_add_status()
545 return -EIO; in vhost_vdpa_add_status()
548 return 0; in vhost_vdpa_add_status()
555 return ret < 0 ? -errno : 0; in vhost_vdpa_get_iova_range()
563 * set, which would need to check dev->vq_index_end instead.
565 static bool vhost_vdpa_first_dev(struct vhost_dev *dev) in vhost_vdpa_first_dev() argument
567 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_first_dev()
569 return v->index == 0; in vhost_vdpa_first_dev()
572 static bool vhost_vdpa_last_dev(struct vhost_dev *dev) in vhost_vdpa_last_dev() argument
574 return dev->vq_index + dev->nvqs == dev->vq_index_end; in vhost_vdpa_last_dev()
577 static int vhost_vdpa_get_dev_features(struct vhost_dev *dev, in vhost_vdpa_get_dev_features() argument
582 ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features); in vhost_vdpa_get_dev_features()
583 trace_vhost_vdpa_get_features(dev, *features); in vhost_vdpa_get_dev_features()
591 shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free); in vhost_vdpa_init_svq()
592 for (unsigned n = 0; n < hdev->nvqs; ++n) { in vhost_vdpa_init_svq()
595 svq = vhost_svq_new(v->shadow_vq_ops, v->shadow_vq_ops_opaque); in vhost_vdpa_init_svq()
599 v->shadow_vqs = g_steal_pointer(&shadow_vqs); in vhost_vdpa_init_svq()
602 static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev) in vhost_vdpa_set_backend_cap() argument
604 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_set_backend_cap()
607 uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 | in vhost_vdpa_set_backend_cap()
608 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH | in vhost_vdpa_set_backend_cap()
609 0x1ULL << VHOST_BACKEND_F_IOTLB_ASID | in vhost_vdpa_set_backend_cap()
610 0x1ULL << VHOST_BACKEND_F_SUSPEND; in vhost_vdpa_set_backend_cap()
613 if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) { in vhost_vdpa_set_backend_cap()
614 return -EFAULT; in vhost_vdpa_set_backend_cap()
619 if (vhost_vdpa_first_dev(dev)) { in vhost_vdpa_set_backend_cap()
620 r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features); in vhost_vdpa_set_backend_cap()
622 return -EFAULT; in vhost_vdpa_set_backend_cap()
626 dev->backend_cap = features; in vhost_vdpa_set_backend_cap()
627 v->shared->backend_cap = features; in vhost_vdpa_set_backend_cap()
629 return 0; in vhost_vdpa_set_backend_cap()
632 static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp) in vhost_vdpa_init() argument
635 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); in vhost_vdpa_init()
636 trace_vhost_vdpa_init(dev, v->shared, opaque); in vhost_vdpa_init()
639 v->dev = dev; in vhost_vdpa_init()
640 dev->opaque = opaque ; in vhost_vdpa_init()
642 ret = vhost_vdpa_set_backend_cap(dev); in vhost_vdpa_init()
643 if (unlikely(ret != 0)) { in vhost_vdpa_init()
647 vhost_vdpa_init_svq(dev, v); in vhost_vdpa_init()
649 error_propagate(&dev->migration_blocker, v->migration_blocker); in vhost_vdpa_init()
650 if (!vhost_vdpa_first_dev(dev)) { in vhost_vdpa_init()
651 return 0; in vhost_vdpa_init()
655 * If dev->shadow_vqs_enabled at initialization that means the device has in vhost_vdpa_init()
656 * been started with x-svq=on, so don't block migration in vhost_vdpa_init()
658 if (dev->migration_blocker == NULL && !v->shadow_vqs_enabled) { in vhost_vdpa_init()
659 /* We don't have dev->features yet */ in vhost_vdpa_init()
661 ret = vhost_vdpa_get_dev_features(dev, &features); in vhost_vdpa_init()
663 error_setg_errno(errp, -ret, "Could not get device features"); in vhost_vdpa_init()
666 vhost_svq_valid_features(features, &dev->migration_blocker); in vhost_vdpa_init()
679 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | in vhost_vdpa_init()
682 v->shared->listener = vhost_vdpa_memory_listener; in vhost_vdpa_init()
683 return 0; in vhost_vdpa_init()
686 static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev, in vhost_vdpa_host_notifier_uninit() argument
690 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_host_notifier_uninit()
691 VirtIODevice *vdev = dev->vdev; in vhost_vdpa_host_notifier_uninit()
694 n = &v->notifier[queue_index]; in vhost_vdpa_host_notifier_uninit()
696 if (n->addr) { in vhost_vdpa_host_notifier_uninit()
697 virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false); in vhost_vdpa_host_notifier_uninit()
698 object_unparent(OBJECT(&n->mr)); in vhost_vdpa_host_notifier_uninit()
699 munmap(n->addr, page_size); in vhost_vdpa_host_notifier_uninit()
700 n->addr = NULL; in vhost_vdpa_host_notifier_uninit()
704 static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index) in vhost_vdpa_host_notifier_init() argument
707 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_host_notifier_init()
708 VirtIODevice *vdev = dev->vdev; in vhost_vdpa_host_notifier_init()
710 int fd = v->shared->device_fd; in vhost_vdpa_host_notifier_init()
714 vhost_vdpa_host_notifier_uninit(dev, queue_index); in vhost_vdpa_host_notifier_init()
716 n = &v->notifier[queue_index]; in vhost_vdpa_host_notifier_init()
724 name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]", in vhost_vdpa_host_notifier_init()
726 memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name, in vhost_vdpa_host_notifier_init()
730 if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) { in vhost_vdpa_host_notifier_init()
731 object_unparent(OBJECT(&n->mr)); in vhost_vdpa_host_notifier_init()
735 n->addr = addr; in vhost_vdpa_host_notifier_init()
737 return 0; in vhost_vdpa_host_notifier_init()
740 return -1; in vhost_vdpa_host_notifier_init()
743 static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n) in vhost_vdpa_host_notifiers_uninit() argument
754 for (i = dev->vq_index; i < dev->vq_index + n; i++) { in vhost_vdpa_host_notifiers_uninit()
755 vhost_vdpa_host_notifier_uninit(dev, i); in vhost_vdpa_host_notifiers_uninit()
761 static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev) in vhost_vdpa_host_notifiers_init() argument
763 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_host_notifiers_init()
766 if (v->shadow_vqs_enabled) { in vhost_vdpa_host_notifiers_init()
778 for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) { in vhost_vdpa_host_notifiers_init()
779 if (vhost_vdpa_host_notifier_init(dev, i)) { in vhost_vdpa_host_notifiers_init()
780 vhost_vdpa_host_notifiers_uninit(dev, i - dev->vq_index); in vhost_vdpa_host_notifiers_init()
788 static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev) in vhost_vdpa_svq_cleanup() argument
790 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_svq_cleanup()
793 for (idx = 0; idx < v->shadow_vqs->len; ++idx) { in vhost_vdpa_svq_cleanup()
794 vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx)); in vhost_vdpa_svq_cleanup()
796 g_ptr_array_free(v->shadow_vqs, true); in vhost_vdpa_svq_cleanup()
799 static int vhost_vdpa_cleanup(struct vhost_dev *dev) in vhost_vdpa_cleanup() argument
802 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); in vhost_vdpa_cleanup()
803 v = dev->opaque; in vhost_vdpa_cleanup()
804 trace_vhost_vdpa_cleanup(dev, v); in vhost_vdpa_cleanup()
805 if (vhost_vdpa_first_dev(dev)) { in vhost_vdpa_cleanup()
807 memory_listener_unregister(&v->shared->listener); in vhost_vdpa_cleanup()
810 vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); in vhost_vdpa_cleanup()
811 vhost_vdpa_svq_cleanup(dev); in vhost_vdpa_cleanup()
813 dev->opaque = NULL; in vhost_vdpa_cleanup()
815 return 0; in vhost_vdpa_cleanup()
818 static int vhost_vdpa_memslots_limit(struct vhost_dev *dev) in vhost_vdpa_memslots_limit() argument
820 trace_vhost_vdpa_memslots_limit(dev, INT_MAX); in vhost_vdpa_memslots_limit()
824 static int vhost_vdpa_set_mem_table(struct vhost_dev *dev, in vhost_vdpa_set_mem_table() argument
827 if (!vhost_vdpa_first_dev(dev)) { in vhost_vdpa_set_mem_table()
828 return 0; in vhost_vdpa_set_mem_table()
831 trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding); in vhost_vdpa_set_mem_table()
835 for (i = 0; i < mem->nregions; i++) { in vhost_vdpa_set_mem_table()
836 trace_vhost_vdpa_dump_regions(dev, i, in vhost_vdpa_set_mem_table()
837 mem->regions[i].guest_phys_addr, in vhost_vdpa_set_mem_table()
838 mem->regions[i].memory_size, in vhost_vdpa_set_mem_table()
839 mem->regions[i].userspace_addr, in vhost_vdpa_set_mem_table()
840 mem->regions[i].flags_padding); in vhost_vdpa_set_mem_table()
843 if (mem->padding) { in vhost_vdpa_set_mem_table()
844 return -EINVAL; in vhost_vdpa_set_mem_table()
847 return 0; in vhost_vdpa_set_mem_table()
850 static int vhost_vdpa_set_features(struct vhost_dev *dev, in vhost_vdpa_set_features() argument
853 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_set_features()
856 if (!vhost_vdpa_first_dev(dev)) { in vhost_vdpa_set_features()
857 return 0; in vhost_vdpa_set_features()
860 if (v->shadow_vqs_enabled) { in vhost_vdpa_set_features()
861 if ((v->acked_features ^ features) == BIT_ULL(VHOST_F_LOG_ALL)) { in vhost_vdpa_set_features()
866 v->acked_features = features; in vhost_vdpa_set_features()
867 return 0; in vhost_vdpa_set_features()
870 v->acked_features = features; in vhost_vdpa_set_features()
876 trace_vhost_vdpa_set_features(dev, features); in vhost_vdpa_set_features()
877 ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features); in vhost_vdpa_set_features()
882 return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK); in vhost_vdpa_set_features()
885 static int vhost_vdpa_get_device_id(struct vhost_dev *dev, in vhost_vdpa_get_device_id() argument
889 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id); in vhost_vdpa_get_device_id()
890 trace_vhost_vdpa_get_device_id(dev, *device_id); in vhost_vdpa_get_device_id()
894 static int vhost_vdpa_reset_device(struct vhost_dev *dev) in vhost_vdpa_reset_device() argument
896 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_reset_device()
898 uint8_t status = 0; in vhost_vdpa_reset_device()
900 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status); in vhost_vdpa_reset_device()
901 trace_vhost_vdpa_reset_device(dev); in vhost_vdpa_reset_device()
906 memory_listener_unregister(&v->shared->listener); in vhost_vdpa_reset_device()
907 v->shared->listener_registered = false; in vhost_vdpa_reset_device()
908 v->suspended = false; in vhost_vdpa_reset_device()
909 return 0; in vhost_vdpa_reset_device()
912 static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx) in vhost_vdpa_get_vq_index() argument
914 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs); in vhost_vdpa_get_vq_index()
916 trace_vhost_vdpa_get_vq_index(dev, idx, idx); in vhost_vdpa_get_vq_index()
923 struct vhost_dev *dev = v->dev; in vhost_vdpa_set_vring_enable_one() local
928 int r = vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state); in vhost_vdpa_set_vring_enable_one()
930 trace_vhost_vdpa_set_vring_enable_one(dev, idx, enable, r); in vhost_vdpa_set_vring_enable_one()
934 static int vhost_vdpa_set_vring_enable(struct vhost_dev *dev, int enable) in vhost_vdpa_set_vring_enable() argument
936 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_set_vring_enable()
940 for (i = 0; i < dev->nvqs; ++i) { in vhost_vdpa_set_vring_enable()
942 if (ret < 0) { in vhost_vdpa_set_vring_enable()
947 return 0; in vhost_vdpa_set_vring_enable()
955 static int vhost_vdpa_set_config_call(struct vhost_dev *dev, in vhost_vdpa_set_config_call() argument
958 trace_vhost_vdpa_set_config_call(dev, fd); in vhost_vdpa_set_config_call()
959 return vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG_CALL, &fd); in vhost_vdpa_set_config_call()
962 static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config, in vhost_vdpa_dump_config() argument
968 for (b = 0; b < config_len; b += len) { in vhost_vdpa_dump_config()
969 len = MIN(config_len - b, 16); in vhost_vdpa_dump_config()
971 g_string_truncate(str, 0); in vhost_vdpa_dump_config()
973 trace_vhost_vdpa_dump_config(dev, b, str->str); in vhost_vdpa_dump_config()
977 static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data, in vhost_vdpa_set_config() argument
985 trace_vhost_vdpa_set_config(dev, offset, size, flags); in vhost_vdpa_set_config()
987 config->off = offset; in vhost_vdpa_set_config()
988 config->len = size; in vhost_vdpa_set_config()
989 memcpy(config->buf, data, size); in vhost_vdpa_set_config()
992 vhost_vdpa_dump_config(dev, data, size); in vhost_vdpa_set_config()
994 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config); in vhost_vdpa_set_config()
999 static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config, in vhost_vdpa_get_config() argument
1006 trace_vhost_vdpa_get_config(dev, config, config_len); in vhost_vdpa_get_config()
1008 v_config->len = config_len; in vhost_vdpa_get_config()
1009 v_config->off = 0; in vhost_vdpa_get_config()
1010 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config); in vhost_vdpa_get_config()
1011 memcpy(config, v_config->buf, config_len); in vhost_vdpa_get_config()
1015 vhost_vdpa_dump_config(dev, config, config_len); in vhost_vdpa_get_config()
1020 static int vhost_vdpa_set_dev_vring_base(struct vhost_dev *dev, in vhost_vdpa_set_dev_vring_base() argument
1023 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_set_dev_vring_base()
1025 trace_vhost_vdpa_set_dev_vring_base(dev, ring->index, ring->num, in vhost_vdpa_set_dev_vring_base()
1026 v->shadow_vqs_enabled); in vhost_vdpa_set_dev_vring_base()
1027 return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring); in vhost_vdpa_set_dev_vring_base()
1030 static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev, in vhost_vdpa_set_vring_dev_kick() argument
1033 trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd); in vhost_vdpa_set_vring_dev_kick()
1034 return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file); in vhost_vdpa_set_vring_dev_kick()
1037 static int vhost_vdpa_set_vring_dev_call(struct vhost_dev *dev, in vhost_vdpa_set_vring_dev_call() argument
1040 trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd); in vhost_vdpa_set_vring_dev_call()
1041 return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file); in vhost_vdpa_set_vring_dev_call()
1044 static int vhost_vdpa_set_vring_dev_addr(struct vhost_dev *dev, in vhost_vdpa_set_vring_dev_addr() argument
1047 trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags, in vhost_vdpa_set_vring_dev_addr()
1048 addr->desc_user_addr, addr->used_user_addr, in vhost_vdpa_set_vring_dev_addr()
1049 addr->avail_user_addr, in vhost_vdpa_set_vring_dev_addr()
1050 addr->log_guest_addr); in vhost_vdpa_set_vring_dev_addr()
1052 return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr); in vhost_vdpa_set_vring_dev_addr()
1059 * @dev: The vhost device model
1067 static int vhost_vdpa_svq_set_fds(struct vhost_dev *dev, in vhost_vdpa_svq_set_fds() argument
1072 .index = dev->vq_index + idx, in vhost_vdpa_svq_set_fds()
1074 const EventNotifier *event_notifier = &svq->hdev_kick; in vhost_vdpa_svq_set_fds()
1077 r = event_notifier_init(&svq->hdev_kick, 0); in vhost_vdpa_svq_set_fds()
1078 if (r != 0) { in vhost_vdpa_svq_set_fds()
1079 error_setg_errno(errp, -r, "Couldn't create kick event notifier"); in vhost_vdpa_svq_set_fds()
1083 r = event_notifier_init(&svq->hdev_call, 0); in vhost_vdpa_svq_set_fds()
1084 if (r != 0) { in vhost_vdpa_svq_set_fds()
1085 error_setg_errno(errp, -r, "Couldn't create call event notifier"); in vhost_vdpa_svq_set_fds()
1090 r = vhost_vdpa_set_vring_dev_kick(dev, &file); in vhost_vdpa_svq_set_fds()
1091 if (unlikely(r != 0)) { in vhost_vdpa_svq_set_fds()
1092 error_setg_errno(errp, -r, "Can't set device kick fd"); in vhost_vdpa_svq_set_fds()
1096 event_notifier = &svq->hdev_call; in vhost_vdpa_svq_set_fds()
1098 r = vhost_vdpa_set_vring_dev_call(dev, &file); in vhost_vdpa_svq_set_fds()
1099 if (unlikely(r != 0)) { in vhost_vdpa_svq_set_fds()
1100 error_setg_errno(errp, -r, "Can't set device call fd"); in vhost_vdpa_svq_set_fds()
1104 return 0; in vhost_vdpa_svq_set_fds()
1107 event_notifier_set_handler(&svq->hdev_call, NULL); in vhost_vdpa_svq_set_fds()
1110 event_notifier_cleanup(&svq->hdev_kick); in vhost_vdpa_svq_set_fds()
1124 const DMAMap *result = vhost_iova_tree_find_iova(v->shared->iova_tree, in vhost_vdpa_svq_unmap_ring()
1134 size = ROUND_UP(result->size, qemu_real_host_page_size()); in vhost_vdpa_svq_unmap_ring()
1135 r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, result->iova, in vhost_vdpa_svq_unmap_ring()
1137 if (unlikely(r < 0)) { in vhost_vdpa_svq_unmap_ring()
1138 error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r); in vhost_vdpa_svq_unmap_ring()
1142 vhost_iova_tree_remove(v->shared->iova_tree, *result); in vhost_vdpa_svq_unmap_ring()
1145 static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev, in vhost_vdpa_svq_unmap_rings() argument
1148 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_svq_unmap_rings()
1161 * @v: Vhost-vdpa device
1171 r = vhost_iova_tree_map_alloc(v->shared->iova_tree, needle, taddr); in vhost_vdpa_svq_map_ring()
1175 if (needle->translated_addr == taddr) { in vhost_vdpa_svq_map_ring()
1176 error_append_hint(errp, "Insertion to IOVA->HVA tree failed"); in vhost_vdpa_svq_map_ring()
1177 /* Remove the mapping from the IOVA-only tree */ in vhost_vdpa_svq_map_ring()
1178 vhost_iova_tree_remove(v->shared->iova_tree, *needle); in vhost_vdpa_svq_map_ring()
1183 r = vhost_vdpa_dma_map(v->shared, v->address_space_id, needle->iova, in vhost_vdpa_svq_map_ring()
1184 needle->size + 1, in vhost_vdpa_svq_map_ring()
1185 (void *)(uintptr_t)needle->translated_addr, in vhost_vdpa_svq_map_ring()
1186 needle->perm == IOMMU_RO); in vhost_vdpa_svq_map_ring()
1187 if (unlikely(r != 0)) { in vhost_vdpa_svq_map_ring()
1188 error_setg_errno(errp, -r, "Cannot map region to device"); in vhost_vdpa_svq_map_ring()
1189 vhost_iova_tree_remove(v->shared->iova_tree, *needle); in vhost_vdpa_svq_map_ring()
1192 return r == 0; in vhost_vdpa_svq_map_ring()
1198 * @dev: The vhost device
1203 static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev, in vhost_vdpa_svq_map_rings() argument
1211 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_svq_map_rings()
1220 .size = driver_size - 1, in vhost_vdpa_svq_map_rings()
1229 addr->desc_user_addr = driver_region.iova; in vhost_vdpa_svq_map_rings()
1230 avail_offset = svq_addr.avail_user_addr - svq_addr.desc_user_addr; in vhost_vdpa_svq_map_rings()
1231 addr->avail_user_addr = driver_region.iova + avail_offset; in vhost_vdpa_svq_map_rings()
1234 .size = device_size - 1, in vhost_vdpa_svq_map_rings()
1243 addr->used_user_addr = device_region.iova; in vhost_vdpa_svq_map_rings()
1248 static bool vhost_vdpa_svq_setup(struct vhost_dev *dev, in vhost_vdpa_svq_setup() argument
1252 uint16_t vq_index = dev->vq_index + idx; in vhost_vdpa_svq_setup()
1258 r = vhost_vdpa_set_dev_vring_base(dev, &s); in vhost_vdpa_svq_setup()
1260 error_setg_errno(errp, -r, "Cannot set vring base"); in vhost_vdpa_svq_setup()
1264 r = vhost_vdpa_svq_set_fds(dev, svq, idx, errp); in vhost_vdpa_svq_setup()
1265 return r == 0; in vhost_vdpa_svq_setup()
1268 static bool vhost_vdpa_svqs_start(struct vhost_dev *dev) in vhost_vdpa_svqs_start() argument
1270 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_svqs_start()
1274 if (!v->shadow_vqs_enabled) { in vhost_vdpa_svqs_start()
1278 for (i = 0; i < v->shadow_vqs->len; ++i) { in vhost_vdpa_svqs_start()
1279 VirtQueue *vq = virtio_get_queue(dev->vdev, dev->vq_index + i); in vhost_vdpa_svqs_start()
1280 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); in vhost_vdpa_svqs_start()
1282 .index = dev->vq_index + i, in vhost_vdpa_svqs_start()
1285 bool ok = vhost_vdpa_svq_setup(dev, svq, i, &err); in vhost_vdpa_svqs_start()
1290 vhost_svq_start(svq, dev->vdev, vq, v->shared->iova_tree); in vhost_vdpa_svqs_start()
1291 ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err); in vhost_vdpa_svqs_start()
1297 r = vhost_vdpa_set_vring_dev_addr(dev, &addr); in vhost_vdpa_svqs_start()
1298 if (unlikely(r != 0)) { in vhost_vdpa_svqs_start()
1299 error_setg_errno(&err, -r, "Cannot set device address"); in vhost_vdpa_svqs_start()
1307 vhost_vdpa_svq_unmap_rings(dev, g_ptr_array_index(v->shadow_vqs, i)); in vhost_vdpa_svqs_start()
1310 vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, i)); in vhost_vdpa_svqs_start()
1314 for (unsigned j = 0; j < i; ++j) { in vhost_vdpa_svqs_start()
1315 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, j); in vhost_vdpa_svqs_start()
1316 vhost_vdpa_svq_unmap_rings(dev, svq); in vhost_vdpa_svqs_start()
1323 static void vhost_vdpa_svqs_stop(struct vhost_dev *dev) in vhost_vdpa_svqs_stop() argument
1325 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_svqs_stop()
1327 if (!v->shadow_vqs_enabled) { in vhost_vdpa_svqs_stop()
1331 for (unsigned i = 0; i < v->shadow_vqs->len; ++i) { in vhost_vdpa_svqs_stop()
1332 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); in vhost_vdpa_svqs_stop()
1335 vhost_vdpa_svq_unmap_rings(dev, svq); in vhost_vdpa_svqs_stop()
1337 event_notifier_cleanup(&svq->hdev_kick); in vhost_vdpa_svqs_stop()
1338 event_notifier_cleanup(&svq->hdev_call); in vhost_vdpa_svqs_stop()
1342 static void vhost_vdpa_suspend(struct vhost_dev *dev) in vhost_vdpa_suspend() argument
1344 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_suspend()
1347 if (!vhost_vdpa_first_dev(dev)) { in vhost_vdpa_suspend()
1351 if (dev->backend_cap & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) { in vhost_vdpa_suspend()
1352 trace_vhost_vdpa_suspend(dev); in vhost_vdpa_suspend()
1353 r = ioctl(v->shared->device_fd, VHOST_VDPA_SUSPEND); in vhost_vdpa_suspend()
1357 v->suspended = true; in vhost_vdpa_suspend()
1362 vhost_vdpa_reset_device(dev); in vhost_vdpa_suspend()
1365 static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) in vhost_vdpa_dev_start() argument
1367 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_dev_start()
1369 trace_vhost_vdpa_dev_start(dev, started); in vhost_vdpa_dev_start()
1372 vhost_vdpa_host_notifiers_init(dev); in vhost_vdpa_dev_start()
1373 ok = vhost_vdpa_svqs_start(dev); in vhost_vdpa_dev_start()
1375 return -1; in vhost_vdpa_dev_start()
1378 vhost_vdpa_suspend(dev); in vhost_vdpa_dev_start()
1379 vhost_vdpa_svqs_stop(dev); in vhost_vdpa_dev_start()
1380 vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); in vhost_vdpa_dev_start()
1383 if (!vhost_vdpa_last_dev(dev)) { in vhost_vdpa_dev_start()
1384 return 0; in vhost_vdpa_dev_start()
1388 if (vhost_dev_has_iommu(dev) && (v->shadow_vqs_enabled)) { in vhost_vdpa_dev_start()
1391 return -1; in vhost_vdpa_dev_start()
1393 if (v->shared->listener_registered && in vhost_vdpa_dev_start()
1394 dev->vdev->dma_as != v->shared->listener.address_space) { in vhost_vdpa_dev_start()
1395 memory_listener_unregister(&v->shared->listener); in vhost_vdpa_dev_start()
1396 v->shared->listener_registered = false; in vhost_vdpa_dev_start()
1398 if (!v->shared->listener_registered) { in vhost_vdpa_dev_start()
1399 memory_listener_register(&v->shared->listener, dev->vdev->dma_as); in vhost_vdpa_dev_start()
1400 v->shared->listener_registered = true; in vhost_vdpa_dev_start()
1403 return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); in vhost_vdpa_dev_start()
1406 return 0; in vhost_vdpa_dev_start()
1409 static void vhost_vdpa_reset_status(struct vhost_dev *dev) in vhost_vdpa_reset_status() argument
1411 if (!vhost_vdpa_last_dev(dev)) { in vhost_vdpa_reset_status()
1415 vhost_vdpa_reset_device(dev); in vhost_vdpa_reset_status()
1416 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | in vhost_vdpa_reset_status()
1420 static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base, in vhost_vdpa_set_log_base() argument
1423 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_set_log_base()
1424 if (v->shadow_vqs_enabled || !vhost_vdpa_first_dev(dev)) { in vhost_vdpa_set_log_base()
1425 return 0; in vhost_vdpa_set_log_base()
1428 trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd, in vhost_vdpa_set_log_base()
1429 log->log); in vhost_vdpa_set_log_base()
1430 return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base); in vhost_vdpa_set_log_base()
1433 static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev, in vhost_vdpa_set_vring_addr() argument
1436 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_set_vring_addr()
1438 if (v->shadow_vqs_enabled) { in vhost_vdpa_set_vring_addr()
1443 return 0; in vhost_vdpa_set_vring_addr()
1446 return vhost_vdpa_set_vring_dev_addr(dev, addr); in vhost_vdpa_set_vring_addr()
1449 static int vhost_vdpa_set_vring_num(struct vhost_dev *dev, in vhost_vdpa_set_vring_num() argument
1452 trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num); in vhost_vdpa_set_vring_num()
1453 return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring); in vhost_vdpa_set_vring_num()
1456 static int vhost_vdpa_set_vring_base(struct vhost_dev *dev, in vhost_vdpa_set_vring_base() argument
1459 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_set_vring_base()
1461 if (v->shadow_vqs_enabled) { in vhost_vdpa_set_vring_base()
1466 return 0; in vhost_vdpa_set_vring_base()
1469 return vhost_vdpa_set_dev_vring_base(dev, ring); in vhost_vdpa_set_vring_base()
1472 static int vhost_vdpa_get_vring_base(struct vhost_dev *dev, in vhost_vdpa_get_vring_base() argument
1475 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_get_vring_base()
1478 if (v->shadow_vqs_enabled) { in vhost_vdpa_get_vring_base()
1479 ring->num = virtio_queue_get_last_avail_idx(dev->vdev, ring->index); in vhost_vdpa_get_vring_base()
1480 trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num, true); in vhost_vdpa_get_vring_base()
1481 return 0; in vhost_vdpa_get_vring_base()
1484 if (!v->suspended) { in vhost_vdpa_get_vring_base()
1489 return -1; in vhost_vdpa_get_vring_base()
1492 ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring); in vhost_vdpa_get_vring_base()
1493 trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num, false); in vhost_vdpa_get_vring_base()
1497 static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev, in vhost_vdpa_set_vring_kick() argument
1500 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_set_vring_kick()
1501 int vdpa_idx = file->index - dev->vq_index; in vhost_vdpa_set_vring_kick()
1503 if (v->shadow_vqs_enabled) { in vhost_vdpa_set_vring_kick()
1504 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx); in vhost_vdpa_set_vring_kick()
1505 vhost_svq_set_svq_kick_fd(svq, file->fd); in vhost_vdpa_set_vring_kick()
1506 return 0; in vhost_vdpa_set_vring_kick()
1508 return vhost_vdpa_set_vring_dev_kick(dev, file); in vhost_vdpa_set_vring_kick()
1512 static int vhost_vdpa_set_vring_call(struct vhost_dev *dev, in vhost_vdpa_set_vring_call() argument
1515 struct vhost_vdpa *v = dev->opaque; in vhost_vdpa_set_vring_call()
1516 int vdpa_idx = file->index - dev->vq_index; in vhost_vdpa_set_vring_call()
1517 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx); in vhost_vdpa_set_vring_call()
1520 vhost_svq_set_svq_call_fd(svq, file->fd); in vhost_vdpa_set_vring_call()
1528 if (v->shadow_vqs_enabled && in vhost_vdpa_set_vring_call()
1529 v->shared->svq_switching != SVQ_TSTATE_DISABLING) { in vhost_vdpa_set_vring_call()
1530 return 0; in vhost_vdpa_set_vring_call()
1533 return vhost_vdpa_set_vring_dev_call(dev, file); in vhost_vdpa_set_vring_call()
1536 static int vhost_vdpa_get_features(struct vhost_dev *dev, in vhost_vdpa_get_features() argument
1539 int ret = vhost_vdpa_get_dev_features(dev, features); in vhost_vdpa_get_features()
1541 if (ret == 0) { in vhost_vdpa_get_features()
1549 static int vhost_vdpa_set_owner(struct vhost_dev *dev) in vhost_vdpa_set_owner() argument
1554 if (!vhost_vdpa_first_dev(dev)) { in vhost_vdpa_set_owner()
1555 return 0; in vhost_vdpa_set_owner()
1558 trace_vhost_vdpa_set_owner(dev); in vhost_vdpa_set_owner()
1559 r = vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL); in vhost_vdpa_set_owner()
1560 if (unlikely(r < 0)) { in vhost_vdpa_set_owner()
1568 v = dev->opaque; in vhost_vdpa_set_owner()
1569 memory_listener_register(&v->shared->listener, &address_space_memory); in vhost_vdpa_set_owner()
1570 v->shared->listener_registered = true; in vhost_vdpa_set_owner()
1571 return 0; in vhost_vdpa_set_owner()
1574 static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev, in vhost_vdpa_vq_get_addr() argument
1577 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); in vhost_vdpa_vq_get_addr()
1578 addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys; in vhost_vdpa_vq_get_addr()
1579 addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys; in vhost_vdpa_vq_get_addr()
1580 addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys; in vhost_vdpa_vq_get_addr()
1581 trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr, in vhost_vdpa_vq_get_addr()
1582 addr->avail_user_addr, addr->used_user_addr); in vhost_vdpa_vq_get_addr()
1583 return 0; in vhost_vdpa_vq_get_addr()
1586 static bool vhost_vdpa_force_iommu(struct vhost_dev *dev) in vhost_vdpa_force_iommu() argument