Lines Matching +full:uuid +full:- +full:dev
9 * Marc-André Lureau <mlureau@redhat.com>
13 * later. See the COPYING file in the top-level directory.
39 #include "standard-headers/linux/virtio_config.h"
57 #include "libvhost-user.h"
83 #define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m))
114 bool vu_has_feature(VuDev *dev, in vu_has_feature() argument
117 return has_feature(dev->features, fbit); in vu_has_feature()
120 static inline bool vu_has_protocol_feature(VuDev *dev, unsigned int fbit) in vu_has_protocol_feature() argument
122 return has_feature(dev->protocol_features, fbit); in vu_has_protocol_feature()
179 vu_panic(VuDev *dev, const char *msg, ...) in vu_panic() argument
190 dev->broken = true; in vu_panic()
191 dev->panic(dev, buf); in vu_panic()
202 vu_gpa_to_mem_region(VuDev *dev, uint64_t guest_addr) in vu_gpa_to_mem_region() argument
205 int high = dev->nregions - 1; in vu_gpa_to_mem_region()
216 unsigned int mid = low + (high - low) / 2; in vu_gpa_to_mem_region()
217 VuDevRegion *cur = &dev->regions[mid]; in vu_gpa_to_mem_region()
219 if (guest_addr >= cur->gpa && guest_addr < cur->gpa + cur->size) { in vu_gpa_to_mem_region()
222 if (guest_addr >= cur->gpa + cur->size) { in vu_gpa_to_mem_region()
225 if (guest_addr < cur->gpa) { in vu_gpa_to_mem_region()
226 high = mid - 1; in vu_gpa_to_mem_region()
234 vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr) in vu_gpa_to_va() argument
242 r = vu_gpa_to_mem_region(dev, guest_addr); in vu_gpa_to_va()
247 if ((guest_addr + *plen) > (r->gpa + r->size)) { in vu_gpa_to_va()
248 *plen = r->gpa + r->size - guest_addr; in vu_gpa_to_va()
250 return (void *)(uintptr_t)guest_addr - r->gpa + r->mmap_addr + in vu_gpa_to_va()
251 r->mmap_offset; in vu_gpa_to_va()
256 qva_to_va(VuDev *dev, uint64_t qemu_addr) in qva_to_va() argument
261 for (i = 0; i < dev->nregions; i++) { in qva_to_va()
262 VuDevRegion *r = &dev->regions[i]; in qva_to_va()
264 if ((qemu_addr >= r->qva) && (qemu_addr < (r->qva + r->size))) { in qva_to_va()
266 qemu_addr - r->qva + r->mmap_addr + r->mmap_offset; in qva_to_va()
274 vu_remove_all_mem_regs(VuDev *dev) in vu_remove_all_mem_regs() argument
278 for (i = 0; i < dev->nregions; i++) { in vu_remove_all_mem_regs()
279 VuDevRegion *r = &dev->regions[i]; in vu_remove_all_mem_regs()
281 munmap((void *)(uintptr_t)r->mmap_addr, r->size + r->mmap_offset); in vu_remove_all_mem_regs()
283 dev->nregions = 0; in vu_remove_all_mem_regs()
287 map_ring(VuDev *dev, VuVirtq *vq) in map_ring() argument
289 vq->vring.desc = qva_to_va(dev, vq->vra.desc_user_addr); in map_ring()
290 vq->vring.used = qva_to_va(dev, vq->vra.used_user_addr); in map_ring()
291 vq->vring.avail = qva_to_va(dev, vq->vra.avail_user_addr); in map_ring()
294 DPRINT(" vring_desc at %p\n", vq->vring.desc); in map_ring()
295 DPRINT(" vring_used at %p\n", vq->vring.used); in map_ring()
296 DPRINT(" vring_avail at %p\n", vq->vring.avail); in map_ring()
298 return !(vq->vring.desc && vq->vring.used && vq->vring.avail); in map_ring()
302 vu_is_vq_usable(VuDev *dev, VuVirtq *vq) in vu_is_vq_usable() argument
304 if (unlikely(dev->broken)) { in vu_is_vq_usable()
308 if (likely(vq->vring.avail)) { in vu_is_vq_usable()
318 if (!vq->vra.desc_user_addr || !vq->vra.used_user_addr || in vu_is_vq_usable()
319 !vq->vra.avail_user_addr) { in vu_is_vq_usable()
322 if (map_ring(dev, vq)) { in vu_is_vq_usable()
323 vu_panic(dev, "remapping queue on access"); in vu_is_vq_usable()
330 unmap_rings(VuDev *dev, VuDevRegion *r) in unmap_rings() argument
334 for (i = 0; i < dev->max_queues; i++) { in unmap_rings()
335 VuVirtq *vq = &dev->vq[i]; in unmap_rings()
336 const uintptr_t desc = (uintptr_t)vq->vring.desc; in unmap_rings()
337 const uintptr_t used = (uintptr_t)vq->vring.used; in unmap_rings()
338 const uintptr_t avail = (uintptr_t)vq->vring.avail; in unmap_rings()
340 if (desc < r->mmap_addr || desc >= r->mmap_addr + r->size) { in unmap_rings()
343 if (used < r->mmap_addr || used >= r->mmap_addr + r->size) { in unmap_rings()
346 if (avail < r->mmap_addr || avail >= r->mmap_addr + r->size) { in unmap_rings()
351 vq->vring.desc = NULL; in unmap_rings()
352 vq->vring.used = NULL; in unmap_rings()
353 vq->vring.avail = NULL; in unmap_rings()
376 _vu_add_mem_reg(VuDev *dev, VhostUserMemoryRegion *msg_region, int fd) in _vu_add_mem_reg() argument
378 const uint64_t start_gpa = msg_region->guest_phys_addr; in _vu_add_mem_reg()
379 const uint64_t end_gpa = start_gpa + msg_region->memory_size; in _vu_add_mem_reg()
386 int high = dev->nregions - 1; in _vu_add_mem_reg()
389 DPRINT("Adding region %d\n", dev->nregions); in _vu_add_mem_reg()
391 msg_region->guest_phys_addr); in _vu_add_mem_reg()
393 msg_region->memory_size); in _vu_add_mem_reg()
395 msg_region->userspace_addr); in _vu_add_mem_reg()
397 msg_region->mmap_offset); in _vu_add_mem_reg()
399 if (dev->postcopy_listening) { in _vu_add_mem_reg()
413 unsigned int mid = low + (high - low) / 2; in _vu_add_mem_reg()
414 VuDevRegion *cur = &dev->regions[mid]; in _vu_add_mem_reg()
417 if (start_gpa < cur->gpa + cur->size && cur->gpa < end_gpa) { in _vu_add_mem_reg()
418 vu_panic(dev, "regions with overlapping guest physical addresses"); in _vu_add_mem_reg()
421 if (start_gpa >= cur->gpa + cur->size) { in _vu_add_mem_reg()
424 if (start_gpa < cur->gpa) { in _vu_add_mem_reg()
425 high = mid - 1; in _vu_add_mem_reg()
431 * Convert most of msg_region->mmap_offset to fd_offset. In almost all in _vu_add_mem_reg()
442 fd_offset = ALIGN_DOWN(msg_region->mmap_offset, hugepagesize); in _vu_add_mem_reg()
443 mmap_offset = msg_region->mmap_offset - fd_offset; in _vu_add_mem_reg()
445 fd_offset = msg_region->mmap_offset; in _vu_add_mem_reg()
454 mmap_addr = mmap(0, msg_region->memory_size + mmap_offset, in _vu_add_mem_reg()
457 vu_panic(dev, "region mmap error: %s", strerror(errno)); in _vu_add_mem_reg()
465 madvise(mmap_addr, msg_region->memory_size + mmap_offset, in _vu_add_mem_reg()
470 r = &dev->regions[idx]; in _vu_add_mem_reg()
471 memmove(r + 1, r, sizeof(VuDevRegion) * (dev->nregions - idx)); in _vu_add_mem_reg()
472 r->gpa = msg_region->guest_phys_addr; in _vu_add_mem_reg()
473 r->size = msg_region->memory_size; in _vu_add_mem_reg()
474 r->qva = msg_region->userspace_addr; in _vu_add_mem_reg()
475 r->mmap_addr = (uint64_t)(uintptr_t)mmap_addr; in _vu_add_mem_reg()
476 r->mmap_offset = mmap_offset; in _vu_add_mem_reg()
477 dev->nregions++; in _vu_add_mem_reg()
479 if (dev->postcopy_listening) { in _vu_add_mem_reg()
484 msg_region->userspace_addr = r->mmap_addr + r->mmap_offset; in _vu_add_mem_reg()
493 for (i = 0; i < vmsg->fd_num; i++) { in vmsg_close_fds()
494 close(vmsg->fds[i]); in vmsg_close_fds()
501 vmsg->flags = 0; /* defaults will be set by vu_send_reply() */ in vmsg_set_reply_u64()
502 vmsg->size = sizeof(vmsg->payload.u64); in vmsg_set_reply_u64()
503 vmsg->payload.u64 = val; in vmsg_set_reply_u64()
504 vmsg->fd_num = 0; in vmsg_set_reply_u64()
537 vu_message_read_default(VuDev *dev, int conn_fd, VhostUserMsg *vmsg) in vu_message_read_default() argument
559 vu_panic(dev, "Error while recvmsg: %s", strerror(errno)); in vu_message_read_default()
563 vmsg->fd_num = 0; in vu_message_read_default()
568 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { in vu_message_read_default()
569 fd_size = cmsg->cmsg_len - CMSG_LEN(0); in vu_message_read_default()
570 vmsg->fd_num = fd_size / sizeof(int); in vu_message_read_default()
571 assert(vmsg->fd_num <= VHOST_MEMORY_BASELINE_NREGIONS); in vu_message_read_default()
572 memcpy(vmsg->fds, CMSG_DATA(cmsg), fd_size); in vu_message_read_default()
577 if (vmsg->size > sizeof(vmsg->payload)) { in vu_message_read_default()
578 vu_panic(dev, in vu_message_read_default()
579 "Error: too big message request: %d, size: vmsg->size: %u, " in vu_message_read_default()
580 "while sizeof(vmsg->payload) = %zu\n", in vu_message_read_default()
581 vmsg->request, vmsg->size, sizeof(vmsg->payload)); in vu_message_read_default()
585 if (vmsg->size) { in vu_message_read_default()
587 rc = read(conn_fd, &vmsg->payload, vmsg->size); in vu_message_read_default()
591 vu_panic(dev, "Error while reading: %s", strerror(errno)); in vu_message_read_default()
595 assert((uint32_t)rc == vmsg->size); in vu_message_read_default()
607 vu_message_write(VuDev *dev, int conn_fd, VhostUserMsg *vmsg) in vu_message_write() argument
624 assert(vmsg->fd_num <= VHOST_MEMORY_BASELINE_NREGIONS); in vu_message_write()
625 if (vmsg->fd_num > 0) { in vu_message_write()
626 size_t fdsize = vmsg->fd_num * sizeof(int); in vu_message_write()
629 cmsg->cmsg_len = CMSG_LEN(fdsize); in vu_message_write()
630 cmsg->cmsg_level = SOL_SOCKET; in vu_message_write()
631 cmsg->cmsg_type = SCM_RIGHTS; in vu_message_write()
632 memcpy(CMSG_DATA(cmsg), vmsg->fds, fdsize); in vu_message_write()
643 vu_panic(dev, "Error while writing: %s", strerror(errno)); in vu_message_write()
647 if (vmsg->size) { in vu_message_write()
649 if (vmsg->data) { in vu_message_write()
650 rc = write(conn_fd, vmsg->data, vmsg->size); in vu_message_write()
652 rc = write(conn_fd, p + VHOST_USER_HDR_SIZE, vmsg->size); in vu_message_write()
658 vu_panic(dev, "Error while writing: %s", strerror(errno)); in vu_message_write()
666 vu_send_reply(VuDev *dev, int conn_fd, VhostUserMsg *vmsg) in vu_send_reply() argument
669 vmsg->flags &= ~VHOST_USER_VERSION_MASK; in vu_send_reply()
670 vmsg->flags |= VHOST_USER_VERSION; in vu_send_reply()
671 vmsg->flags |= VHOST_USER_REPLY_MASK; in vu_send_reply()
673 return vu_message_write(dev, conn_fd, vmsg); in vu_send_reply()
682 vu_process_message_reply(VuDev *dev, const VhostUserMsg *vmsg) in vu_process_message_reply() argument
687 if ((vmsg->flags & VHOST_USER_NEED_REPLY_MASK) == 0) { in vu_process_message_reply()
692 if (!vu_message_read_default(dev, dev->backend_fd, &msg_reply)) { in vu_process_message_reply()
696 if (msg_reply.request != vmsg->request) { in vu_process_message_reply()
698 vmsg->request, msg_reply.request); in vu_process_message_reply()
705 pthread_mutex_unlock(&dev->backend_mutex); in vu_process_message_reply()
711 vu_log_kick(VuDev *dev) in vu_log_kick() argument
713 if (dev->log_call_fd != -1) { in vu_log_kick()
715 if (eventfd_write(dev->log_call_fd, 1) < 0) { in vu_log_kick()
716 vu_panic(dev, "Error writing eventfd: %s", strerror(errno)); in vu_log_kick()
729 vu_log_write(VuDev *dev, uint64_t address, uint64_t length) in vu_log_write() argument
733 if (!(dev->features & (1ULL << VHOST_F_LOG_ALL)) || in vu_log_write()
734 !dev->log_table || !length) { in vu_log_write()
738 assert(dev->log_size > ((address + length - 1) / VHOST_LOG_PAGE / 8)); in vu_log_write()
742 vu_log_page(dev->log_table, page); in vu_log_write()
746 vu_log_kick(dev); in vu_log_write()
750 vu_kick_cb(VuDev *dev, int condition, void *data) in vu_kick_cb() argument
753 VuVirtq *vq = &dev->vq[index]; in vu_kick_cb()
754 int sock = vq->kick_fd; in vu_kick_cb()
759 if (rc == -1) { in vu_kick_cb()
760 vu_panic(dev, "kick eventfd_read(): %s", strerror(errno)); in vu_kick_cb()
761 dev->remove_watch(dev, dev->vq[index].kick_fd); in vu_kick_cb()
764 kick_data, vq->handler, index); in vu_kick_cb()
765 if (vq->handler) { in vu_kick_cb()
766 vq->handler(dev, index); in vu_kick_cb()
772 vu_get_features_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_get_features_exec() argument
774 vmsg->payload.u64 = in vu_get_features_exec()
784 /* vhost-user feature bits */ in vu_get_features_exec()
788 if (dev->iface->get_features) { in vu_get_features_exec()
789 vmsg->payload.u64 |= dev->iface->get_features(dev); in vu_get_features_exec()
792 vmsg->size = sizeof(vmsg->payload.u64); in vu_get_features_exec()
793 vmsg->fd_num = 0; in vu_get_features_exec()
795 DPRINT("Sending back to guest u64: 0x%016"PRIx64"\n", vmsg->payload.u64); in vu_get_features_exec()
801 vu_set_enable_all_rings(VuDev *dev, bool enabled) in vu_set_enable_all_rings() argument
805 for (i = 0; i < dev->max_queues; i++) { in vu_set_enable_all_rings()
806 dev->vq[i].enable = enabled; in vu_set_enable_all_rings()
811 vu_set_features_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_features_exec() argument
813 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); in vu_set_features_exec()
815 dev->features = vmsg->payload.u64; in vu_set_features_exec()
816 if (!vu_has_feature(dev, VIRTIO_F_VERSION_1)) { in vu_set_features_exec()
821 vu_panic(dev, "virtio legacy devices aren't supported by libvhost-user"); in vu_set_features_exec()
825 if (!(dev->features & VHOST_USER_F_PROTOCOL_FEATURES)) { in vu_set_features_exec()
826 vu_set_enable_all_rings(dev, true); in vu_set_features_exec()
829 if (dev->iface->set_features) { in vu_set_features_exec()
830 dev->iface->set_features(dev, dev->features); in vu_set_features_exec()
837 vu_set_owner_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_owner_exec() argument
843 vu_close_log(VuDev *dev) in vu_close_log() argument
845 if (dev->log_table) { in vu_close_log()
846 if (munmap(dev->log_table, dev->log_size) != 0) { in vu_close_log()
850 dev->log_table = NULL; in vu_close_log()
852 if (dev->log_call_fd != -1) { in vu_close_log()
853 close(dev->log_call_fd); in vu_close_log()
854 dev->log_call_fd = -1; in vu_close_log()
859 vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_reset_device_exec() argument
861 vu_set_enable_all_rings(dev, false); in vu_reset_device_exec()
867 generate_faults(VuDev *dev) { in generate_faults() argument
869 for (i = 0; i < dev->nregions; i++) { in generate_faults()
871 VuDevRegion *dev_region = &dev->regions[i]; in generate_faults()
883 ret = madvise((void *)(uintptr_t)dev_region->mmap_addr, in generate_faults()
884 dev_region->size + dev_region->mmap_offset, in generate_faults()
896 ret = madvise((void *)(uintptr_t)dev_region->mmap_addr, in generate_faults()
897 dev_region->size + dev_region->mmap_offset, in generate_faults()
909 reg_struct.range.start = (uintptr_t)dev_region->mmap_addr; in generate_faults()
910 reg_struct.range.len = dev_region->size + dev_region->mmap_offset; in generate_faults()
913 if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, ®_struct)) { in generate_faults()
914 vu_panic(dev, "%s: Failed to userfault region %d " in generate_faults()
918 dev_region->mmap_addr, in generate_faults()
919 dev_region->size, dev_region->mmap_offset, in generate_faults()
920 dev->postcopy_ufd, strerror(errno)); in generate_faults()
924 vu_panic(dev, "%s Region (%d) doesn't support COPY", in generate_faults()
933 if (mprotect((void *)(uintptr_t)dev_region->mmap_addr, in generate_faults()
934 dev_region->size + dev_region->mmap_offset, in generate_faults()
936 vu_panic(dev, "failed to mprotect region %d for postcopy (%s)", in generate_faults()
948 vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { in vu_add_mem_reg() argument
949 VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m; in vu_add_mem_reg()
951 if (vmsg->fd_num != 1) { in vu_add_mem_reg()
953 vu_panic(dev, "VHOST_USER_ADD_MEM_REG received %d fds - only 1 fd " in vu_add_mem_reg()
954 "should be sent for this message type", vmsg->fd_num); in vu_add_mem_reg()
958 if (vmsg->size < VHOST_USER_MEM_REG_SIZE) { in vu_add_mem_reg()
959 close(vmsg->fds[0]); in vu_add_mem_reg()
960 vu_panic(dev, "VHOST_USER_ADD_MEM_REG requires a message size of at " in vu_add_mem_reg()
962 VHOST_USER_MEM_REG_SIZE, vmsg->size); in vu_add_mem_reg()
966 if (dev->nregions == VHOST_USER_MAX_RAM_SLOTS) { in vu_add_mem_reg()
967 close(vmsg->fds[0]); in vu_add_mem_reg()
968 vu_panic(dev, "failing attempt to hot add memory via " in vu_add_mem_reg()
979 if (dev->postcopy_listening && in vu_add_mem_reg()
980 vmsg->size == sizeof(vmsg->payload.u64) && in vu_add_mem_reg()
981 vmsg->payload.u64 == 0) { in vu_add_mem_reg()
982 (void)generate_faults(dev); in vu_add_mem_reg()
986 _vu_add_mem_reg(dev, msg_region, vmsg->fds[0]); in vu_add_mem_reg()
987 close(vmsg->fds[0]); in vu_add_mem_reg()
989 if (dev->postcopy_listening) { in vu_add_mem_reg()
991 vmsg->fd_num = 0; in vu_add_mem_reg()
1002 if (vudev_reg->gpa == msg_reg->guest_phys_addr && in reg_equal()
1003 vudev_reg->qva == msg_reg->userspace_addr && in reg_equal()
1004 vudev_reg->size == msg_reg->memory_size) { in reg_equal()
1012 vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { in vu_rem_mem_reg() argument
1013 VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m; in vu_rem_mem_reg()
1017 if (vmsg->fd_num > 1) { in vu_rem_mem_reg()
1019 vu_panic(dev, "VHOST_USER_REM_MEM_REG received %d fds - at most 1 fd " in vu_rem_mem_reg()
1020 "should be sent for this message type", vmsg->fd_num); in vu_rem_mem_reg()
1024 if (vmsg->size < VHOST_USER_MEM_REG_SIZE) { in vu_rem_mem_reg()
1026 vu_panic(dev, "VHOST_USER_REM_MEM_REG requires a message size of at " in vu_rem_mem_reg()
1028 VHOST_USER_MEM_REG_SIZE, vmsg->size); in vu_rem_mem_reg()
1034 msg_region->guest_phys_addr); in vu_rem_mem_reg()
1036 msg_region->memory_size); in vu_rem_mem_reg()
1038 msg_region->userspace_addr); in vu_rem_mem_reg()
1040 msg_region->mmap_offset); in vu_rem_mem_reg()
1042 r = vu_gpa_to_mem_region(dev, msg_region->guest_phys_addr); in vu_rem_mem_reg()
1045 vu_panic(dev, "Specified region not found\n"); in vu_rem_mem_reg()
1057 unmap_rings(dev, r); in vu_rem_mem_reg()
1059 munmap((void *)(uintptr_t)r->mmap_addr, r->size + r->mmap_offset); in vu_rem_mem_reg()
1061 idx = r - dev->regions; in vu_rem_mem_reg()
1062 assert(idx < dev->nregions); in vu_rem_mem_reg()
1064 memmove(r, r + 1, sizeof(VuDevRegion) * (dev->nregions - idx - 1)); in vu_rem_mem_reg()
1066 dev->nregions--; in vu_rem_mem_reg()
1074 vu_get_shared_object(VuDev *dev, VhostUserMsg *vmsg) in vu_get_shared_object() argument
1077 int dmabuf_fd = -1; in vu_get_shared_object()
1078 if (dev->iface->get_shared_object) { in vu_get_shared_object()
1079 dmabuf_fd = dev->iface->get_shared_object( in vu_get_shared_object()
1080 dev, &vmsg->payload.object.uuid[0]); in vu_get_shared_object()
1082 if (dmabuf_fd != -1) { in vu_get_shared_object()
1083 DPRINT("dmabuf_fd found for requested UUID\n"); in vu_get_shared_object()
1084 vmsg->fds[fd_num++] = dmabuf_fd; in vu_get_shared_object()
1086 vmsg->fd_num = fd_num; in vu_get_shared_object()
1092 vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_mem_table_exec() argument
1094 VhostUserMemory m = vmsg->payload.memory, *memory = &m; in vu_set_mem_table_exec()
1097 vu_remove_all_mem_regs(dev); in vu_set_mem_table_exec()
1099 DPRINT("Nregions: %u\n", memory->nregions); in vu_set_mem_table_exec()
1100 for (i = 0; i < memory->nregions; i++) { in vu_set_mem_table_exec()
1101 _vu_add_mem_reg(dev, &memory->regions[i], vmsg->fds[i]); in vu_set_mem_table_exec()
1102 close(vmsg->fds[i]); in vu_set_mem_table_exec()
1105 if (dev->postcopy_listening) { in vu_set_mem_table_exec()
1107 vmsg->fd_num = 0; in vu_set_mem_table_exec()
1108 if (!vu_send_reply(dev, dev->sock, vmsg)) { in vu_set_mem_table_exec()
1109 vu_panic(dev, "failed to respond to set-mem-table for postcopy"); in vu_set_mem_table_exec()
1117 if (!dev->read_msg(dev, dev->sock, vmsg) || in vu_set_mem_table_exec()
1118 vmsg->size != sizeof(vmsg->payload.u64) || in vu_set_mem_table_exec()
1119 vmsg->payload.u64 != 0) { in vu_set_mem_table_exec()
1120 vu_panic(dev, "failed to receive valid ack for postcopy set-mem-table"); in vu_set_mem_table_exec()
1125 (void)generate_faults(dev); in vu_set_mem_table_exec()
1129 for (i = 0; i < dev->max_queues; i++) { in vu_set_mem_table_exec()
1130 if (dev->vq[i].vring.desc) { in vu_set_mem_table_exec()
1131 if (map_ring(dev, &dev->vq[i])) { in vu_set_mem_table_exec()
1132 vu_panic(dev, "remapping queue %d during setmemtable", i); in vu_set_mem_table_exec()
1141 vu_set_log_base_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_log_base_exec() argument
1147 if (vmsg->fd_num != 1 || in vu_set_log_base_exec()
1148 vmsg->size != sizeof(vmsg->payload.log)) { in vu_set_log_base_exec()
1149 vu_panic(dev, "Invalid log_base message"); in vu_set_log_base_exec()
1153 fd = vmsg->fds[0]; in vu_set_log_base_exec()
1154 log_mmap_offset = vmsg->payload.log.mmap_offset; in vu_set_log_base_exec()
1155 log_mmap_size = vmsg->payload.log.mmap_size; in vu_set_log_base_exec()
1166 if (dev->log_table) { in vu_set_log_base_exec()
1167 munmap(dev->log_table, dev->log_size); in vu_set_log_base_exec()
1169 dev->log_table = rc; in vu_set_log_base_exec()
1170 dev->log_size = log_mmap_size; in vu_set_log_base_exec()
1172 vmsg->size = sizeof(vmsg->payload.u64); in vu_set_log_base_exec()
1173 vmsg->fd_num = 0; in vu_set_log_base_exec()
1179 vu_set_log_fd_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_log_fd_exec() argument
1181 if (vmsg->fd_num != 1) { in vu_set_log_fd_exec()
1182 vu_panic(dev, "Invalid log_fd message"); in vu_set_log_fd_exec()
1186 if (dev->log_call_fd != -1) { in vu_set_log_fd_exec()
1187 close(dev->log_call_fd); in vu_set_log_fd_exec()
1189 dev->log_call_fd = vmsg->fds[0]; in vu_set_log_fd_exec()
1190 DPRINT("Got log_call_fd: %d\n", vmsg->fds[0]); in vu_set_log_fd_exec()
1196 vu_set_vring_num_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_vring_num_exec() argument
1198 unsigned int index = vmsg->payload.state.index; in vu_set_vring_num_exec()
1199 unsigned int num = vmsg->payload.state.num; in vu_set_vring_num_exec()
1203 dev->vq[index].vring.num = num; in vu_set_vring_num_exec()
1209 vu_set_vring_addr_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_vring_addr_exec() argument
1211 struct vhost_vring_addr addr = vmsg->payload.addr, *vra = &addr; in vu_set_vring_addr_exec()
1212 unsigned int index = vra->index; in vu_set_vring_addr_exec()
1213 VuVirtq *vq = &dev->vq[index]; in vu_set_vring_addr_exec()
1216 DPRINT(" index: %d\n", vra->index); in vu_set_vring_addr_exec()
1217 DPRINT(" flags: %d\n", vra->flags); in vu_set_vring_addr_exec()
1218 DPRINT(" desc_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->desc_user_addr); in vu_set_vring_addr_exec()
1219 DPRINT(" used_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->used_user_addr); in vu_set_vring_addr_exec()
1220 DPRINT(" avail_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->avail_user_addr); in vu_set_vring_addr_exec()
1221 DPRINT(" log_guest_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->log_guest_addr); in vu_set_vring_addr_exec()
1223 vq->vra = *vra; in vu_set_vring_addr_exec()
1224 vq->vring.flags = vra->flags; in vu_set_vring_addr_exec()
1225 vq->vring.log_guest_addr = vra->log_guest_addr; in vu_set_vring_addr_exec()
1228 if (map_ring(dev, vq)) { in vu_set_vring_addr_exec()
1229 vu_panic(dev, "Invalid vring_addr message"); in vu_set_vring_addr_exec()
1233 vq->used_idx = le16toh(vq->vring.used->idx); in vu_set_vring_addr_exec()
1235 if (vq->last_avail_idx != vq->used_idx) { in vu_set_vring_addr_exec()
1236 bool resume = dev->iface->queue_is_processed_in_order && in vu_set_vring_addr_exec()
1237 dev->iface->queue_is_processed_in_order(dev, index); in vu_set_vring_addr_exec()
1240 vq->last_avail_idx, vq->used_idx, in vu_set_vring_addr_exec()
1244 vq->shadow_avail_idx = vq->last_avail_idx = vq->used_idx; in vu_set_vring_addr_exec()
1252 vu_set_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_vring_base_exec() argument
1254 unsigned int index = vmsg->payload.state.index; in vu_set_vring_base_exec()
1255 unsigned int num = vmsg->payload.state.num; in vu_set_vring_base_exec()
1259 dev->vq[index].shadow_avail_idx = dev->vq[index].last_avail_idx = num; in vu_set_vring_base_exec()
1265 vu_get_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_get_vring_base_exec() argument
1267 unsigned int index = vmsg->payload.state.index; in vu_get_vring_base_exec()
1270 vmsg->payload.state.num = dev->vq[index].last_avail_idx; in vu_get_vring_base_exec()
1271 vmsg->size = sizeof(vmsg->payload.state); in vu_get_vring_base_exec()
1273 dev->vq[index].started = false; in vu_get_vring_base_exec()
1274 if (dev->iface->queue_set_started) { in vu_get_vring_base_exec()
1275 dev->iface->queue_set_started(dev, index, false); in vu_get_vring_base_exec()
1278 if (dev->vq[index].call_fd != -1) { in vu_get_vring_base_exec()
1279 close(dev->vq[index].call_fd); in vu_get_vring_base_exec()
1280 dev->vq[index].call_fd = -1; in vu_get_vring_base_exec()
1282 if (dev->vq[index].kick_fd != -1) { in vu_get_vring_base_exec()
1283 dev->remove_watch(dev, dev->vq[index].kick_fd); in vu_get_vring_base_exec()
1284 close(dev->vq[index].kick_fd); in vu_get_vring_base_exec()
1285 dev->vq[index].kick_fd = -1; in vu_get_vring_base_exec()
1292 vu_check_queue_msg_file(VuDev *dev, VhostUserMsg *vmsg) in vu_check_queue_msg_file() argument
1294 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; in vu_check_queue_msg_file()
1295 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; in vu_check_queue_msg_file()
1297 if (index >= dev->max_queues) { in vu_check_queue_msg_file()
1299 vu_panic(dev, "Invalid queue index: %u", index); in vu_check_queue_msg_file()
1308 if (vmsg->fd_num != 1) { in vu_check_queue_msg_file()
1310 vu_panic(dev, "Invalid fds in request: %d", vmsg->request); in vu_check_queue_msg_file()
1323 if (desc1->counter > desc0->counter && in inflight_desc_compare()
1324 (desc1->counter - desc0->counter) < VIRTQUEUE_MAX_SIZE * 2) { in inflight_desc_compare()
1328 return -1; in inflight_desc_compare()
1332 vu_check_queue_inflights(VuDev *dev, VuVirtq *vq) in vu_check_queue_inflights() argument
1336 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { in vu_check_queue_inflights()
1340 if (unlikely(!vq->inflight)) { in vu_check_queue_inflights()
1341 return -1; in vu_check_queue_inflights()
1344 if (unlikely(!vq->inflight->version)) { in vu_check_queue_inflights()
1346 vq->inflight->version = INFLIGHT_VERSION; in vu_check_queue_inflights()
1350 vq->used_idx = le16toh(vq->vring.used->idx); in vu_check_queue_inflights()
1351 vq->resubmit_num = 0; in vu_check_queue_inflights()
1352 vq->resubmit_list = NULL; in vu_check_queue_inflights()
1353 vq->counter = 0; in vu_check_queue_inflights()
1355 if (unlikely(vq->inflight->used_idx != vq->used_idx)) { in vu_check_queue_inflights()
1356 vq->inflight->desc[vq->inflight->last_batch_head].inflight = 0; in vu_check_queue_inflights()
1360 vq->inflight->used_idx = vq->used_idx; in vu_check_queue_inflights()
1363 for (i = 0; i < vq->inflight->desc_num; i++) { in vu_check_queue_inflights()
1364 if (vq->inflight->desc[i].inflight == 1) { in vu_check_queue_inflights()
1365 vq->inuse++; in vu_check_queue_inflights()
1369 vq->shadow_avail_idx = vq->last_avail_idx = vq->inuse + vq->used_idx; in vu_check_queue_inflights()
1371 if (vq->inuse) { in vu_check_queue_inflights()
1372 vq->resubmit_list = calloc(vq->inuse, sizeof(VuVirtqInflightDesc)); in vu_check_queue_inflights()
1373 if (!vq->resubmit_list) { in vu_check_queue_inflights()
1374 return -1; in vu_check_queue_inflights()
1377 for (i = 0; i < vq->inflight->desc_num; i++) { in vu_check_queue_inflights()
1378 if (vq->inflight->desc[i].inflight) { in vu_check_queue_inflights()
1379 vq->resubmit_list[vq->resubmit_num].index = i; in vu_check_queue_inflights()
1380 vq->resubmit_list[vq->resubmit_num].counter = in vu_check_queue_inflights()
1381 vq->inflight->desc[i].counter; in vu_check_queue_inflights()
1382 vq->resubmit_num++; in vu_check_queue_inflights()
1386 if (vq->resubmit_num > 1) { in vu_check_queue_inflights()
1387 qsort(vq->resubmit_list, vq->resubmit_num, in vu_check_queue_inflights()
1390 vq->counter = vq->resubmit_list[0].counter + 1; in vu_check_queue_inflights()
1394 if (eventfd_write(vq->kick_fd, 1)) { in vu_check_queue_inflights()
1395 return -1; in vu_check_queue_inflights()
1402 vu_set_vring_kick_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_vring_kick_exec() argument
1404 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; in vu_set_vring_kick_exec()
1405 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; in vu_set_vring_kick_exec()
1407 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); in vu_set_vring_kick_exec()
1409 if (!vu_check_queue_msg_file(dev, vmsg)) { in vu_set_vring_kick_exec()
1413 if (dev->vq[index].kick_fd != -1) { in vu_set_vring_kick_exec()
1414 dev->remove_watch(dev, dev->vq[index].kick_fd); in vu_set_vring_kick_exec()
1415 close(dev->vq[index].kick_fd); in vu_set_vring_kick_exec()
1416 dev->vq[index].kick_fd = -1; in vu_set_vring_kick_exec()
1419 dev->vq[index].kick_fd = nofd ? -1 : vmsg->fds[0]; in vu_set_vring_kick_exec()
1420 DPRINT("Got kick_fd: %d for vq: %d\n", dev->vq[index].kick_fd, index); in vu_set_vring_kick_exec()
1422 dev->vq[index].started = true; in vu_set_vring_kick_exec()
1423 if (dev->iface->queue_set_started) { in vu_set_vring_kick_exec()
1424 dev->iface->queue_set_started(dev, index, true); in vu_set_vring_kick_exec()
1427 if (dev->vq[index].kick_fd != -1 && dev->vq[index].handler) { in vu_set_vring_kick_exec()
1428 dev->set_watch(dev, dev->vq[index].kick_fd, VU_WATCH_IN, in vu_set_vring_kick_exec()
1432 dev->vq[index].kick_fd, index); in vu_set_vring_kick_exec()
1435 if (vu_check_queue_inflights(dev, &dev->vq[index])) { in vu_set_vring_kick_exec()
1436 vu_panic(dev, "Failed to check inflights for vq: %d\n", index); in vu_set_vring_kick_exec()
1442 void vu_set_queue_handler(VuDev *dev, VuVirtq *vq, in vu_set_queue_handler() argument
1445 int qidx = vq - dev->vq; in vu_set_queue_handler()
1447 vq->handler = handler; in vu_set_queue_handler()
1448 if (vq->kick_fd >= 0) { in vu_set_queue_handler()
1450 dev->set_watch(dev, vq->kick_fd, VU_WATCH_IN, in vu_set_queue_handler()
1453 dev->remove_watch(dev, vq->kick_fd); in vu_set_queue_handler()
1458 bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd, in vu_set_queue_host_notifier() argument
1461 int qidx = vq - dev->vq; in vu_set_queue_host_notifier()
1474 if (fd == -1) { in vu_set_queue_host_notifier()
1482 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD)) { in vu_set_queue_host_notifier()
1486 pthread_mutex_lock(&dev->backend_mutex); in vu_set_queue_host_notifier()
1487 if (!vu_message_write(dev, dev->backend_fd, &vmsg)) { in vu_set_queue_host_notifier()
1488 pthread_mutex_unlock(&dev->backend_mutex); in vu_set_queue_host_notifier()
1493 return vu_process_message_reply(dev, &vmsg); in vu_set_queue_host_notifier()
1497 vu_lookup_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN], in vu_lookup_shared_object() argument
1508 memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN); in vu_lookup_shared_object()
1510 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) { in vu_lookup_shared_object()
1514 pthread_mutex_lock(&dev->backend_mutex); in vu_lookup_shared_object()
1515 if (!vu_message_write(dev, dev->backend_fd, &msg)) { in vu_lookup_shared_object()
1519 if (!vu_message_read_default(dev, dev->backend_fd, &msg_reply)) { in vu_lookup_shared_object()
1538 pthread_mutex_unlock(&dev->backend_mutex); in vu_lookup_shared_object()
1544 vu_send_message(VuDev *dev, VhostUserMsg *vmsg) in vu_send_message() argument
1547 pthread_mutex_lock(&dev->backend_mutex); in vu_send_message()
1548 if (!vu_message_write(dev, dev->backend_fd, vmsg)) { in vu_send_message()
1554 pthread_mutex_unlock(&dev->backend_mutex); in vu_send_message()
1560 vu_add_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN]) in vu_add_shared_object() argument
1568 memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN); in vu_add_shared_object()
1570 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) { in vu_add_shared_object()
1574 return vu_send_message(dev, &msg); in vu_add_shared_object()
1578 vu_rm_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN]) in vu_rm_shared_object() argument
1586 memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN); in vu_rm_shared_object()
1588 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) { in vu_rm_shared_object()
1592 return vu_send_message(dev, &msg); in vu_rm_shared_object()
1596 vu_set_vring_call_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_vring_call_exec() argument
1598 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; in vu_set_vring_call_exec()
1599 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; in vu_set_vring_call_exec()
1601 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); in vu_set_vring_call_exec()
1603 if (!vu_check_queue_msg_file(dev, vmsg)) { in vu_set_vring_call_exec()
1607 if (dev->vq[index].call_fd != -1) { in vu_set_vring_call_exec()
1608 close(dev->vq[index].call_fd); in vu_set_vring_call_exec()
1609 dev->vq[index].call_fd = -1; in vu_set_vring_call_exec()
1612 dev->vq[index].call_fd = nofd ? -1 : vmsg->fds[0]; in vu_set_vring_call_exec()
1615 if (dev->vq[index].call_fd != -1 && eventfd_write(vmsg->fds[0], 1)) { in vu_set_vring_call_exec()
1616 return -1; in vu_set_vring_call_exec()
1619 DPRINT("Got call_fd: %d for vq: %d\n", dev->vq[index].call_fd, index); in vu_set_vring_call_exec()
1625 vu_set_vring_err_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_vring_err_exec() argument
1627 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; in vu_set_vring_err_exec()
1628 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; in vu_set_vring_err_exec()
1630 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); in vu_set_vring_err_exec()
1632 if (!vu_check_queue_msg_file(dev, vmsg)) { in vu_set_vring_err_exec()
1636 if (dev->vq[index].err_fd != -1) { in vu_set_vring_err_exec()
1637 close(dev->vq[index].err_fd); in vu_set_vring_err_exec()
1638 dev->vq[index].err_fd = -1; in vu_set_vring_err_exec()
1641 dev->vq[index].err_fd = nofd ? -1 : vmsg->fds[0]; in vu_set_vring_err_exec()
1647 vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_get_protocol_features_exec() argument
1669 if (dev->iface->get_config && dev->iface->set_config) { in vu_get_protocol_features_exec()
1673 if (dev->iface->get_protocol_features) { in vu_get_protocol_features_exec()
1674 features |= dev->iface->get_protocol_features(dev); in vu_get_protocol_features_exec()
1693 vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_protocol_features_exec() argument
1695 uint64_t features = vmsg->payload.u64; in vu_set_protocol_features_exec()
1699 dev->protocol_features = vmsg->payload.u64; in vu_set_protocol_features_exec()
1701 if (vu_has_protocol_feature(dev, in vu_set_protocol_features_exec()
1703 (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_REQ) || in vu_set_protocol_features_exec()
1704 !vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_REPLY_ACK))) { in vu_set_protocol_features_exec()
1715 vu_panic(dev, in vu_set_protocol_features_exec()
1720 if (dev->iface->set_protocol_features) { in vu_set_protocol_features_exec()
1721 dev->iface->set_protocol_features(dev, features); in vu_set_protocol_features_exec()
1728 vu_get_queue_num_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_get_queue_num_exec() argument
1730 vmsg_set_reply_u64(vmsg, dev->max_queues); in vu_get_queue_num_exec()
1735 vu_set_vring_enable_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_vring_enable_exec() argument
1737 unsigned int index = vmsg->payload.state.index; in vu_set_vring_enable_exec()
1738 unsigned int enable = vmsg->payload.state.num; in vu_set_vring_enable_exec()
1743 if (index >= dev->max_queues) { in vu_set_vring_enable_exec()
1744 vu_panic(dev, "Invalid vring_enable index: %u", index); in vu_set_vring_enable_exec()
1748 dev->vq[index].enable = enable; in vu_set_vring_enable_exec()
1753 vu_set_backend_req_fd(VuDev *dev, VhostUserMsg *vmsg) in vu_set_backend_req_fd() argument
1755 if (vmsg->fd_num != 1) { in vu_set_backend_req_fd()
1756 vu_panic(dev, "Invalid backend_req_fd message (%d fd's)", vmsg->fd_num); in vu_set_backend_req_fd()
1760 if (dev->backend_fd != -1) { in vu_set_backend_req_fd()
1761 close(dev->backend_fd); in vu_set_backend_req_fd()
1763 dev->backend_fd = vmsg->fds[0]; in vu_set_backend_req_fd()
1764 DPRINT("Got backend_fd: %d\n", vmsg->fds[0]); in vu_set_backend_req_fd()
1770 vu_get_config(VuDev *dev, VhostUserMsg *vmsg) in vu_get_config() argument
1772 int ret = -1; in vu_get_config()
1774 if (dev->iface->get_config) { in vu_get_config()
1775 ret = dev->iface->get_config(dev, vmsg->payload.config.region, in vu_get_config()
1776 vmsg->payload.config.size); in vu_get_config()
1781 vmsg->size = 0; in vu_get_config()
1788 vu_set_config(VuDev *dev, VhostUserMsg *vmsg) in vu_set_config() argument
1790 int ret = -1; in vu_set_config()
1792 if (dev->iface->set_config) { in vu_set_config()
1793 ret = dev->iface->set_config(dev, vmsg->payload.config.region, in vu_set_config()
1794 vmsg->payload.config.offset, in vu_set_config()
1795 vmsg->payload.config.size, in vu_set_config()
1796 vmsg->payload.config.flags); in vu_set_config()
1798 vu_panic(dev, "Set virtio configuration space failed"); in vu_set_config()
1806 vu_set_postcopy_advise(VuDev *dev, VhostUserMsg *vmsg) in vu_set_postcopy_advise() argument
1811 dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); in vu_set_postcopy_advise()
1812 vmsg->size = 0; in vu_set_postcopy_advise()
1814 dev->postcopy_ufd = -1; in vu_set_postcopy_advise()
1817 if (dev->postcopy_ufd == -1) { in vu_set_postcopy_advise()
1818 vu_panic(dev, "Userfaultfd not available: %s", strerror(errno)); in vu_set_postcopy_advise()
1825 if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) { in vu_set_postcopy_advise()
1826 vu_panic(dev, "Failed UFFDIO_API: %s", strerror(errno)); in vu_set_postcopy_advise()
1827 close(dev->postcopy_ufd); in vu_set_postcopy_advise()
1828 dev->postcopy_ufd = -1; in vu_set_postcopy_advise()
1836 vmsg->fd_num = 1; in vu_set_postcopy_advise()
1837 vmsg->fds[0] = dev->postcopy_ufd; in vu_set_postcopy_advise()
1842 vu_set_postcopy_listen(VuDev *dev, VhostUserMsg *vmsg) in vu_set_postcopy_listen() argument
1844 if (dev->nregions) { in vu_set_postcopy_listen()
1845 vu_panic(dev, "Regions already registered at postcopy-listen"); in vu_set_postcopy_listen()
1846 vmsg_set_reply_u64(vmsg, -1); in vu_set_postcopy_listen()
1849 dev->postcopy_listening = true; in vu_set_postcopy_listen()
1856 vu_set_postcopy_end(VuDev *dev, VhostUserMsg *vmsg) in vu_set_postcopy_end() argument
1859 dev->postcopy_listening = false; in vu_set_postcopy_end()
1860 if (dev->postcopy_ufd > 0) { in vu_set_postcopy_end()
1861 close(dev->postcopy_ufd); in vu_set_postcopy_end()
1862 dev->postcopy_ufd = -1; in vu_set_postcopy_end()
1913 vu_get_inflight_fd(VuDev *dev, VhostUserMsg *vmsg) in vu_get_inflight_fd() argument
1915 int fd = -1; in vu_get_inflight_fd()
1920 if (vmsg->size != sizeof(vmsg->payload.inflight)) { in vu_get_inflight_fd()
1921 vu_panic(dev, "Invalid get_inflight_fd message:%d", vmsg->size); in vu_get_inflight_fd()
1922 vmsg->payload.inflight.mmap_size = 0; in vu_get_inflight_fd()
1926 num_queues = vmsg->payload.inflight.num_queues; in vu_get_inflight_fd()
1927 queue_size = vmsg->payload.inflight.queue_size; in vu_get_inflight_fd()
1935 addr = memfd_alloc("vhost-inflight", mmap_size, in vu_get_inflight_fd()
1939 vu_panic(dev, "Not implemented: memfd support is missing"); in vu_get_inflight_fd()
1943 vu_panic(dev, "Failed to alloc vhost inflight area"); in vu_get_inflight_fd()
1944 vmsg->payload.inflight.mmap_size = 0; in vu_get_inflight_fd()
1950 dev->inflight_info.addr = addr; in vu_get_inflight_fd()
1951 dev->inflight_info.size = vmsg->payload.inflight.mmap_size = mmap_size; in vu_get_inflight_fd()
1952 dev->inflight_info.fd = vmsg->fds[0] = fd; in vu_get_inflight_fd()
1953 vmsg->fd_num = 1; in vu_get_inflight_fd()
1954 vmsg->payload.inflight.mmap_offset = 0; in vu_get_inflight_fd()
1957 vmsg->payload.inflight.mmap_size); in vu_get_inflight_fd()
1959 vmsg->payload.inflight.mmap_offset); in vu_get_inflight_fd()
1965 vu_set_inflight_fd(VuDev *dev, VhostUserMsg *vmsg) in vu_set_inflight_fd() argument
1972 if (vmsg->fd_num != 1 || in vu_set_inflight_fd()
1973 vmsg->size != sizeof(vmsg->payload.inflight)) { in vu_set_inflight_fd()
1974 vu_panic(dev, "Invalid set_inflight_fd message size:%d fds:%d", in vu_set_inflight_fd()
1975 vmsg->size, vmsg->fd_num); in vu_set_inflight_fd()
1979 fd = vmsg->fds[0]; in vu_set_inflight_fd()
1980 mmap_size = vmsg->payload.inflight.mmap_size; in vu_set_inflight_fd()
1981 mmap_offset = vmsg->payload.inflight.mmap_offset; in vu_set_inflight_fd()
1982 num_queues = vmsg->payload.inflight.num_queues; in vu_set_inflight_fd()
1983 queue_size = vmsg->payload.inflight.queue_size; in vu_set_inflight_fd()
1994 vu_panic(dev, "set_inflight_fd mmap error: %s", strerror(errno)); in vu_set_inflight_fd()
1998 if (dev->inflight_info.fd) { in vu_set_inflight_fd()
1999 close(dev->inflight_info.fd); in vu_set_inflight_fd()
2002 if (dev->inflight_info.addr) { in vu_set_inflight_fd()
2003 munmap(dev->inflight_info.addr, dev->inflight_info.size); in vu_set_inflight_fd()
2006 dev->inflight_info.fd = fd; in vu_set_inflight_fd()
2007 dev->inflight_info.addr = rc; in vu_set_inflight_fd()
2008 dev->inflight_info.size = mmap_size; in vu_set_inflight_fd()
2011 dev->vq[i].inflight = (VuVirtqInflight *)rc; in vu_set_inflight_fd()
2012 dev->vq[i].inflight->desc_num = queue_size; in vu_set_inflight_fd()
2020 vu_handle_vring_kick(VuDev *dev, VhostUserMsg *vmsg) in vu_handle_vring_kick() argument
2022 unsigned int index = vmsg->payload.state.index; in vu_handle_vring_kick()
2024 if (index >= dev->max_queues) { in vu_handle_vring_kick()
2025 vu_panic(dev, "Invalid queue index: %u", index); in vu_handle_vring_kick()
2030 dev->vq[index].handler, index); in vu_handle_vring_kick()
2032 if (!dev->vq[index].started) { in vu_handle_vring_kick()
2033 dev->vq[index].started = true; in vu_handle_vring_kick()
2035 if (dev->iface->queue_set_started) { in vu_handle_vring_kick()
2036 dev->iface->queue_set_started(dev, index, true); in vu_handle_vring_kick()
2040 if (dev->vq[index].handler) { in vu_handle_vring_kick()
2041 dev->vq[index].handler(dev, index); in vu_handle_vring_kick()
2047 static bool vu_handle_get_max_memslots(VuDev *dev, VhostUserMsg *vmsg) in vu_handle_get_max_memslots() argument
2057 vu_process_message(VuDev *dev, VhostUserMsg *vmsg) in vu_process_message() argument
2063 DPRINT("Request: %s (%d)\n", vu_request_to_string(vmsg->request), in vu_process_message()
2064 vmsg->request); in vu_process_message()
2065 DPRINT("Flags: 0x%x\n", vmsg->flags); in vu_process_message()
2066 DPRINT("Size: %u\n", vmsg->size); in vu_process_message()
2068 if (vmsg->fd_num) { in vu_process_message()
2071 for (i = 0; i < vmsg->fd_num; i++) { in vu_process_message()
2072 DPRINT(" %d", vmsg->fds[i]); in vu_process_message()
2077 if (dev->iface->process_msg && in vu_process_message()
2078 dev->iface->process_msg(dev, vmsg, &do_reply)) { in vu_process_message()
2082 switch (vmsg->request) { in vu_process_message()
2084 return vu_get_features_exec(dev, vmsg); in vu_process_message()
2086 return vu_set_features_exec(dev, vmsg); in vu_process_message()
2088 return vu_get_protocol_features_exec(dev, vmsg); in vu_process_message()
2090 return vu_set_protocol_features_exec(dev, vmsg); in vu_process_message()
2092 return vu_set_owner_exec(dev, vmsg); in vu_process_message()
2094 return vu_reset_device_exec(dev, vmsg); in vu_process_message()
2096 return vu_set_mem_table_exec(dev, vmsg); in vu_process_message()
2098 return vu_set_log_base_exec(dev, vmsg); in vu_process_message()
2100 return vu_set_log_fd_exec(dev, vmsg); in vu_process_message()
2102 return vu_set_vring_num_exec(dev, vmsg); in vu_process_message()
2104 return vu_set_vring_addr_exec(dev, vmsg); in vu_process_message()
2106 return vu_set_vring_base_exec(dev, vmsg); in vu_process_message()
2108 return vu_get_vring_base_exec(dev, vmsg); in vu_process_message()
2110 return vu_set_vring_kick_exec(dev, vmsg); in vu_process_message()
2112 return vu_set_vring_call_exec(dev, vmsg); in vu_process_message()
2114 return vu_set_vring_err_exec(dev, vmsg); in vu_process_message()
2116 return vu_get_queue_num_exec(dev, vmsg); in vu_process_message()
2118 return vu_set_vring_enable_exec(dev, vmsg); in vu_process_message()
2120 return vu_set_backend_req_fd(dev, vmsg); in vu_process_message()
2122 return vu_get_config(dev, vmsg); in vu_process_message()
2124 return vu_set_config(dev, vmsg); in vu_process_message()
2126 /* if you need processing before exit, override iface->process_msg */ in vu_process_message()
2129 return vu_set_postcopy_advise(dev, vmsg); in vu_process_message()
2131 return vu_set_postcopy_listen(dev, vmsg); in vu_process_message()
2133 return vu_set_postcopy_end(dev, vmsg); in vu_process_message()
2135 return vu_get_inflight_fd(dev, vmsg); in vu_process_message()
2137 return vu_set_inflight_fd(dev, vmsg); in vu_process_message()
2139 return vu_handle_vring_kick(dev, vmsg); in vu_process_message()
2141 return vu_handle_get_max_memslots(dev, vmsg); in vu_process_message()
2143 return vu_add_mem_reg(dev, vmsg); in vu_process_message()
2145 return vu_rem_mem_reg(dev, vmsg); in vu_process_message()
2147 return vu_get_shared_object(dev, vmsg); in vu_process_message()
2150 vu_panic(dev, "Unhandled request: %d", vmsg->request); in vu_process_message()
2157 vu_dispatch(VuDev *dev) in vu_dispatch() argument
2163 if (!dev->read_msg(dev, dev->sock, &vmsg)) { in vu_dispatch()
2169 reply_requested = vu_process_message(dev, &vmsg); in vu_dispatch()
2180 if (!vu_send_reply(dev, dev->sock, &vmsg)) { in vu_dispatch()
2192 vu_deinit(VuDev *dev) in vu_deinit() argument
2196 vu_remove_all_mem_regs(dev); in vu_deinit()
2198 for (i = 0; i < dev->max_queues; i++) { in vu_deinit()
2199 VuVirtq *vq = &dev->vq[i]; in vu_deinit()
2201 if (vq->call_fd != -1) { in vu_deinit()
2202 close(vq->call_fd); in vu_deinit()
2203 vq->call_fd = -1; in vu_deinit()
2206 if (vq->kick_fd != -1) { in vu_deinit()
2207 dev->remove_watch(dev, vq->kick_fd); in vu_deinit()
2208 close(vq->kick_fd); in vu_deinit()
2209 vq->kick_fd = -1; in vu_deinit()
2212 if (vq->err_fd != -1) { in vu_deinit()
2213 close(vq->err_fd); in vu_deinit()
2214 vq->err_fd = -1; in vu_deinit()
2217 if (vq->resubmit_list) { in vu_deinit()
2218 free(vq->resubmit_list); in vu_deinit()
2219 vq->resubmit_list = NULL; in vu_deinit()
2222 vq->inflight = NULL; in vu_deinit()
2225 if (dev->inflight_info.addr) { in vu_deinit()
2226 munmap(dev->inflight_info.addr, dev->inflight_info.size); in vu_deinit()
2227 dev->inflight_info.addr = NULL; in vu_deinit()
2230 if (dev->inflight_info.fd > 0) { in vu_deinit()
2231 close(dev->inflight_info.fd); in vu_deinit()
2232 dev->inflight_info.fd = -1; in vu_deinit()
2235 vu_close_log(dev); in vu_deinit()
2236 if (dev->backend_fd != -1) { in vu_deinit()
2237 close(dev->backend_fd); in vu_deinit()
2238 dev->backend_fd = -1; in vu_deinit()
2240 pthread_mutex_destroy(&dev->backend_mutex); in vu_deinit()
2242 if (dev->sock != -1) { in vu_deinit()
2243 close(dev->sock); in vu_deinit()
2246 free(dev->vq); in vu_deinit()
2247 dev->vq = NULL; in vu_deinit()
2248 free(dev->regions); in vu_deinit()
2249 dev->regions = NULL; in vu_deinit()
2253 vu_init(VuDev *dev, in vu_init() argument
2271 memset(dev, 0, sizeof(*dev)); in vu_init()
2273 dev->sock = socket; in vu_init()
2274 dev->panic = panic; in vu_init()
2275 dev->read_msg = read_msg ? read_msg : vu_message_read_default; in vu_init()
2276 dev->set_watch = set_watch; in vu_init()
2277 dev->remove_watch = remove_watch; in vu_init()
2278 dev->iface = iface; in vu_init()
2279 dev->log_call_fd = -1; in vu_init()
2280 pthread_mutex_init(&dev->backend_mutex, NULL); in vu_init()
2281 dev->backend_fd = -1; in vu_init()
2282 dev->max_queues = max_queues; in vu_init()
2284 dev->regions = malloc(VHOST_USER_MAX_RAM_SLOTS * sizeof(dev->regions[0])); in vu_init()
2285 if (!dev->regions) { in vu_init()
2290 dev->vq = malloc(max_queues * sizeof(dev->vq[0])); in vu_init()
2291 if (!dev->vq) { in vu_init()
2293 free(dev->regions); in vu_init()
2294 dev->regions = NULL; in vu_init()
2299 dev->vq[i] = (VuVirtq) { in vu_init()
2300 .call_fd = -1, .kick_fd = -1, .err_fd = -1, in vu_init()
2309 vu_get_queue(VuDev *dev, int qidx) in vu_get_queue() argument
2311 assert(qidx < dev->max_queues); in vu_get_queue()
2312 return &dev->vq[qidx]; in vu_get_queue()
2316 vu_queue_enabled(VuDev *dev, VuVirtq *vq) in vu_queue_enabled() argument
2318 return vq->enable; in vu_queue_enabled()
2322 vu_queue_started(const VuDev *dev, const VuVirtq *vq) in vu_queue_started() argument
2324 return vq->started; in vu_queue_started()
2330 return le16toh(vq->vring.avail->flags); in vring_avail_flags()
2336 vq->shadow_avail_idx = le16toh(vq->vring.avail->idx); in vring_avail_idx()
2338 return vq->shadow_avail_idx; in vring_avail_idx()
2344 return le16toh(vq->vring.avail->ring[i]); in vring_avail_ring()
2350 return vring_avail_ring(vq, vq->vring.num); in vring_get_used_event()
2354 virtqueue_num_heads(VuDev *dev, VuVirtq *vq, unsigned int idx) in virtqueue_num_heads() argument
2356 uint16_t num_heads = vring_avail_idx(vq) - idx; in virtqueue_num_heads()
2359 if (num_heads > vq->vring.num) { in virtqueue_num_heads()
2360 vu_panic(dev, "Guest moved used index from %u to %u", in virtqueue_num_heads()
2361 idx, vq->shadow_avail_idx); in virtqueue_num_heads()
2362 return -1; in virtqueue_num_heads()
2365 /* On success, callers read a descriptor at vq->last_avail_idx. in virtqueue_num_heads()
2374 virtqueue_get_head(VuDev *dev, VuVirtq *vq, in virtqueue_get_head() argument
2379 *head = vring_avail_ring(vq, idx % vq->vring.num); in virtqueue_get_head()
2382 if (*head >= vq->vring.num) { in virtqueue_get_head()
2383 vu_panic(dev, "Guest says index %u is available", *head); in virtqueue_get_head()
2391 virtqueue_read_indirect_desc(VuDev *dev, struct vring_desc *desc, in virtqueue_read_indirect_desc() argument
2398 return -1; in virtqueue_read_indirect_desc()
2402 return -1; in virtqueue_read_indirect_desc()
2407 ori_desc = vu_gpa_to_va(dev, &read_len, addr); in virtqueue_read_indirect_desc()
2409 return -1; in virtqueue_read_indirect_desc()
2413 len -= read_len; in virtqueue_read_indirect_desc()
2422 VIRTQUEUE_READ_DESC_ERROR = -1,
2428 virtqueue_read_next_desc(VuDev *dev, struct vring_desc *desc, in virtqueue_read_next_desc() argument
2442 vu_panic(dev, "Desc next is %u", *next); in virtqueue_read_next_desc()
2450 vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes, in vu_queue_get_avail_bytes() argument
2458 idx = vq->last_avail_idx; in vu_queue_get_avail_bytes()
2461 if (!vu_is_vq_usable(dev, vq)) { in vu_queue_get_avail_bytes()
2465 while ((rc = virtqueue_num_heads(dev, vq, idx)) > 0) { in vu_queue_get_avail_bytes()
2472 max = vq->vring.num; in vu_queue_get_avail_bytes()
2474 if (!virtqueue_get_head(dev, vq, idx++, &i)) { in vu_queue_get_avail_bytes()
2477 desc = vq->vring.desc; in vu_queue_get_avail_bytes()
2481 vu_panic(dev, "Invalid size for indirect buffer table"); in vu_queue_get_avail_bytes()
2487 vu_panic(dev, "Looped descriptor"); in vu_queue_get_avail_bytes()
2497 desc = vu_gpa_to_va(dev, &read_len, desc_addr); in vu_queue_get_avail_bytes()
2501 if (!virtqueue_read_indirect_desc(dev, desc_buf, in vu_queue_get_avail_bytes()
2508 vu_panic(dev, "Invalid indirect buffer table"); in vu_queue_get_avail_bytes()
2517 vu_panic(dev, "Looped descriptor"); in vu_queue_get_avail_bytes()
2529 rc = virtqueue_read_next_desc(dev, desc, i, max, &i); in vu_queue_get_avail_bytes()
2560 vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes, in vu_queue_avail_bytes() argument
2565 vu_queue_get_avail_bytes(dev, vq, &in_total, &out_total, in vu_queue_avail_bytes()
2574 vu_queue_empty(VuDev *dev, VuVirtq *vq) in vu_queue_empty() argument
2576 if (!vu_is_vq_usable(dev, vq)) { in vu_queue_empty()
2580 if (vq->shadow_avail_idx != vq->last_avail_idx) { in vu_queue_empty()
2584 return vring_avail_idx(vq) == vq->last_avail_idx; in vu_queue_empty()
2588 vring_notify(VuDev *dev, VuVirtq *vq) in vring_notify() argument
2597 if (vu_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) && in vring_notify()
2598 !vq->inuse && vu_queue_empty(dev, vq)) { in vring_notify()
2602 if (!vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { in vring_notify()
2606 v = vq->signalled_used_valid; in vring_notify()
2607 vq->signalled_used_valid = true; in vring_notify()
2608 old = vq->signalled_used; in vring_notify()
2609 new = vq->signalled_used = vq->used_idx; in vring_notify()
2613 static void _vu_queue_notify(VuDev *dev, VuVirtq *vq, bool sync) in _vu_queue_notify() argument
2615 if (!vu_is_vq_usable(dev, vq)) { in _vu_queue_notify()
2619 if (!vring_notify(dev, vq)) { in _vu_queue_notify()
2624 if (vq->call_fd < 0 && in _vu_queue_notify()
2625 vu_has_protocol_feature(dev, in _vu_queue_notify()
2627 vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_REQ)) { in _vu_queue_notify()
2633 .index = vq - dev->vq, in _vu_queue_notify()
2637 vu_has_protocol_feature(dev, in _vu_queue_notify()
2644 vu_message_write(dev, dev->backend_fd, &vmsg); in _vu_queue_notify()
2646 vu_message_read_default(dev, dev->backend_fd, &vmsg); in _vu_queue_notify()
2651 if (eventfd_write(vq->call_fd, 1) < 0) { in _vu_queue_notify()
2652 vu_panic(dev, "Error writing eventfd: %s", strerror(errno)); in _vu_queue_notify()
2656 void vu_queue_notify(VuDev *dev, VuVirtq *vq) in vu_queue_notify() argument
2658 _vu_queue_notify(dev, vq, false); in vu_queue_notify()
2661 void vu_queue_notify_sync(VuDev *dev, VuVirtq *vq) in vu_queue_notify_sync() argument
2663 _vu_queue_notify(dev, vq, true); in vu_queue_notify_sync()
2666 void vu_config_change_msg(VuDev *dev) in vu_config_change_msg() argument
2673 vu_message_write(dev, dev->backend_fd, &vmsg); in vu_config_change_msg()
2681 flags = (uint16_t *)((char*)vq->vring.used + in vring_used_flags_set_bit()
2691 flags = (uint16_t *)((char*)vq->vring.used + in vring_used_flags_unset_bit()
2701 if (!vq->notification) { in vring_set_avail_event()
2705 memcpy(&vq->vring.used->ring[vq->vring.num], &val_le, sizeof(uint16_t)); in vring_set_avail_event()
2709 vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable) in vu_queue_set_notification() argument
2711 vq->notification = enable; in vu_queue_set_notification()
2712 if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { in vu_queue_set_notification()
2726 virtqueue_map_desc(VuDev *dev, in virtqueue_map_desc() argument
2736 vu_panic(dev, "virtio: zero sized buffers are not allowed"); in virtqueue_map_desc()
2744 vu_panic(dev, "virtio: too many descriptors in indirect table"); in virtqueue_map_desc()
2748 iov[num_sg].iov_base = vu_gpa_to_va(dev, &len, pa); in virtqueue_map_desc()
2750 vu_panic(dev, "virtio: invalid address for buffers"); in virtqueue_map_desc()
2755 sz -= len; in virtqueue_map_desc()
2768 size_t in_sg_ofs = ALIGN_UP(sz, __alignof__(elem->in_sg[0])); in virtqueue_alloc_element()
2769 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]); in virtqueue_alloc_element()
2770 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]); in virtqueue_alloc_element()
2778 elem->out_num = out_num; in virtqueue_alloc_element()
2779 elem->in_num = in_num; in virtqueue_alloc_element()
2780 elem->in_sg = (void *)elem + in_sg_ofs; in virtqueue_alloc_element()
2781 elem->out_sg = (void *)elem + out_sg_ofs; in virtqueue_alloc_element()
2786 vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz) in vu_queue_map_desc() argument
2788 struct vring_desc *desc = vq->vring.desc; in vu_queue_map_desc()
2791 unsigned int max = vq->vring.num; in vu_queue_map_desc()
2801 vu_panic(dev, "Invalid size for indirect buffer table"); in vu_queue_map_desc()
2810 desc = vu_gpa_to_va(dev, &read_len, desc_addr); in vu_queue_map_desc()
2814 if (!virtqueue_read_indirect_desc(dev, desc_buf, in vu_queue_map_desc()
2821 vu_panic(dev, "Invalid indirect buffer table"); in vu_queue_map_desc()
2830 if (!virtqueue_map_desc(dev, &in_num, iov + out_num, in vu_queue_map_desc()
2831 VIRTQUEUE_MAX_SIZE - out_num, true, in vu_queue_map_desc()
2838 vu_panic(dev, "Incorrect order for descriptors"); in vu_queue_map_desc()
2841 if (!virtqueue_map_desc(dev, &out_num, iov, in vu_queue_map_desc()
2851 vu_panic(dev, "Looped descriptor"); in vu_queue_map_desc()
2854 rc = virtqueue_read_next_desc(dev, desc, i, max, &i); in vu_queue_map_desc()
2858 vu_panic(dev, "read descriptor error"); in vu_queue_map_desc()
2867 elem->index = idx; in vu_queue_map_desc()
2869 elem->out_sg[i] = iov[i]; in vu_queue_map_desc()
2872 elem->in_sg[i] = iov[out_num + i]; in vu_queue_map_desc()
2879 vu_queue_inflight_get(VuDev *dev, VuVirtq *vq, int desc_idx) in vu_queue_inflight_get() argument
2881 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { in vu_queue_inflight_get()
2885 if (unlikely(!vq->inflight)) { in vu_queue_inflight_get()
2886 return -1; in vu_queue_inflight_get()
2889 vq->inflight->desc[desc_idx].counter = vq->counter++; in vu_queue_inflight_get()
2890 vq->inflight->desc[desc_idx].inflight = 1; in vu_queue_inflight_get()
2896 vu_queue_inflight_pre_put(VuDev *dev, VuVirtq *vq, int desc_idx) in vu_queue_inflight_pre_put() argument
2898 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { in vu_queue_inflight_pre_put()
2902 if (unlikely(!vq->inflight)) { in vu_queue_inflight_pre_put()
2903 return -1; in vu_queue_inflight_pre_put()
2906 vq->inflight->last_batch_head = desc_idx; in vu_queue_inflight_pre_put()
2912 vu_queue_inflight_post_put(VuDev *dev, VuVirtq *vq, int desc_idx) in vu_queue_inflight_post_put() argument
2914 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { in vu_queue_inflight_post_put()
2918 if (unlikely(!vq->inflight)) { in vu_queue_inflight_post_put()
2919 return -1; in vu_queue_inflight_post_put()
2924 vq->inflight->desc[desc_idx].inflight = 0; in vu_queue_inflight_post_put()
2928 vq->inflight->used_idx = vq->used_idx; in vu_queue_inflight_post_put()
2934 vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz) in vu_queue_pop() argument
2940 if (!vu_is_vq_usable(dev, vq)) { in vu_queue_pop()
2944 if (unlikely(vq->resubmit_list && vq->resubmit_num > 0)) { in vu_queue_pop()
2945 i = (--vq->resubmit_num); in vu_queue_pop()
2946 elem = vu_queue_map_desc(dev, vq, vq->resubmit_list[i].index, sz); in vu_queue_pop()
2948 if (!vq->resubmit_num) { in vu_queue_pop()
2949 free(vq->resubmit_list); in vu_queue_pop()
2950 vq->resubmit_list = NULL; in vu_queue_pop()
2956 if (vu_queue_empty(dev, vq)) { in vu_queue_pop()
2965 if (vq->inuse >= vq->vring.num) { in vu_queue_pop()
2966 vu_panic(dev, "Virtqueue size exceeded"); in vu_queue_pop()
2970 if (!virtqueue_get_head(dev, vq, vq->last_avail_idx++, &head)) { in vu_queue_pop()
2974 if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { in vu_queue_pop()
2975 vring_set_avail_event(vq, vq->last_avail_idx); in vu_queue_pop()
2978 elem = vu_queue_map_desc(dev, vq, head, sz); in vu_queue_pop()
2984 vq->inuse++; in vu_queue_pop()
2986 vu_queue_inflight_get(dev, vq, head); in vu_queue_pop()
2992 vu_queue_detach_element(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem, in vu_queue_detach_element() argument
2995 vq->inuse--; in vu_queue_detach_element()
3000 vu_queue_unpop(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem, in vu_queue_unpop() argument
3003 vq->last_avail_idx--; in vu_queue_unpop()
3004 vu_queue_detach_element(dev, vq, elem, len); in vu_queue_unpop()
3008 vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num) in vu_queue_rewind() argument
3010 if (num > vq->inuse) { in vu_queue_rewind()
3013 vq->last_avail_idx -= num; in vu_queue_rewind()
3014 vq->inuse -= num; in vu_queue_rewind()
3019 void vring_used_write(VuDev *dev, VuVirtq *vq, in vring_used_write() argument
3022 struct vring_used *used = vq->vring.used; in vring_used_write()
3024 used->ring[i] = *uelem; in vring_used_write()
3025 vu_log_write(dev, vq->vring.log_guest_addr + in vring_used_write()
3027 sizeof(used->ring[i])); in vring_used_write()
3032 vu_log_queue_fill(VuDev *dev, VuVirtq *vq, in vu_log_queue_fill() argument
3036 struct vring_desc *desc = vq->vring.desc; in vu_log_queue_fill()
3042 max = vq->vring.num; in vu_log_queue_fill()
3043 i = elem->index; in vu_log_queue_fill()
3047 vu_panic(dev, "Invalid size for indirect buffer table"); in vu_log_queue_fill()
3056 desc = vu_gpa_to_va(dev, &read_len, desc_addr); in vu_log_queue_fill()
3060 if (!virtqueue_read_indirect_desc(dev, desc_buf, in vu_log_queue_fill()
3067 vu_panic(dev, "Invalid indirect buffer table"); in vu_log_queue_fill()
3075 vu_panic(dev, "Looped descriptor"); in vu_log_queue_fill()
3081 vu_log_write(dev, le64toh(desc[i].addr), min); in vu_log_queue_fill()
3082 len -= min; in vu_log_queue_fill()
3086 (virtqueue_read_next_desc(dev, desc, i, max, &i) in vu_log_queue_fill()
3091 vu_queue_fill(VuDev *dev, VuVirtq *vq, in vu_queue_fill() argument
3097 if (!vu_is_vq_usable(dev, vq)) { in vu_queue_fill()
3101 vu_log_queue_fill(dev, vq, elem, len); in vu_queue_fill()
3103 idx = (idx + vq->used_idx) % vq->vring.num; in vu_queue_fill()
3105 uelem.id = htole32(elem->index); in vu_queue_fill()
3107 vring_used_write(dev, vq, &uelem, idx); in vu_queue_fill()
3111 void vring_used_idx_set(VuDev *dev, VuVirtq *vq, uint16_t val) in vring_used_idx_set() argument
3113 vq->vring.used->idx = htole16(val); in vring_used_idx_set()
3114 vu_log_write(dev, in vring_used_idx_set()
3115 vq->vring.log_guest_addr + offsetof(struct vring_used, idx), in vring_used_idx_set()
3116 sizeof(vq->vring.used->idx)); in vring_used_idx_set()
3118 vq->used_idx = val; in vring_used_idx_set()
3122 vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int count) in vu_queue_flush() argument
3126 if (!vu_is_vq_usable(dev, vq)) { in vu_queue_flush()
3133 old = vq->used_idx; in vu_queue_flush()
3135 vring_used_idx_set(dev, vq, new); in vu_queue_flush()
3136 vq->inuse -= count; in vu_queue_flush()
3137 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) { in vu_queue_flush()
3138 vq->signalled_used_valid = false; in vu_queue_flush()
3143 vu_queue_push(VuDev *dev, VuVirtq *vq, in vu_queue_push() argument
3146 vu_queue_fill(dev, vq, elem, len, 0); in vu_queue_push()
3147 vu_queue_inflight_pre_put(dev, vq, elem->index); in vu_queue_push()
3148 vu_queue_flush(dev, vq, 1); in vu_queue_push()
3149 vu_queue_inflight_post_put(dev, vq, elem->index); in vu_queue_push()