Lines Matching +full:max +full:- +full:len
9 * Marc-André Lureau <mlureau@redhat.com>
13 * later. See the COPYING file in the top-level directory.
39 #include "standard-headers/linux/virtio_config.h"
57 #include "libvhost-user.h"
83 #define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m))
117 return has_feature(dev->features, fbit); in vu_has_feature()
122 return has_feature(dev->protocol_features, fbit); in vu_has_protocol_feature()
190 dev->broken = true; in vu_panic()
191 dev->panic(dev, buf); in vu_panic()
205 int high = dev->nregions - 1; in vu_gpa_to_mem_region()
216 unsigned int mid = low + (high - low) / 2; in vu_gpa_to_mem_region()
217 VuDevRegion *cur = &dev->regions[mid]; in vu_gpa_to_mem_region()
219 if (guest_addr >= cur->gpa && guest_addr < cur->gpa + cur->size) { in vu_gpa_to_mem_region()
222 if (guest_addr >= cur->gpa + cur->size) { in vu_gpa_to_mem_region()
225 if (guest_addr < cur->gpa) { in vu_gpa_to_mem_region()
226 high = mid - 1; in vu_gpa_to_mem_region()
247 if ((guest_addr + *plen) > (r->gpa + r->size)) { in vu_gpa_to_va()
248 *plen = r->gpa + r->size - guest_addr; in vu_gpa_to_va()
250 return (void *)(uintptr_t)guest_addr - r->gpa + r->mmap_addr + in vu_gpa_to_va()
251 r->mmap_offset; in vu_gpa_to_va()
261 for (i = 0; i < dev->nregions; i++) { in qva_to_va()
262 VuDevRegion *r = &dev->regions[i]; in qva_to_va()
264 if ((qemu_addr >= r->qva) && (qemu_addr < (r->qva + r->size))) { in qva_to_va()
266 qemu_addr - r->qva + r->mmap_addr + r->mmap_offset; in qva_to_va()
278 for (i = 0; i < dev->nregions; i++) { in vu_remove_all_mem_regs()
279 VuDevRegion *r = &dev->regions[i]; in vu_remove_all_mem_regs()
281 munmap((void *)(uintptr_t)r->mmap_addr, r->size + r->mmap_offset); in vu_remove_all_mem_regs()
283 dev->nregions = 0; in vu_remove_all_mem_regs()
289 vq->vring.desc = qva_to_va(dev, vq->vra.desc_user_addr); in map_ring()
290 vq->vring.used = qva_to_va(dev, vq->vra.used_user_addr); in map_ring()
291 vq->vring.avail = qva_to_va(dev, vq->vra.avail_user_addr); in map_ring()
294 DPRINT(" vring_desc at %p\n", vq->vring.desc); in map_ring()
295 DPRINT(" vring_used at %p\n", vq->vring.used); in map_ring()
296 DPRINT(" vring_avail at %p\n", vq->vring.avail); in map_ring()
298 return !(vq->vring.desc && vq->vring.used && vq->vring.avail); in map_ring()
304 if (unlikely(dev->broken)) { in vu_is_vq_usable()
308 if (likely(vq->vring.avail)) { in vu_is_vq_usable()
318 if (!vq->vra.desc_user_addr || !vq->vra.used_user_addr || in vu_is_vq_usable()
319 !vq->vra.avail_user_addr) { in vu_is_vq_usable()
334 for (i = 0; i < dev->max_queues; i++) { in unmap_rings()
335 VuVirtq *vq = &dev->vq[i]; in unmap_rings()
336 const uintptr_t desc = (uintptr_t)vq->vring.desc; in unmap_rings()
337 const uintptr_t used = (uintptr_t)vq->vring.used; in unmap_rings()
338 const uintptr_t avail = (uintptr_t)vq->vring.avail; in unmap_rings()
340 if (desc < r->mmap_addr || desc >= r->mmap_addr + r->size) { in unmap_rings()
343 if (used < r->mmap_addr || used >= r->mmap_addr + r->size) { in unmap_rings()
346 if (avail < r->mmap_addr || avail >= r->mmap_addr + r->size) { in unmap_rings()
351 vq->vring.desc = NULL; in unmap_rings()
352 vq->vring.used = NULL; in unmap_rings()
353 vq->vring.avail = NULL; in unmap_rings()
378 const uint64_t start_gpa = msg_region->guest_phys_addr; in _vu_add_mem_reg()
379 const uint64_t end_gpa = start_gpa + msg_region->memory_size; in _vu_add_mem_reg()
386 int high = dev->nregions - 1; in _vu_add_mem_reg()
389 DPRINT("Adding region %d\n", dev->nregions); in _vu_add_mem_reg()
391 msg_region->guest_phys_addr); in _vu_add_mem_reg()
393 msg_region->memory_size); in _vu_add_mem_reg()
395 msg_region->userspace_addr); in _vu_add_mem_reg()
397 msg_region->mmap_offset); in _vu_add_mem_reg()
399 if (dev->postcopy_listening) { in _vu_add_mem_reg()
413 unsigned int mid = low + (high - low) / 2; in _vu_add_mem_reg()
414 VuDevRegion *cur = &dev->regions[mid]; in _vu_add_mem_reg()
417 if (start_gpa < cur->gpa + cur->size && cur->gpa < end_gpa) { in _vu_add_mem_reg()
421 if (start_gpa >= cur->gpa + cur->size) { in _vu_add_mem_reg()
424 if (start_gpa < cur->gpa) { in _vu_add_mem_reg()
425 high = mid - 1; in _vu_add_mem_reg()
431 * Convert most of msg_region->mmap_offset to fd_offset. In almost all in _vu_add_mem_reg()
442 fd_offset = ALIGN_DOWN(msg_region->mmap_offset, hugepagesize); in _vu_add_mem_reg()
443 mmap_offset = msg_region->mmap_offset - fd_offset; in _vu_add_mem_reg()
445 fd_offset = msg_region->mmap_offset; in _vu_add_mem_reg()
454 mmap_addr = mmap(0, msg_region->memory_size + mmap_offset, in _vu_add_mem_reg()
465 madvise(mmap_addr, msg_region->memory_size + mmap_offset, in _vu_add_mem_reg()
470 r = &dev->regions[idx]; in _vu_add_mem_reg()
471 memmove(r + 1, r, sizeof(VuDevRegion) * (dev->nregions - idx)); in _vu_add_mem_reg()
472 r->gpa = msg_region->guest_phys_addr; in _vu_add_mem_reg()
473 r->size = msg_region->memory_size; in _vu_add_mem_reg()
474 r->qva = msg_region->userspace_addr; in _vu_add_mem_reg()
475 r->mmap_addr = (uint64_t)(uintptr_t)mmap_addr; in _vu_add_mem_reg()
476 r->mmap_offset = mmap_offset; in _vu_add_mem_reg()
477 dev->nregions++; in _vu_add_mem_reg()
479 if (dev->postcopy_listening) { in _vu_add_mem_reg()
484 msg_region->userspace_addr = r->mmap_addr + r->mmap_offset; in _vu_add_mem_reg()
493 for (i = 0; i < vmsg->fd_num; i++) { in vmsg_close_fds()
494 close(vmsg->fds[i]); in vmsg_close_fds()
501 vmsg->flags = 0; /* defaults will be set by vu_send_reply() */ in vmsg_set_reply_u64()
502 vmsg->size = sizeof(vmsg->payload.u64); in vmsg_set_reply_u64()
503 vmsg->payload.u64 = val; in vmsg_set_reply_u64()
504 vmsg->fd_num = 0; in vmsg_set_reply_u64()
563 vmsg->fd_num = 0; in vu_message_read_default()
568 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { in vu_message_read_default()
569 fd_size = cmsg->cmsg_len - CMSG_LEN(0); in vu_message_read_default()
570 vmsg->fd_num = fd_size / sizeof(int); in vu_message_read_default()
571 assert(vmsg->fd_num <= VHOST_MEMORY_BASELINE_NREGIONS); in vu_message_read_default()
572 memcpy(vmsg->fds, CMSG_DATA(cmsg), fd_size); in vu_message_read_default()
577 if (vmsg->size > sizeof(vmsg->payload)) { in vu_message_read_default()
579 "Error: too big message request: %d, size: vmsg->size: %u, " in vu_message_read_default()
580 "while sizeof(vmsg->payload) = %zu\n", in vu_message_read_default()
581 vmsg->request, vmsg->size, sizeof(vmsg->payload)); in vu_message_read_default()
585 if (vmsg->size) { in vu_message_read_default()
587 rc = read(conn_fd, &vmsg->payload, vmsg->size); in vu_message_read_default()
595 assert((uint32_t)rc == vmsg->size); in vu_message_read_default()
624 assert(vmsg->fd_num <= VHOST_MEMORY_BASELINE_NREGIONS); in vu_message_write()
625 if (vmsg->fd_num > 0) { in vu_message_write()
626 size_t fdsize = vmsg->fd_num * sizeof(int); in vu_message_write()
629 cmsg->cmsg_len = CMSG_LEN(fdsize); in vu_message_write()
630 cmsg->cmsg_level = SOL_SOCKET; in vu_message_write()
631 cmsg->cmsg_type = SCM_RIGHTS; in vu_message_write()
632 memcpy(CMSG_DATA(cmsg), vmsg->fds, fdsize); in vu_message_write()
647 if (vmsg->size) { in vu_message_write()
649 if (vmsg->data) { in vu_message_write()
650 rc = write(conn_fd, vmsg->data, vmsg->size); in vu_message_write()
652 rc = write(conn_fd, p + VHOST_USER_HDR_SIZE, vmsg->size); in vu_message_write()
669 vmsg->flags &= ~VHOST_USER_VERSION_MASK; in vu_send_reply()
670 vmsg->flags |= VHOST_USER_VERSION; in vu_send_reply()
671 vmsg->flags |= VHOST_USER_REPLY_MASK; in vu_send_reply()
687 if ((vmsg->flags & VHOST_USER_NEED_REPLY_MASK) == 0) { in vu_process_message_reply()
692 if (!vu_message_read_default(dev, dev->backend_fd, &msg_reply)) { in vu_process_message_reply()
696 if (msg_reply.request != vmsg->request) { in vu_process_message_reply()
698 vmsg->request, msg_reply.request); in vu_process_message_reply()
705 pthread_mutex_unlock(&dev->backend_mutex); in vu_process_message_reply()
713 if (dev->log_call_fd != -1) { in vu_log_kick()
715 if (eventfd_write(dev->log_call_fd, 1) < 0) { in vu_log_kick()
733 if (!(dev->features & (1ULL << VHOST_F_LOG_ALL)) || in vu_log_write()
734 !dev->log_table || !length) { in vu_log_write()
738 assert(dev->log_size > ((address + length - 1) / VHOST_LOG_PAGE / 8)); in vu_log_write()
742 vu_log_page(dev->log_table, page); in vu_log_write()
753 VuVirtq *vq = &dev->vq[index]; in vu_kick_cb()
754 int sock = vq->kick_fd; in vu_kick_cb()
759 if (rc == -1) { in vu_kick_cb()
761 dev->remove_watch(dev, dev->vq[index].kick_fd); in vu_kick_cb()
764 kick_data, vq->handler, index); in vu_kick_cb()
765 if (vq->handler) { in vu_kick_cb()
766 vq->handler(dev, index); in vu_kick_cb()
774 vmsg->payload.u64 = in vu_get_features_exec()
784 /* vhost-user feature bits */ in vu_get_features_exec()
788 if (dev->iface->get_features) { in vu_get_features_exec()
789 vmsg->payload.u64 |= dev->iface->get_features(dev); in vu_get_features_exec()
792 vmsg->size = sizeof(vmsg->payload.u64); in vu_get_features_exec()
793 vmsg->fd_num = 0; in vu_get_features_exec()
795 DPRINT("Sending back to guest u64: 0x%016"PRIx64"\n", vmsg->payload.u64); in vu_get_features_exec()
805 for (i = 0; i < dev->max_queues; i++) { in vu_set_enable_all_rings()
806 dev->vq[i].enable = enabled; in vu_set_enable_all_rings()
813 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); in vu_set_features_exec()
815 dev->features = vmsg->payload.u64; in vu_set_features_exec()
821 vu_panic(dev, "virtio legacy devices aren't supported by libvhost-user"); in vu_set_features_exec()
825 if (!(dev->features & VHOST_USER_F_PROTOCOL_FEATURES)) { in vu_set_features_exec()
829 if (dev->iface->set_features) { in vu_set_features_exec()
830 dev->iface->set_features(dev, dev->features); in vu_set_features_exec()
845 if (dev->log_table) { in vu_close_log()
846 if (munmap(dev->log_table, dev->log_size) != 0) { in vu_close_log()
850 dev->log_table = NULL; in vu_close_log()
852 if (dev->log_call_fd != -1) { in vu_close_log()
853 close(dev->log_call_fd); in vu_close_log()
854 dev->log_call_fd = -1; in vu_close_log()
869 for (i = 0; i < dev->nregions; i++) { in generate_faults()
871 VuDevRegion *dev_region = &dev->regions[i]; in generate_faults()
883 ret = madvise((void *)(uintptr_t)dev_region->mmap_addr, in generate_faults()
884 dev_region->size + dev_region->mmap_offset, in generate_faults()
896 ret = madvise((void *)(uintptr_t)dev_region->mmap_addr, in generate_faults()
897 dev_region->size + dev_region->mmap_offset, in generate_faults()
909 reg_struct.range.start = (uintptr_t)dev_region->mmap_addr; in generate_faults()
910 reg_struct.range.len = dev_region->size + dev_region->mmap_offset; in generate_faults()
913 if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, ®_struct)) { in generate_faults()
918 dev_region->mmap_addr, in generate_faults()
919 dev_region->size, dev_region->mmap_offset, in generate_faults()
920 dev->postcopy_ufd, strerror(errno)); in generate_faults()
931 (uint64_t)reg_struct.range.len); in generate_faults()
933 if (mprotect((void *)(uintptr_t)dev_region->mmap_addr, in generate_faults()
934 dev_region->size + dev_region->mmap_offset, in generate_faults()
949 VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m; in vu_add_mem_reg()
951 if (vmsg->fd_num != 1) { in vu_add_mem_reg()
953 vu_panic(dev, "VHOST_USER_ADD_MEM_REG received %d fds - only 1 fd " in vu_add_mem_reg()
954 "should be sent for this message type", vmsg->fd_num); in vu_add_mem_reg()
958 if (vmsg->size < VHOST_USER_MEM_REG_SIZE) { in vu_add_mem_reg()
959 close(vmsg->fds[0]); in vu_add_mem_reg()
962 VHOST_USER_MEM_REG_SIZE, vmsg->size); in vu_add_mem_reg()
966 if (dev->nregions == VHOST_USER_MAX_RAM_SLOTS) { in vu_add_mem_reg()
967 close(vmsg->fds[0]); in vu_add_mem_reg()
979 if (dev->postcopy_listening && in vu_add_mem_reg()
980 vmsg->size == sizeof(vmsg->payload.u64) && in vu_add_mem_reg()
981 vmsg->payload.u64 == 0) { in vu_add_mem_reg()
986 _vu_add_mem_reg(dev, msg_region, vmsg->fds[0]); in vu_add_mem_reg()
987 close(vmsg->fds[0]); in vu_add_mem_reg()
989 if (dev->postcopy_listening) { in vu_add_mem_reg()
991 vmsg->fd_num = 0; in vu_add_mem_reg()
1002 if (vudev_reg->gpa == msg_reg->guest_phys_addr && in reg_equal()
1003 vudev_reg->qva == msg_reg->userspace_addr && in reg_equal()
1004 vudev_reg->size == msg_reg->memory_size) { in reg_equal()
1013 VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m; in vu_rem_mem_reg()
1017 if (vmsg->fd_num > 1) { in vu_rem_mem_reg()
1019 vu_panic(dev, "VHOST_USER_REM_MEM_REG received %d fds - at most 1 fd " in vu_rem_mem_reg()
1020 "should be sent for this message type", vmsg->fd_num); in vu_rem_mem_reg()
1024 if (vmsg->size < VHOST_USER_MEM_REG_SIZE) { in vu_rem_mem_reg()
1028 VHOST_USER_MEM_REG_SIZE, vmsg->size); in vu_rem_mem_reg()
1034 msg_region->guest_phys_addr); in vu_rem_mem_reg()
1036 msg_region->memory_size); in vu_rem_mem_reg()
1038 msg_region->userspace_addr); in vu_rem_mem_reg()
1040 msg_region->mmap_offset); in vu_rem_mem_reg()
1042 r = vu_gpa_to_mem_region(dev, msg_region->guest_phys_addr); in vu_rem_mem_reg()
1059 munmap((void *)(uintptr_t)r->mmap_addr, r->size + r->mmap_offset); in vu_rem_mem_reg()
1061 idx = r - dev->regions; in vu_rem_mem_reg()
1062 assert(idx < dev->nregions); in vu_rem_mem_reg()
1064 memmove(r, r + 1, sizeof(VuDevRegion) * (dev->nregions - idx - 1)); in vu_rem_mem_reg()
1066 dev->nregions--; in vu_rem_mem_reg()
1077 int dmabuf_fd = -1; in vu_get_shared_object()
1078 if (dev->iface->get_shared_object) { in vu_get_shared_object()
1079 dmabuf_fd = dev->iface->get_shared_object( in vu_get_shared_object()
1080 dev, &vmsg->payload.object.uuid[0]); in vu_get_shared_object()
1082 if (dmabuf_fd != -1) { in vu_get_shared_object()
1084 vmsg->fds[fd_num++] = dmabuf_fd; in vu_get_shared_object()
1086 vmsg->fd_num = fd_num; in vu_get_shared_object()
1094 VhostUserMemory m = vmsg->payload.memory, *memory = &m; in vu_set_mem_table_exec()
1099 DPRINT("Nregions: %u\n", memory->nregions); in vu_set_mem_table_exec()
1100 for (i = 0; i < memory->nregions; i++) { in vu_set_mem_table_exec()
1101 _vu_add_mem_reg(dev, &memory->regions[i], vmsg->fds[i]); in vu_set_mem_table_exec()
1102 close(vmsg->fds[i]); in vu_set_mem_table_exec()
1105 if (dev->postcopy_listening) { in vu_set_mem_table_exec()
1107 vmsg->fd_num = 0; in vu_set_mem_table_exec()
1108 if (!vu_send_reply(dev, dev->sock, vmsg)) { in vu_set_mem_table_exec()
1109 vu_panic(dev, "failed to respond to set-mem-table for postcopy"); in vu_set_mem_table_exec()
1117 if (!dev->read_msg(dev, dev->sock, vmsg) || in vu_set_mem_table_exec()
1118 vmsg->size != sizeof(vmsg->payload.u64) || in vu_set_mem_table_exec()
1119 vmsg->payload.u64 != 0) { in vu_set_mem_table_exec()
1120 vu_panic(dev, "failed to receive valid ack for postcopy set-mem-table"); in vu_set_mem_table_exec()
1129 for (i = 0; i < dev->max_queues; i++) { in vu_set_mem_table_exec()
1130 if (dev->vq[i].vring.desc) { in vu_set_mem_table_exec()
1131 if (map_ring(dev, &dev->vq[i])) { in vu_set_mem_table_exec()
1147 if (vmsg->fd_num != 1 || in vu_set_log_base_exec()
1148 vmsg->size != sizeof(vmsg->payload.log)) { in vu_set_log_base_exec()
1153 fd = vmsg->fds[0]; in vu_set_log_base_exec()
1154 log_mmap_offset = vmsg->payload.log.mmap_offset; in vu_set_log_base_exec()
1155 log_mmap_size = vmsg->payload.log.mmap_size; in vu_set_log_base_exec()
1166 if (dev->log_table) { in vu_set_log_base_exec()
1167 munmap(dev->log_table, dev->log_size); in vu_set_log_base_exec()
1169 dev->log_table = rc; in vu_set_log_base_exec()
1170 dev->log_size = log_mmap_size; in vu_set_log_base_exec()
1172 vmsg->size = sizeof(vmsg->payload.u64); in vu_set_log_base_exec()
1173 vmsg->fd_num = 0; in vu_set_log_base_exec()
1181 if (vmsg->fd_num != 1) { in vu_set_log_fd_exec()
1186 if (dev->log_call_fd != -1) { in vu_set_log_fd_exec()
1187 close(dev->log_call_fd); in vu_set_log_fd_exec()
1189 dev->log_call_fd = vmsg->fds[0]; in vu_set_log_fd_exec()
1190 DPRINT("Got log_call_fd: %d\n", vmsg->fds[0]); in vu_set_log_fd_exec()
1198 unsigned int index = vmsg->payload.state.index; in vu_set_vring_num_exec()
1199 unsigned int num = vmsg->payload.state.num; in vu_set_vring_num_exec()
1203 dev->vq[index].vring.num = num; in vu_set_vring_num_exec()
1211 struct vhost_vring_addr addr = vmsg->payload.addr, *vra = &addr; in vu_set_vring_addr_exec()
1212 unsigned int index = vra->index; in vu_set_vring_addr_exec()
1213 VuVirtq *vq = &dev->vq[index]; in vu_set_vring_addr_exec()
1216 DPRINT(" index: %d\n", vra->index); in vu_set_vring_addr_exec()
1217 DPRINT(" flags: %d\n", vra->flags); in vu_set_vring_addr_exec()
1218 DPRINT(" desc_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->desc_user_addr); in vu_set_vring_addr_exec()
1219 DPRINT(" used_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->used_user_addr); in vu_set_vring_addr_exec()
1220 DPRINT(" avail_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->avail_user_addr); in vu_set_vring_addr_exec()
1221 DPRINT(" log_guest_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->log_guest_addr); in vu_set_vring_addr_exec()
1223 vq->vra = *vra; in vu_set_vring_addr_exec()
1224 vq->vring.flags = vra->flags; in vu_set_vring_addr_exec()
1225 vq->vring.log_guest_addr = vra->log_guest_addr; in vu_set_vring_addr_exec()
1233 vq->used_idx = le16toh(vq->vring.used->idx); in vu_set_vring_addr_exec()
1235 if (vq->last_avail_idx != vq->used_idx) { in vu_set_vring_addr_exec()
1236 bool resume = dev->iface->queue_is_processed_in_order && in vu_set_vring_addr_exec()
1237 dev->iface->queue_is_processed_in_order(dev, index); in vu_set_vring_addr_exec()
1240 vq->last_avail_idx, vq->used_idx, in vu_set_vring_addr_exec()
1244 vq->shadow_avail_idx = vq->last_avail_idx = vq->used_idx; in vu_set_vring_addr_exec()
1254 unsigned int index = vmsg->payload.state.index; in vu_set_vring_base_exec()
1255 unsigned int num = vmsg->payload.state.num; in vu_set_vring_base_exec()
1259 dev->vq[index].shadow_avail_idx = dev->vq[index].last_avail_idx = num; in vu_set_vring_base_exec()
1267 unsigned int index = vmsg->payload.state.index; in vu_get_vring_base_exec()
1270 vmsg->payload.state.num = dev->vq[index].last_avail_idx; in vu_get_vring_base_exec()
1271 vmsg->size = sizeof(vmsg->payload.state); in vu_get_vring_base_exec()
1273 dev->vq[index].started = false; in vu_get_vring_base_exec()
1274 if (dev->iface->queue_set_started) { in vu_get_vring_base_exec()
1275 dev->iface->queue_set_started(dev, index, false); in vu_get_vring_base_exec()
1278 if (dev->vq[index].call_fd != -1) { in vu_get_vring_base_exec()
1279 close(dev->vq[index].call_fd); in vu_get_vring_base_exec()
1280 dev->vq[index].call_fd = -1; in vu_get_vring_base_exec()
1282 if (dev->vq[index].kick_fd != -1) { in vu_get_vring_base_exec()
1283 dev->remove_watch(dev, dev->vq[index].kick_fd); in vu_get_vring_base_exec()
1284 close(dev->vq[index].kick_fd); in vu_get_vring_base_exec()
1285 dev->vq[index].kick_fd = -1; in vu_get_vring_base_exec()
1294 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; in vu_check_queue_msg_file()
1295 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; in vu_check_queue_msg_file()
1297 if (index >= dev->max_queues) { in vu_check_queue_msg_file()
1308 if (vmsg->fd_num != 1) { in vu_check_queue_msg_file()
1310 vu_panic(dev, "Invalid fds in request: %d", vmsg->request); in vu_check_queue_msg_file()
1323 if (desc1->counter > desc0->counter && in inflight_desc_compare()
1324 (desc1->counter - desc0->counter) < VIRTQUEUE_MAX_SIZE * 2) { in inflight_desc_compare()
1328 return -1; in inflight_desc_compare()
1340 if (unlikely(!vq->inflight)) { in vu_check_queue_inflights()
1341 return -1; in vu_check_queue_inflights()
1344 if (unlikely(!vq->inflight->version)) { in vu_check_queue_inflights()
1346 vq->inflight->version = INFLIGHT_VERSION; in vu_check_queue_inflights()
1350 vq->used_idx = le16toh(vq->vring.used->idx); in vu_check_queue_inflights()
1351 vq->resubmit_num = 0; in vu_check_queue_inflights()
1352 vq->resubmit_list = NULL; in vu_check_queue_inflights()
1353 vq->counter = 0; in vu_check_queue_inflights()
1355 if (unlikely(vq->inflight->used_idx != vq->used_idx)) { in vu_check_queue_inflights()
1356 vq->inflight->desc[vq->inflight->last_batch_head].inflight = 0; in vu_check_queue_inflights()
1360 vq->inflight->used_idx = vq->used_idx; in vu_check_queue_inflights()
1363 for (i = 0; i < vq->inflight->desc_num; i++) { in vu_check_queue_inflights()
1364 if (vq->inflight->desc[i].inflight == 1) { in vu_check_queue_inflights()
1365 vq->inuse++; in vu_check_queue_inflights()
1369 vq->shadow_avail_idx = vq->last_avail_idx = vq->inuse + vq->used_idx; in vu_check_queue_inflights()
1371 if (vq->inuse) { in vu_check_queue_inflights()
1372 vq->resubmit_list = calloc(vq->inuse, sizeof(VuVirtqInflightDesc)); in vu_check_queue_inflights()
1373 if (!vq->resubmit_list) { in vu_check_queue_inflights()
1374 return -1; in vu_check_queue_inflights()
1377 for (i = 0; i < vq->inflight->desc_num; i++) { in vu_check_queue_inflights()
1378 if (vq->inflight->desc[i].inflight) { in vu_check_queue_inflights()
1379 vq->resubmit_list[vq->resubmit_num].index = i; in vu_check_queue_inflights()
1380 vq->resubmit_list[vq->resubmit_num].counter = in vu_check_queue_inflights()
1381 vq->inflight->desc[i].counter; in vu_check_queue_inflights()
1382 vq->resubmit_num++; in vu_check_queue_inflights()
1386 if (vq->resubmit_num > 1) { in vu_check_queue_inflights()
1387 qsort(vq->resubmit_list, vq->resubmit_num, in vu_check_queue_inflights()
1390 vq->counter = vq->resubmit_list[0].counter + 1; in vu_check_queue_inflights()
1394 if (eventfd_write(vq->kick_fd, 1)) { in vu_check_queue_inflights()
1395 return -1; in vu_check_queue_inflights()
1404 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; in vu_set_vring_kick_exec()
1405 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; in vu_set_vring_kick_exec()
1407 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); in vu_set_vring_kick_exec()
1413 if (dev->vq[index].kick_fd != -1) { in vu_set_vring_kick_exec()
1414 dev->remove_watch(dev, dev->vq[index].kick_fd); in vu_set_vring_kick_exec()
1415 close(dev->vq[index].kick_fd); in vu_set_vring_kick_exec()
1416 dev->vq[index].kick_fd = -1; in vu_set_vring_kick_exec()
1419 dev->vq[index].kick_fd = nofd ? -1 : vmsg->fds[0]; in vu_set_vring_kick_exec()
1420 DPRINT("Got kick_fd: %d for vq: %d\n", dev->vq[index].kick_fd, index); in vu_set_vring_kick_exec()
1422 dev->vq[index].started = true; in vu_set_vring_kick_exec()
1423 if (dev->iface->queue_set_started) { in vu_set_vring_kick_exec()
1424 dev->iface->queue_set_started(dev, index, true); in vu_set_vring_kick_exec()
1427 if (dev->vq[index].kick_fd != -1 && dev->vq[index].handler) { in vu_set_vring_kick_exec()
1428 dev->set_watch(dev, dev->vq[index].kick_fd, VU_WATCH_IN, in vu_set_vring_kick_exec()
1432 dev->vq[index].kick_fd, index); in vu_set_vring_kick_exec()
1435 if (vu_check_queue_inflights(dev, &dev->vq[index])) { in vu_set_vring_kick_exec()
1445 int qidx = vq - dev->vq; in vu_set_queue_handler()
1447 vq->handler = handler; in vu_set_queue_handler()
1448 if (vq->kick_fd >= 0) { in vu_set_queue_handler()
1450 dev->set_watch(dev, vq->kick_fd, VU_WATCH_IN, in vu_set_queue_handler()
1453 dev->remove_watch(dev, vq->kick_fd); in vu_set_queue_handler()
1461 int qidx = vq - dev->vq; in vu_set_queue_host_notifier()
1474 if (fd == -1) { in vu_set_queue_host_notifier()
1486 pthread_mutex_lock(&dev->backend_mutex); in vu_set_queue_host_notifier()
1487 if (!vu_message_write(dev, dev->backend_fd, &vmsg)) { in vu_set_queue_host_notifier()
1488 pthread_mutex_unlock(&dev->backend_mutex); in vu_set_queue_host_notifier()
1514 pthread_mutex_lock(&dev->backend_mutex); in vu_lookup_shared_object()
1515 if (!vu_message_write(dev, dev->backend_fd, &msg)) { in vu_lookup_shared_object()
1519 if (!vu_message_read_default(dev, dev->backend_fd, &msg_reply)) { in vu_lookup_shared_object()
1538 pthread_mutex_unlock(&dev->backend_mutex); in vu_lookup_shared_object()
1547 pthread_mutex_lock(&dev->backend_mutex); in vu_send_message()
1548 if (!vu_message_write(dev, dev->backend_fd, vmsg)) { in vu_send_message()
1554 pthread_mutex_unlock(&dev->backend_mutex); in vu_send_message()
1598 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; in vu_set_vring_call_exec()
1599 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; in vu_set_vring_call_exec()
1601 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); in vu_set_vring_call_exec()
1607 if (dev->vq[index].call_fd != -1) { in vu_set_vring_call_exec()
1608 close(dev->vq[index].call_fd); in vu_set_vring_call_exec()
1609 dev->vq[index].call_fd = -1; in vu_set_vring_call_exec()
1612 dev->vq[index].call_fd = nofd ? -1 : vmsg->fds[0]; in vu_set_vring_call_exec()
1615 if (dev->vq[index].call_fd != -1 && eventfd_write(vmsg->fds[0], 1)) { in vu_set_vring_call_exec()
1616 return -1; in vu_set_vring_call_exec()
1619 DPRINT("Got call_fd: %d for vq: %d\n", dev->vq[index].call_fd, index); in vu_set_vring_call_exec()
1627 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; in vu_set_vring_err_exec()
1628 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; in vu_set_vring_err_exec()
1630 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); in vu_set_vring_err_exec()
1636 if (dev->vq[index].err_fd != -1) { in vu_set_vring_err_exec()
1637 close(dev->vq[index].err_fd); in vu_set_vring_err_exec()
1638 dev->vq[index].err_fd = -1; in vu_set_vring_err_exec()
1641 dev->vq[index].err_fd = nofd ? -1 : vmsg->fds[0]; in vu_set_vring_err_exec()
1669 if (dev->iface->get_config && dev->iface->set_config) { in vu_get_protocol_features_exec()
1673 if (dev->iface->get_protocol_features) { in vu_get_protocol_features_exec()
1674 features |= dev->iface->get_protocol_features(dev); in vu_get_protocol_features_exec()
1695 uint64_t features = vmsg->payload.u64; in vu_set_protocol_features_exec()
1699 dev->protocol_features = vmsg->payload.u64; in vu_set_protocol_features_exec()
1720 if (dev->iface->set_protocol_features) { in vu_set_protocol_features_exec()
1721 dev->iface->set_protocol_features(dev, features); in vu_set_protocol_features_exec()
1730 vmsg_set_reply_u64(vmsg, dev->max_queues); in vu_get_queue_num_exec()
1737 unsigned int index = vmsg->payload.state.index; in vu_set_vring_enable_exec()
1738 unsigned int enable = vmsg->payload.state.num; in vu_set_vring_enable_exec()
1743 if (index >= dev->max_queues) { in vu_set_vring_enable_exec()
1748 dev->vq[index].enable = enable; in vu_set_vring_enable_exec()
1755 if (vmsg->fd_num != 1) { in vu_set_backend_req_fd()
1756 vu_panic(dev, "Invalid backend_req_fd message (%d fd's)", vmsg->fd_num); in vu_set_backend_req_fd()
1760 if (dev->backend_fd != -1) { in vu_set_backend_req_fd()
1761 close(dev->backend_fd); in vu_set_backend_req_fd()
1763 dev->backend_fd = vmsg->fds[0]; in vu_set_backend_req_fd()
1764 DPRINT("Got backend_fd: %d\n", vmsg->fds[0]); in vu_set_backend_req_fd()
1772 int ret = -1; in vu_get_config()
1774 if (dev->iface->get_config) { in vu_get_config()
1775 ret = dev->iface->get_config(dev, vmsg->payload.config.region, in vu_get_config()
1776 vmsg->payload.config.size); in vu_get_config()
1781 vmsg->size = 0; in vu_get_config()
1790 int ret = -1; in vu_set_config()
1792 if (dev->iface->set_config) { in vu_set_config()
1793 ret = dev->iface->set_config(dev, vmsg->payload.config.region, in vu_set_config()
1794 vmsg->payload.config.offset, in vu_set_config()
1795 vmsg->payload.config.size, in vu_set_config()
1796 vmsg->payload.config.flags); in vu_set_config()
1811 dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); in vu_set_postcopy_advise()
1812 vmsg->size = 0; in vu_set_postcopy_advise()
1814 dev->postcopy_ufd = -1; in vu_set_postcopy_advise()
1817 if (dev->postcopy_ufd == -1) { in vu_set_postcopy_advise()
1825 if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) { in vu_set_postcopy_advise()
1827 close(dev->postcopy_ufd); in vu_set_postcopy_advise()
1828 dev->postcopy_ufd = -1; in vu_set_postcopy_advise()
1836 vmsg->fd_num = 1; in vu_set_postcopy_advise()
1837 vmsg->fds[0] = dev->postcopy_ufd; in vu_set_postcopy_advise()
1844 if (dev->nregions) { in vu_set_postcopy_listen()
1845 vu_panic(dev, "Regions already registered at postcopy-listen"); in vu_set_postcopy_listen()
1846 vmsg_set_reply_u64(vmsg, -1); in vu_set_postcopy_listen()
1849 dev->postcopy_listening = true; in vu_set_postcopy_listen()
1859 dev->postcopy_listening = false; in vu_set_postcopy_end()
1860 if (dev->postcopy_ufd > 0) { in vu_set_postcopy_end()
1861 close(dev->postcopy_ufd); in vu_set_postcopy_end()
1862 dev->postcopy_ufd = -1; in vu_set_postcopy_end()
1915 int fd = -1; in vu_get_inflight_fd()
1920 if (vmsg->size != sizeof(vmsg->payload.inflight)) { in vu_get_inflight_fd()
1921 vu_panic(dev, "Invalid get_inflight_fd message:%d", vmsg->size); in vu_get_inflight_fd()
1922 vmsg->payload.inflight.mmap_size = 0; in vu_get_inflight_fd()
1926 num_queues = vmsg->payload.inflight.num_queues; in vu_get_inflight_fd()
1927 queue_size = vmsg->payload.inflight.queue_size; in vu_get_inflight_fd()
1935 addr = memfd_alloc("vhost-inflight", mmap_size, in vu_get_inflight_fd()
1944 vmsg->payload.inflight.mmap_size = 0; in vu_get_inflight_fd()
1950 dev->inflight_info.addr = addr; in vu_get_inflight_fd()
1951 dev->inflight_info.size = vmsg->payload.inflight.mmap_size = mmap_size; in vu_get_inflight_fd()
1952 dev->inflight_info.fd = vmsg->fds[0] = fd; in vu_get_inflight_fd()
1953 vmsg->fd_num = 1; in vu_get_inflight_fd()
1954 vmsg->payload.inflight.mmap_offset = 0; in vu_get_inflight_fd()
1957 vmsg->payload.inflight.mmap_size); in vu_get_inflight_fd()
1959 vmsg->payload.inflight.mmap_offset); in vu_get_inflight_fd()
1972 if (vmsg->fd_num != 1 || in vu_set_inflight_fd()
1973 vmsg->size != sizeof(vmsg->payload.inflight)) { in vu_set_inflight_fd()
1975 vmsg->size, vmsg->fd_num); in vu_set_inflight_fd()
1979 fd = vmsg->fds[0]; in vu_set_inflight_fd()
1980 mmap_size = vmsg->payload.inflight.mmap_size; in vu_set_inflight_fd()
1981 mmap_offset = vmsg->payload.inflight.mmap_offset; in vu_set_inflight_fd()
1982 num_queues = vmsg->payload.inflight.num_queues; in vu_set_inflight_fd()
1983 queue_size = vmsg->payload.inflight.queue_size; in vu_set_inflight_fd()
1998 if (dev->inflight_info.fd) { in vu_set_inflight_fd()
1999 close(dev->inflight_info.fd); in vu_set_inflight_fd()
2002 if (dev->inflight_info.addr) { in vu_set_inflight_fd()
2003 munmap(dev->inflight_info.addr, dev->inflight_info.size); in vu_set_inflight_fd()
2006 dev->inflight_info.fd = fd; in vu_set_inflight_fd()
2007 dev->inflight_info.addr = rc; in vu_set_inflight_fd()
2008 dev->inflight_info.size = mmap_size; in vu_set_inflight_fd()
2011 dev->vq[i].inflight = (VuVirtqInflight *)rc; in vu_set_inflight_fd()
2012 dev->vq[i].inflight->desc_num = queue_size; in vu_set_inflight_fd()
2022 unsigned int index = vmsg->payload.state.index; in vu_handle_vring_kick()
2024 if (index >= dev->max_queues) { in vu_handle_vring_kick()
2030 dev->vq[index].handler, index); in vu_handle_vring_kick()
2032 if (!dev->vq[index].started) { in vu_handle_vring_kick()
2033 dev->vq[index].started = true; in vu_handle_vring_kick()
2035 if (dev->iface->queue_set_started) { in vu_handle_vring_kick()
2036 dev->iface->queue_set_started(dev, index, true); in vu_handle_vring_kick()
2040 if (dev->vq[index].handler) { in vu_handle_vring_kick()
2041 dev->vq[index].handler(dev, index); in vu_handle_vring_kick()
2063 DPRINT("Request: %s (%d)\n", vu_request_to_string(vmsg->request), in vu_process_message()
2064 vmsg->request); in vu_process_message()
2065 DPRINT("Flags: 0x%x\n", vmsg->flags); in vu_process_message()
2066 DPRINT("Size: %u\n", vmsg->size); in vu_process_message()
2068 if (vmsg->fd_num) { in vu_process_message()
2071 for (i = 0; i < vmsg->fd_num; i++) { in vu_process_message()
2072 DPRINT(" %d", vmsg->fds[i]); in vu_process_message()
2077 if (dev->iface->process_msg && in vu_process_message()
2078 dev->iface->process_msg(dev, vmsg, &do_reply)) { in vu_process_message()
2082 switch (vmsg->request) { in vu_process_message()
2126 /* if you need processing before exit, override iface->process_msg */ in vu_process_message()
2150 vu_panic(dev, "Unhandled request: %d", vmsg->request); in vu_process_message()
2163 if (!dev->read_msg(dev, dev->sock, &vmsg)) { in vu_dispatch()
2180 if (!vu_send_reply(dev, dev->sock, &vmsg)) { in vu_dispatch()
2198 for (i = 0; i < dev->max_queues; i++) { in vu_deinit()
2199 VuVirtq *vq = &dev->vq[i]; in vu_deinit()
2201 if (vq->call_fd != -1) { in vu_deinit()
2202 close(vq->call_fd); in vu_deinit()
2203 vq->call_fd = -1; in vu_deinit()
2206 if (vq->kick_fd != -1) { in vu_deinit()
2207 dev->remove_watch(dev, vq->kick_fd); in vu_deinit()
2208 close(vq->kick_fd); in vu_deinit()
2209 vq->kick_fd = -1; in vu_deinit()
2212 if (vq->err_fd != -1) { in vu_deinit()
2213 close(vq->err_fd); in vu_deinit()
2214 vq->err_fd = -1; in vu_deinit()
2217 if (vq->resubmit_list) { in vu_deinit()
2218 free(vq->resubmit_list); in vu_deinit()
2219 vq->resubmit_list = NULL; in vu_deinit()
2222 vq->inflight = NULL; in vu_deinit()
2225 if (dev->inflight_info.addr) { in vu_deinit()
2226 munmap(dev->inflight_info.addr, dev->inflight_info.size); in vu_deinit()
2227 dev->inflight_info.addr = NULL; in vu_deinit()
2230 if (dev->inflight_info.fd > 0) { in vu_deinit()
2231 close(dev->inflight_info.fd); in vu_deinit()
2232 dev->inflight_info.fd = -1; in vu_deinit()
2236 if (dev->backend_fd != -1) { in vu_deinit()
2237 close(dev->backend_fd); in vu_deinit()
2238 dev->backend_fd = -1; in vu_deinit()
2240 pthread_mutex_destroy(&dev->backend_mutex); in vu_deinit()
2242 if (dev->sock != -1) { in vu_deinit()
2243 close(dev->sock); in vu_deinit()
2246 free(dev->vq); in vu_deinit()
2247 dev->vq = NULL; in vu_deinit()
2248 free(dev->regions); in vu_deinit()
2249 dev->regions = NULL; in vu_deinit()
2273 dev->sock = socket; in vu_init()
2274 dev->panic = panic; in vu_init()
2275 dev->read_msg = read_msg ? read_msg : vu_message_read_default; in vu_init()
2276 dev->set_watch = set_watch; in vu_init()
2277 dev->remove_watch = remove_watch; in vu_init()
2278 dev->iface = iface; in vu_init()
2279 dev->log_call_fd = -1; in vu_init()
2280 pthread_mutex_init(&dev->backend_mutex, NULL); in vu_init()
2281 dev->backend_fd = -1; in vu_init()
2282 dev->max_queues = max_queues; in vu_init()
2284 dev->regions = malloc(VHOST_USER_MAX_RAM_SLOTS * sizeof(dev->regions[0])); in vu_init()
2285 if (!dev->regions) { in vu_init()
2290 dev->vq = malloc(max_queues * sizeof(dev->vq[0])); in vu_init()
2291 if (!dev->vq) { in vu_init()
2293 free(dev->regions); in vu_init()
2294 dev->regions = NULL; in vu_init()
2299 dev->vq[i] = (VuVirtq) { in vu_init()
2300 .call_fd = -1, .kick_fd = -1, .err_fd = -1, in vu_init()
2311 assert(qidx < dev->max_queues); in vu_get_queue()
2312 return &dev->vq[qidx]; in vu_get_queue()
2318 return vq->enable; in vu_queue_enabled()
2324 return vq->started; in vu_queue_started()
2330 return le16toh(vq->vring.avail->flags); in vring_avail_flags()
2336 vq->shadow_avail_idx = le16toh(vq->vring.avail->idx); in vring_avail_idx()
2338 return vq->shadow_avail_idx; in vring_avail_idx()
2344 return le16toh(vq->vring.avail->ring[i]); in vring_avail_ring()
2350 return vring_avail_ring(vq, vq->vring.num); in vring_get_used_event()
2356 uint16_t num_heads = vring_avail_idx(vq) - idx; in virtqueue_num_heads()
2359 if (num_heads > vq->vring.num) { in virtqueue_num_heads()
2361 idx, vq->shadow_avail_idx); in virtqueue_num_heads()
2362 return -1; in virtqueue_num_heads()
2365 /* On success, callers read a descriptor at vq->last_avail_idx. in virtqueue_num_heads()
2379 *head = vring_avail_ring(vq, idx % vq->vring.num); in virtqueue_get_head()
2382 if (*head >= vq->vring.num) { in virtqueue_get_head()
2392 uint64_t addr, size_t len) in virtqueue_read_indirect_desc() argument
2397 if (len > (VIRTQUEUE_MAX_SIZE * sizeof(struct vring_desc))) { in virtqueue_read_indirect_desc()
2398 return -1; in virtqueue_read_indirect_desc()
2401 if (len == 0) { in virtqueue_read_indirect_desc()
2402 return -1; in virtqueue_read_indirect_desc()
2405 while (len) { in virtqueue_read_indirect_desc()
2406 read_len = len; in virtqueue_read_indirect_desc()
2409 return -1; in virtqueue_read_indirect_desc()
2413 len -= read_len; in virtqueue_read_indirect_desc()
2422 VIRTQUEUE_READ_DESC_ERROR = -1,
2429 int i, unsigned int max, unsigned int *next) in virtqueue_read_next_desc() argument
2441 if (*next >= max) { in virtqueue_read_next_desc()
2458 idx = vq->last_avail_idx; in vu_queue_get_avail_bytes()
2466 unsigned int max, desc_len, num_bufs, indirect = 0; in vu_queue_get_avail_bytes() local
2472 max = vq->vring.num; in vu_queue_get_avail_bytes()
2477 desc = vq->vring.desc; in vu_queue_get_avail_bytes()
2480 if (le32toh(desc[i].len) % sizeof(struct vring_desc)) { in vu_queue_get_avail_bytes()
2486 if (num_bufs >= max) { in vu_queue_get_avail_bytes()
2494 desc_len = le32toh(desc[i].len); in vu_queue_get_avail_bytes()
2495 max = desc_len / sizeof(struct vring_desc); in vu_queue_get_avail_bytes()
2516 if (++num_bufs > max) { in vu_queue_get_avail_bytes()
2522 in_total += le32toh(desc[i].len); in vu_queue_get_avail_bytes()
2524 out_total += le32toh(desc[i].len); in vu_queue_get_avail_bytes()
2529 rc = virtqueue_read_next_desc(dev, desc, i, max, &i); in vu_queue_get_avail_bytes()
2580 if (vq->shadow_avail_idx != vq->last_avail_idx) { in vu_queue_empty()
2584 return vring_avail_idx(vq) == vq->last_avail_idx; in vu_queue_empty()
2598 !vq->inuse && vu_queue_empty(dev, vq)) { in vring_notify()
2606 v = vq->signalled_used_valid; in vring_notify()
2607 vq->signalled_used_valid = true; in vring_notify()
2608 old = vq->signalled_used; in vring_notify()
2609 new = vq->signalled_used = vq->used_idx; in vring_notify()
2624 if (vq->call_fd < 0 && in _vu_queue_notify()
2633 .index = vq - dev->vq, in _vu_queue_notify()
2644 vu_message_write(dev, dev->backend_fd, &vmsg); in _vu_queue_notify()
2646 vu_message_read_default(dev, dev->backend_fd, &vmsg); in _vu_queue_notify()
2651 if (eventfd_write(vq->call_fd, 1) < 0) { in _vu_queue_notify()
2673 vu_message_write(dev, dev->backend_fd, &vmsg); in vu_config_change_msg()
2681 flags = (uint16_t *)((char*)vq->vring.used + in vring_used_flags_set_bit()
2691 flags = (uint16_t *)((char*)vq->vring.used + in vring_used_flags_unset_bit()
2701 if (!vq->notification) { in vring_set_avail_event()
2705 memcpy(&vq->vring.used->ring[vq->vring.num], &val_le, sizeof(uint16_t)); in vring_set_avail_event()
2711 vq->notification = enable; in vu_queue_set_notification()
2741 uint64_t len = sz; in virtqueue_map_desc() local
2748 iov[num_sg].iov_base = vu_gpa_to_va(dev, &len, pa); in virtqueue_map_desc()
2753 iov[num_sg].iov_len = len; in virtqueue_map_desc()
2755 sz -= len; in virtqueue_map_desc()
2756 pa += len; in virtqueue_map_desc()
2768 size_t in_sg_ofs = ALIGN_UP(sz, __alignof__(elem->in_sg[0])); in virtqueue_alloc_element()
2769 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]); in virtqueue_alloc_element()
2770 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]); in virtqueue_alloc_element()
2778 elem->out_num = out_num; in virtqueue_alloc_element()
2779 elem->in_num = in_num; in virtqueue_alloc_element()
2780 elem->in_sg = (void *)elem + in_sg_ofs; in virtqueue_alloc_element()
2781 elem->out_sg = (void *)elem + out_sg_ofs; in virtqueue_alloc_element()
2788 struct vring_desc *desc = vq->vring.desc; in vu_queue_map_desc()
2791 unsigned int max = vq->vring.num; in vu_queue_map_desc() local
2800 if (le32toh(desc[i].len) % sizeof(struct vring_desc)) { in vu_queue_map_desc()
2807 desc_len = le32toh(desc[i].len); in vu_queue_map_desc()
2808 max = desc_len / sizeof(struct vring_desc); in vu_queue_map_desc()
2831 VIRTQUEUE_MAX_SIZE - out_num, true, in vu_queue_map_desc()
2833 le32toh(desc[i].len))) { in vu_queue_map_desc()
2844 le32toh(desc[i].len))) { in vu_queue_map_desc()
2850 if ((in_num + out_num) > max) { in vu_queue_map_desc()
2854 rc = virtqueue_read_next_desc(dev, desc, i, max, &i); in vu_queue_map_desc()
2867 elem->index = idx; in vu_queue_map_desc()
2869 elem->out_sg[i] = iov[i]; in vu_queue_map_desc()
2872 elem->in_sg[i] = iov[out_num + i]; in vu_queue_map_desc()
2885 if (unlikely(!vq->inflight)) { in vu_queue_inflight_get()
2886 return -1; in vu_queue_inflight_get()
2889 vq->inflight->desc[desc_idx].counter = vq->counter++; in vu_queue_inflight_get()
2890 vq->inflight->desc[desc_idx].inflight = 1; in vu_queue_inflight_get()
2902 if (unlikely(!vq->inflight)) { in vu_queue_inflight_pre_put()
2903 return -1; in vu_queue_inflight_pre_put()
2906 vq->inflight->last_batch_head = desc_idx; in vu_queue_inflight_pre_put()
2918 if (unlikely(!vq->inflight)) { in vu_queue_inflight_post_put()
2919 return -1; in vu_queue_inflight_post_put()
2924 vq->inflight->desc[desc_idx].inflight = 0; in vu_queue_inflight_post_put()
2928 vq->inflight->used_idx = vq->used_idx; in vu_queue_inflight_post_put()
2944 if (unlikely(vq->resubmit_list && vq->resubmit_num > 0)) { in vu_queue_pop()
2945 i = (--vq->resubmit_num); in vu_queue_pop()
2946 elem = vu_queue_map_desc(dev, vq, vq->resubmit_list[i].index, sz); in vu_queue_pop()
2948 if (!vq->resubmit_num) { in vu_queue_pop()
2949 free(vq->resubmit_list); in vu_queue_pop()
2950 vq->resubmit_list = NULL; in vu_queue_pop()
2965 if (vq->inuse >= vq->vring.num) { in vu_queue_pop()
2970 if (!virtqueue_get_head(dev, vq, vq->last_avail_idx++, &head)) { in vu_queue_pop()
2975 vring_set_avail_event(vq, vq->last_avail_idx); in vu_queue_pop()
2984 vq->inuse++; in vu_queue_pop()
2993 size_t len) in vu_queue_detach_element() argument
2995 vq->inuse--; in vu_queue_detach_element()
3001 size_t len) in vu_queue_unpop() argument
3003 vq->last_avail_idx--; in vu_queue_unpop()
3004 vu_queue_detach_element(dev, vq, elem, len); in vu_queue_unpop()
3010 if (num > vq->inuse) { in vu_queue_rewind()
3013 vq->last_avail_idx -= num; in vu_queue_rewind()
3014 vq->inuse -= num; in vu_queue_rewind()
3022 struct vring_used *used = vq->vring.used; in vring_used_write()
3024 used->ring[i] = *uelem; in vring_used_write()
3025 vu_log_write(dev, vq->vring.log_guest_addr + in vring_used_write()
3027 sizeof(used->ring[i])); in vring_used_write()
3034 unsigned int len) in vu_log_queue_fill() argument
3036 struct vring_desc *desc = vq->vring.desc; in vu_log_queue_fill()
3037 unsigned int i, max, min, desc_len; in vu_log_queue_fill() local
3042 max = vq->vring.num; in vu_log_queue_fill()
3043 i = elem->index; in vu_log_queue_fill()
3046 if (le32toh(desc[i].len) % sizeof(struct vring_desc)) { in vu_log_queue_fill()
3053 desc_len = le32toh(desc[i].len); in vu_log_queue_fill()
3054 max = desc_len / sizeof(struct vring_desc); in vu_log_queue_fill()
3074 if (++num_bufs > max) { in vu_log_queue_fill()
3080 min = MIN(le32toh(desc[i].len), len); in vu_log_queue_fill()
3082 len -= min; in vu_log_queue_fill()
3085 } while (len > 0 && in vu_log_queue_fill()
3086 (virtqueue_read_next_desc(dev, desc, i, max, &i) in vu_log_queue_fill()
3093 unsigned int len, unsigned int idx) in vu_queue_fill() argument
3101 vu_log_queue_fill(dev, vq, elem, len); in vu_queue_fill()
3103 idx = (idx + vq->used_idx) % vq->vring.num; in vu_queue_fill()
3105 uelem.id = htole32(elem->index); in vu_queue_fill()
3106 uelem.len = htole32(len); in vu_queue_fill()
3113 vq->vring.used->idx = htole16(val); in vring_used_idx_set()
3115 vq->vring.log_guest_addr + offsetof(struct vring_used, idx), in vring_used_idx_set()
3116 sizeof(vq->vring.used->idx)); in vring_used_idx_set()
3118 vq->used_idx = val; in vring_used_idx_set()
3133 old = vq->used_idx; in vu_queue_flush()
3136 vq->inuse -= count; in vu_queue_flush()
3137 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) { in vu_queue_flush()
3138 vq->signalled_used_valid = false; in vu_queue_flush()
3144 const VuVirtqElement *elem, unsigned int len) in vu_queue_push() argument
3146 vu_queue_fill(dev, vq, elem, len, 0); in vu_queue_push()
3147 vu_queue_inflight_pre_put(dev, vq, elem->index); in vu_queue_push()
3149 vu_queue_inflight_post_put(dev, vq, elem->index); in vu_queue_push()