Lines Matching +full:max +full:- +full:len

5  *   Portions of codes and concepts borrowed from libvhost-user.c, so:
12 * Marc-André Lureau <mlureau@redhat.com>
16 * later. See the COPYING file in the top-level directory.
41 #include "linux-headers/linux/virtio_ring.h"
42 #include "linux-headers/linux/virtio_config.h"
43 #include "linux-headers/linux/vduse.h"
55 #define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m))
150 if (fd == -1) { in vduse_log_get()
154 if (ftruncate(fd, size) == -1) { in vduse_log_get()
173 return has_feature(dev->features, fbit); in vduse_dev_has_feature()
187 return vq->dev; in vduse_queue_get_dev()
192 return vq->fd; in vduse_queue_get_fd()
197 return dev->priv; in vduse_dev_get_priv()
202 return &dev->vqs[index]; in vduse_dev_get_queue()
207 return dev->fd; in vduse_dev_get_fd()
212 return ioctl(dev->fd, VDUSE_VQ_INJECT_IRQ, &index); in vduse_inject_irq()
220 if (desc1->counter > desc0->counter && in inflight_desc_compare()
221 (desc1->counter - desc0->counter) < VIRTQUEUE_MAX_SIZE * 2) { in inflight_desc_compare()
225 return -1; in inflight_desc_compare()
231 VduseDev *dev = vq->dev; in vduse_queue_check_inflights()
233 vq->used_idx = le16toh(vq->vring.used->idx); in vduse_queue_check_inflights()
234 vq->resubmit_num = 0; in vduse_queue_check_inflights()
235 vq->resubmit_list = NULL; in vduse_queue_check_inflights()
236 vq->counter = 0; in vduse_queue_check_inflights()
238 if (unlikely(vq->log->inflight.used_idx != vq->used_idx)) { in vduse_queue_check_inflights()
239 if (vq->log->inflight.last_batch_head > VIRTQUEUE_MAX_SIZE) { in vduse_queue_check_inflights()
240 return -1; in vduse_queue_check_inflights()
243 vq->log->inflight.desc[vq->log->inflight.last_batch_head].inflight = 0; in vduse_queue_check_inflights()
247 vq->log->inflight.used_idx = vq->used_idx; in vduse_queue_check_inflights()
250 for (i = 0; i < vq->log->inflight.desc_num; i++) { in vduse_queue_check_inflights()
251 if (vq->log->inflight.desc[i].inflight == 1) { in vduse_queue_check_inflights()
252 vq->inuse++; in vduse_queue_check_inflights()
256 vq->shadow_avail_idx = vq->last_avail_idx = vq->inuse + vq->used_idx; in vduse_queue_check_inflights()
258 if (vq->inuse) { in vduse_queue_check_inflights()
259 vq->resubmit_list = calloc(vq->inuse, sizeof(VduseVirtqInflightDesc)); in vduse_queue_check_inflights()
260 if (!vq->resubmit_list) { in vduse_queue_check_inflights()
261 return -1; in vduse_queue_check_inflights()
264 for (i = 0; i < vq->log->inflight.desc_num; i++) { in vduse_queue_check_inflights()
265 if (vq->log->inflight.desc[i].inflight) { in vduse_queue_check_inflights()
266 vq->resubmit_list[vq->resubmit_num].index = i; in vduse_queue_check_inflights()
267 vq->resubmit_list[vq->resubmit_num].counter = in vduse_queue_check_inflights()
268 vq->log->inflight.desc[i].counter; in vduse_queue_check_inflights()
269 vq->resubmit_num++; in vduse_queue_check_inflights()
273 if (vq->resubmit_num > 1) { in vduse_queue_check_inflights()
274 qsort(vq->resubmit_list, vq->resubmit_num, in vduse_queue_check_inflights()
277 vq->counter = vq->resubmit_list[0].counter + 1; in vduse_queue_check_inflights()
280 vduse_inject_irq(dev, vq->index); in vduse_queue_check_inflights()
287 vq->log->inflight.desc[desc_idx].counter = vq->counter++; in vduse_queue_inflight_get()
291 vq->log->inflight.desc[desc_idx].inflight = 1; in vduse_queue_inflight_get()
298 vq->log->inflight.last_batch_head = desc_idx; in vduse_queue_inflight_pre_put()
305 vq->log->inflight.desc[desc_idx].inflight = 0; in vduse_queue_inflight_post_put()
309 vq->log->inflight.used_idx = vq->used_idx; in vduse_queue_inflight_post_put()
324 if (!dev->regions[i].mmap_addr) { in vduse_iova_remove_region()
328 if (start <= dev->regions[i].iova && in vduse_iova_remove_region()
329 last >= (dev->regions[i].iova + dev->regions[i].size - 1)) { in vduse_iova_remove_region()
330 munmap((void *)(uintptr_t)dev->regions[i].mmap_addr, in vduse_iova_remove_region()
331 dev->regions[i].mmap_offset + dev->regions[i].size); in vduse_iova_remove_region()
332 dev->regions[i].mmap_addr = 0; in vduse_iova_remove_region()
333 dev->num_regions--; in vduse_iova_remove_region()
343 uint64_t size = last - start + 1; in vduse_iova_add_region()
348 return -EINVAL; in vduse_iova_add_region()
352 if (!dev->regions[i].mmap_addr) { in vduse_iova_add_region()
353 dev->regions[i].mmap_addr = (uint64_t)(uintptr_t)mmap_addr; in vduse_iova_add_region()
354 dev->regions[i].mmap_offset = offset; in vduse_iova_add_region()
355 dev->regions[i].iova = start; in vduse_iova_add_region()
356 dev->regions[i].size = size; in vduse_iova_add_region()
357 dev->num_regions++; in vduse_iova_add_region()
394 VduseIovaRegion *r = &dev->regions[i]; in iova_to_va()
396 if (!r->mmap_addr) { in iova_to_va()
400 if ((iova >= r->iova) && (iova < (r->iova + r->size))) { in iova_to_va()
401 if ((iova + *plen) > (r->iova + r->size)) { in iova_to_va()
402 *plen = r->iova + r->size - iova; in iova_to_va()
404 return (void *)(uintptr_t)(iova - r->iova + in iova_to_va()
405 r->mmap_addr + r->mmap_offset); in iova_to_va()
411 ret = ioctl(dev->fd, VDUSE_IOTLB_GET_FD, &entry); in iova_to_va()
426 return le16toh(vq->vring.avail->flags); in vring_avail_flags()
431 vq->shadow_avail_idx = le16toh(vq->vring.avail->idx); in vring_avail_idx()
433 return vq->shadow_avail_idx; in vring_avail_idx()
438 return le16toh(vq->vring.avail->ring[i]); in vring_avail_ring()
443 return vring_avail_ring(vq, vq->vring.num); in vring_get_used_event()
453 *head = vring_avail_ring(vq, idx % vq->vring.num); in vduse_queue_get_head()
456 if (*head >= vq->vring.num) { in vduse_queue_get_head()
466 uint64_t addr, size_t len) in vduse_queue_read_indirect_desc() argument
471 if (len > (VIRTQUEUE_MAX_SIZE * sizeof(struct vring_desc))) { in vduse_queue_read_indirect_desc()
472 return -1; in vduse_queue_read_indirect_desc()
475 if (len == 0) { in vduse_queue_read_indirect_desc()
476 return -1; in vduse_queue_read_indirect_desc()
479 while (len) { in vduse_queue_read_indirect_desc()
480 read_len = len; in vduse_queue_read_indirect_desc()
483 return -1; in vduse_queue_read_indirect_desc()
487 len -= read_len; in vduse_queue_read_indirect_desc()
496 VIRTQUEUE_READ_DESC_ERROR = -1,
502 unsigned int max, unsigned int *next) in vduse_queue_read_next_desc() argument
514 if (*next >= max) { in vduse_queue_read_next_desc()
528 if (unlikely(!vq->vring.avail)) { in vduse_queue_empty()
532 if (vq->shadow_avail_idx != vq->last_avail_idx) { in vduse_queue_empty()
536 return vring_avail_idx(vq) == vq->last_avail_idx; in vduse_queue_empty()
541 VduseDev *dev = vq->dev; in vduse_queue_should_notify()
550 !vq->inuse && vduse_queue_empty(vq)) { in vduse_queue_should_notify()
558 v = vq->signalled_used_valid; in vduse_queue_should_notify()
559 vq->signalled_used_valid = true; in vduse_queue_should_notify()
560 old = vq->signalled_used; in vduse_queue_should_notify()
561 new = vq->signalled_used = vq->used_idx; in vduse_queue_should_notify()
567 VduseDev *dev = vq->dev; in vduse_queue_notify()
569 if (unlikely(!vq->vring.avail)) { in vduse_queue_notify()
577 if (vduse_inject_irq(dev, vq->index) < 0) { in vduse_queue_notify()
579 vq->index, strerror(errno)); in vduse_queue_notify()
586 memcpy(&vq->vring.used->ring[vq->vring.num], &val_le, sizeof(uint16_t)); in vring_set_avail_event()
594 VduseDev *dev = vq->dev; in vduse_queue_map_single_desc()
604 uint64_t len = sz; in vduse_queue_map_single_desc() local
612 iov[num_sg].iov_base = iova_to_va(dev, &len, pa); in vduse_queue_map_single_desc()
617 iov[num_sg++].iov_len = len; in vduse_queue_map_single_desc()
618 sz -= len; in vduse_queue_map_single_desc()
619 pa += len; in vduse_queue_map_single_desc()
630 size_t in_sg_ofs = ALIGN_UP(sz, __alignof__(elem->in_sg[0])); in vduse_queue_alloc_element()
631 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]); in vduse_queue_alloc_element()
632 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]); in vduse_queue_alloc_element()
639 elem->out_num = out_num; in vduse_queue_alloc_element()
640 elem->in_num = in_num; in vduse_queue_alloc_element()
641 elem->in_sg = (void *)elem + in_sg_ofs; in vduse_queue_alloc_element()
642 elem->out_sg = (void *)elem + out_sg_ofs; in vduse_queue_alloc_element()
648 struct vring_desc *desc = vq->vring.desc; in vduse_queue_map_desc()
649 VduseDev *dev = vq->dev; in vduse_queue_map_desc()
652 unsigned int max = vq->vring.num; in vduse_queue_map_desc() local
661 if (le32toh(desc[i].len) % sizeof(struct vring_desc)) { in vduse_queue_map_desc()
668 desc_len = le32toh(desc[i].len); in vduse_queue_map_desc()
669 max = desc_len / sizeof(struct vring_desc); in vduse_queue_map_desc()
692 VIRTQUEUE_MAX_SIZE - out_num, in vduse_queue_map_desc()
694 le32toh(desc[i].len))) { in vduse_queue_map_desc()
705 le32toh(desc[i].len))) { in vduse_queue_map_desc()
711 if ((in_num + out_num) > max) { in vduse_queue_map_desc()
715 rc = vduse_queue_read_next_desc(desc, i, max, &i); in vduse_queue_map_desc()
729 elem->index = idx; in vduse_queue_map_desc()
731 elem->out_sg[i] = iov[i]; in vduse_queue_map_desc()
734 elem->in_sg[i] = iov[out_num + i]; in vduse_queue_map_desc()
744 VduseDev *dev = vq->dev; in vduse_queue_pop()
747 if (unlikely(!vq->vring.avail)) { in vduse_queue_pop()
751 if (unlikely(vq->resubmit_list && vq->resubmit_num > 0)) { in vduse_queue_pop()
752 i = (--vq->resubmit_num); in vduse_queue_pop()
753 elem = vduse_queue_map_desc(vq, vq->resubmit_list[i].index, sz); in vduse_queue_pop()
755 if (!vq->resubmit_num) { in vduse_queue_pop()
756 free(vq->resubmit_list); in vduse_queue_pop()
757 vq->resubmit_list = NULL; in vduse_queue_pop()
769 if (vq->inuse >= vq->vring.num) { in vduse_queue_pop()
770 fprintf(stderr, "Virtqueue size exceeded: %d\n", vq->inuse); in vduse_queue_pop()
774 if (!vduse_queue_get_head(vq, vq->last_avail_idx++, &head)) { in vduse_queue_pop()
779 vring_set_avail_event(vq, vq->last_avail_idx); in vduse_queue_pop()
788 vq->inuse++; in vduse_queue_pop()
798 struct vring_used *used = vq->vring.used; in vring_used_write()
800 used->ring[i] = *uelem; in vring_used_write()
804 unsigned int len, unsigned int idx) in vduse_queue_fill() argument
808 if (unlikely(!vq->vring.used)) { in vduse_queue_fill()
812 idx = (idx + vq->used_idx) % vq->vring.num; in vduse_queue_fill()
814 uelem.id = htole32(elem->index); in vduse_queue_fill()
815 uelem.len = htole32(len); in vduse_queue_fill()
821 vq->vring.used->idx = htole16(val); in vring_used_idx_set()
822 vq->used_idx = val; in vring_used_idx_set()
829 if (unlikely(!vq->vring.used)) { in vduse_queue_flush()
836 old = vq->used_idx; in vduse_queue_flush()
839 vq->inuse -= count; in vduse_queue_flush()
840 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) { in vduse_queue_flush()
841 vq->signalled_used_valid = false; in vduse_queue_flush()
846 unsigned int len) in vduse_queue_push() argument
848 vduse_queue_fill(vq, elem, len, 0); in vduse_queue_push()
849 vduse_queue_inflight_pre_put(vq, elem->index); in vduse_queue_push()
851 vduse_queue_inflight_post_put(vq, elem->index); in vduse_queue_push()
857 struct VduseDev *dev = vq->dev; in vduse_queue_update_vring()
858 uint64_t len; in vduse_queue_update_vring() local
860 len = sizeof(struct vring_desc); in vduse_queue_update_vring()
861 vq->vring.desc = iova_to_va(dev, &len, desc_addr); in vduse_queue_update_vring()
862 if (len != sizeof(struct vring_desc)) { in vduse_queue_update_vring()
863 return -EINVAL; in vduse_queue_update_vring()
866 len = sizeof(struct vring_avail); in vduse_queue_update_vring()
867 vq->vring.avail = iova_to_va(dev, &len, avail_addr); in vduse_queue_update_vring()
868 if (len != sizeof(struct vring_avail)) { in vduse_queue_update_vring()
869 return -EINVAL; in vduse_queue_update_vring()
872 len = sizeof(struct vring_used); in vduse_queue_update_vring()
873 vq->vring.used = iova_to_va(dev, &len, used_addr); in vduse_queue_update_vring()
874 if (len != sizeof(struct vring_used)) { in vduse_queue_update_vring()
875 return -EINVAL; in vduse_queue_update_vring()
878 if (!vq->vring.desc || !vq->vring.avail || !vq->vring.used) { in vduse_queue_update_vring()
879 fprintf(stderr, "Failed to get vq[%d] iova mapping\n", vq->index); in vduse_queue_update_vring()
880 return -EINVAL; in vduse_queue_update_vring()
888 struct VduseDev *dev = vq->dev; in vduse_queue_enable()
893 vq_info.index = vq->index; in vduse_queue_enable()
894 if (ioctl(dev->fd, VDUSE_VQ_GET_INFO, &vq_info)) { in vduse_queue_enable()
896 vq->index, strerror(errno)); in vduse_queue_enable()
904 vq->vring.num = vq_info.num; in vduse_queue_enable()
905 vq->vring.desc_addr = vq_info.desc_addr; in vduse_queue_enable()
906 vq->vring.avail_addr = vq_info.driver_addr; in vduse_queue_enable()
907 vq->vring.used_addr = vq_info.device_addr; in vduse_queue_enable()
911 fprintf(stderr, "Failed to update vring for vq[%d]\n", vq->index); in vduse_queue_enable()
917 fprintf(stderr, "Failed to init eventfd for vq[%d]\n", vq->index); in vduse_queue_enable()
921 vq_eventfd.index = vq->index; in vduse_queue_enable()
923 if (ioctl(dev->fd, VDUSE_VQ_SETUP_KICKFD, &vq_eventfd)) { in vduse_queue_enable()
924 fprintf(stderr, "Failed to setup kick fd for vq[%d]\n", vq->index); in vduse_queue_enable()
929 vq->fd = fd; in vduse_queue_enable()
930 vq->signalled_used_valid = false; in vduse_queue_enable()
931 vq->ready = true; in vduse_queue_enable()
934 fprintf(stderr, "Failed to check inflights for vq[%d]\n", vq->index); in vduse_queue_enable()
939 dev->ops->enable_queue(dev, vq); in vduse_queue_enable()
944 struct VduseDev *dev = vq->dev; in vduse_queue_disable()
947 if (!vq->ready) { in vduse_queue_disable()
951 dev->ops->disable_queue(dev, vq); in vduse_queue_disable()
953 eventfd.index = vq->index; in vduse_queue_disable()
955 ioctl(dev->fd, VDUSE_VQ_SETUP_KICKFD, &eventfd); in vduse_queue_disable()
956 close(vq->fd); in vduse_queue_disable()
958 assert(vq->inuse == 0); in vduse_queue_disable()
960 vq->vring.num = 0; in vduse_queue_disable()
961 vq->vring.desc_addr = 0; in vduse_queue_disable()
962 vq->vring.avail_addr = 0; in vduse_queue_disable()
963 vq->vring.used_addr = 0; in vduse_queue_disable()
964 vq->vring.desc = 0; in vduse_queue_disable()
965 vq->vring.avail = 0; in vduse_queue_disable()
966 vq->vring.used = 0; in vduse_queue_disable()
967 vq->ready = false; in vduse_queue_disable()
968 vq->fd = -1; in vduse_queue_disable()
975 if (ioctl(dev->fd, VDUSE_DEV_GET_FEATURES, &dev->features)) { in vduse_dev_start_dataplane()
981 for (i = 0; i < dev->num_queues; i++) { in vduse_dev_start_dataplane()
982 vduse_queue_enable(&dev->vqs[i]); in vduse_dev_start_dataplane()
988 size_t log_size = dev->num_queues * vduse_vq_log_size(VIRTQUEUE_MAX_SIZE); in vduse_dev_stop_dataplane()
991 for (i = 0; i < dev->num_queues; i++) { in vduse_dev_stop_dataplane()
992 vduse_queue_disable(&dev->vqs[i]); in vduse_dev_stop_dataplane()
994 if (dev->log) { in vduse_dev_stop_dataplane()
995 memset(dev->log, 0, log_size); in vduse_dev_stop_dataplane()
997 dev->features = 0; in vduse_dev_stop_dataplane()
1008 ret = read(dev->fd, &req, sizeof(req)); in vduse_dev_handler()
1012 return -errno; in vduse_dev_handler()
1018 vq = &dev->vqs[req.vq_state.index]; in vduse_dev_handler()
1019 resp.vq_state.split.avail_index = vq->last_avail_idx; in vduse_dev_handler()
1033 for (i = 0; i < dev->num_queues; i++) { in vduse_dev_handler()
1034 vq = &dev->vqs[i]; in vduse_dev_handler()
1035 if (vq->ready) { in vduse_dev_handler()
1036 if (vduse_queue_update_vring(vq, vq->vring.desc_addr, in vduse_dev_handler()
1037 vq->vring.avail_addr, in vduse_dev_handler()
1038 vq->vring.used_addr)) { in vduse_dev_handler()
1040 vq->index); in vduse_dev_handler()
1051 ret = write(dev->fd, &resp, sizeof(resp)); in vduse_dev_handler()
1055 return -errno; in vduse_dev_handler()
1068 return -ENOMEM; in vduse_dev_update_config()
1071 data->offset = offset; in vduse_dev_update_config()
1072 data->length = size; in vduse_dev_update_config()
1073 memcpy(data->buffer, buffer, size); in vduse_dev_update_config()
1075 ret = ioctl(dev->fd, VDUSE_DEV_SET_CONFIG, data); in vduse_dev_update_config()
1079 return -errno; in vduse_dev_update_config()
1082 if (ioctl(dev->fd, VDUSE_DEV_INJECT_CONFIG_IRQ)) { in vduse_dev_update_config()
1083 return -errno; in vduse_dev_update_config()
1091 VduseVirtq *vq = &dev->vqs[index]; in vduse_dev_setup_queue()
1095 return -EINVAL; in vduse_dev_setup_queue()
1098 vq_config.index = vq->index; in vduse_dev_setup_queue()
1101 if (ioctl(dev->fd, VDUSE_VQ_SETUP, &vq_config)) { in vduse_dev_setup_queue()
1102 return -errno; in vduse_dev_setup_queue()
1113 size_t log_size = dev->num_queues * vduse_vq_log_size(VIRTQUEUE_MAX_SIZE); in vduse_set_reconnect_log_file()
1117 dev->log = log = vduse_log_get(filename, log_size); in vduse_set_reconnect_log_file()
1120 return -EINVAL; in vduse_set_reconnect_log_file()
1123 for (i = 0; i < dev->num_queues; i++) { in vduse_set_reconnect_log_file()
1124 dev->vqs[i].log = log; in vduse_set_reconnect_log_file()
1125 dev->vqs[i].log->inflight.desc_num = VIRTQUEUE_MAX_SIZE; in vduse_set_reconnect_log_file()
1139 return -ENOMEM; in vduse_dev_init_vqs()
1145 vqs[i].fd = -1; in vduse_dev_init_vqs()
1147 dev->vqs = vqs; in vduse_dev_init_vqs()
1161 return -ENOMEM; in vduse_dev_init()
1170 return -errno; in vduse_dev_init()
1173 if (ioctl(fd, VDUSE_DEV_GET_FEATURES, &dev->features)) { in vduse_dev_init()
1176 return -errno; in vduse_dev_init()
1182 return -ENOMEM; in vduse_dev_init()
1192 dev->name = dev_name; in vduse_dev_init()
1193 dev->num_queues = num_queues; in vduse_dev_init()
1194 dev->fd = fd; in vduse_dev_init()
1195 dev->ops = ops; in vduse_dev_init()
1196 dev->priv = priv; in vduse_dev_init()
1212 if (!ops || !ops->enable_queue || !ops->disable_queue) { in vduse_dev_create_by_fd()
1223 if (ioctl(fd, VDUSE_DEV_GET_FEATURES, &dev->features)) { in vduse_dev_create_by_fd()
1236 dev->num_queues = num_queues; in vduse_dev_create_by_fd()
1237 dev->fd = fd; in vduse_dev_create_by_fd()
1238 dev->ops = ops; in vduse_dev_create_by_fd()
1239 dev->priv = priv; in vduse_dev_create_by_fd()
1251 !ops->enable_queue || !ops->disable_queue) { in vduse_dev_create_by_name()
1265 name, strerror(-ret)); in vduse_dev_create_by_name()
1286 !config_size || !ops || !ops->enable_queue || !ops->disable_queue) { in vduse_dev_create()
1318 strcpy(dev_config->name, name); in vduse_dev_create()
1319 dev_config->device_id = device_id; in vduse_dev_create()
1320 dev_config->vendor_id = vendor_id; in vduse_dev_create()
1321 dev_config->features = features; in vduse_dev_create()
1322 dev_config->vq_num = num_queues; in vduse_dev_create()
1323 dev_config->vq_align = VDUSE_VQ_ALIGN; in vduse_dev_create()
1324 dev_config->config_size = config_size; in vduse_dev_create()
1325 memcpy(dev_config->config, config, config_size); in vduse_dev_create()
1334 dev->ctrl_fd = ctrl_fd; in vduse_dev_create()
1339 name, strerror(-ret)); in vduse_dev_create()
1356 size_t log_size = dev->num_queues * vduse_vq_log_size(VIRTQUEUE_MAX_SIZE); in vduse_dev_destroy()
1359 if (dev->log) { in vduse_dev_destroy()
1360 munmap(dev->log, log_size); in vduse_dev_destroy()
1362 for (i = 0; i < dev->num_queues; i++) { in vduse_dev_destroy()
1363 free(dev->vqs[i].resubmit_list); in vduse_dev_destroy()
1365 free(dev->vqs); in vduse_dev_destroy()
1366 if (dev->fd >= 0) { in vduse_dev_destroy()
1367 close(dev->fd); in vduse_dev_destroy()
1368 dev->fd = -1; in vduse_dev_destroy()
1370 if (dev->ctrl_fd >= 0) { in vduse_dev_destroy()
1371 if (ioctl(dev->ctrl_fd, VDUSE_DESTROY_DEV, dev->name)) { in vduse_dev_destroy()
1372 ret = -errno; in vduse_dev_destroy()
1374 close(dev->ctrl_fd); in vduse_dev_destroy()
1375 dev->ctrl_fd = -1; in vduse_dev_destroy()
1377 free(dev->name); in vduse_dev_destroy()