Lines Matching refs:vq

227 static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)  in virtio_virtqueue_reset_region_cache()  argument
231 caches = qatomic_read(&vq->vring.caches); in virtio_virtqueue_reset_region_cache()
232 qatomic_rcu_set(&vq->vring.caches, NULL); in virtio_virtqueue_reset_region_cache()
240 VirtQueue *vq = &vdev->vq[n]; in virtio_init_region_cache() local
241 VRingMemoryRegionCaches *old = vq->vring.caches; in virtio_init_region_cache()
248 addr = vq->vring.desc; in virtio_init_region_cache()
254 packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ? in virtio_init_region_cache()
265 vq->vring.used, size, true); in virtio_init_region_cache()
273 vq->vring.avail, size, false); in virtio_init_region_cache()
279 qatomic_rcu_set(&vq->vring.caches, new); in virtio_init_region_cache()
293 virtio_virtqueue_reset_region_cache(vq); in virtio_init_region_cache()
299 VRing *vring = &vdev->vq[n].vring; in virtio_queue_update_rings()
357 static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq) in vring_get_region_caches() argument
359 return qatomic_rcu_read(&vq->vring.caches); in vring_get_region_caches()
363 static inline uint16_t vring_avail_flags(VirtQueue *vq) in vring_avail_flags() argument
365 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); in vring_avail_flags()
372 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa); in vring_avail_flags()
376 static inline uint16_t vring_avail_idx(VirtQueue *vq) in vring_avail_idx() argument
378 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); in vring_avail_idx()
385 vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa); in vring_avail_idx()
386 return vq->shadow_avail_idx; in vring_avail_idx()
390 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i) in vring_avail_ring() argument
392 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); in vring_avail_ring()
399 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa); in vring_avail_ring()
403 static inline uint16_t vring_get_used_event(VirtQueue *vq) in vring_get_used_event() argument
405 return vring_avail_ring(vq, vq->vring.num); in vring_get_used_event()
409 static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem, in vring_used_write() argument
412 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); in vring_used_write()
419 virtio_tswap32s(vq->vdev, &uelem->id); in vring_used_write()
420 virtio_tswap32s(vq->vdev, &uelem->len); in vring_used_write()
426 static inline uint16_t vring_used_flags(VirtQueue *vq) in vring_used_flags() argument
428 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); in vring_used_flags()
435 return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); in vring_used_flags()
439 static uint16_t vring_used_idx(VirtQueue *vq) in vring_used_idx() argument
441 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); in vring_used_idx()
448 return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); in vring_used_idx()
452 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val) in vring_used_idx_set() argument
454 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); in vring_used_idx_set()
458 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val); in vring_used_idx_set()
462 vq->used_idx = val; in vring_used_idx_set()
466 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask) in vring_used_flags_set_bit() argument
468 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); in vring_used_flags_set_bit()
469 VirtIODevice *vdev = vq->vdev; in vring_used_flags_set_bit()
477 flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); in vring_used_flags_set_bit()
483 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask) in vring_used_flags_unset_bit() argument
485 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); in vring_used_flags_unset_bit()
486 VirtIODevice *vdev = vq->vdev; in vring_used_flags_unset_bit()
494 flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); in vring_used_flags_unset_bit()
500 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val) in vring_set_avail_event() argument
504 if (!vq->notification) { in vring_set_avail_event()
508 caches = vring_get_region_caches(vq); in vring_set_avail_event()
513 pa = offsetof(VRingUsed, ring[vq->vring.num]); in vring_set_avail_event()
514 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val); in vring_set_avail_event()
518 static void virtio_queue_split_set_notification(VirtQueue *vq, int enable) in virtio_queue_split_set_notification() argument
522 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) { in virtio_queue_split_set_notification()
523 vring_set_avail_event(vq, vring_avail_idx(vq)); in virtio_queue_split_set_notification()
525 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY); in virtio_queue_split_set_notification()
527 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY); in virtio_queue_split_set_notification()
535 static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable) in virtio_queue_packed_set_notification() argument
542 caches = vring_get_region_caches(vq); in virtio_queue_packed_set_notification()
547 vring_packed_event_read(vq->vdev, &caches->used, &e); in virtio_queue_packed_set_notification()
551 } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) { in virtio_queue_packed_set_notification()
552 off_wrap = vq->shadow_avail_idx | vq->shadow_avail_wrap_counter << 15; in virtio_queue_packed_set_notification()
553 vring_packed_off_wrap_write(vq->vdev, &caches->used, off_wrap); in virtio_queue_packed_set_notification()
561 vring_packed_flags_write(vq->vdev, &caches->used, e.flags); in virtio_queue_packed_set_notification()
568 bool virtio_queue_get_notification(VirtQueue *vq) in virtio_queue_get_notification() argument
570 return vq->notification; in virtio_queue_get_notification()
573 void virtio_queue_set_notification(VirtQueue *vq, int enable) in virtio_queue_set_notification() argument
575 vq->notification = enable; in virtio_queue_set_notification()
577 if (!vq->vring.desc) { in virtio_queue_set_notification()
581 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { in virtio_queue_set_notification()
582 virtio_queue_packed_set_notification(vq, enable); in virtio_queue_set_notification()
584 virtio_queue_split_set_notification(vq, enable); in virtio_queue_set_notification()
588 int virtio_queue_ready(VirtQueue *vq) in virtio_queue_ready() argument
590 return vq->vring.avail != 0; in virtio_queue_ready()
682 static int virtio_queue_empty_rcu(VirtQueue *vq) in virtio_queue_empty_rcu() argument
684 if (virtio_device_disabled(vq->vdev)) { in virtio_queue_empty_rcu()
688 if (unlikely(!vq->vring.avail)) { in virtio_queue_empty_rcu()
692 if (vq->shadow_avail_idx != vq->last_avail_idx) { in virtio_queue_empty_rcu()
696 return vring_avail_idx(vq) == vq->last_avail_idx; in virtio_queue_empty_rcu()
699 static int virtio_queue_split_empty(VirtQueue *vq) in virtio_queue_split_empty() argument
703 if (virtio_device_disabled(vq->vdev)) { in virtio_queue_split_empty()
707 if (unlikely(!vq->vring.avail)) { in virtio_queue_split_empty()
711 if (vq->shadow_avail_idx != vq->last_avail_idx) { in virtio_queue_split_empty()
716 empty = vring_avail_idx(vq) == vq->last_avail_idx; in virtio_queue_split_empty()
721 static int virtio_queue_packed_empty_rcu(VirtQueue *vq) in virtio_queue_packed_empty_rcu() argument
726 if (unlikely(!vq->vring.desc)) { in virtio_queue_packed_empty_rcu()
730 cache = vring_get_region_caches(vq); in virtio_queue_packed_empty_rcu()
735 vring_packed_desc_read_flags(vq->vdev, &desc.flags, &cache->desc, in virtio_queue_packed_empty_rcu()
736 vq->last_avail_idx); in virtio_queue_packed_empty_rcu()
738 return !is_desc_avail(desc.flags, vq->last_avail_wrap_counter); in virtio_queue_packed_empty_rcu()
741 static int virtio_queue_packed_empty(VirtQueue *vq) in virtio_queue_packed_empty() argument
744 return virtio_queue_packed_empty_rcu(vq); in virtio_queue_packed_empty()
747 int virtio_queue_empty(VirtQueue *vq) in virtio_queue_empty() argument
749 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { in virtio_queue_empty()
750 return virtio_queue_packed_empty(vq); in virtio_queue_empty()
752 return virtio_queue_split_empty(vq); in virtio_queue_empty()
756 static bool virtio_queue_split_poll(VirtQueue *vq, unsigned shadow_idx) in virtio_queue_split_poll() argument
758 if (unlikely(!vq->vring.avail)) { in virtio_queue_split_poll()
762 return (uint16_t)shadow_idx != vring_avail_idx(vq); in virtio_queue_split_poll()
765 static bool virtio_queue_packed_poll(VirtQueue *vq, unsigned shadow_idx) in virtio_queue_packed_poll() argument
770 if (unlikely(!vq->vring.desc)) { in virtio_queue_packed_poll()
774 caches = vring_get_region_caches(vq); in virtio_queue_packed_poll()
779 vring_packed_desc_read(vq->vdev, &desc, &caches->desc, in virtio_queue_packed_poll()
782 return is_desc_avail(desc.flags, vq->shadow_avail_wrap_counter); in virtio_queue_packed_poll()
785 static bool virtio_queue_poll(VirtQueue *vq, unsigned shadow_idx) in virtio_queue_poll() argument
787 if (virtio_device_disabled(vq->vdev)) { in virtio_queue_poll()
791 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { in virtio_queue_poll()
792 return virtio_queue_packed_poll(vq, shadow_idx); in virtio_queue_poll()
794 return virtio_queue_split_poll(vq, shadow_idx); in virtio_queue_poll()
798 bool virtio_queue_enable_notification_and_check(VirtQueue *vq, in virtio_queue_enable_notification_and_check() argument
801 virtio_queue_set_notification(vq, 1); in virtio_queue_enable_notification_and_check()
804 return virtio_queue_poll(vq, (unsigned)opaque); in virtio_queue_enable_notification_and_check()
810 static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem, in virtqueue_unmap_sg() argument
813 AddressSpace *dma_as = vq->vdev->dma_as; in virtqueue_unmap_sg()
844 void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem, in virtqueue_detach_element() argument
847 vq->inuse -= elem->ndescs; in virtqueue_detach_element()
848 virtqueue_unmap_sg(vq, elem, len); in virtqueue_detach_element()
851 static void virtqueue_split_rewind(VirtQueue *vq, unsigned int num) in virtqueue_split_rewind() argument
853 vq->last_avail_idx -= num; in virtqueue_split_rewind()
856 static void virtqueue_packed_rewind(VirtQueue *vq, unsigned int num) in virtqueue_packed_rewind() argument
858 if (vq->last_avail_idx < num) { in virtqueue_packed_rewind()
859 vq->last_avail_idx = vq->vring.num + vq->last_avail_idx - num; in virtqueue_packed_rewind()
860 vq->last_avail_wrap_counter ^= 1; in virtqueue_packed_rewind()
862 vq->last_avail_idx -= num; in virtqueue_packed_rewind()
874 void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem, in virtqueue_unpop() argument
878 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { in virtqueue_unpop()
879 virtqueue_packed_rewind(vq, 1); in virtqueue_unpop()
881 virtqueue_split_rewind(vq, 1); in virtqueue_unpop()
884 virtqueue_detach_element(vq, elem, len); in virtqueue_unpop()
899 bool virtqueue_rewind(VirtQueue *vq, unsigned int num) in virtqueue_rewind() argument
901 if (num > vq->inuse) { in virtqueue_rewind()
905 vq->inuse -= num; in virtqueue_rewind()
906 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { in virtqueue_rewind()
907 virtqueue_packed_rewind(vq, num); in virtqueue_rewind()
909 virtqueue_split_rewind(vq, num); in virtqueue_rewind()
914 static void virtqueue_split_fill(VirtQueue *vq, const VirtQueueElement *elem, in virtqueue_split_fill() argument
919 if (unlikely(!vq->vring.used)) { in virtqueue_split_fill()
923 idx = (idx + vq->used_idx) % vq->vring.num; in virtqueue_split_fill()
927 vring_used_write(vq, &uelem, idx); in virtqueue_split_fill()
930 static void virtqueue_packed_fill(VirtQueue *vq, const VirtQueueElement *elem, in virtqueue_packed_fill() argument
933 vq->used_elems[idx].index = elem->index; in virtqueue_packed_fill()
934 vq->used_elems[idx].len = len; in virtqueue_packed_fill()
935 vq->used_elems[idx].ndescs = elem->ndescs; in virtqueue_packed_fill()
938 static void virtqueue_ordered_fill(VirtQueue *vq, const VirtQueueElement *elem, in virtqueue_ordered_fill() argument
943 i = vq->used_idx % vq->vring.num; in virtqueue_ordered_fill()
949 max_steps = (vq->last_avail_idx - vq->used_idx) % vq->vring.num; in virtqueue_ordered_fill()
954 if (vq->used_elems[i].index == elem->index) { in virtqueue_ordered_fill()
955 vq->used_elems[i].len = len; in virtqueue_ordered_fill()
956 vq->used_elems[i].in_order_filled = true; in virtqueue_ordered_fill()
960 i += vq->used_elems[i].ndescs; in virtqueue_ordered_fill()
961 steps += vq->used_elems[i].ndescs; in virtqueue_ordered_fill()
963 if (i >= vq->vring.num) { in virtqueue_ordered_fill()
964 i -= vq->vring.num; in virtqueue_ordered_fill()
974 __func__, vq->vdev->name, elem->index); in virtqueue_ordered_fill()
978 static void virtqueue_packed_fill_desc(VirtQueue *vq, in virtqueue_packed_fill_desc() argument
989 bool wrap_counter = vq->used_wrap_counter; in virtqueue_packed_fill_desc()
991 if (unlikely(!vq->vring.desc)) { in virtqueue_packed_fill_desc()
995 head = vq->used_idx + idx; in virtqueue_packed_fill_desc()
996 if (head >= vq->vring.num) { in virtqueue_packed_fill_desc()
997 head -= vq->vring.num; in virtqueue_packed_fill_desc()
1008 caches = vring_get_region_caches(vq); in virtqueue_packed_fill_desc()
1013 vring_packed_desc_write(vq->vdev, &desc, &caches->desc, head, strict_order); in virtqueue_packed_fill_desc()
1017 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, in virtqueue_fill() argument
1020 trace_virtqueue_fill(vq, elem, len, idx); in virtqueue_fill()
1022 virtqueue_unmap_sg(vq, elem, len); in virtqueue_fill()
1024 if (virtio_device_disabled(vq->vdev)) { in virtqueue_fill()
1028 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_IN_ORDER)) { in virtqueue_fill()
1029 virtqueue_ordered_fill(vq, elem, len); in virtqueue_fill()
1030 } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { in virtqueue_fill()
1031 virtqueue_packed_fill(vq, elem, len, idx); in virtqueue_fill()
1033 virtqueue_split_fill(vq, elem, len, idx); in virtqueue_fill()
1038 static void virtqueue_split_flush(VirtQueue *vq, unsigned int count) in virtqueue_split_flush() argument
1042 if (unlikely(!vq->vring.used)) { in virtqueue_split_flush()
1048 trace_virtqueue_flush(vq, count); in virtqueue_split_flush()
1049 old = vq->used_idx; in virtqueue_split_flush()
1051 vring_used_idx_set(vq, new); in virtqueue_split_flush()
1052 vq->inuse -= count; in virtqueue_split_flush()
1053 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) in virtqueue_split_flush()
1054 vq->signalled_used_valid = false; in virtqueue_split_flush()
1057 static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count) in virtqueue_packed_flush() argument
1061 if (unlikely(!vq->vring.desc)) { in virtqueue_packed_flush()
1073 ndescs += vq->used_elems[0].ndescs; in virtqueue_packed_flush()
1075 virtqueue_packed_fill_desc(vq, &vq->used_elems[i], ndescs, false); in virtqueue_packed_flush()
1076 ndescs += vq->used_elems[i].ndescs; in virtqueue_packed_flush()
1078 virtqueue_packed_fill_desc(vq, &vq->used_elems[0], 0, true); in virtqueue_packed_flush()
1080 vq->inuse -= ndescs; in virtqueue_packed_flush()
1081 vq->used_idx += ndescs; in virtqueue_packed_flush()
1082 if (vq->used_idx >= vq->vring.num) { in virtqueue_packed_flush()
1083 vq->used_idx -= vq->vring.num; in virtqueue_packed_flush()
1084 vq->used_wrap_counter ^= 1; in virtqueue_packed_flush()
1085 vq->signalled_used_valid = false; in virtqueue_packed_flush()
1089 static void virtqueue_ordered_flush(VirtQueue *vq) in virtqueue_ordered_flush() argument
1091 unsigned int i = vq->used_idx % vq->vring.num; in virtqueue_ordered_flush()
1093 uint16_t old = vq->used_idx; in virtqueue_ordered_flush()
1098 packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED); in virtqueue_ordered_flush()
1101 if (unlikely(!vq->vring.desc)) { in virtqueue_ordered_flush()
1104 } else if (unlikely(!vq->vring.used)) { in virtqueue_ordered_flush()
1109 if (!vq->used_elems[i].in_order_filled) { in virtqueue_ordered_flush()
1114 while (vq->used_elems[i].in_order_filled) { in virtqueue_ordered_flush()
1119 if (packed && i != vq->used_idx) { in virtqueue_ordered_flush()
1120 virtqueue_packed_fill_desc(vq, &vq->used_elems[i], ndescs, false); in virtqueue_ordered_flush()
1122 uelem.id = vq->used_elems[i].index; in virtqueue_ordered_flush()
1123 uelem.len = vq->used_elems[i].len; in virtqueue_ordered_flush()
1124 vring_used_write(vq, &uelem, i); in virtqueue_ordered_flush()
1127 vq->used_elems[i].in_order_filled = false; in virtqueue_ordered_flush()
1128 ndescs += vq->used_elems[i].ndescs; in virtqueue_ordered_flush()
1129 i += vq->used_elems[i].ndescs; in virtqueue_ordered_flush()
1130 if (i >= vq->vring.num) { in virtqueue_ordered_flush()
1131 i -= vq->vring.num; in virtqueue_ordered_flush()
1136 virtqueue_packed_fill_desc(vq, &vq->used_elems[vq->used_idx], 0, true); in virtqueue_ordered_flush()
1137 vq->used_idx += ndescs; in virtqueue_ordered_flush()
1138 if (vq->used_idx >= vq->vring.num) { in virtqueue_ordered_flush()
1139 vq->used_idx -= vq->vring.num; in virtqueue_ordered_flush()
1140 vq->used_wrap_counter ^= 1; in virtqueue_ordered_flush()
1141 vq->signalled_used_valid = false; in virtqueue_ordered_flush()
1147 vring_used_idx_set(vq, new); in virtqueue_ordered_flush()
1148 if (unlikely((int16_t)(new - vq->signalled_used) < in virtqueue_ordered_flush()
1150 vq->signalled_used_valid = false; in virtqueue_ordered_flush()
1153 vq->inuse -= ndescs; in virtqueue_ordered_flush()
1156 void virtqueue_flush(VirtQueue *vq, unsigned int count) in virtqueue_flush() argument
1158 if (virtio_device_disabled(vq->vdev)) { in virtqueue_flush()
1159 vq->inuse -= count; in virtqueue_flush()
1163 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_IN_ORDER)) { in virtqueue_flush()
1164 virtqueue_ordered_flush(vq); in virtqueue_flush()
1165 } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { in virtqueue_flush()
1166 virtqueue_packed_flush(vq, count); in virtqueue_flush()
1168 virtqueue_split_flush(vq, count); in virtqueue_flush()
1172 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem, in virtqueue_push() argument
1176 virtqueue_fill(vq, elem, len, 0); in virtqueue_push()
1177 virtqueue_flush(vq, 1); in virtqueue_push()
1181 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx) in virtqueue_num_heads() argument
1186 avail_idx = (vq->shadow_avail_idx != idx) ? vq->shadow_avail_idx in virtqueue_num_heads()
1187 : vring_avail_idx(vq); in virtqueue_num_heads()
1191 if (num_heads > vq->vring.num) { in virtqueue_num_heads()
1192 virtio_error(vq->vdev, "Guest moved used index from %u to %u", in virtqueue_num_heads()
1193 idx, vq->shadow_avail_idx); in virtqueue_num_heads()
1213 static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx, in virtqueue_get_head() argument
1218 *head = vring_avail_ring(vq, idx % vq->vring.num); in virtqueue_get_head()
1221 if (*head >= vq->vring.num) { in virtqueue_get_head()
1222 virtio_error(vq->vdev, "Guest says index %u is available", *head); in virtqueue_get_head()
1256 static void virtqueue_split_get_avail_bytes(VirtQueue *vq, in virtqueue_split_get_avail_bytes() argument
1261 VirtIODevice *vdev = vq->vdev; in virtqueue_split_get_avail_bytes()
1270 idx = vq->last_avail_idx; in virtqueue_split_get_avail_bytes()
1273 while ((rc = virtqueue_num_heads(vq, idx)) > 0) { in virtqueue_split_get_avail_bytes()
1278 unsigned int max = vq->vring.num; in virtqueue_split_get_avail_bytes()
1282 if (!virtqueue_get_head(vq, idx++, &i)) { in virtqueue_split_get_avail_bytes()
1365 static int virtqueue_packed_read_next_desc(VirtQueue *vq, in virtqueue_packed_read_next_desc() argument
1383 (*next) -= vq->vring.num; in virtqueue_packed_read_next_desc()
1387 vring_packed_desc_read(vq->vdev, desc, desc_cache, *next, false); in virtqueue_packed_read_next_desc()
1392 static void virtqueue_packed_get_avail_bytes(VirtQueue *vq, in virtqueue_packed_get_avail_bytes() argument
1399 VirtIODevice *vdev = vq->vdev; in virtqueue_packed_get_avail_bytes()
1410 idx = vq->last_avail_idx; in virtqueue_packed_get_avail_bytes()
1411 wrap_counter = vq->last_avail_wrap_counter; in virtqueue_packed_get_avail_bytes()
1418 unsigned int max = vq->vring.num; in virtqueue_packed_get_avail_bytes()
1470 rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max, in virtqueue_packed_get_avail_bytes()
1484 if (idx >= vq->vring.num) { in virtqueue_packed_get_avail_bytes()
1485 idx -= vq->vring.num; in virtqueue_packed_get_avail_bytes()
1491 vq->shadow_avail_idx = idx; in virtqueue_packed_get_avail_bytes()
1492 vq->shadow_avail_wrap_counter = wrap_counter; in virtqueue_packed_get_avail_bytes()
1508 int virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, in virtqueue_get_avail_bytes() argument
1517 if (unlikely(!vq->vring.desc)) { in virtqueue_get_avail_bytes()
1521 caches = vring_get_region_caches(vq); in virtqueue_get_avail_bytes()
1526 desc_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ? in virtqueue_get_avail_bytes()
1528 if (caches->desc.len < vq->vring.num * desc_size) { in virtqueue_get_avail_bytes()
1529 virtio_error(vq->vdev, "Cannot map descriptor ring"); in virtqueue_get_avail_bytes()
1533 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { in virtqueue_get_avail_bytes()
1534 virtqueue_packed_get_avail_bytes(vq, in_bytes, out_bytes, in virtqueue_get_avail_bytes()
1538 virtqueue_split_get_avail_bytes(vq, in_bytes, out_bytes, in virtqueue_get_avail_bytes()
1543 return (int)vq->shadow_avail_idx; in virtqueue_get_avail_bytes()
1555 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes, in virtqueue_avail_bytes() argument
1560 virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes); in virtqueue_avail_bytes()
1682 static void *virtqueue_split_pop(VirtQueue *vq, size_t sz) in virtqueue_split_pop() argument
1689 VirtIODevice *vdev = vq->vdev; in virtqueue_split_pop()
1700 if (virtio_queue_empty_rcu(vq)) { in virtqueue_split_pop()
1710 max = vq->vring.num; in virtqueue_split_pop()
1712 if (vq->inuse >= vq->vring.num) { in virtqueue_split_pop()
1717 if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) { in virtqueue_split_pop()
1722 vring_set_avail_event(vq, vq->last_avail_idx); in virtqueue_split_pop()
1727 caches = vring_get_region_caches(vq); in virtqueue_split_pop()
1810 idx = (vq->last_avail_idx - 1) % vq->vring.num; in virtqueue_split_pop()
1811 vq->used_elems[idx].index = elem->index; in virtqueue_split_pop()
1812 vq->used_elems[idx].len = elem->len; in virtqueue_split_pop()
1813 vq->used_elems[idx].ndescs = elem->ndescs; in virtqueue_split_pop()
1816 vq->inuse++; in virtqueue_split_pop()
1818 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num); in virtqueue_split_pop()
1829 static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz) in virtqueue_packed_pop() argument
1836 VirtIODevice *vdev = vq->vdev; in virtqueue_packed_pop()
1848 if (virtio_queue_packed_empty_rcu(vq)) { in virtqueue_packed_pop()
1855 max = vq->vring.num; in virtqueue_packed_pop()
1857 if (vq->inuse >= vq->vring.num) { in virtqueue_packed_pop()
1862 i = vq->last_avail_idx; in virtqueue_packed_pop()
1864 caches = vring_get_region_caches(vq); in virtqueue_packed_pop()
1927 rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max, &i, in virtqueue_packed_pop()
1952 vq->used_elems[vq->last_avail_idx].index = elem->index; in virtqueue_packed_pop()
1953 vq->used_elems[vq->last_avail_idx].len = elem->len; in virtqueue_packed_pop()
1954 vq->used_elems[vq->last_avail_idx].ndescs = elem->ndescs; in virtqueue_packed_pop()
1957 vq->last_avail_idx += elem->ndescs; in virtqueue_packed_pop()
1958 vq->inuse += elem->ndescs; in virtqueue_packed_pop()
1960 if (vq->last_avail_idx >= vq->vring.num) { in virtqueue_packed_pop()
1961 vq->last_avail_idx -= vq->vring.num; in virtqueue_packed_pop()
1962 vq->last_avail_wrap_counter ^= 1; in virtqueue_packed_pop()
1965 vq->shadow_avail_idx = vq->last_avail_idx; in virtqueue_packed_pop()
1966 vq->shadow_avail_wrap_counter = vq->last_avail_wrap_counter; in virtqueue_packed_pop()
1968 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num); in virtqueue_packed_pop()
1979 void *virtqueue_pop(VirtQueue *vq, size_t sz) in virtqueue_pop() argument
1981 if (virtio_device_disabled(vq->vdev)) { in virtqueue_pop()
1985 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { in virtqueue_pop()
1986 return virtqueue_packed_pop(vq, sz); in virtqueue_pop()
1988 return virtqueue_split_pop(vq, sz); in virtqueue_pop()
1992 static unsigned int virtqueue_packed_drop_all(VirtQueue *vq) in virtqueue_packed_drop_all() argument
1998 VirtIODevice *vdev = vq->vdev; in virtqueue_packed_drop_all()
2003 caches = vring_get_region_caches(vq); in virtqueue_packed_drop_all()
2010 virtio_queue_set_notification(vq, 0); in virtqueue_packed_drop_all()
2012 while (vq->inuse < vq->vring.num) { in virtqueue_packed_drop_all()
2013 unsigned int idx = vq->last_avail_idx; in virtqueue_packed_drop_all()
2019 vq->last_avail_idx , true); in virtqueue_packed_drop_all()
2020 if (!is_desc_avail(desc.flags, vq->last_avail_wrap_counter)) { in virtqueue_packed_drop_all()
2025 while (virtqueue_packed_read_next_desc(vq, &desc, desc_cache, in virtqueue_packed_drop_all()
2026 vq->vring.num, &idx, false)) { in virtqueue_packed_drop_all()
2033 virtqueue_push(vq, &elem, 0); in virtqueue_packed_drop_all()
2035 vq->last_avail_idx += elem.ndescs; in virtqueue_packed_drop_all()
2036 if (vq->last_avail_idx >= vq->vring.num) { in virtqueue_packed_drop_all()
2037 vq->last_avail_idx -= vq->vring.num; in virtqueue_packed_drop_all()
2038 vq->last_avail_wrap_counter ^= 1; in virtqueue_packed_drop_all()
2045 static unsigned int virtqueue_split_drop_all(VirtQueue *vq) in virtqueue_split_drop_all() argument
2049 VirtIODevice *vdev = vq->vdev; in virtqueue_split_drop_all()
2052 while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) { in virtqueue_split_drop_all()
2056 if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) { in virtqueue_split_drop_all()
2059 vq->inuse++; in virtqueue_split_drop_all()
2060 vq->last_avail_idx++; in virtqueue_split_drop_all()
2062 vring_set_avail_event(vq, vq->last_avail_idx); in virtqueue_split_drop_all()
2066 virtqueue_push(vq, &elem, 0); in virtqueue_split_drop_all()
2079 unsigned int virtqueue_drop_all(VirtQueue *vq) in virtqueue_drop_all() argument
2081 struct VirtIODevice *vdev = vq->vdev; in virtqueue_drop_all()
2083 if (virtio_device_disabled(vq->vdev)) { in virtqueue_drop_all()
2088 return virtqueue_packed_drop_all(vq); in virtqueue_drop_all()
2090 return virtqueue_split_drop_all(vq); in virtqueue_drop_all()
2284 vdev->vq[i].vring.desc = 0; in __virtio_queue_reset()
2285 vdev->vq[i].vring.avail = 0; in __virtio_queue_reset()
2286 vdev->vq[i].vring.used = 0; in __virtio_queue_reset()
2287 vdev->vq[i].last_avail_idx = 0; in __virtio_queue_reset()
2288 vdev->vq[i].shadow_avail_idx = 0; in __virtio_queue_reset()
2289 vdev->vq[i].used_idx = 0; in __virtio_queue_reset()
2290 vdev->vq[i].last_avail_wrap_counter = true; in __virtio_queue_reset()
2291 vdev->vq[i].shadow_avail_wrap_counter = true; in __virtio_queue_reset()
2292 vdev->vq[i].used_wrap_counter = true; in __virtio_queue_reset()
2294 vdev->vq[i].signalled_used = 0; in __virtio_queue_reset()
2295 vdev->vq[i].signalled_used_valid = false; in __virtio_queue_reset()
2296 vdev->vq[i].notification = true; in __virtio_queue_reset()
2297 vdev->vq[i].vring.num = vdev->vq[i].vring.num_default; in __virtio_queue_reset()
2298 vdev->vq[i].inuse = 0; in __virtio_queue_reset()
2299 virtio_virtqueue_reset_region_cache(&vdev->vq[i]); in __virtio_queue_reset()
2336 if (!vdev->vq[n].vring.num) { in virtio_queue_set_addr()
2339 vdev->vq[n].vring.desc = addr; in virtio_queue_set_addr()
2345 return vdev->vq[n].vring.desc; in virtio_queue_get_addr()
2351 if (!vdev->vq[n].vring.num) { in virtio_queue_set_rings()
2354 vdev->vq[n].vring.desc = desc; in virtio_queue_set_rings()
2355 vdev->vq[n].vring.avail = avail; in virtio_queue_set_rings()
2356 vdev->vq[n].vring.used = used; in virtio_queue_set_rings()
2365 if (!!num != !!vdev->vq[n].vring.num || in virtio_queue_set_num()
2370 vdev->vq[n].vring.num = num; in virtio_queue_set_num()
2378 VirtQueue *virtio_vector_next_queue(VirtQueue *vq) in virtio_vector_next_queue() argument
2380 return QLIST_NEXT(vq, node); in virtio_vector_next_queue()
2385 return vdev->vq[n].vring.num; in virtio_queue_get_num()
2390 return vdev->vq[n].vring.num_default; in virtio_queue_get_max_num()
2423 vdev->vq[n].vring.align = align; in virtio_queue_set_align()
2428 void virtio_queue_set_shadow_avail_idx(VirtQueue *vq, uint16_t shadow_avail_idx) in virtio_queue_set_shadow_avail_idx() argument
2430 if (!vq->vring.desc) { in virtio_queue_set_shadow_avail_idx()
2438 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { in virtio_queue_set_shadow_avail_idx()
2439 vq->shadow_avail_wrap_counter = (shadow_avail_idx >> 15) & 0x1; in virtio_queue_set_shadow_avail_idx()
2440 vq->shadow_avail_idx = shadow_avail_idx & 0x7FFF; in virtio_queue_set_shadow_avail_idx()
2442 vq->shadow_avail_idx = shadow_avail_idx; in virtio_queue_set_shadow_avail_idx()
2446 static void virtio_queue_notify_vq(VirtQueue *vq) in virtio_queue_notify_vq() argument
2448 if (vq->vring.desc && vq->handle_output) { in virtio_queue_notify_vq()
2449 VirtIODevice *vdev = vq->vdev; in virtio_queue_notify_vq()
2455 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); in virtio_queue_notify_vq()
2456 vq->handle_output(vdev, vq); in virtio_queue_notify_vq()
2466 VirtQueue *vq = &vdev->vq[n]; in virtio_queue_notify() local
2468 if (unlikely(!vq->vring.desc || vdev->broken)) { in virtio_queue_notify()
2472 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); in virtio_queue_notify()
2473 if (vq->host_notifier_enabled) { in virtio_queue_notify()
2474 event_notifier_set(&vq->host_notifier); in virtio_queue_notify()
2475 } else if (vq->handle_output) { in virtio_queue_notify()
2476 vq->handle_output(vdev, vq); in virtio_queue_notify()
2486 return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector : in virtio_queue_vector()
2492 VirtQueue *vq = &vdev->vq[n]; in virtio_queue_set_vector() local
2496 vdev->vq[n].vector != VIRTIO_NO_VECTOR) { in virtio_queue_set_vector()
2497 QLIST_REMOVE(vq, node); in virtio_queue_set_vector()
2499 vdev->vq[n].vector = vector; in virtio_queue_set_vector()
2502 QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node); in virtio_queue_set_vector()
2513 if (vdev->vq[i].vring.num == 0) in virtio_add_queue()
2520 vdev->vq[i].vring.num = queue_size; in virtio_add_queue()
2521 vdev->vq[i].vring.num_default = queue_size; in virtio_add_queue()
2522 vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN; in virtio_add_queue()
2523 vdev->vq[i].handle_output = handle_output; in virtio_add_queue()
2524 vdev->vq[i].used_elems = g_new0(VirtQueueElement, queue_size); in virtio_add_queue()
2526 return &vdev->vq[i]; in virtio_add_queue()
2529 void virtio_delete_queue(VirtQueue *vq) in virtio_delete_queue() argument
2531 vq->vring.num = 0; in virtio_delete_queue()
2532 vq->vring.num_default = 0; in virtio_delete_queue()
2533 vq->handle_output = NULL; in virtio_delete_queue()
2534 g_free(vq->used_elems); in virtio_delete_queue()
2535 vq->used_elems = NULL; in virtio_delete_queue()
2536 virtio_virtqueue_reset_region_cache(vq); in virtio_delete_queue()
2545 virtio_delete_queue(&vdev->vq[n]); in virtio_del_queue()
2561 static bool virtio_split_should_notify(VirtIODevice *vdev, VirtQueue *vq) in virtio_split_should_notify() argument
2569 !vq->inuse && virtio_queue_empty(vq)) { in virtio_split_should_notify()
2574 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT); in virtio_split_should_notify()
2577 v = vq->signalled_used_valid; in virtio_split_should_notify()
2578 vq->signalled_used_valid = true; in virtio_split_should_notify()
2579 old = vq->signalled_used; in virtio_split_should_notify()
2580 new = vq->signalled_used = vq->used_idx; in virtio_split_should_notify()
2581 return !v || vring_need_event(vring_get_used_event(vq), new, old); in virtio_split_should_notify()
2584 static bool vring_packed_need_event(VirtQueue *vq, bool wrap, in vring_packed_need_event() argument
2591 off -= vq->vring.num; in vring_packed_need_event()
2598 static bool virtio_packed_should_notify(VirtIODevice *vdev, VirtQueue *vq) in virtio_packed_should_notify() argument
2605 caches = vring_get_region_caches(vq); in virtio_packed_should_notify()
2612 old = vq->signalled_used; in virtio_packed_should_notify()
2613 new = vq->signalled_used = vq->used_idx; in virtio_packed_should_notify()
2614 v = vq->signalled_used_valid; in virtio_packed_should_notify()
2615 vq->signalled_used_valid = true; in virtio_packed_should_notify()
2623 return !v || vring_packed_need_event(vq, vq->used_wrap_counter, in virtio_packed_should_notify()
2628 static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq) in virtio_should_notify() argument
2631 return virtio_packed_should_notify(vdev, vq); in virtio_should_notify()
2633 return virtio_split_should_notify(vdev, vq); in virtio_should_notify()
2641 VirtQueue *vq = container_of(notifier, VirtQueue, guest_notifier); in virtio_notify_irqfd_deferred_fn() local
2643 trace_virtio_notify_irqfd_deferred_fn(vq->vdev, vq); in virtio_notify_irqfd_deferred_fn()
2647 void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq) in virtio_notify_irqfd() argument
2650 if (!virtio_should_notify(vdev, vq)) { in virtio_notify_irqfd()
2655 trace_virtio_notify_irqfd(vdev, vq); in virtio_notify_irqfd()
2672 virtio_set_isr(vq->vdev, 0x1); in virtio_notify_irqfd()
2673 defer_call(virtio_notify_irqfd_deferred_fn, &vq->guest_notifier); in virtio_notify_irqfd()
2676 static void virtio_irq(VirtQueue *vq) in virtio_irq() argument
2678 virtio_set_isr(vq->vdev, 0x1); in virtio_irq()
2679 virtio_notify_vector(vq->vdev, vq->vector); in virtio_irq()
2682 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq) in virtio_notify() argument
2685 if (!virtio_should_notify(vdev, vq)) { in virtio_notify()
2690 trace_virtio_notify(vdev, vq); in virtio_notify()
2691 virtio_irq(vq); in virtio_notify()
2743 if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) { in virtio_ringsize_needed()
2812 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2824 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2846 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2998 if (vdev->vq[i].vring.num == 0) in virtio_save()
3005 if (vdev->vq[i].vring.num == 0) in virtio_save()
3008 qemu_put_be32(f, vdev->vq[i].vring.num); in virtio_save()
3010 qemu_put_be32(f, vdev->vq[i].vring.align); in virtio_save()
3016 qemu_put_be64(f, vdev->vq[i].vring.desc); in virtio_save()
3017 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx); in virtio_save()
3130 if (vdev->vq[i].vring.num != 0) { in virtio_set_features()
3288 vdev->vq[i].vring.num = qemu_get_be32(f); in virtio_load()
3290 vdev->vq[i].vring.align = qemu_get_be32(f); in virtio_load()
3292 vdev->vq[i].vring.desc = qemu_get_be64(f); in virtio_load()
3293 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx); in virtio_load()
3294 vdev->vq[i].signalled_used_valid = false; in virtio_load()
3295 vdev->vq[i].notification = true; in virtio_load()
3297 if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) { in virtio_load()
3300 i, vdev->vq[i].last_avail_idx); in virtio_load()
3365 if (vdev->vq[i].vring.desc) { in virtio_load()
3381 vdev->vq[i].shadow_avail_idx = vdev->vq[i].last_avail_idx; in virtio_load()
3382 vdev->vq[i].shadow_avail_wrap_counter = in virtio_load()
3383 vdev->vq[i].last_avail_wrap_counter; in virtio_load()
3387 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx; in virtio_load()
3389 if (nheads > vdev->vq[i].vring.num) { in virtio_load()
3392 i, vdev->vq[i].vring.num, in virtio_load()
3393 vring_avail_idx(&vdev->vq[i]), in virtio_load()
3394 vdev->vq[i].last_avail_idx, nheads); in virtio_load()
3395 vdev->vq[i].used_idx = 0; in virtio_load()
3396 vdev->vq[i].shadow_avail_idx = 0; in virtio_load()
3397 vdev->vq[i].inuse = 0; in virtio_load()
3400 vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]); in virtio_load()
3401 vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]); in virtio_load()
3409 vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx - in virtio_load()
3410 vdev->vq[i].used_idx); in virtio_load()
3411 if (vdev->vq[i].inuse > vdev->vq[i].vring.num) { in virtio_load()
3414 i, vdev->vq[i].vring.num, in virtio_load()
3415 vdev->vq[i].last_avail_idx, in virtio_load()
3416 vdev->vq[i].used_idx); in virtio_load()
3493 vdev->vq = g_new0(VirtQueue, VIRTIO_QUEUE_MAX); in virtio_init()
3497 vdev->vq[i].vector = VIRTIO_NO_VECTOR; in virtio_init()
3498 vdev->vq[i].vdev = vdev; in virtio_init()
3499 vdev->vq[i].queue_index = i; in virtio_init()
3500 vdev->vq[i].host_notifier_enabled = false; in virtio_init()
3547 return vdev->vq[n].vring.desc; in virtio_queue_get_desc_addr()
3568 return vdev->vq[n].vring.avail; in virtio_queue_get_avail_addr()
3573 return vdev->vq[n].vring.used; in virtio_queue_get_used_addr()
3578 return sizeof(VRingDesc) * vdev->vq[n].vring.num; in virtio_queue_get_desc_size()
3591 sizeof(uint16_t) * vdev->vq[n].vring.num + s; in virtio_queue_get_avail_size()
3604 sizeof(VRingUsedElem) * vdev->vq[n].vring.num + s; in virtio_queue_get_used_size()
3612 avail = vdev->vq[n].last_avail_idx; in virtio_queue_packed_get_last_avail_idx()
3613 avail |= ((uint16_t)vdev->vq[n].last_avail_wrap_counter) << 15; in virtio_queue_packed_get_last_avail_idx()
3615 used = vdev->vq[n].used_idx; in virtio_queue_packed_get_last_avail_idx()
3616 used |= ((uint16_t)vdev->vq[n].used_wrap_counter) << 15; in virtio_queue_packed_get_last_avail_idx()
3624 return vdev->vq[n].last_avail_idx; in virtio_queue_split_get_last_avail_idx()
3639 struct VirtQueue *vq = &vdev->vq[n]; in virtio_queue_packed_set_last_avail_idx() local
3641 vq->last_avail_idx = vq->shadow_avail_idx = idx & 0x7fff; in virtio_queue_packed_set_last_avail_idx()
3642 vq->last_avail_wrap_counter = in virtio_queue_packed_set_last_avail_idx()
3643 vq->shadow_avail_wrap_counter = !!(idx & 0x8000); in virtio_queue_packed_set_last_avail_idx()
3645 vq->used_idx = idx & 0x7fff; in virtio_queue_packed_set_last_avail_idx()
3646 vq->used_wrap_counter = !!(idx & 0x8000); in virtio_queue_packed_set_last_avail_idx()
3652 vdev->vq[n].last_avail_idx = idx; in virtio_queue_split_set_last_avail_idx()
3653 vdev->vq[n].shadow_avail_idx = idx; in virtio_queue_split_set_last_avail_idx()
3676 if (vdev->vq[n].vring.desc) { in virtio_queue_split_restore_last_avail_idx()
3677 vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]); in virtio_queue_split_restore_last_avail_idx()
3678 vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx; in virtio_queue_split_restore_last_avail_idx()
3699 if (vdev->vq[n].vring.desc) { in virtio_queue_split_update_used_idx()
3700 vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]); in virtio_queue_split_update_used_idx()
3715 vdev->vq[n].signalled_used_valid = false; in virtio_queue_invalidate_signalled_used()
3720 return vdev->vq + n; in virtio_get_queue()
3723 uint16_t virtio_get_queue_index(VirtQueue *vq) in virtio_get_queue_index() argument
3725 return vq->queue_index; in virtio_get_queue_index()
3730 VirtQueue *vq = container_of(n, VirtQueue, guest_notifier); in virtio_queue_guest_notifier_read() local
3732 virtio_irq(vq); in virtio_queue_guest_notifier_read()
3743 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign, in virtio_queue_set_guest_notifier_fd_handler() argument
3747 event_notifier_set_handler(&vq->guest_notifier, in virtio_queue_set_guest_notifier_fd_handler()
3750 event_notifier_set_handler(&vq->guest_notifier, NULL); in virtio_queue_set_guest_notifier_fd_handler()
3755 virtio_queue_guest_notifier_read(&vq->guest_notifier); in virtio_queue_set_guest_notifier_fd_handler()
3776 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq) in virtio_queue_get_guest_notifier() argument
3778 return &vq->guest_notifier; in virtio_queue_get_guest_notifier()
3783 VirtQueue *vq = container_of(n, VirtQueue, host_notifier); in virtio_queue_host_notifier_aio_poll_begin() local
3785 virtio_queue_set_notification(vq, 0); in virtio_queue_host_notifier_aio_poll_begin()
3791 VirtQueue *vq = container_of(n, VirtQueue, host_notifier); in virtio_queue_host_notifier_aio_poll() local
3793 return vq->vring.desc && !virtio_queue_empty(vq); in virtio_queue_host_notifier_aio_poll()
3798 VirtQueue *vq = container_of(n, VirtQueue, host_notifier); in virtio_queue_host_notifier_aio_poll_ready() local
3800 virtio_queue_notify_vq(vq); in virtio_queue_host_notifier_aio_poll_ready()
3805 VirtQueue *vq = container_of(n, VirtQueue, host_notifier); in virtio_queue_host_notifier_aio_poll_end() local
3808 virtio_queue_set_notification(vq, 1); in virtio_queue_host_notifier_aio_poll_end()
3811 void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx) in virtio_queue_aio_attach_host_notifier() argument
3820 if (!virtio_queue_get_notification(vq)) { in virtio_queue_aio_attach_host_notifier()
3821 virtio_queue_set_notification(vq, 1); in virtio_queue_aio_attach_host_notifier()
3824 aio_set_event_notifier(ctx, &vq->host_notifier, in virtio_queue_aio_attach_host_notifier()
3828 aio_set_event_notifier_poll(ctx, &vq->host_notifier, in virtio_queue_aio_attach_host_notifier()
3837 event_notifier_set(&vq->host_notifier); in virtio_queue_aio_attach_host_notifier()
3846 void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ctx) in virtio_queue_aio_attach_host_notifier_no_poll() argument
3849 if (!virtio_queue_get_notification(vq)) { in virtio_queue_aio_attach_host_notifier_no_poll()
3850 virtio_queue_set_notification(vq, 1); in virtio_queue_aio_attach_host_notifier_no_poll()
3853 aio_set_event_notifier(ctx, &vq->host_notifier, in virtio_queue_aio_attach_host_notifier_no_poll()
3863 event_notifier_set(&vq->host_notifier); in virtio_queue_aio_attach_host_notifier_no_poll()
3866 void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx) in virtio_queue_aio_detach_host_notifier() argument
3868 aio_set_event_notifier(ctx, &vq->host_notifier, NULL, NULL, NULL); in virtio_queue_aio_detach_host_notifier()
3884 VirtQueue *vq = container_of(n, VirtQueue, host_notifier); in virtio_queue_host_notifier_read() local
3886 virtio_queue_notify_vq(vq); in virtio_queue_host_notifier_read()
3890 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq) in virtio_queue_get_host_notifier() argument
3892 return &vq->host_notifier; in virtio_queue_get_host_notifier()
3900 void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled) in virtio_queue_set_host_notifier_enabled() argument
3902 vq->host_notifier_enabled = enabled; in virtio_queue_set_host_notifier_enabled()
3946 if (vdev->vq[i].vring.num == 0) { in virtio_memory_listener_commit()
4009 if (!vdev->vq) { in virtio_device_free_virtqueues()
4014 if (vdev->vq[i].vring.num == 0) { in virtio_device_free_virtqueues()
4017 virtio_virtqueue_reset_region_cache(&vdev->vq[i]); in virtio_device_free_virtqueues()
4019 g_free(vdev->vq); in virtio_device_free_virtqueues()
4051 VirtQueue *vq = &vdev->vq[n]; in virtio_device_start_ioeventfd_impl() local
4060 event_notifier_set_handler(&vq->host_notifier, in virtio_device_start_ioeventfd_impl()
4066 VirtQueue *vq = &vdev->vq[n]; in virtio_device_start_ioeventfd_impl() local
4067 if (!vq->vring.num) { in virtio_device_start_ioeventfd_impl()
4070 event_notifier_set(&vq->host_notifier); in virtio_device_start_ioeventfd_impl()
4078 VirtQueue *vq = &vdev->vq[n]; in virtio_device_start_ioeventfd_impl() local
4083 event_notifier_set_handler(&vq->host_notifier, NULL); in virtio_device_start_ioeventfd_impl()
4121 VirtQueue *vq = &vdev->vq[n]; in virtio_device_stop_ioeventfd_impl() local
4126 event_notifier_set_handler(&vq->host_notifier, NULL); in virtio_device_stop_ioeventfd_impl()
4204 status->queue_index = vdev->vq[queue].queue_index; in qmp_x_query_virtio_queue_status()
4205 status->inuse = vdev->vq[queue].inuse; in qmp_x_query_virtio_queue_status()
4206 status->vring_num = vdev->vq[queue].vring.num; in qmp_x_query_virtio_queue_status()
4207 status->vring_num_default = vdev->vq[queue].vring.num_default; in qmp_x_query_virtio_queue_status()
4208 status->vring_align = vdev->vq[queue].vring.align; in qmp_x_query_virtio_queue_status()
4209 status->vring_desc = vdev->vq[queue].vring.desc; in qmp_x_query_virtio_queue_status()
4210 status->vring_avail = vdev->vq[queue].vring.avail; in qmp_x_query_virtio_queue_status()
4211 status->vring_used = vdev->vq[queue].vring.used; in qmp_x_query_virtio_queue_status()
4212 status->used_idx = vdev->vq[queue].used_idx; in qmp_x_query_virtio_queue_status()
4213 status->signalled_used = vdev->vq[queue].signalled_used; in qmp_x_query_virtio_queue_status()
4214 status->signalled_used_valid = vdev->vq[queue].signalled_used_valid; in qmp_x_query_virtio_queue_status()
4236 status->last_avail_idx = vdev->vq[queue].last_avail_idx; in qmp_x_query_virtio_queue_status()
4237 status->shadow_avail_idx = vdev->vq[queue].shadow_avail_idx; in qmp_x_query_virtio_queue_status()
4281 VirtQueue *vq; in qmp_x_query_virtio_queue_element() local
4294 vq = &vdev->vq[queue]; in qmp_x_query_virtio_queue_element()
4313 max = vq->vring.num; in qmp_x_query_virtio_queue_element()
4316 head = vring_avail_ring(vq, vq->last_avail_idx % vq->vring.num); in qmp_x_query_virtio_queue_element()
4318 head = vring_avail_ring(vq, index % vq->vring.num); in qmp_x_query_virtio_queue_element()
4322 caches = vring_get_region_caches(vq); in qmp_x_query_virtio_queue_element()
4354 element->avail->flags = vring_avail_flags(vq); in qmp_x_query_virtio_queue_element()
4355 element->avail->idx = vring_avail_idx(vq); in qmp_x_query_virtio_queue_element()
4357 element->used->flags = vring_used_flags(vq); in qmp_x_query_virtio_queue_element()
4358 element->used->idx = vring_used_idx(vq); in qmp_x_query_virtio_queue_element()