Lines Matching +full:- +full:- +full:disable +full:- +full:vhost +full:- +full:crypto

10  * the COPYING file in the top-level directory.
16 #include "qapi/qapi-commands-virtio.h"
18 #include "qemu/defer-call.h"
19 #include "qemu/error-report.h"
21 #include "qemu/main-loop.h"
27 #include "hw/virtio/vhost.h"
28 #include "migration/qemu-file-types.h"
30 #include "hw/virtio/virtio-bus.h"
31 #include "hw/qdev-properties.h"
32 #include "hw/virtio/virtio-access.h"
35 #include "virtio-qmp.h"
37 #include "standard-headers/linux/virtio_ids.h"
38 #include "standard-headers/linux/vhost_types.h"
39 #include "standard-headers/linux/virtio_blk.h"
40 #include "standard-headers/linux/virtio_console.h"
41 #include "standard-headers/linux/virtio_gpu.h"
42 #include "standard-headers/linux/virtio_net.h"
43 #include "standard-headers/linux/virtio_scsi.h"
44 #include "standard-headers/linux/virtio_i2c.h"
45 #include "standard-headers/linux/virtio_balloon.h"
46 #include "standard-headers/linux/virtio_iommu.h"
47 #include "standard-headers/linux/virtio_mem.h"
48 #include "standard-headers/linux/virtio_vsock.h"
159 [VIRTIO_ID_NET] = "virtio-net",
160 [VIRTIO_ID_BLOCK] = "virtio-blk",
161 [VIRTIO_ID_CONSOLE] = "virtio-serial",
162 [VIRTIO_ID_RNG] = "virtio-rng",
163 [VIRTIO_ID_BALLOON] = "virtio-balloon",
164 [VIRTIO_ID_IOMEM] = "virtio-iomem",
165 [VIRTIO_ID_RPMSG] = "virtio-rpmsg",
166 [VIRTIO_ID_SCSI] = "virtio-scsi",
167 [VIRTIO_ID_9P] = "virtio-9p",
168 [VIRTIO_ID_MAC80211_WLAN] = "virtio-mac-wlan",
169 [VIRTIO_ID_RPROC_SERIAL] = "virtio-rproc-serial",
170 [VIRTIO_ID_CAIF] = "virtio-caif",
171 [VIRTIO_ID_MEMORY_BALLOON] = "virtio-mem-balloon",
172 [VIRTIO_ID_GPU] = "virtio-gpu",
173 [VIRTIO_ID_CLOCK] = "virtio-clk",
174 [VIRTIO_ID_INPUT] = "virtio-input",
175 [VIRTIO_ID_VSOCK] = "vhost-vsock",
176 [VIRTIO_ID_CRYPTO] = "virtio-crypto",
177 [VIRTIO_ID_SIGNAL_DIST] = "virtio-signal",
178 [VIRTIO_ID_PSTORE] = "virtio-pstore",
179 [VIRTIO_ID_IOMMU] = "virtio-iommu",
180 [VIRTIO_ID_MEM] = "virtio-mem",
181 [VIRTIO_ID_SOUND] = "virtio-sound",
182 [VIRTIO_ID_FS] = "virtio-user-fs",
183 [VIRTIO_ID_PMEM] = "virtio-pmem",
184 [VIRTIO_ID_RPMB] = "virtio-rpmb",
185 [VIRTIO_ID_MAC80211_HWSIM] = "virtio-mac-hwsim",
186 [VIRTIO_ID_VIDEO_ENCODER] = "virtio-vid-encoder",
187 [VIRTIO_ID_VIDEO_DECODER] = "virtio-vid-decoder",
188 [VIRTIO_ID_SCMI] = "virtio-scmi",
189 [VIRTIO_ID_NITRO_SEC_MOD] = "virtio-nitro-sec-mod",
190 [VIRTIO_ID_I2C_ADAPTER] = "vhost-user-i2c",
191 [VIRTIO_ID_WATCHDOG] = "virtio-watchdog",
192 [VIRTIO_ID_CAN] = "virtio-can",
193 [VIRTIO_ID_DMABUF] = "virtio-dmabuf",
194 [VIRTIO_ID_PARAM_SERV] = "virtio-param-serv",
195 [VIRTIO_ID_AUDIO_POLICY] = "virtio-audio-pol",
196 [VIRTIO_ID_BT] = "virtio-bluetooth",
197 [VIRTIO_ID_GPIO] = "virtio-gpio"
213 vdev->name);
221 address_space_cache_destroy(&caches->desc);
222 address_space_cache_destroy(&caches->avail);
223 address_space_cache_destroy(&caches->used);
231 caches = qatomic_read(&vq->vring.caches);
232 qatomic_rcu_set(&vq->vring.caches, NULL);
240 VirtQueue *vq = &vdev->vq[n];
241 VRingMemoryRegionCaches *old = vq->vring.caches;
248 addr = vq->vring.desc;
254 packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
256 len = address_space_cache_init(&new->desc, vdev->dma_as,
264 len = address_space_cache_init(&new->used, vdev->dma_as,
265 vq->vring.used, size, true);
272 len = address_space_cache_init(&new->avail, vdev->dma_as,
273 vq->vring.avail, size, false);
279 qatomic_rcu_set(&vq->vring.caches, new);
286 address_space_cache_destroy(&new->avail);
288 address_space_cache_destroy(&new->used);
290 address_space_cache_destroy(&new->desc);
299 VRing *vring = &vdev->vq[n].vring;
301 if (!vring->num || !vring->desc || !vring->align) {
302 /* not yet setup -> nothing to do */
305 vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
306 vring->used = vring_align(vring->avail +
307 offsetof(VRingAvail, ring[vring->num]),
308 vring->align);
318 virtio_tswap64s(vdev, &desc->addr);
319 virtio_tswap32s(vdev, &desc->len);
320 virtio_tswap16s(vdev, &desc->flags);
321 virtio_tswap16s(vdev, &desc->next);
331 e->flags = virtio_lduw_phys_cached(vdev, cache, off_flags);
334 e->off_wrap = virtio_lduw_phys_cached(vdev, cache, off_off);
359 return qatomic_rcu_read(&vq->vring.caches);
372 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
385 vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
386 return vq->shadow_avail_idx;
399 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
405 return vring_avail_ring(vq, vq->vring.num);
419 virtio_tswap32s(vq->vdev, &uelem->id);
420 virtio_tswap32s(vq->vdev, &uelem->len);
421 address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
422 address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
435 return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
448 return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
458 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
459 address_space_cache_invalidate(&caches->used, pa, sizeof(val));
462 vq->used_idx = val;
469 VirtIODevice *vdev = vq->vdev;
477 flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
478 virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
479 address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
486 VirtIODevice *vdev = vq->vdev;
494 flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
495 virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
496 address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
504 if (!vq->notification) {
513 pa = offsetof(VRingUsed, ring[vq->vring.num]);
514 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
515 address_space_cache_invalidate(&caches->used, pa, sizeof(val));
522 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
547 vring_packed_event_read(vq->vdev, &caches->used, &e);
551 } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
552 off_wrap = vq->shadow_avail_idx | vq->shadow_avail_wrap_counter << 15;
553 vring_packed_off_wrap_write(vq->vdev, &caches->used, off_wrap);
561 vring_packed_flags_write(vq->vdev, &caches->used, e.flags);
570 return vq->notification;
575 vq->notification = enable;
577 if (!vq->vring.desc) {
581 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
590 return vq->vring.avail != 0;
610 vring_packed_desc_read_flags(vdev, &desc->flags, cache, i);
618 &desc->addr, sizeof(desc->addr));
620 &desc->id, sizeof(desc->id));
622 &desc->len, sizeof(desc->len));
623 virtio_tswap64s(vdev, &desc->addr);
624 virtio_tswap16s(vdev, &desc->id);
625 virtio_tswap32s(vdev, &desc->len);
638 virtio_tswap32s(vdev, &desc->len);
639 virtio_tswap16s(vdev, &desc->id);
640 address_space_write_cached(cache, off_id, &desc->id, sizeof(desc->id));
641 address_space_cache_invalidate(cache, off_id, sizeof(desc->id));
642 address_space_write_cached(cache, off_len, &desc->len, sizeof(desc->len));
643 address_space_cache_invalidate(cache, off_len, sizeof(desc->len));
653 virtio_stw_phys_cached(vdev, cache, off, desc->flags);
654 address_space_cache_invalidate(cache, off, sizeof(desc->flags));
684 if (virtio_device_disabled(vq->vdev)) {
688 if (unlikely(!vq->vring.avail)) {
692 if (vq->shadow_avail_idx != vq->last_avail_idx) {
696 return vring_avail_idx(vq) == vq->last_avail_idx;
703 if (virtio_device_disabled(vq->vdev)) {
707 if (unlikely(!vq->vring.avail)) {
711 if (vq->shadow_avail_idx != vq->last_avail_idx) {
716 empty = vring_avail_idx(vq) == vq->last_avail_idx;
726 if (unlikely(!vq->vring.desc)) {
735 vring_packed_desc_read_flags(vq->vdev, &desc.flags, &cache->desc,
736 vq->last_avail_idx);
738 return !is_desc_avail(desc.flags, vq->last_avail_wrap_counter);
749 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
758 if (unlikely(!vq->vring.avail)) {
770 if (unlikely(!vq->vring.desc)) {
779 vring_packed_desc_read(vq->vdev, &desc, &caches->desc,
782 return is_desc_avail(desc.flags, vq->shadow_avail_wrap_counter);
787 if (virtio_device_disabled(vq->vdev)) {
791 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
813 AddressSpace *dma_as = vq->vdev->dma_as;
818 for (i = 0; i < elem->in_num; i++) {
819 size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
821 dma_memory_unmap(dma_as, elem->in_sg[i].iov_base,
822 elem->in_sg[i].iov_len,
828 for (i = 0; i < elem->out_num; i++)
829 dma_memory_unmap(dma_as, elem->out_sg[i].iov_base,
830 elem->out_sg[i].iov_len,
832 elem->out_sg[i].iov_len);
847 vq->inuse -= elem->ndescs;
853 vq->last_avail_idx -= num;
858 if (vq->last_avail_idx < num) {
859 vq->last_avail_idx = vq->vring.num + vq->last_avail_idx - num;
860 vq->last_avail_wrap_counter ^= 1;
862 vq->last_avail_idx -= num;
878 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
901 if (num > vq->inuse) {
905 vq->inuse -= num;
906 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
919 if (unlikely(!vq->vring.used)) {
923 idx = (idx + vq->used_idx) % vq->vring.num;
925 uelem.id = elem->index;
933 vq->used_elems[idx].index = elem->index;
934 vq->used_elems[idx].len = len;
935 vq->used_elems[idx].ndescs = elem->ndescs;
943 i = vq->used_idx % vq->vring.num;
949 max_steps = (vq->last_avail_idx - vq->used_idx) % vq->vring.num;
951 /* Search for element in vq->used_elems */
954 if (vq->used_elems[i].index == elem->index) {
955 vq->used_elems[i].len = len;
956 vq->used_elems[i].in_order_filled = true;
960 i += vq->used_elems[i].ndescs;
961 steps += vq->used_elems[i].ndescs;
963 if (i >= vq->vring.num) {
964 i -= vq->vring.num;
974 __func__, vq->vdev->name, elem->index);
986 .id = elem->index,
987 .len = elem->len,
989 bool wrap_counter = vq->used_wrap_counter;
991 if (unlikely(!vq->vring.desc)) {
995 head = vq->used_idx + idx;
996 if (head >= vq->vring.num) {
997 head -= vq->vring.num;
1013 vring_packed_desc_write(vq->vdev, &desc, &caches->desc, head, strict_order);
1024 if (virtio_device_disabled(vq->vdev)) {
1028 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_IN_ORDER)) {
1030 } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1042 if (unlikely(!vq->vring.used)) {
1049 old = vq->used_idx;
1052 vq->inuse -= count;
1053 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
1054 vq->signalled_used_valid = false;
1061 if (unlikely(!vq->vring.desc)) {
1071 * the value of 'vq->used_idx' plus the 'ndescs'.
1073 ndescs += vq->used_elems[0].ndescs;
1075 virtqueue_packed_fill_desc(vq, &vq->used_elems[i], ndescs, false);
1076 ndescs += vq->used_elems[i].ndescs;
1078 virtqueue_packed_fill_desc(vq, &vq->used_elems[0], 0, true);
1080 vq->inuse -= ndescs;
1081 vq->used_idx += ndescs;
1082 if (vq->used_idx >= vq->vring.num) {
1083 vq->used_idx -= vq->vring.num;
1084 vq->used_wrap_counter ^= 1;
1085 vq->signalled_used_valid = false;
1091 unsigned int i = vq->used_idx % vq->vring.num;
1093 uint16_t old = vq->used_idx;
1098 packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED);
1101 if (unlikely(!vq->vring.desc)) {
1104 } else if (unlikely(!vq->vring.used)) {
1108 /* First expected in-order element isn't ready, nothing to do */
1109 if (!vq->used_elems[i].in_order_filled) {
1113 /* Search for filled elements in-order */
1114 while (vq->used_elems[i].in_order_filled) {
1119 if (packed && i != vq->used_idx) {
1120 virtqueue_packed_fill_desc(vq, &vq->used_elems[i], ndescs, false);
1122 uelem.id = vq->used_elems[i].index;
1123 uelem.len = vq->used_elems[i].len;
1127 vq->used_elems[i].in_order_filled = false;
1128 ndescs += vq->used_elems[i].ndescs;
1129 i += vq->used_elems[i].ndescs;
1130 if (i >= vq->vring.num) {
1131 i -= vq->vring.num;
1136 virtqueue_packed_fill_desc(vq, &vq->used_elems[vq->used_idx], 0, true);
1137 vq->used_idx += ndescs;
1138 if (vq->used_idx >= vq->vring.num) {
1139 vq->used_idx -= vq->vring.num;
1140 vq->used_wrap_counter ^= 1;
1141 vq->signalled_used_valid = false;
1148 if (unlikely((int16_t)(new - vq->signalled_used) <
1149 (uint16_t)(new - old))) {
1150 vq->signalled_used_valid = false;
1153 vq->inuse -= ndescs;
1158 if (virtio_device_disabled(vq->vdev)) {
1159 vq->inuse -= count;
1163 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_IN_ORDER)) {
1165 } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1186 avail_idx = (vq->shadow_avail_idx != idx) ? vq->shadow_avail_idx
1188 num_heads = avail_idx - idx;
1191 if (num_heads > vq->vring.num) {
1192 virtio_error(vq->vdev, "Guest moved used index from %u to %u",
1193 idx, vq->shadow_avail_idx);
1194 return -EINVAL;
1197 * On success, callers read a descriptor at vq->last_avail_idx.
1218 *head = vring_avail_ring(vq, idx % vq->vring.num);
1221 if (*head >= vq->vring.num) {
1222 virtio_error(vq->vdev, "Guest says index %u is available", *head);
1230 VIRTQUEUE_READ_DESC_ERROR = -1,
1235 /* Reads the 'desc->next' descriptor into '*desc'. */
1241 if (!(desc->flags & VRING_DESC_F_NEXT)) {
1246 if (desc->next >= max) {
1247 virtio_error(vdev, "Desc next is %u", desc->next);
1251 vring_split_desc_read(vdev, desc, desc_cache, desc->next);
1261 VirtIODevice *vdev = vq->vdev;
1270 idx = vq->last_avail_idx;
1274 MemoryRegionCache *desc_cache = &caches->desc;
1278 unsigned int max = vq->vring.num;
1302 vdev->dma_as,
1374 if (!indirect && !(desc->flags & VRING_DESC_F_NEXT)) {
1383 (*next) -= vq->vring.num;
1387 vring_packed_desc_read(vq->vdev, desc, desc_cache, *next, false);
1399 VirtIODevice *vdev = vq->vdev;
1410 idx = vq->last_avail_idx;
1411 wrap_counter = vq->last_avail_wrap_counter;
1418 unsigned int max = vq->vring.num;
1420 desc_cache = &caches->desc;
1441 vdev->dma_as,
1480 idx += num_bufs - total_bufs;
1484 if (idx >= vq->vring.num) {
1485 idx -= vq->vring.num;
1491 vq->shadow_avail_idx = idx;
1492 vq->shadow_avail_wrap_counter = wrap_counter;
1517 if (unlikely(!vq->vring.desc)) {
1526 desc_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
1528 if (caches->desc.len < vq->vring.num * desc_size) {
1529 virtio_error(vq->vdev, "Cannot map descriptor ring");
1533 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1543 return (int)vq->shadow_avail_idx;
1552 return -1;
1587 iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
1600 sz -= len;
1623 cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
1637 sg[i].iov_base = dma_memory_map(vdev->dma_as,
1655 virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, elem->in_num, true);
1656 virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, elem->out_num,
1663 size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
1664 size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
1665 size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
1666 size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
1667 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
1668 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
1673 elem->out_num = out_num;
1674 elem->in_num = in_num;
1675 elem->in_addr = (void *)elem + in_addr_ofs;
1676 elem->out_addr = (void *)elem + out_addr_ofs;
1677 elem->in_sg = (void *)elem + in_sg_ofs;
1678 elem->out_sg = (void *)elem + out_sg_ofs;
1689 VirtIODevice *vdev = vq->vdev;
1710 max = vq->vring.num;
1712 if (vq->inuse >= vq->vring.num) {
1717 if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
1722 vring_set_avail_event(vq, vq->last_avail_idx);
1733 if (caches->desc.len < max * sizeof(VRingDesc)) {
1738 desc_cache = &caches->desc;
1748 len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
1768 VIRTQUEUE_MAX_SIZE - out_num, true,
1798 elem->index = head;
1799 elem->ndescs = 1;
1801 elem->out_addr[i] = addr[i];
1802 elem->out_sg[i] = iov[i];
1805 elem->in_addr[i] = addr[out_num + i];
1806 elem->in_sg[i] = iov[out_num + i];
1810 idx = (vq->last_avail_idx - 1) % vq->vring.num;
1811 vq->used_elems[idx].index = elem->index;
1812 vq->used_elems[idx].len = elem->len;
1813 vq->used_elems[idx].ndescs = elem->ndescs;
1816 vq->inuse++;
1818 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
1836 VirtIODevice *vdev = vq->vdev;
1855 max = vq->vring.num;
1857 if (vq->inuse >= vq->vring.num) {
1862 i = vq->last_avail_idx;
1870 if (caches->desc.len < max * sizeof(VRingDesc)) {
1875 desc_cache = &caches->desc;
1886 len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
1906 VIRTQUEUE_MAX_SIZE - out_num, true,
1940 elem->out_addr[i] = addr[i];
1941 elem->out_sg[i] = iov[i];
1944 elem->in_addr[i] = addr[out_num + i];
1945 elem->in_sg[i] = iov[out_num + i];
1948 elem->index = id;
1949 elem->ndescs = (desc_cache == &indirect_desc_cache) ? 1 : elem_entries;
1952 vq->used_elems[vq->last_avail_idx].index = elem->index;
1953 vq->used_elems[vq->last_avail_idx].len = elem->len;
1954 vq->used_elems[vq->last_avail_idx].ndescs = elem->ndescs;
1957 vq->last_avail_idx += elem->ndescs;
1958 vq->inuse += elem->ndescs;
1960 if (vq->last_avail_idx >= vq->vring.num) {
1961 vq->last_avail_idx -= vq->vring.num;
1962 vq->last_avail_wrap_counter ^= 1;
1965 vq->shadow_avail_idx = vq->last_avail_idx;
1966 vq->shadow_avail_wrap_counter = vq->last_avail_wrap_counter;
1968 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
1981 if (virtio_device_disabled(vq->vdev)) {
1985 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1998 VirtIODevice *vdev = vq->vdev;
2008 desc_cache = &caches->desc;
2012 while (vq->inuse < vq->vring.num) {
2013 unsigned int idx = vq->last_avail_idx;
2019 vq->last_avail_idx , true);
2020 if (!is_desc_avail(desc.flags, vq->last_avail_wrap_counter)) {
2026 vq->vring.num, &idx, false)) {
2035 vq->last_avail_idx += elem.ndescs;
2036 if (vq->last_avail_idx >= vq->vring.num) {
2037 vq->last_avail_idx -= vq->vring.num;
2038 vq->last_avail_wrap_counter ^= 1;
2049 VirtIODevice *vdev = vq->vdev;
2052 while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) {
2056 if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) {
2059 vq->inuse++;
2060 vq->last_avail_idx++;
2062 vring_set_avail_event(vq, vq->last_avail_idx);
2081 struct VirtIODevice *vdev = vq->vdev;
2083 if (virtio_device_disabled(vq->vdev)) {
2097 * In the meanwhile, since the in-memory layout of VirtQueueElement
2128 elem->index = data.index;
2130 for (i = 0; i < elem->in_num; i++) {
2131 elem->in_addr[i] = data.in_addr[i];
2134 for (i = 0; i < elem->out_num; i++) {
2135 elem->out_addr[i] = data.out_addr[i];
2138 for (i = 0; i < elem->in_num; i++) {
2140 elem->in_sg[i].iov_base = 0;
2141 elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
2144 for (i = 0; i < elem->out_num; i++) {
2146 elem->out_sg[i].iov_base = 0;
2147 elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
2151 qemu_get_be32s(f, &elem->ndescs);
2165 data.index = elem->index;
2166 data.in_num = elem->in_num;
2167 data.out_num = elem->out_num;
2169 for (i = 0; i < elem->in_num; i++) {
2170 data.in_addr[i] = elem->in_addr[i];
2173 for (i = 0; i < elem->out_num; i++) {
2174 data.out_addr[i] = elem->out_addr[i];
2177 for (i = 0; i < elem->in_num; i++) {
2180 data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
2183 for (i = 0; i < elem->out_num; i++) {
2185 data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
2189 qemu_put_be32s(f, &elem->ndescs);
2205 if (k->notify) {
2206 k->notify(qbus->parent, vector);
2221 return -EFAULT;
2224 if (k->validate_features) {
2225 return k->validate_features(vdev);
2238 if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
2247 if ((vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) !=
2252 if (k->set_status) {
2253 ret = k->set_status(vdev, val);
2256 vdev->name, val, vdev->status);
2259 vdev->status = val;
2284 vdev->vq[i].vring.desc = 0;
2285 vdev->vq[i].vring.avail = 0;
2286 vdev->vq[i].vring.used = 0;
2287 vdev->vq[i].last_avail_idx = 0;
2288 vdev->vq[i].shadow_avail_idx = 0;
2289 vdev->vq[i].used_idx = 0;
2290 vdev->vq[i].last_avail_wrap_counter = true;
2291 vdev->vq[i].shadow_avail_wrap_counter = true;
2292 vdev->vq[i].used_wrap_counter = true;
2294 vdev->vq[i].signalled_used = 0;
2295 vdev->vq[i].signalled_used_valid = false;
2296 vdev->vq[i].notification = true;
2297 vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
2298 vdev->vq[i].inuse = 0;
2299 virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
2306 if (k->queue_reset) {
2307 k->queue_reset(vdev, queue_index);
2320 * be re-enabled for new machine types only, and also after
2329 if (k->queue_enable) {
2330 k->queue_enable(vdev, queue_index);
2336 if (!vdev->vq[n].vring.num) {
2339 vdev->vq[n].vring.desc = addr;
2345 return vdev->vq[n].vring.desc;
2351 if (!vdev->vq[n].vring.num) {
2354 vdev->vq[n].vring.desc = desc;
2355 vdev->vq[n].vring.avail = avail;
2356 vdev->vq[n].vring.used = used;
2365 if (!!num != !!vdev->vq[n].vring.num ||
2370 vdev->vq[n].vring.num = num;
2375 return QLIST_FIRST(&vdev->vector_queues[vector]);
2385 return vdev->vq[n].vring.num;
2390 return vdev->vq[n].vring.num_default;
2411 /* virtio-1 compliant devices cannot change the alignment */
2413 error_report("tried to modify queue alignment for virtio-1 device");
2420 assert(k->has_variable_vring_alignment);
2423 vdev->vq[n].vring.align = align;
2430 if (!vq->vring.desc) {
2435 * 16-bit data for packed VQs include 1-bit wrap counter and
2436 * 15-bit shadow_avail_idx.
2438 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
2439 vq->shadow_avail_wrap_counter = (shadow_avail_idx >> 15) & 0x1;
2440 vq->shadow_avail_idx = shadow_avail_idx & 0x7FFF;
2442 vq->shadow_avail_idx = shadow_avail_idx;
2448 if (vq->vring.desc && vq->handle_output) {
2449 VirtIODevice *vdev = vq->vdev;
2451 if (unlikely(vdev->broken)) {
2455 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2456 vq->handle_output(vdev, vq);
2458 if (unlikely(vdev->start_on_kick)) {
2466 VirtQueue *vq = &vdev->vq[n];
2468 if (unlikely(!vq->vring.desc || vdev->broken)) {
2472 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2473 if (vq->host_notifier_enabled) {
2474 event_notifier_set(&vq->host_notifier);
2475 } else if (vq->handle_output) {
2476 vq->handle_output(vdev, vq);
2478 if (unlikely(vdev->start_on_kick)) {
2486 return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
2492 VirtQueue *vq = &vdev->vq[n];
2495 if (vdev->vector_queues &&
2496 vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
2499 vdev->vq[n].vector = vector;
2500 if (vdev->vector_queues &&
2502 QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
2513 if (vdev->vq[i].vring.num == 0)
2520 vdev->vq[i].vring.num = queue_size;
2521 vdev->vq[i].vring.num_default = queue_size;
2522 vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
2523 vdev->vq[i].handle_output = handle_output;
2524 vdev->vq[i].used_elems = g_new0(VirtQueueElement, queue_size);
2526 return &vdev->vq[i];
2531 vq->vring.num = 0;
2532 vq->vring.num_default = 0;
2533 vq->handle_output = NULL;
2534 g_free(vq->used_elems);
2535 vq->used_elems = NULL;
2545 virtio_delete_queue(&vdev->vq[n]);
2550 uint8_t old = qatomic_read(&vdev->isr);
2556 qatomic_or(&vdev->isr, value);
2569 !vq->inuse && virtio_queue_empty(vq)) {
2577 v = vq->signalled_used_valid;
2578 vq->signalled_used_valid = true;
2579 old = vq->signalled_used;
2580 new = vq->signalled_used = vq->used_idx;
2591 off -= vq->vring.num;
2610 vring_packed_event_read(vdev, &caches->avail, &e);
2612 old = vq->signalled_used;
2613 new = vq->signalled_used = vq->used_idx;
2614 v = vq->signalled_used_valid;
2615 vq->signalled_used_valid = true;
2623 return !v || vring_packed_need_event(vq, vq->used_wrap_counter,
2643 trace_virtio_notify_irqfd_deferred_fn(vq->vdev, vq);
2659 * windows drivers included in virtio-win 1.8.0 (circa 2015) are
2672 virtio_set_isr(vq->vdev, 0x1);
2673 defer_call(virtio_notify_irqfd_deferred_fn, &vq->guest_notifier);
2678 virtio_set_isr(vq->vdev, 0x1);
2679 virtio_notify_vector(vq->vdev, vq->vector);
2696 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
2700 vdev->generation++;
2701 virtio_notify_vector(vdev, vdev->config_vector);
2708 assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
2710 return vdev->device_endian != virtio_default_endian();
2713 return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
2720 return (vdev->host_features >> 32) != 0;
2743 if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
2756 return k->has_extra_state &&
2757 k->has_extra_state(qbus->parent);
2764 return vdev->broken;
2771 return vdev->started;
2778 return vdev->disabled;
2859 if (!k->load_extra_state) {
2860 return -1;
2862 return k->load_extra_state(qbus->parent, f);
2873 k->save_extra_state(qbus->parent, f);
2983 uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
2986 if (k->save_config) {
2987 k->save_config(qbus->parent, f);
2990 qemu_put_8s(f, &vdev->status);
2991 qemu_put_8s(f, &vdev->isr);
2992 qemu_put_be16s(f, &vdev->queue_sel);
2994 qemu_put_be32(f, vdev->config_len);
2995 qemu_put_buffer(f, vdev->config, vdev->config_len);
2998 if (vdev->vq[i].vring.num == 0)
3005 if (vdev->vq[i].vring.num == 0)
3008 qemu_put_be32(f, vdev->vq[i].vring.num);
3009 if (k->has_variable_vring_alignment) {
3010 qemu_put_be32(f, vdev->vq[i].vring.align);
3014 * subsections for VIRTIO-1 devices.
3016 qemu_put_be64(f, vdev->vq[i].vring.desc);
3017 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
3018 if (k->save_queue) {
3019 k->save_queue(qbus->parent, i, f);
3023 if (vdc->save != NULL) {
3024 vdc->save(vdev, f);
3027 if (vdc->vmsd) {
3028 int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL);
3053 return virtio_load(vdev, f, dc->vmsd->version_id);
3065 bool bad = (val & ~(vdev->host_features)) != 0;
3067 val &= vdev->host_features;
3068 if (k->set_features) {
3069 k->set_features(vdev, val);
3071 vdev->guest_features = val;
3072 return bad ? -1 : 0;
3086 data->ret = virtio_set_features_nocheck(data->vdev, data->val);
3087 aio_co_wake(data->co);
3115 if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
3116 return -EINVAL;
3122 __func__, vdev->name);
3130 if (vdev->vq[i].vring.num != 0) {
3136 if (!virtio_device_started(vdev, vdev->status) &&
3138 vdev->start_on_kick = true;
3153 vdev->device_endian = virtio_current_cpu_endian();
3156 vdev->device_endian = virtio_default_endian();
3159 if (k->get_vhost) {
3160 struct vhost_dev *hdev = k->get_vhost(vdev);
3161 /* Only reset when vhost back-end is connected */
3162 if (hdev && hdev->vhost_ops) {
3167 if (k->reset) {
3168 k->reset(vdev);
3171 vdev->start_on_kick = false;
3172 vdev->started = false;
3173 vdev->broken = false;
3175 vdev->queue_sel = 0;
3176 vdev->status = 0;
3177 vdev->disabled = false;
3178 qatomic_set(&vdev->isr, 0);
3179 vdev->config_vector = VIRTIO_NO_VECTOR;
3180 virtio_notify_vector(vdev, vdev->config_vector);
3192 DeviceState *proxy = DEVICE(BUS(bus)->parent);
3195 k->ioeventfd_enabled(proxy)) {
3204 size_t config_size = params->min_size;
3205 const VirtIOFeature *feature_sizes = params->feature_sizes;
3214 assert(config_size <= params->max_size);
3233 vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
3235 if (k->load_config) {
3236 ret = k->load_config(qbus->parent, f);
3241 qemu_get_8s(f, &vdev->status);
3242 qemu_get_8s(f, &vdev->isr);
3243 qemu_get_be16s(f, &vdev->queue_sel);
3244 if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
3245 return -1;
3250 * Temporarily set guest_features low bits - needed by
3254 * Note: devices should always test host features in future - don't create
3257 vdev->guest_features = features;
3266 qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
3268 while (config_len > vdev->config_len) {
3270 config_len--;
3273 if (vdc->pre_load_queues) {
3274 ret = vdc->pre_load_queues(vdev);
3284 return -1;
3288 vdev->vq[i].vring.num = qemu_get_be32(f);
3289 if (k->has_variable_vring_alignment) {
3290 vdev->vq[i].vring.align = qemu_get_be32(f);
3292 vdev->vq[i].vring.desc = qemu_get_be64(f);
3293 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
3294 vdev->vq[i].signalled_used_valid = false;
3295 vdev->vq[i].notification = true;
3297 if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) {
3300 i, vdev->vq[i].last_avail_idx);
3301 return -1;
3303 if (k->load_queue) {
3304 ret = k->load_queue(qbus->parent, i, f);
3312 if (vdc->load != NULL) {
3313 ret = vdc->load(vdev, f, version_id);
3319 if (vdc->vmsd) {
3320 ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id);
3332 if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
3333 vdev->device_endian = virtio_default_endian();
3338 * Subsection load filled vdev->guest_features. Run them
3339 * through virtio_set_features to sanity-check them against
3342 uint64_t features64 = vdev->guest_features;
3346 features64, vdev->host_features);
3347 return -1;
3353 features, vdev->host_features);
3354 return -1;
3358 if (!virtio_device_started(vdev, vdev->status) &&
3360 vdev->start_on_kick = true;
3365 if (vdev->vq[i].vring.desc) {
3369 * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
3381 vdev->vq[i].shadow_avail_idx = vdev->vq[i].last_avail_idx;
3382 vdev->vq[i].shadow_avail_wrap_counter =
3383 vdev->vq[i].last_avail_wrap_counter;
3387 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
3389 if (nheads > vdev->vq[i].vring.num) {
3392 i, vdev->vq[i].vring.num,
3393 vring_avail_idx(&vdev->vq[i]),
3394 vdev->vq[i].last_avail_idx, nheads);
3395 vdev->vq[i].used_idx = 0;
3396 vdev->vq[i].shadow_avail_idx = 0;
3397 vdev->vq[i].inuse = 0;
3400 vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
3401 vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
3409 vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
3410 vdev->vq[i].used_idx);
3411 if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
3412 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
3414 i, vdev->vq[i].vring.num,
3415 vdev->vq[i].last_avail_idx,
3416 vdev->vq[i].used_idx);
3417 return -1;
3422 if (vdc->post_load) {
3423 ret = vdc->post_load(vdev);
3434 qemu_del_vm_change_state_handler(vdev->vmstate);
3442 bool backend_run = running && virtio_device_started(vdev, vdev->status);
3443 vdev->vm_running = running;
3446 virtio_set_status(vdev, vdev->status);
3449 if (k->vmstate_change) {
3450 k->vmstate_change(qbus->parent, backend_run);
3454 int ret = virtio_set_status(vdev, vdev->status);
3467 object_initialize_child_with_props(proxy_obj, "virtio-backend", vdev,
3478 int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
3481 vdev->vector_queues =
3482 g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
3485 vdev->start_on_kick = false;
3486 vdev->started = false;
3487 vdev->vhost_started = false;
3488 vdev->device_id = device_id;
3489 vdev->status = 0;
3490 qatomic_set(&vdev->isr, 0);
3491 vdev->queue_sel = 0;
3492 vdev->config_vector = VIRTIO_NO_VECTOR;
3493 vdev->vq = g_new0(VirtQueue, VIRTIO_QUEUE_MAX);
3494 vdev->vm_running = runstate_is_running();
3495 vdev->broken = false;
3497 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
3498 vdev->vq[i].vdev = vdev;
3499 vdev->vq[i].queue_index = i;
3500 vdev->vq[i].host_notifier_enabled = false;
3503 vdev->name = virtio_id_to_name(device_id);
3504 vdev->config_len = config_size;
3505 if (vdev->config_len) {
3506 vdev->config = g_malloc0(config_size);
3508 vdev->config = NULL;
3510 vdev->vmstate = qdev_add_vm_change_state_handler(DEVICE(vdev),
3512 vdev->device_endian = virtio_default_endian();
3513 vdev->use_guest_notifier_mask = true;
3523 switch (vdev->device_id) {
3542 return vdev->disable_legacy_check;
3547 return vdev->vq[n].vring.desc;
3560 if (k->queue_enabled) {
3561 return k->queue_enabled(qbus->parent, n);
3568 return vdev->vq[n].vring.avail;
3573 return vdev->vq[n].vring.used;
3578 return sizeof(VRingDesc) * vdev->vq[n].vring.num;
3591 sizeof(uint16_t) * vdev->vq[n].vring.num + s;
3604 sizeof(VRingUsedElem) * vdev->vq[n].vring.num + s;
3612 avail = vdev->vq[n].last_avail_idx;
3613 avail |= ((uint16_t)vdev->vq[n].last_avail_wrap_counter) << 15;
3615 used = vdev->vq[n].used_idx;
3616 used |= ((uint16_t)vdev->vq[n].used_wrap_counter) << 15;
3624 return vdev->vq[n].last_avail_idx;
3639 struct VirtQueue *vq = &vdev->vq[n];
3641 vq->last_avail_idx = vq->shadow_avail_idx = idx & 0x7fff;
3642 vq->last_avail_wrap_counter =
3643 vq->shadow_avail_wrap_counter = !!(idx & 0x8000);
3645 vq->used_idx = idx & 0x7fff;
3646 vq->used_wrap_counter = !!(idx & 0x8000);
3652 vdev->vq[n].last_avail_idx = idx;
3653 vdev->vq[n].shadow_avail_idx = idx;
3676 if (vdev->vq[n].vring.desc) {
3677 vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]);
3678 vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx;
3699 if (vdev->vq[n].vring.desc) {
3700 vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
3715 vdev->vq[n].signalled_used_valid = false;
3720 return vdev->vq + n;
3725 return vq->queue_index;
3747 event_notifier_set_handler(&vq->guest_notifier,
3750 event_notifier_set_handler(&vq->guest_notifier, NULL);
3755 virtio_queue_guest_notifier_read(&vq->guest_notifier);
3763 n = &vdev->config_notifier;
3778 return &vq->guest_notifier;
3793 return vq->vring.desc && !virtio_queue_empty(vq);
3815 * Re-enable them. (And if detach has not been used before, notifications
3824 aio_set_event_notifier(ctx, &vq->host_notifier,
3828 aio_set_event_notifier_poll(ctx, &vq->host_notifier,
3837 event_notifier_set(&vq->host_notifier);
3843 * function does not pop all elements. When the virtqueue is left non-empty
3853 aio_set_event_notifier(ctx, &vq->host_notifier,
3863 event_notifier_set(&vq->host_notifier);
3868 aio_set_event_notifier(ctx, &vq->host_notifier, NULL, NULL, NULL);
3877 * we potentially re-attach it. The attach_host_notifier functions will
3892 return &vq->host_notifier;
3897 return &vdev->config_notifier;
3902 vq->host_notifier_enabled = enabled;
3911 if (k->set_host_notifier_mr) {
3912 return k->set_host_notifier_mr(qbus->parent, n, mr, assign);
3915 return -1;
3920 g_free(vdev->bus_name);
3921 vdev->bus_name = g_strdup(bus_name);
3933 vdev->status = vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET;
3937 vdev->broken = true;
3946 if (vdev->vq[i].vring.num == 0) {
3960 assert(!vdc->vmsd || !vdc->load);
3962 if (vdc->realize != NULL) {
3963 vdc->realize(dev, &err);
3974 vdc->unrealize(dev);
3981 vdc->unrealize(dev);
3985 vdev->listener.commit = virtio_memory_listener_commit;
3986 vdev->listener.name = "virtio";
3987 memory_listener_register(&vdev->listener, vdev->dma_as);
3995 memory_listener_unregister(&vdev->listener);
3998 if (vdc->unrealize != NULL) {
3999 vdc->unrealize(dev);
4002 g_free(vdev->bus_name);
4003 vdev->bus_name = NULL;
4009 if (!vdev->vq) {
4014 if (vdev->vq[i].vring.num == 0) {
4017 virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
4019 g_free(vdev->vq);
4028 g_free(vdev->config);
4029 g_free(vdev->vector_queues);
4034 DEFINE_PROP_BOOL("use-started", VirtIODevice, use_started, true),
4035 DEFINE_PROP_BOOL("use-disabled-flag", VirtIODevice, use_disabled_flag, true),
4036 DEFINE_PROP_BOOL("x-disable-legacy-check", VirtIODevice,
4051 VirtQueue *vq = &vdev->vq[n];
4060 event_notifier_set_handler(&vq->host_notifier,
4066 VirtQueue *vq = &vdev->vq[n];
4067 if (!vq->vring.num) {
4070 event_notifier_set(&vq->host_notifier);
4077 while (--n >= 0) {
4078 VirtQueue *vq = &vdev->vq[n];
4083 event_notifier_set_handler(&vq->host_notifier, NULL);
4093 while (--i >= 0) {
4121 VirtQueue *vq = &vdev->vq[n];
4126 event_notifier_set_handler(&vq->host_notifier, NULL);
4166 dc->realize = virtio_device_realize;
4167 dc->unrealize = virtio_device_unrealize;
4168 dc->bus_type = TYPE_VIRTIO_BUS;
4170 vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
4171 vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
4173 vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
4203 status->name = g_strdup(vdev->name);
4204 status->queue_index = vdev->vq[queue].queue_index;
4205 status->inuse = vdev->vq[queue].inuse;
4206 status->vring_num = vdev->vq[queue].vring.num;
4207 status->vring_num_default = vdev->vq[queue].vring.num_default;
4208 status->vring_align = vdev->vq[queue].vring.align;
4209 status->vring_desc = vdev->vq[queue].vring.desc;
4210 status->vring_avail = vdev->vq[queue].vring.avail;
4211 status->vring_used = vdev->vq[queue].vring.used;
4212 status->used_idx = vdev->vq[queue].used_idx;
4213 status->signalled_used = vdev->vq[queue].signalled_used;
4214 status->signalled_used_valid = vdev->vq[queue].signalled_used_valid;
4216 if (vdev->vhost_started) {
4218 struct vhost_dev *hdev = vdc->get_vhost(vdev);
4220 /* check if vq index exists for vhost as well */
4221 if (queue >= hdev->vq_index && queue < hdev->vq_index + hdev->nvqs) {
4222 status->has_last_avail_idx = true;
4225 hdev->vhost_ops->vhost_get_vq_index(hdev, queue);
4230 status->last_avail_idx =
4231 hdev->vhost_ops->vhost_get_vring_base(hdev, &state);
4234 status->has_shadow_avail_idx = true;
4235 status->has_last_avail_idx = true;
4236 status->last_avail_idx = vdev->vq[queue].last_avail_idx;
4237 status->shadow_avail_idx = vdev->vq[queue].shadow_avail_idx;
4266 node->value = g_strdup(map[i].value);
4267 node->next = list;
4294 vq = &vdev->vq[queue];
4313 max = vq->vring.num;
4316 head = vring_avail_ring(vq, vq->last_avail_idx % vq->vring.num);
4318 head = vring_avail_ring(vq, index % vq->vring.num);
4327 if (caches->desc.len < max * sizeof(VRingDesc)) {
4332 desc_cache = &caches->desc;
4336 len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
4350 element->avail = g_new0(VirtioRingAvail, 1);
4351 element->used = g_new0(VirtioRingUsed, 1);
4352 element->name = g_strdup(vdev->name);
4353 element->index = head;
4354 element->avail->flags = vring_avail_flags(vq);
4355 element->avail->idx = vring_avail_idx(vq);
4356 element->avail->ring = head;
4357 element->used->flags = vring_used_flags(vq);
4358 element->used->idx = vring_used_idx(vq);
4367 node->value = g_new0(VirtioRingDesc, 1);
4368 node->value->addr = desc.addr;
4369 node->value->len = desc.len;
4370 node->value->flags = qmp_decode_vring_desc_flags(desc.flags);
4371 node->next = list;
4377 element->descs = list;
4406 DeviceState *transport = qdev_get_parent_bus(dev)->parent;
4409 &transport->mem_reentrancy_guard);