xref: /kvmtool/virtio/core.c (revision 755752d63b1dafda4305757eaf4ace737ec63933)
1 #include <linux/virtio_ring.h>
2 #include <linux/types.h>
3 #include <sys/uio.h>
4 #include <stdlib.h>
5 
6 #include "kvm/barrier.h"
7 #include "kvm/virtio.h"
8 #include "kvm/virtio-pci.h"
9 #include "kvm/virtio-mmio.h"
10 #include "kvm/util.h"
11 #include "kvm/kvm.h"
12 
13 
14 struct vring_used_elem *virt_queue__set_used_elem(struct virt_queue *queue, u32 head, u32 len)
15 {
16 	struct vring_used_elem *used_elem;
17 
18 	used_elem	= &queue->vring.used->ring[queue->vring.used->idx % queue->vring.num];
19 	used_elem->id	= head;
20 	used_elem->len	= len;
21 
22 	/*
23 	 * Use wmb to assure that used elem was updated with head and len.
24 	 * We need a wmb here since we can't advance idx unless we're ready
25 	 * to pass the used element to the guest.
26 	 */
27 	wmb();
28 	queue->vring.used->idx++;
29 
30 	/*
31 	 * Use wmb to assure used idx has been increased before we signal the guest.
32 	 * Without a wmb here the guest may ignore the queue since it won't see
33 	 * an updated idx.
34 	 */
35 	wmb();
36 
37 	return used_elem;
38 }
39 
40 /*
41  * Each buffer in the virtqueues is actually a chain of descriptors.  This
42  * function returns the next descriptor in the chain, or vq->vring.num if we're
43  * at the end.
44  */
45 static unsigned next_desc(struct vring_desc *desc,
46 			  unsigned int i, unsigned int max)
47 {
48 	unsigned int next;
49 
50 	/* If this descriptor says it doesn't chain, we're done. */
51 	if (!(desc[i].flags & VRING_DESC_F_NEXT))
52 		return max;
53 
54 	/* Check they're not leading us off end of descriptors. */
55 	next = desc[i].next;
56 	/* Make sure compiler knows to grab that: we don't want it changing! */
57 	wmb();
58 
59 	return next;
60 }
61 
62 u16 virt_queue__get_head_iov(struct virt_queue *vq, struct iovec iov[], u16 *out, u16 *in, u16 head, struct kvm *kvm)
63 {
64 	struct vring_desc *desc;
65 	u16 idx;
66 	u16 max;
67 
68 	idx = head;
69 	*out = *in = 0;
70 	max = vq->vring.num;
71 	desc = vq->vring.desc;
72 
73 	if (desc[idx].flags & VRING_DESC_F_INDIRECT) {
74 		max = desc[idx].len / sizeof(struct vring_desc);
75 		desc = guest_flat_to_host(kvm, desc[idx].addr);
76 		idx = 0;
77 	}
78 
79 	do {
80 		/* Grab the first descriptor, and check it's OK. */
81 		iov[*out + *in].iov_len = desc[idx].len;
82 		iov[*out + *in].iov_base = guest_flat_to_host(kvm, desc[idx].addr);
83 		/* If this is an input descriptor, increment that count. */
84 		if (desc[idx].flags & VRING_DESC_F_WRITE)
85 			(*in)++;
86 		else
87 			(*out)++;
88 	} while ((idx = next_desc(desc, idx, max)) != max);
89 
90 	return head;
91 }
92 
93 u16 virt_queue__get_iov(struct virt_queue *vq, struct iovec iov[], u16 *out, u16 *in, struct kvm *kvm)
94 {
95 	u16 head;
96 
97 	head = virt_queue__pop(vq);
98 
99 	return virt_queue__get_head_iov(vq, iov, out, in, head, kvm);
100 }
101 
102 /* in and out are relative to guest */
103 u16 virt_queue__get_inout_iov(struct kvm *kvm, struct virt_queue *queue,
104 			      struct iovec in_iov[], struct iovec out_iov[],
105 			      u16 *in, u16 *out)
106 {
107 	struct vring_desc *desc;
108 	u16 head, idx;
109 
110 	idx = head = virt_queue__pop(queue);
111 	*out = *in = 0;
112 	do {
113 		desc = virt_queue__get_desc(queue, idx);
114 		if (desc->flags & VRING_DESC_F_WRITE) {
115 			in_iov[*in].iov_base = guest_flat_to_host(kvm,
116 								  desc->addr);
117 			in_iov[*in].iov_len = desc->len;
118 			(*in)++;
119 		} else {
120 			out_iov[*out].iov_base = guest_flat_to_host(kvm,
121 								    desc->addr);
122 			out_iov[*out].iov_len = desc->len;
123 			(*out)++;
124 		}
125 		if (desc->flags & VRING_DESC_F_NEXT)
126 			idx = desc->next;
127 		else
128 			break;
129 	} while (1);
130 
131 	return head;
132 }
133 
134 int virtio__get_dev_specific_field(int offset, bool msix, u32 *config_off)
135 {
136 	if (msix) {
137 		if (offset < 4)
138 			return VIRTIO_PCI_O_MSIX;
139 		else
140 			offset -= 4;
141 	}
142 
143 	*config_off = offset;
144 
145 	return VIRTIO_PCI_O_CONFIG;
146 }
147 
148 bool virtio_queue__should_signal(struct virt_queue *vq)
149 {
150 	u16 old_idx, new_idx, event_idx;
151 
152 	old_idx		= vq->last_used_signalled;
153 	new_idx		= vq->vring.used->idx;
154 	event_idx	= vring_used_event(&vq->vring);
155 
156 	if (vring_need_event(event_idx, new_idx, old_idx)) {
157 		vq->last_used_signalled = new_idx;
158 		return true;
159 	}
160 
161 	return false;
162 }
163 
164 int virtio_init(struct kvm *kvm, void *dev, struct virtio_device *vdev,
165 		struct virtio_ops *ops, enum virtio_trans trans,
166 		int device_id, int subsys_id, int class)
167 {
168 	void *virtio;
169 
170 	switch (trans) {
171 	case VIRTIO_PCI:
172 		virtio = calloc(sizeof(struct virtio_pci), 1);
173 		if (!virtio)
174 			return -ENOMEM;
175 		vdev->virtio			= virtio;
176 		vdev->ops			= ops;
177 		vdev->ops->signal_vq		= virtio_pci__signal_vq;
178 		vdev->ops->signal_config	= virtio_pci__signal_config;
179 		vdev->ops->init			= virtio_pci__init;
180 		vdev->ops->exit			= virtio_pci__exit;
181 		vdev->ops->init(kvm, dev, vdev, device_id, subsys_id, class);
182 		break;
183 	case VIRTIO_MMIO:
184 		virtio = calloc(sizeof(struct virtio_mmio), 1);
185 		if (!virtio)
186 			return -ENOMEM;
187 		vdev->virtio			= virtio;
188 		vdev->ops			= ops;
189 		vdev->ops->signal_vq		= virtio_mmio_signal_vq;
190 		vdev->ops->signal_config	= virtio_mmio_signal_config;
191 		vdev->ops->init			= virtio_mmio_init;
192 		vdev->ops->exit			= virtio_mmio_exit;
193 		vdev->ops->init(kvm, dev, vdev, device_id, subsys_id, class);
194 		break;
195 	default:
196 		return -1;
197 	};
198 
199 	return 0;
200 }
201