xref: /kvmtool/virtio/core.c (revision 02eca50ce5df536abde00460924b7be00a121d41)
1 #include <linux/virtio_ring.h>
2 #include <linux/types.h>
3 #include <sys/uio.h>
4 #include <stdlib.h>
5 
6 #include "kvm/barrier.h"
7 #include "kvm/virtio.h"
8 #include "kvm/virtio-pci.h"
9 #include "kvm/util.h"
10 #include "kvm/kvm.h"
11 
12 
13 struct vring_used_elem *virt_queue__set_used_elem(struct virt_queue *queue, u32 head, u32 len)
14 {
15 	struct vring_used_elem *used_elem;
16 
17 	used_elem	= &queue->vring.used->ring[queue->vring.used->idx % queue->vring.num];
18 	used_elem->id	= head;
19 	used_elem->len	= len;
20 
21 	/*
22 	 * Use wmb to assure that used elem was updated with head and len.
23 	 * We need a wmb here since we can't advance idx unless we're ready
24 	 * to pass the used element to the guest.
25 	 */
26 	wmb();
27 	queue->vring.used->idx++;
28 
29 	/*
30 	 * Use wmb to assure used idx has been increased before we signal the guest.
31 	 * Without a wmb here the guest may ignore the queue since it won't see
32 	 * an updated idx.
33 	 */
34 	wmb();
35 
36 	return used_elem;
37 }
38 
39 /*
40  * Each buffer in the virtqueues is actually a chain of descriptors.  This
41  * function returns the next descriptor in the chain, or vq->vring.num if we're
42  * at the end.
43  */
44 static unsigned next_desc(struct vring_desc *desc,
45 			  unsigned int i, unsigned int max)
46 {
47 	unsigned int next;
48 
49 	/* If this descriptor says it doesn't chain, we're done. */
50 	if (!(desc[i].flags & VRING_DESC_F_NEXT))
51 		return max;
52 
53 	/* Check they're not leading us off end of descriptors. */
54 	next = desc[i].next;
55 	/* Make sure compiler knows to grab that: we don't want it changing! */
56 	wmb();
57 
58 	return next;
59 }
60 
61 u16 virt_queue__get_head_iov(struct virt_queue *vq, struct iovec iov[], u16 *out, u16 *in, u16 head, struct kvm *kvm)
62 {
63 	struct vring_desc *desc;
64 	u16 idx;
65 	u16 max;
66 
67 	idx = head;
68 	*out = *in = 0;
69 	max = vq->vring.num;
70 	desc = vq->vring.desc;
71 
72 	if (desc[idx].flags & VRING_DESC_F_INDIRECT) {
73 		max = desc[idx].len / sizeof(struct vring_desc);
74 		desc = guest_flat_to_host(kvm, desc[idx].addr);
75 		idx = 0;
76 	}
77 
78 	do {
79 		/* Grab the first descriptor, and check it's OK. */
80 		iov[*out + *in].iov_len = desc[idx].len;
81 		iov[*out + *in].iov_base = guest_flat_to_host(kvm, desc[idx].addr);
82 		/* If this is an input descriptor, increment that count. */
83 		if (desc[idx].flags & VRING_DESC_F_WRITE)
84 			(*in)++;
85 		else
86 			(*out)++;
87 	} while ((idx = next_desc(desc, idx, max)) != max);
88 
89 	return head;
90 }
91 
92 u16 virt_queue__get_iov(struct virt_queue *vq, struct iovec iov[], u16 *out, u16 *in, struct kvm *kvm)
93 {
94 	u16 head;
95 
96 	head = virt_queue__pop(vq);
97 
98 	return virt_queue__get_head_iov(vq, iov, out, in, head, kvm);
99 }
100 
101 /* in and out are relative to guest */
102 u16 virt_queue__get_inout_iov(struct kvm *kvm, struct virt_queue *queue,
103 			      struct iovec in_iov[], struct iovec out_iov[],
104 			      u16 *in, u16 *out)
105 {
106 	struct vring_desc *desc;
107 	u16 head, idx;
108 
109 	idx = head = virt_queue__pop(queue);
110 	*out = *in = 0;
111 	do {
112 		desc = virt_queue__get_desc(queue, idx);
113 		if (desc->flags & VRING_DESC_F_WRITE) {
114 			in_iov[*in].iov_base = guest_flat_to_host(kvm,
115 								  desc->addr);
116 			in_iov[*in].iov_len = desc->len;
117 			(*in)++;
118 		} else {
119 			out_iov[*out].iov_base = guest_flat_to_host(kvm,
120 								    desc->addr);
121 			out_iov[*out].iov_len = desc->len;
122 			(*out)++;
123 		}
124 		if (desc->flags & VRING_DESC_F_NEXT)
125 			idx = desc->next;
126 		else
127 			break;
128 	} while (1);
129 
130 	return head;
131 }
132 
133 int virtio__get_dev_specific_field(int offset, bool msix, u32 *config_off)
134 {
135 	if (msix) {
136 		if (offset < 4)
137 			return VIRTIO_PCI_O_MSIX;
138 		else
139 			offset -= 4;
140 	}
141 
142 	*config_off = offset;
143 
144 	return VIRTIO_PCI_O_CONFIG;
145 }
146 
147 bool virtio_queue__should_signal(struct virt_queue *vq)
148 {
149 	u16 old_idx, new_idx, event_idx;
150 
151 	old_idx		= vq->last_used_signalled;
152 	new_idx		= vq->vring.used->idx;
153 	event_idx	= vring_used_event(&vq->vring);
154 
155 	if (vring_need_event(event_idx, new_idx, old_idx)) {
156 		vq->last_used_signalled = new_idx;
157 		return true;
158 	}
159 
160 	return false;
161 }
162 
163 int virtio_init(struct kvm *kvm, void *dev, struct virtio_device *vdev,
164 		struct virtio_ops *ops, enum virtio_trans trans,
165 		int device_id, int subsys_id, int class)
166 {
167 	void *virtio;
168 
169 	switch (trans) {
170 	case VIRTIO_PCI:
171 		virtio = calloc(sizeof(struct virtio_pci), 1);
172 		if (!virtio)
173 			return -ENOMEM;
174 		vdev->virtio			= virtio;
175 		vdev->ops			= ops;
176 		vdev->ops->signal_vq		= virtio_pci__signal_vq;
177 		vdev->ops->signal_config	= virtio_pci__signal_config;
178 		vdev->ops->init			= virtio_pci__init;
179 		vdev->ops->exit			= virtio_pci__exit;
180 		vdev->ops->init(kvm, dev, vdev, device_id, subsys_id, class);
181 		break;
182 	default:
183 		return -1;
184 	};
185 
186 	return 0;
187 }
188