xref: /kvmtool/virtio/core.c (revision 1382aba018748f7d2494ab2e22d51ff91939e3e4)
1 #include <linux/virtio_ring.h>
2 #include <linux/types.h>
3 #include <sys/uio.h>
4 
5 #include "kvm/barrier.h"
6 
7 #include "kvm/kvm.h"
8 #include "kvm/virtio.h"
9 
10 struct vring_used_elem *virt_queue__set_used_elem(struct virt_queue *queue, u32 head, u32 len)
11 {
12 	struct vring_used_elem *used_elem;
13 
14 	used_elem	= &queue->vring.used->ring[queue->vring.used->idx % queue->vring.num];
15 	used_elem->id	= head;
16 	used_elem->len	= len;
17 
18 	/*
19 	 * Use wmb to assure that used elem was updated with head and len.
20 	 * We need a wmb here since we can't advance idx unless we're ready
21 	 * to pass the used element to the guest.
22 	 */
23 	wmb();
24 	queue->vring.used->idx++;
25 
26 	/*
27 	 * Use wmb to assure used idx has been increased before we signal the guest.
28 	 * Without a wmb here the guest may ignore the queue since it won't see
29 	 * an updated idx.
30 	 */
31 	wmb();
32 
33 	return used_elem;
34 }
35 
36 /*
37  * Each buffer in the virtqueues is actually a chain of descriptors.  This
38  * function returns the next descriptor in the chain, or vq->vring.num if we're
39  * at the end.
40  */
41 static unsigned next_desc(struct vring_desc *desc,
42 			  unsigned int i, unsigned int max)
43 {
44 	unsigned int next;
45 
46 	/* If this descriptor says it doesn't chain, we're done. */
47 	if (!(desc[i].flags & VRING_DESC_F_NEXT))
48 		return max;
49 
50 	/* Check they're not leading us off end of descriptors. */
51 	next = desc[i].next;
52 	/* Make sure compiler knows to grab that: we don't want it changing! */
53 	wmb();
54 
55 	return next;
56 }
57 
58 u16 virt_queue__get_head_iov(struct virt_queue *vq, struct iovec iov[], u16 *out, u16 *in, u16 head, struct kvm *kvm)
59 {
60 	struct vring_desc *desc;
61 	u16 idx;
62 	u16 max;
63 
64 	idx = head;
65 	*out = *in = 0;
66 	max = vq->vring.num;
67 	desc = vq->vring.desc;
68 
69 	if (desc[idx].flags & VRING_DESC_F_INDIRECT) {
70 
71 		max = desc[idx].len / sizeof(struct vring_desc);
72 		desc = guest_flat_to_host(kvm, desc[idx].addr);
73 		idx = 0;
74 	}
75 
76 	do {
77 		/* Grab the first descriptor, and check it's OK. */
78 		iov[*out + *in].iov_len = desc[idx].len;
79 		iov[*out + *in].iov_base = guest_flat_to_host(kvm, desc[idx].addr);
80 		/* If this is an input descriptor, increment that count. */
81 		if (desc[idx].flags & VRING_DESC_F_WRITE)
82 			(*in)++;
83 		else
84 			(*out)++;
85 	} while ((idx = next_desc(desc, idx, max)) != max);
86 
87 	return head;
88 }
89 
90 u16 virt_queue__get_iov(struct virt_queue *vq, struct iovec iov[], u16 *out, u16 *in, struct kvm *kvm)
91 {
92 	u16 head;
93 
94 	head = virt_queue__pop(vq);
95 
96 	return virt_queue__get_head_iov(vq, iov, out, in, head, kvm);
97 }
98 
99 /* in and out are relative to guest */
100 u16 virt_queue__get_inout_iov(struct kvm *kvm, struct virt_queue *queue,
101 			      struct iovec in_iov[], struct iovec out_iov[],
102 			      u16 *in, u16 *out)
103 {
104 	struct vring_desc *desc;
105 	u16 head, idx;
106 
107 	idx = head = virt_queue__pop(queue);
108 	*out = *in = 0;
109 	do {
110 		desc = virt_queue__get_desc(queue, idx);
111 		if (desc->flags & VRING_DESC_F_WRITE) {
112 			in_iov[*in].iov_base = guest_flat_to_host(kvm,
113 								  desc->addr);
114 			in_iov[*in].iov_len = desc->len;
115 			(*in)++;
116 		} else {
117 			out_iov[*out].iov_base = guest_flat_to_host(kvm,
118 								    desc->addr);
119 			out_iov[*out].iov_len = desc->len;
120 			(*out)++;
121 		}
122 		if (desc->flags & VRING_DESC_F_NEXT)
123 			idx = desc->next;
124 		else
125 			break;
126 	} while (1);
127 
128 	return head;
129 }
130 
131 int virtio__get_dev_specific_field(int offset, bool msix, u32 *config_off)
132 {
133 	if (msix) {
134 		if (offset < 4)
135 			return VIRTIO_PCI_O_MSIX;
136 		else
137 			offset -= 4;
138 	}
139 
140 	*config_off = offset;
141 
142 	return VIRTIO_PCI_O_CONFIG;
143 }
144 
145 bool virtio_queue__should_signal(struct virt_queue *vq)
146 {
147 	u16 old_idx, new_idx, event_idx;
148 
149 	old_idx		= vq->last_used_signalled;
150 	new_idx		= vq->vring.used->idx;
151 	event_idx	= vring_used_event(&vq->vring);
152 
153 	if (vring_need_event(event_idx, new_idx, old_idx)) {
154 		vq->last_used_signalled = new_idx;
155 		return true;
156 	}
157 
158 	return false;
159 }
160