xref: /kvmtool/virtio/core.c (revision 1382aba018748f7d2494ab2e22d51ff91939e3e4)
139d6af07SAsias He #include <linux/virtio_ring.h>
23fdf659dSSasha Levin #include <linux/types.h>
339d6af07SAsias He #include <sys/uio.h>
42caa836dSIngo Molnar 
52caa836dSIngo Molnar #include "kvm/barrier.h"
62caa836dSIngo Molnar 
739d6af07SAsias He #include "kvm/kvm.h"
839d6af07SAsias He #include "kvm/virtio.h"
939d6af07SAsias He 
103fdf659dSSasha Levin struct vring_used_elem *virt_queue__set_used_elem(struct virt_queue *queue, u32 head, u32 len)
1139d6af07SAsias He {
1239d6af07SAsias He 	struct vring_used_elem *used_elem;
13407475bfSPekka Enberg 
1494902782SSasha Levin 	used_elem	= &queue->vring.used->ring[queue->vring.used->idx % queue->vring.num];
1539d6af07SAsias He 	used_elem->id	= head;
1639d6af07SAsias He 	used_elem->len	= len;
1794902782SSasha Levin 
1894902782SSasha Levin 	/*
1994902782SSasha Levin 	 * Use wmb to assure that used elem was updated with head and len.
2094902782SSasha Levin 	 * We need a wmb here since we can't advance idx unless we're ready
2194902782SSasha Levin 	 * to pass the used element to the guest.
2294902782SSasha Levin 	 */
2394902782SSasha Levin 	wmb();
2494902782SSasha Levin 	queue->vring.used->idx++;
2594902782SSasha Levin 
2694902782SSasha Levin 	/*
2794902782SSasha Levin 	 * Use wmb to assure used idx has been increased before we signal the guest.
2894902782SSasha Levin 	 * Without a wmb here the guest may ignore the queue since it won't see
2994902782SSasha Levin 	 * an updated idx.
3094902782SSasha Levin 	 */
3194902782SSasha Levin 	wmb();
3294902782SSasha Levin 
3339d6af07SAsias He 	return used_elem;
3439d6af07SAsias He }
3539d6af07SAsias He 
36754c8ce3SSasha Levin /*
37754c8ce3SSasha Levin  * Each buffer in the virtqueues is actually a chain of descriptors.  This
38754c8ce3SSasha Levin  * function returns the next descriptor in the chain, or vq->vring.num if we're
39754c8ce3SSasha Levin  * at the end.
40754c8ce3SSasha Levin  */
41754c8ce3SSasha Levin static unsigned next_desc(struct vring_desc *desc,
42754c8ce3SSasha Levin 			  unsigned int i, unsigned int max)
43754c8ce3SSasha Levin {
44754c8ce3SSasha Levin 	unsigned int next;
45754c8ce3SSasha Levin 
46754c8ce3SSasha Levin 	/* If this descriptor says it doesn't chain, we're done. */
47754c8ce3SSasha Levin 	if (!(desc[i].flags & VRING_DESC_F_NEXT))
48754c8ce3SSasha Levin 		return max;
49754c8ce3SSasha Levin 
50754c8ce3SSasha Levin 	/* Check they're not leading us off end of descriptors. */
51754c8ce3SSasha Levin 	next = desc[i].next;
52754c8ce3SSasha Levin 	/* Make sure compiler knows to grab that: we don't want it changing! */
53754c8ce3SSasha Levin 	wmb();
54754c8ce3SSasha Levin 
55754c8ce3SSasha Levin 	return next;
56754c8ce3SSasha Levin }
57754c8ce3SSasha Levin 
582fddfdb5SAsias He u16 virt_queue__get_head_iov(struct virt_queue *vq, struct iovec iov[], u16 *out, u16 *in, u16 head, struct kvm *kvm)
5939d6af07SAsias He {
6039d6af07SAsias He 	struct vring_desc *desc;
612fddfdb5SAsias He 	u16 idx;
62754c8ce3SSasha Levin 	u16 max;
6339d6af07SAsias He 
642fddfdb5SAsias He 	idx = head;
6539d6af07SAsias He 	*out = *in = 0;
66754c8ce3SSasha Levin 	max = vq->vring.num;
67754c8ce3SSasha Levin 	desc = vq->vring.desc;
68754c8ce3SSasha Levin 
69754c8ce3SSasha Levin 	if (desc[idx].flags & VRING_DESC_F_INDIRECT) {
70754c8ce3SSasha Levin 
71754c8ce3SSasha Levin 		max = desc[idx].len / sizeof(struct vring_desc);
72754c8ce3SSasha Levin 		desc = guest_flat_to_host(kvm, desc[idx].addr);
73754c8ce3SSasha Levin 		idx = 0;
74754c8ce3SSasha Levin 	}
7539d6af07SAsias He 
7639d6af07SAsias He 	do {
77754c8ce3SSasha Levin 		/* Grab the first descriptor, and check it's OK. */
78754c8ce3SSasha Levin 		iov[*out + *in].iov_len = desc[idx].len;
79754c8ce3SSasha Levin 		iov[*out + *in].iov_base = guest_flat_to_host(kvm, desc[idx].addr);
80754c8ce3SSasha Levin 		/* If this is an input descriptor, increment that count. */
81754c8ce3SSasha Levin 		if (desc[idx].flags & VRING_DESC_F_WRITE)
8239d6af07SAsias He 			(*in)++;
8339d6af07SAsias He 		else
8439d6af07SAsias He 			(*out)++;
85754c8ce3SSasha Levin 	} while ((idx = next_desc(desc, idx, max)) != max);
8639d6af07SAsias He 
8739d6af07SAsias He 	return head;
8839d6af07SAsias He }
897f5ffaf5SAsias He 
902fddfdb5SAsias He u16 virt_queue__get_iov(struct virt_queue *vq, struct iovec iov[], u16 *out, u16 *in, struct kvm *kvm)
912fddfdb5SAsias He {
922fddfdb5SAsias He 	u16 head;
932fddfdb5SAsias He 
942fddfdb5SAsias He 	head = virt_queue__pop(vq);
952fddfdb5SAsias He 
962fddfdb5SAsias He 	return virt_queue__get_head_iov(vq, iov, out, in, head, kvm);
972fddfdb5SAsias He }
982fddfdb5SAsias He 
9908861bcfSAneesh Kumar K.V /* in and out are relative to guest */
10008861bcfSAneesh Kumar K.V u16 virt_queue__get_inout_iov(struct kvm *kvm, struct virt_queue *queue,
10108861bcfSAneesh Kumar K.V 			      struct iovec in_iov[], struct iovec out_iov[],
10208861bcfSAneesh Kumar K.V 			      u16 *in, u16 *out)
10308861bcfSAneesh Kumar K.V {
10408861bcfSAneesh Kumar K.V 	struct vring_desc *desc;
1052fddfdb5SAsias He 	u16 head, idx;
10608861bcfSAneesh Kumar K.V 
10708861bcfSAneesh Kumar K.V 	idx = head = virt_queue__pop(queue);
10808861bcfSAneesh Kumar K.V 	*out = *in = 0;
10908861bcfSAneesh Kumar K.V 	do {
11008861bcfSAneesh Kumar K.V 		desc = virt_queue__get_desc(queue, idx);
11108861bcfSAneesh Kumar K.V 		if (desc->flags & VRING_DESC_F_WRITE) {
11208861bcfSAneesh Kumar K.V 			in_iov[*in].iov_base = guest_flat_to_host(kvm,
11308861bcfSAneesh Kumar K.V 								  desc->addr);
11408861bcfSAneesh Kumar K.V 			in_iov[*in].iov_len = desc->len;
11508861bcfSAneesh Kumar K.V 			(*in)++;
11608861bcfSAneesh Kumar K.V 		} else {
11708861bcfSAneesh Kumar K.V 			out_iov[*out].iov_base = guest_flat_to_host(kvm,
11808861bcfSAneesh Kumar K.V 								    desc->addr);
11908861bcfSAneesh Kumar K.V 			out_iov[*out].iov_len = desc->len;
12008861bcfSAneesh Kumar K.V 			(*out)++;
12108861bcfSAneesh Kumar K.V 		}
12208861bcfSAneesh Kumar K.V 		if (desc->flags & VRING_DESC_F_NEXT)
12308861bcfSAneesh Kumar K.V 			idx = desc->next;
12408861bcfSAneesh Kumar K.V 		else
12508861bcfSAneesh Kumar K.V 			break;
12608861bcfSAneesh Kumar K.V 	} while (1);
1272fddfdb5SAsias He 
12808861bcfSAneesh Kumar K.V 	return head;
12908861bcfSAneesh Kumar K.V }
13008861bcfSAneesh Kumar K.V 
131*1382aba0SSasha Levin int virtio__get_dev_specific_field(int offset, bool msix, u32 *config_off)
132c3a79fa1SSasha Levin {
133c3a79fa1SSasha Levin 	if (msix) {
134c3a79fa1SSasha Levin 		if (offset < 4)
135c3a79fa1SSasha Levin 			return VIRTIO_PCI_O_MSIX;
136c3a79fa1SSasha Levin 		else
137c3a79fa1SSasha Levin 			offset -= 4;
138c3a79fa1SSasha Levin 	}
139c3a79fa1SSasha Levin 
140c3a79fa1SSasha Levin 	*config_off = offset;
141c3a79fa1SSasha Levin 
142c3a79fa1SSasha Levin 	return VIRTIO_PCI_O_CONFIG;
143c3a79fa1SSasha Levin }
14451b1454fSAsias He 
14551b1454fSAsias He bool virtio_queue__should_signal(struct virt_queue *vq)
14651b1454fSAsias He {
14751b1454fSAsias He 	u16 old_idx, new_idx, event_idx;
14851b1454fSAsias He 
14951b1454fSAsias He 	old_idx		= vq->last_used_signalled;
15051b1454fSAsias He 	new_idx		= vq->vring.used->idx;
15151b1454fSAsias He 	event_idx	= vring_used_event(&vq->vring);
15251b1454fSAsias He 
15351b1454fSAsias He 	if (vring_need_event(event_idx, new_idx, old_idx)) {
15451b1454fSAsias He 		vq->last_used_signalled = new_idx;
15551b1454fSAsias He 		return true;
15651b1454fSAsias He 	}
15751b1454fSAsias He 
15851b1454fSAsias He 	return false;
15951b1454fSAsias He }
160