xref: /kvmtool/virtio/rng.c (revision 74af1456dfa0c3fb1c79529450c6130b54fd1c83)
1376ac44cSSasha Levin #include "kvm/virtio-rng.h"
2376ac44cSSasha Levin 
331638bcaSCyrill Gorcunov #include "kvm/virtio-pci-dev.h"
4376ac44cSSasha Levin 
5376ac44cSSasha Levin #include "kvm/virtio.h"
6376ac44cSSasha Levin #include "kvm/util.h"
7376ac44cSSasha Levin #include "kvm/kvm.h"
8376ac44cSSasha Levin #include "kvm/threadpool.h"
9c75b037fSSasha Levin #include "kvm/guest_compat.h"
10376ac44cSSasha Levin 
11376ac44cSSasha Levin #include <linux/virtio_ring.h>
12376ac44cSSasha Levin #include <linux/virtio_rng.h>
13376ac44cSSasha Levin 
1480ac1d05SSasha Levin #include <linux/list.h>
15376ac44cSSasha Levin #include <fcntl.h>
16376ac44cSSasha Levin #include <sys/types.h>
17376ac44cSSasha Levin #include <sys/stat.h>
18376ac44cSSasha Levin #include <pthread.h>
1953cbeb9bSSasha Levin #include <linux/kernel.h>
20376ac44cSSasha Levin 
21376ac44cSSasha Levin #define NUM_VIRT_QUEUES		1
22376ac44cSSasha Levin #define VIRTIO_RNG_QUEUE_SIZE	128
23376ac44cSSasha Levin 
2480ac1d05SSasha Levin struct rng_dev_job {
2580ac1d05SSasha Levin 	struct virt_queue	*vq;
2680ac1d05SSasha Levin 	struct rng_dev		*rdev;
27df0c7f57SSasha Levin 	struct thread_pool__job	job_id;
282449f6e3SSasha Levin };
292449f6e3SSasha Levin 
3080ffe4d1SSasha Levin struct rng_dev {
3180ac1d05SSasha Levin 	struct list_head	list;
3202eca50cSAsias He 	struct virtio_device	vdev;
3380ac1d05SSasha Levin 
3480ffe4d1SSasha Levin 	int			fd;
35376ac44cSSasha Levin 
36376ac44cSSasha Levin 	/* virtio queue */
37376ac44cSSasha Levin 	struct virt_queue	vqs[NUM_VIRT_QUEUES];
3880ac1d05SSasha Levin 	struct rng_dev_job	jobs[NUM_VIRT_QUEUES];
39376ac44cSSasha Levin };
40376ac44cSSasha Levin 
4180ac1d05SSasha Levin static LIST_HEAD(rdevs);
42312c62d1SSasha Levin static int compat_id = -1;
43376ac44cSSasha Levin 
get_config(struct kvm * kvm,void * dev)44c5ae742bSSasha Levin static u8 *get_config(struct kvm *kvm, void *dev)
4553cbeb9bSSasha Levin {
4653cbeb9bSSasha Levin 	/* Unused */
4753cbeb9bSSasha Levin 	return 0;
4853cbeb9bSSasha Levin }
49376ac44cSSasha Levin 
get_config_size(struct kvm * kvm,void * dev)50e4730284SMartin Radev static size_t get_config_size(struct kvm *kvm, void *dev)
51e4730284SMartin Radev {
52e4730284SMartin Radev 	return 0;
53e4730284SMartin Radev }
54e4730284SMartin Radev 
get_host_features(struct kvm * kvm,void * dev)553c8f82b8SJean-Philippe Brucker static u64 get_host_features(struct kvm *kvm, void *dev)
5653cbeb9bSSasha Levin {
5753cbeb9bSSasha Levin 	/* Unused */
5853cbeb9bSSasha Levin 	return 0;
5953cbeb9bSSasha Levin }
60376ac44cSSasha Levin 
virtio_rng_do_io_request(struct kvm * kvm,struct rng_dev * rdev,struct virt_queue * queue)6180ac1d05SSasha Levin static bool virtio_rng_do_io_request(struct kvm *kvm, struct rng_dev *rdev, struct virt_queue *queue)
62376ac44cSSasha Levin {
63376ac44cSSasha Levin 	struct iovec iov[VIRTIO_RNG_QUEUE_SIZE];
64bc23b9d9SAndre Przywara 	ssize_t len;
65407475bfSPekka Enberg 	u16 out, in, head;
66376ac44cSSasha Levin 
6780ffe4d1SSasha Levin 	head	= virt_queue__get_iov(queue, iov, &out, &in, kvm);
6880ac1d05SSasha Levin 	len	= readv(rdev->fd, iov, in);
69bc23b9d9SAndre Przywara 	if (len < 0 && (errno == EAGAIN || errno == EINTR)) {
70bc23b9d9SAndre Przywara 		/*
71bc23b9d9SAndre Przywara 		 * The virtio 1.0 spec demands at least one byte of entropy,
72bc23b9d9SAndre Przywara 		 * so we cannot just return with 0 if something goes wrong.
73bc23b9d9SAndre Przywara 		 * The urandom(4) manpage mentions that a read from /dev/urandom
74bc23b9d9SAndre Przywara 		 * should always return at least 256 bytes of randomness, so
75bc23b9d9SAndre Przywara 		 * just retry here, with the requested size clamped to that
76bc23b9d9SAndre Przywara 		 * maximum, in case we were interrupted by a signal.
77bc23b9d9SAndre Przywara 		 */
7853114134SJean-Philippe Brucker 		iov[0].iov_len = min_t(size_t, iov[0].iov_len, 256UL);
79bc23b9d9SAndre Przywara 		len = readv(rdev->fd, iov, 1);
80bc23b9d9SAndre Przywara 		if (len < 1)
81bc23b9d9SAndre Przywara 			return false;
82bc23b9d9SAndre Przywara 	}
83407475bfSPekka Enberg 
84376ac44cSSasha Levin 	virt_queue__set_used_elem(queue, head, len);
85376ac44cSSasha Levin 
86376ac44cSSasha Levin 	return true;
87376ac44cSSasha Levin }
88376ac44cSSasha Levin 
virtio_rng_do_io(struct kvm * kvm,void * param)89376ac44cSSasha Levin static void virtio_rng_do_io(struct kvm *kvm, void *param)
90376ac44cSSasha Levin {
9180ac1d05SSasha Levin 	struct rng_dev_job *job	= param;
9280ac1d05SSasha Levin 	struct virt_queue *vq	= job->vq;
9380ac1d05SSasha Levin 	struct rng_dev *rdev	= job->rdev;
94376ac44cSSasha Levin 
95bc485053SSasha Levin 	while (virt_queue__available(vq))
9680ac1d05SSasha Levin 		virtio_rng_do_io_request(kvm, rdev, vq);
97bc485053SSasha Levin 
9802eca50cSAsias He 	rdev->vdev.ops->signal_vq(kvm, &rdev->vdev, vq - rdev->vqs);
99376ac44cSSasha Levin }
100376ac44cSSasha Levin 
init_vq(struct kvm * kvm,void * dev,u32 vq)101609ee906SJean-Philippe Brucker static int init_vq(struct kvm *kvm, void *dev, u32 vq)
102376ac44cSSasha Levin {
10353cbeb9bSSasha Levin 	struct rng_dev *rdev = dev;
104376ac44cSSasha Levin 	struct virt_queue *queue;
10580ac1d05SSasha Levin 	struct rng_dev_job *job;
106376ac44cSSasha Levin 
107312c62d1SSasha Levin 	compat__remove_message(compat_id);
108c75b037fSSasha Levin 
10953cbeb9bSSasha Levin 	queue		= &rdev->vqs[vq];
110376ac44cSSasha Levin 
11153cbeb9bSSasha Levin 	job = &rdev->jobs[vq];
11280ac1d05SSasha Levin 
113609ee906SJean-Philippe Brucker 	virtio_init_device_vq(kvm, &rdev->vdev, queue, VIRTIO_RNG_QUEUE_SIZE);
114376ac44cSSasha Levin 
11580ac1d05SSasha Levin 	*job = (struct rng_dev_job) {
11680ac1d05SSasha Levin 		.vq	= queue,
11780ac1d05SSasha Levin 		.rdev	= rdev,
11880ac1d05SSasha Levin 	};
11980ac1d05SSasha Levin 
12053cbeb9bSSasha Levin 	thread_pool__init_job(&job->job_id, kvm, virtio_rng_do_io, job);
12153cbeb9bSSasha Levin 
12253cbeb9bSSasha Levin 	return 0;
123b2533581SSasha Levin }
124c75b037fSSasha Levin 
exit_vq(struct kvm * kvm,void * dev,u32 vq)125*74af1456SEduardo Bart static void exit_vq(struct kvm *kvm, void *dev, u32 vq)
126*74af1456SEduardo Bart {
127*74af1456SEduardo Bart 	struct rng_dev *rdev = dev;
128*74af1456SEduardo Bart 
129*74af1456SEduardo Bart 	thread_pool__cancel_job(&rdev->jobs[vq].job_id);
130*74af1456SEduardo Bart }
131*74af1456SEduardo Bart 
notify_vq(struct kvm * kvm,void * dev,u32 vq)13253cbeb9bSSasha Levin static int notify_vq(struct kvm *kvm, void *dev, u32 vq)
13353cbeb9bSSasha Levin {
13453cbeb9bSSasha Levin 	struct rng_dev *rdev = dev;
13553cbeb9bSSasha Levin 
13653cbeb9bSSasha Levin 	thread_pool__do_job(&rdev->jobs[vq].job_id);
13753cbeb9bSSasha Levin 
13853cbeb9bSSasha Levin 	return 0;
13953cbeb9bSSasha Levin }
14053cbeb9bSSasha Levin 
get_vq(struct kvm * kvm,void * dev,u32 vq)14153fbb17bSJean-Philippe Brucker static struct virt_queue *get_vq(struct kvm *kvm, void *dev, u32 vq)
14253cbeb9bSSasha Levin {
14353cbeb9bSSasha Levin 	struct rng_dev *rdev = dev;
14453cbeb9bSSasha Levin 
14553fbb17bSJean-Philippe Brucker 	return &rdev->vqs[vq];
14653cbeb9bSSasha Levin }
14753cbeb9bSSasha Levin 
get_size_vq(struct kvm * kvm,void * dev,u32 vq)14853cbeb9bSSasha Levin static int get_size_vq(struct kvm *kvm, void *dev, u32 vq)
14953cbeb9bSSasha Levin {
15053cbeb9bSSasha Levin 	return VIRTIO_RNG_QUEUE_SIZE;
15153cbeb9bSSasha Levin }
15253cbeb9bSSasha Levin 
set_size_vq(struct kvm * kvm,void * dev,u32 vq,int size)1537aba29c1SWill Deacon static int set_size_vq(struct kvm *kvm, void *dev, u32 vq, int size)
1547aba29c1SWill Deacon {
1557aba29c1SWill Deacon 	/* FIXME: dynamic */
1567aba29c1SWill Deacon 	return size;
1577aba29c1SWill Deacon }
1587aba29c1SWill Deacon 
get_vq_count(struct kvm * kvm,void * dev)15931e0eaccSMartin Radev static unsigned int get_vq_count(struct kvm *kvm, void *dev)
160b98ac591SJean-Philippe Brucker {
161b98ac591SJean-Philippe Brucker 	return NUM_VIRT_QUEUES;
162b98ac591SJean-Philippe Brucker }
163b98ac591SJean-Philippe Brucker 
16415542babSAndre Przywara static struct virtio_ops rng_dev_virtio_ops = {
1651c47ce69SSasha Levin 	.get_config		= get_config,
166e4730284SMartin Radev 	.get_config_size	= get_config_size,
1671c47ce69SSasha Levin 	.get_host_features	= get_host_features,
1681c47ce69SSasha Levin 	.init_vq		= init_vq,
169*74af1456SEduardo Bart 	.exit_vq		= exit_vq,
1701c47ce69SSasha Levin 	.notify_vq		= notify_vq,
17153fbb17bSJean-Philippe Brucker 	.get_vq			= get_vq,
1721c47ce69SSasha Levin 	.get_size_vq		= get_size_vq,
1737aba29c1SWill Deacon 	.set_size_vq		= set_size_vq,
174b98ac591SJean-Philippe Brucker 	.get_vq_count		= get_vq_count,
1751c47ce69SSasha Levin };
1761c47ce69SSasha Levin 
virtio_rng__init(struct kvm * kvm)177495fbd4eSSasha Levin int virtio_rng__init(struct kvm *kvm)
17853cbeb9bSSasha Levin {
17953cbeb9bSSasha Levin 	struct rng_dev *rdev;
180495fbd4eSSasha Levin 	int r;
18153cbeb9bSSasha Levin 
182f16653adSSasha Levin 	if (!kvm->cfg.virtio_rng)
183f16653adSSasha Levin 		return 0;
184f16653adSSasha Levin 
1856c88c26fSJean-Philippe Brucker 	rdev = calloc(1, sizeof(*rdev));
18653cbeb9bSSasha Levin 	if (rdev == NULL)
187495fbd4eSSasha Levin 		return -ENOMEM;
18853cbeb9bSSasha Levin 
18962ba372bSAndre Przywara 	rdev->fd = open("/dev/urandom", O_RDONLY);
190495fbd4eSSasha Levin 	if (rdev->fd < 0) {
191495fbd4eSSasha Levin 		r = rdev->fd;
192495fbd4eSSasha Levin 		goto cleanup;
193495fbd4eSSasha Levin 	}
19453cbeb9bSSasha Levin 
19502eca50cSAsias He 	r = virtio_init(kvm, rdev, &rdev->vdev, &rng_dev_virtio_ops,
1969b46ebc5SRajnesh Kanwal 			kvm->cfg.virtio_transport, PCI_DEVICE_ID_VIRTIO_RNG,
197ae06ce71SWill Deacon 			VIRTIO_ID_RNG, PCI_CLASS_RNG);
198495fbd4eSSasha Levin 	if (r < 0)
199495fbd4eSSasha Levin 		goto cleanup;
200495fbd4eSSasha Levin 
20153cbeb9bSSasha Levin 	list_add_tail(&rdev->list, &rdevs);
20253cbeb9bSSasha Levin 
203d278197dSAsias He 	if (compat_id == -1)
20452f34d2cSAsias He 		compat_id = virtio_compat_add_message("virtio-rng", "CONFIG_HW_RANDOM_VIRTIO");
205495fbd4eSSasha Levin 	return 0;
206495fbd4eSSasha Levin cleanup:
207495fbd4eSSasha Levin 	close(rdev->fd);
208495fbd4eSSasha Levin 	free(rdev);
209495fbd4eSSasha Levin 
210495fbd4eSSasha Levin 	return r;
21180ac1d05SSasha Levin }
21249a8afd1SSasha Levin virtio_dev_init(virtio_rng__init);
21380ac1d05SSasha Levin 
virtio_rng__exit(struct kvm * kvm)214495fbd4eSSasha Levin int virtio_rng__exit(struct kvm *kvm)
21580ac1d05SSasha Levin {
2163a60be06SSasha Levin 	struct rng_dev *rdev, *tmp;
21780ac1d05SSasha Levin 
2183a60be06SSasha Levin 	list_for_each_entry_safe(rdev, tmp, &rdevs, list) {
21980ac1d05SSasha Levin 		list_del(&rdev->list);
220*74af1456SEduardo Bart 		virtio_exit(kvm, &rdev->vdev);
22180ac1d05SSasha Levin 		free(rdev);
22280ac1d05SSasha Levin 	}
223495fbd4eSSasha Levin 
224495fbd4eSSasha Levin 	return 0;
225376ac44cSSasha Levin }
22649a8afd1SSasha Levin virtio_dev_exit(virtio_rng__exit);
227