xref: /kvmtool/virtio/rng.c (revision bc23b9d9b152eaf56bacb1e2bae9a2b2252ade46)
1376ac44cSSasha Levin #include "kvm/virtio-rng.h"
2376ac44cSSasha Levin 
331638bcaSCyrill Gorcunov #include "kvm/virtio-pci-dev.h"
4376ac44cSSasha Levin 
5376ac44cSSasha Levin #include "kvm/virtio.h"
6376ac44cSSasha Levin #include "kvm/util.h"
7376ac44cSSasha Levin #include "kvm/kvm.h"
8376ac44cSSasha Levin #include "kvm/threadpool.h"
9c75b037fSSasha Levin #include "kvm/guest_compat.h"
10376ac44cSSasha Levin 
11376ac44cSSasha Levin #include <linux/virtio_ring.h>
12376ac44cSSasha Levin #include <linux/virtio_rng.h>
13376ac44cSSasha Levin 
1480ac1d05SSasha Levin #include <linux/list.h>
15376ac44cSSasha Levin #include <fcntl.h>
16376ac44cSSasha Levin #include <sys/types.h>
17376ac44cSSasha Levin #include <sys/stat.h>
18376ac44cSSasha Levin #include <pthread.h>
1953cbeb9bSSasha Levin #include <linux/kernel.h>
20376ac44cSSasha Levin 
21376ac44cSSasha Levin #define NUM_VIRT_QUEUES		1
22376ac44cSSasha Levin #define VIRTIO_RNG_QUEUE_SIZE	128
23376ac44cSSasha Levin 
2480ac1d05SSasha Levin struct rng_dev_job {
2580ac1d05SSasha Levin 	struct virt_queue	*vq;
2680ac1d05SSasha Levin 	struct rng_dev		*rdev;
27df0c7f57SSasha Levin 	struct thread_pool__job	job_id;
282449f6e3SSasha Levin };
292449f6e3SSasha Levin 
3080ffe4d1SSasha Levin struct rng_dev {
3180ac1d05SSasha Levin 	struct list_head	list;
3202eca50cSAsias He 	struct virtio_device	vdev;
3380ac1d05SSasha Levin 
3480ffe4d1SSasha Levin 	int			fd;
35376ac44cSSasha Levin 
36376ac44cSSasha Levin 	/* virtio queue */
37376ac44cSSasha Levin 	struct virt_queue	vqs[NUM_VIRT_QUEUES];
3880ac1d05SSasha Levin 	struct rng_dev_job	jobs[NUM_VIRT_QUEUES];
39376ac44cSSasha Levin };
40376ac44cSSasha Levin 
4180ac1d05SSasha Levin static LIST_HEAD(rdevs);
42312c62d1SSasha Levin static int compat_id = -1;
43376ac44cSSasha Levin 
44c5ae742bSSasha Levin static u8 *get_config(struct kvm *kvm, void *dev)
4553cbeb9bSSasha Levin {
4653cbeb9bSSasha Levin 	/* Unused */
4753cbeb9bSSasha Levin 	return 0;
4853cbeb9bSSasha Levin }
49376ac44cSSasha Levin 
50e4730284SMartin Radev static size_t get_config_size(struct kvm *kvm, void *dev)
51e4730284SMartin Radev {
52e4730284SMartin Radev 	return 0;
53e4730284SMartin Radev }
54e4730284SMartin Radev 
553c8f82b8SJean-Philippe Brucker static u64 get_host_features(struct kvm *kvm, void *dev)
5653cbeb9bSSasha Levin {
5753cbeb9bSSasha Levin 	/* Unused */
5853cbeb9bSSasha Levin 	return 0;
5953cbeb9bSSasha Levin }
60376ac44cSSasha Levin 
6180ac1d05SSasha Levin static bool virtio_rng_do_io_request(struct kvm *kvm, struct rng_dev *rdev, struct virt_queue *queue)
62376ac44cSSasha Levin {
63376ac44cSSasha Levin 	struct iovec iov[VIRTIO_RNG_QUEUE_SIZE];
64*bc23b9d9SAndre Przywara 	ssize_t len;
65407475bfSPekka Enberg 	u16 out, in, head;
66376ac44cSSasha Levin 
6780ffe4d1SSasha Levin 	head	= virt_queue__get_iov(queue, iov, &out, &in, kvm);
6880ac1d05SSasha Levin 	len	= readv(rdev->fd, iov, in);
69*bc23b9d9SAndre Przywara 	if (len < 0 && (errno == EAGAIN || errno == EINTR)) {
70*bc23b9d9SAndre Przywara 		/*
71*bc23b9d9SAndre Przywara 		 * The virtio 1.0 spec demands at least one byte of entropy,
72*bc23b9d9SAndre Przywara 		 * so we cannot just return with 0 if something goes wrong.
73*bc23b9d9SAndre Przywara 		 * The urandom(4) manpage mentions that a read from /dev/urandom
74*bc23b9d9SAndre Przywara 		 * should always return at least 256 bytes of randomness, so
75*bc23b9d9SAndre Przywara 		 * just retry here, with the requested size clamped to that
76*bc23b9d9SAndre Przywara 		 * maximum, in case we were interrupted by a signal.
77*bc23b9d9SAndre Przywara 		 */
78*bc23b9d9SAndre Przywara 		iov[0].iov_len = min(iov[0].iov_len, 256UL);
79*bc23b9d9SAndre Przywara 		len = readv(rdev->fd, iov, 1);
80*bc23b9d9SAndre Przywara 		if (len < 1)
81*bc23b9d9SAndre Przywara 			return false;
82*bc23b9d9SAndre Przywara 	}
83407475bfSPekka Enberg 
84376ac44cSSasha Levin 	virt_queue__set_used_elem(queue, head, len);
85376ac44cSSasha Levin 
86376ac44cSSasha Levin 	return true;
87376ac44cSSasha Levin }
88376ac44cSSasha Levin 
89376ac44cSSasha Levin static void virtio_rng_do_io(struct kvm *kvm, void *param)
90376ac44cSSasha Levin {
9180ac1d05SSasha Levin 	struct rng_dev_job *job	= param;
9280ac1d05SSasha Levin 	struct virt_queue *vq	= job->vq;
9380ac1d05SSasha Levin 	struct rng_dev *rdev	= job->rdev;
94376ac44cSSasha Levin 
95bc485053SSasha Levin 	while (virt_queue__available(vq))
9680ac1d05SSasha Levin 		virtio_rng_do_io_request(kvm, rdev, vq);
97bc485053SSasha Levin 
9802eca50cSAsias He 	rdev->vdev.ops->signal_vq(kvm, &rdev->vdev, vq - rdev->vqs);
99376ac44cSSasha Levin }
100376ac44cSSasha Levin 
101609ee906SJean-Philippe Brucker static int init_vq(struct kvm *kvm, void *dev, u32 vq)
102376ac44cSSasha Levin {
10353cbeb9bSSasha Levin 	struct rng_dev *rdev = dev;
104376ac44cSSasha Levin 	struct virt_queue *queue;
10580ac1d05SSasha Levin 	struct rng_dev_job *job;
106376ac44cSSasha Levin 
107312c62d1SSasha Levin 	compat__remove_message(compat_id);
108c75b037fSSasha Levin 
10953cbeb9bSSasha Levin 	queue		= &rdev->vqs[vq];
110376ac44cSSasha Levin 
11153cbeb9bSSasha Levin 	job = &rdev->jobs[vq];
11280ac1d05SSasha Levin 
113609ee906SJean-Philippe Brucker 	virtio_init_device_vq(kvm, &rdev->vdev, queue, VIRTIO_RNG_QUEUE_SIZE);
114376ac44cSSasha Levin 
11580ac1d05SSasha Levin 	*job = (struct rng_dev_job) {
11680ac1d05SSasha Levin 		.vq	= queue,
11780ac1d05SSasha Levin 		.rdev	= rdev,
11880ac1d05SSasha Levin 	};
11980ac1d05SSasha Levin 
12053cbeb9bSSasha Levin 	thread_pool__init_job(&job->job_id, kvm, virtio_rng_do_io, job);
12153cbeb9bSSasha Levin 
12253cbeb9bSSasha Levin 	return 0;
123b2533581SSasha Levin }
124c75b037fSSasha Levin 
12553cbeb9bSSasha Levin static int notify_vq(struct kvm *kvm, void *dev, u32 vq)
12653cbeb9bSSasha Levin {
12753cbeb9bSSasha Levin 	struct rng_dev *rdev = dev;
12853cbeb9bSSasha Levin 
12953cbeb9bSSasha Levin 	thread_pool__do_job(&rdev->jobs[vq].job_id);
13053cbeb9bSSasha Levin 
13153cbeb9bSSasha Levin 	return 0;
13253cbeb9bSSasha Levin }
13353cbeb9bSSasha Levin 
13453fbb17bSJean-Philippe Brucker static struct virt_queue *get_vq(struct kvm *kvm, void *dev, u32 vq)
13553cbeb9bSSasha Levin {
13653cbeb9bSSasha Levin 	struct rng_dev *rdev = dev;
13753cbeb9bSSasha Levin 
13853fbb17bSJean-Philippe Brucker 	return &rdev->vqs[vq];
13953cbeb9bSSasha Levin }
14053cbeb9bSSasha Levin 
14153cbeb9bSSasha Levin static int get_size_vq(struct kvm *kvm, void *dev, u32 vq)
14253cbeb9bSSasha Levin {
14353cbeb9bSSasha Levin 	return VIRTIO_RNG_QUEUE_SIZE;
14453cbeb9bSSasha Levin }
14553cbeb9bSSasha Levin 
1467aba29c1SWill Deacon static int set_size_vq(struct kvm *kvm, void *dev, u32 vq, int size)
1477aba29c1SWill Deacon {
1487aba29c1SWill Deacon 	/* FIXME: dynamic */
1497aba29c1SWill Deacon 	return size;
1507aba29c1SWill Deacon }
1517aba29c1SWill Deacon 
15231e0eaccSMartin Radev static unsigned int get_vq_count(struct kvm *kvm, void *dev)
153b98ac591SJean-Philippe Brucker {
154b98ac591SJean-Philippe Brucker 	return NUM_VIRT_QUEUES;
155b98ac591SJean-Philippe Brucker }
156b98ac591SJean-Philippe Brucker 
15715542babSAndre Przywara static struct virtio_ops rng_dev_virtio_ops = {
1581c47ce69SSasha Levin 	.get_config		= get_config,
159e4730284SMartin Radev 	.get_config_size	= get_config_size,
1601c47ce69SSasha Levin 	.get_host_features	= get_host_features,
1611c47ce69SSasha Levin 	.init_vq		= init_vq,
1621c47ce69SSasha Levin 	.notify_vq		= notify_vq,
16353fbb17bSJean-Philippe Brucker 	.get_vq			= get_vq,
1641c47ce69SSasha Levin 	.get_size_vq		= get_size_vq,
1657aba29c1SWill Deacon 	.set_size_vq		= set_size_vq,
166b98ac591SJean-Philippe Brucker 	.get_vq_count		= get_vq_count,
1671c47ce69SSasha Levin };
1681c47ce69SSasha Levin 
169495fbd4eSSasha Levin int virtio_rng__init(struct kvm *kvm)
17053cbeb9bSSasha Levin {
17153cbeb9bSSasha Levin 	struct rng_dev *rdev;
172495fbd4eSSasha Levin 	int r;
17353cbeb9bSSasha Levin 
174f16653adSSasha Levin 	if (!kvm->cfg.virtio_rng)
175f16653adSSasha Levin 		return 0;
176f16653adSSasha Levin 
1776c88c26fSJean-Philippe Brucker 	rdev = calloc(1, sizeof(*rdev));
17853cbeb9bSSasha Levin 	if (rdev == NULL)
179495fbd4eSSasha Levin 		return -ENOMEM;
18053cbeb9bSSasha Levin 
18162ba372bSAndre Przywara 	rdev->fd = open("/dev/urandom", O_RDONLY);
182495fbd4eSSasha Levin 	if (rdev->fd < 0) {
183495fbd4eSSasha Levin 		r = rdev->fd;
184495fbd4eSSasha Levin 		goto cleanup;
185495fbd4eSSasha Levin 	}
18653cbeb9bSSasha Levin 
18702eca50cSAsias He 	r = virtio_init(kvm, rdev, &rdev->vdev, &rng_dev_virtio_ops,
1889b46ebc5SRajnesh Kanwal 			kvm->cfg.virtio_transport, PCI_DEVICE_ID_VIRTIO_RNG,
189ae06ce71SWill Deacon 			VIRTIO_ID_RNG, PCI_CLASS_RNG);
190495fbd4eSSasha Levin 	if (r < 0)
191495fbd4eSSasha Levin 		goto cleanup;
192495fbd4eSSasha Levin 
19353cbeb9bSSasha Levin 	list_add_tail(&rdev->list, &rdevs);
19453cbeb9bSSasha Levin 
195d278197dSAsias He 	if (compat_id == -1)
19652f34d2cSAsias He 		compat_id = virtio_compat_add_message("virtio-rng", "CONFIG_HW_RANDOM_VIRTIO");
197495fbd4eSSasha Levin 	return 0;
198495fbd4eSSasha Levin cleanup:
199495fbd4eSSasha Levin 	close(rdev->fd);
200495fbd4eSSasha Levin 	free(rdev);
201495fbd4eSSasha Levin 
202495fbd4eSSasha Levin 	return r;
20380ac1d05SSasha Levin }
20449a8afd1SSasha Levin virtio_dev_init(virtio_rng__init);
20580ac1d05SSasha Levin 
206495fbd4eSSasha Levin int virtio_rng__exit(struct kvm *kvm)
20780ac1d05SSasha Levin {
2083a60be06SSasha Levin 	struct rng_dev *rdev, *tmp;
20980ac1d05SSasha Levin 
2103a60be06SSasha Levin 	list_for_each_entry_safe(rdev, tmp, &rdevs, list) {
21180ac1d05SSasha Levin 		list_del(&rdev->list);
21202eca50cSAsias He 		rdev->vdev.ops->exit(kvm, &rdev->vdev);
21380ac1d05SSasha Levin 		free(rdev);
21480ac1d05SSasha Levin 	}
215495fbd4eSSasha Levin 
216495fbd4eSSasha Levin 	return 0;
217376ac44cSSasha Levin }
21849a8afd1SSasha Levin virtio_dev_exit(virtio_rng__exit);
219