1 #include "kvm/virtio-rng.h"
2
3 #include "kvm/virtio-pci-dev.h"
4
5 #include "kvm/virtio.h"
6 #include "kvm/util.h"
7 #include "kvm/kvm.h"
8 #include "kvm/threadpool.h"
9 #include "kvm/guest_compat.h"
10
11 #include <linux/virtio_ring.h>
12 #include <linux/virtio_rng.h>
13
14 #include <linux/list.h>
15 #include <fcntl.h>
16 #include <sys/types.h>
17 #include <sys/stat.h>
18 #include <pthread.h>
19 #include <linux/kernel.h>
20
21 #define NUM_VIRT_QUEUES 1
22 #define VIRTIO_RNG_QUEUE_SIZE 128
23
24 struct rng_dev_job {
25 struct virt_queue *vq;
26 struct rng_dev *rdev;
27 struct thread_pool__job job_id;
28 };
29
30 struct rng_dev {
31 struct list_head list;
32 struct virtio_device vdev;
33
34 int fd;
35
36 /* virtio queue */
37 struct virt_queue vqs[NUM_VIRT_QUEUES];
38 struct rng_dev_job jobs[NUM_VIRT_QUEUES];
39 };
40
41 static LIST_HEAD(rdevs);
42 static int compat_id = -1;
43
get_config(struct kvm * kvm,void * dev)44 static u8 *get_config(struct kvm *kvm, void *dev)
45 {
46 /* Unused */
47 return 0;
48 }
49
get_config_size(struct kvm * kvm,void * dev)50 static size_t get_config_size(struct kvm *kvm, void *dev)
51 {
52 return 0;
53 }
54
get_host_features(struct kvm * kvm,void * dev)55 static u64 get_host_features(struct kvm *kvm, void *dev)
56 {
57 /* Unused */
58 return 0;
59 }
60
virtio_rng_do_io_request(struct kvm * kvm,struct rng_dev * rdev,struct virt_queue * queue)61 static bool virtio_rng_do_io_request(struct kvm *kvm, struct rng_dev *rdev, struct virt_queue *queue)
62 {
63 struct iovec iov[VIRTIO_RNG_QUEUE_SIZE];
64 ssize_t len;
65 u16 out, in, head;
66
67 head = virt_queue__get_iov(queue, iov, &out, &in, kvm);
68 len = readv(rdev->fd, iov, in);
69 if (len < 0 && (errno == EAGAIN || errno == EINTR)) {
70 /*
71 * The virtio 1.0 spec demands at least one byte of entropy,
72 * so we cannot just return with 0 if something goes wrong.
73 * The urandom(4) manpage mentions that a read from /dev/urandom
74 * should always return at least 256 bytes of randomness, so
75 * just retry here, with the requested size clamped to that
76 * maximum, in case we were interrupted by a signal.
77 */
78 iov[0].iov_len = min_t(size_t, iov[0].iov_len, 256UL);
79 len = readv(rdev->fd, iov, 1);
80 if (len < 1)
81 return false;
82 }
83
84 virt_queue__set_used_elem(queue, head, len);
85
86 return true;
87 }
88
virtio_rng_do_io(struct kvm * kvm,void * param)89 static void virtio_rng_do_io(struct kvm *kvm, void *param)
90 {
91 struct rng_dev_job *job = param;
92 struct virt_queue *vq = job->vq;
93 struct rng_dev *rdev = job->rdev;
94
95 while (virt_queue__available(vq))
96 virtio_rng_do_io_request(kvm, rdev, vq);
97
98 rdev->vdev.ops->signal_vq(kvm, &rdev->vdev, vq - rdev->vqs);
99 }
100
init_vq(struct kvm * kvm,void * dev,u32 vq)101 static int init_vq(struct kvm *kvm, void *dev, u32 vq)
102 {
103 struct rng_dev *rdev = dev;
104 struct virt_queue *queue;
105 struct rng_dev_job *job;
106
107 compat__remove_message(compat_id);
108
109 queue = &rdev->vqs[vq];
110
111 job = &rdev->jobs[vq];
112
113 virtio_init_device_vq(kvm, &rdev->vdev, queue, VIRTIO_RNG_QUEUE_SIZE);
114
115 *job = (struct rng_dev_job) {
116 .vq = queue,
117 .rdev = rdev,
118 };
119
120 thread_pool__init_job(&job->job_id, kvm, virtio_rng_do_io, job);
121
122 return 0;
123 }
124
exit_vq(struct kvm * kvm,void * dev,u32 vq)125 static void exit_vq(struct kvm *kvm, void *dev, u32 vq)
126 {
127 struct rng_dev *rdev = dev;
128
129 thread_pool__cancel_job(&rdev->jobs[vq].job_id);
130 }
131
notify_vq(struct kvm * kvm,void * dev,u32 vq)132 static int notify_vq(struct kvm *kvm, void *dev, u32 vq)
133 {
134 struct rng_dev *rdev = dev;
135
136 thread_pool__do_job(&rdev->jobs[vq].job_id);
137
138 return 0;
139 }
140
get_vq(struct kvm * kvm,void * dev,u32 vq)141 static struct virt_queue *get_vq(struct kvm *kvm, void *dev, u32 vq)
142 {
143 struct rng_dev *rdev = dev;
144
145 return &rdev->vqs[vq];
146 }
147
get_size_vq(struct kvm * kvm,void * dev,u32 vq)148 static int get_size_vq(struct kvm *kvm, void *dev, u32 vq)
149 {
150 return VIRTIO_RNG_QUEUE_SIZE;
151 }
152
set_size_vq(struct kvm * kvm,void * dev,u32 vq,int size)153 static int set_size_vq(struct kvm *kvm, void *dev, u32 vq, int size)
154 {
155 /* FIXME: dynamic */
156 return size;
157 }
158
get_vq_count(struct kvm * kvm,void * dev)159 static unsigned int get_vq_count(struct kvm *kvm, void *dev)
160 {
161 return NUM_VIRT_QUEUES;
162 }
163
164 static struct virtio_ops rng_dev_virtio_ops = {
165 .get_config = get_config,
166 .get_config_size = get_config_size,
167 .get_host_features = get_host_features,
168 .init_vq = init_vq,
169 .exit_vq = exit_vq,
170 .notify_vq = notify_vq,
171 .get_vq = get_vq,
172 .get_size_vq = get_size_vq,
173 .set_size_vq = set_size_vq,
174 .get_vq_count = get_vq_count,
175 };
176
virtio_rng__init(struct kvm * kvm)177 int virtio_rng__init(struct kvm *kvm)
178 {
179 struct rng_dev *rdev;
180 int r;
181
182 if (!kvm->cfg.virtio_rng)
183 return 0;
184
185 rdev = calloc(1, sizeof(*rdev));
186 if (rdev == NULL)
187 return -ENOMEM;
188
189 rdev->fd = open("/dev/urandom", O_RDONLY);
190 if (rdev->fd < 0) {
191 r = rdev->fd;
192 goto cleanup;
193 }
194
195 r = virtio_init(kvm, rdev, &rdev->vdev, &rng_dev_virtio_ops,
196 kvm->cfg.virtio_transport, PCI_DEVICE_ID_VIRTIO_RNG,
197 VIRTIO_ID_RNG, PCI_CLASS_RNG);
198 if (r < 0)
199 goto cleanup;
200
201 list_add_tail(&rdev->list, &rdevs);
202
203 if (compat_id == -1)
204 compat_id = virtio_compat_add_message("virtio-rng", "CONFIG_HW_RANDOM_VIRTIO");
205 return 0;
206 cleanup:
207 close(rdev->fd);
208 free(rdev);
209
210 return r;
211 }
212 virtio_dev_init(virtio_rng__init);
213
virtio_rng__exit(struct kvm * kvm)214 int virtio_rng__exit(struct kvm *kvm)
215 {
216 struct rng_dev *rdev, *tmp;
217
218 list_for_each_entry_safe(rdev, tmp, &rdevs, list) {
219 list_del(&rdev->list);
220 virtio_exit(kvm, &rdev->vdev);
221 free(rdev);
222 }
223
224 return 0;
225 }
226 virtio_dev_exit(virtio_rng__exit);
227