1 #include "kvm/virtio-balloon.h"
2
3 #include "kvm/virtio-pci-dev.h"
4
5 #include "kvm/virtio.h"
6 #include "kvm/util.h"
7 #include "kvm/kvm.h"
8 #include "kvm/pci.h"
9 #include "kvm/threadpool.h"
10 #include "kvm/guest_compat.h"
11 #include "kvm/kvm-ipc.h"
12
13 #include <linux/virtio_ring.h>
14 #include <linux/virtio_balloon.h>
15
16 #include <linux/byteorder.h>
17 #include <linux/kernel.h>
18 #include <linux/list.h>
19 #include <fcntl.h>
20 #include <sys/types.h>
21 #include <sys/stat.h>
22 #include <sys/mman.h>
23 #include <pthread.h>
24 #include <sys/eventfd.h>
25
26 #define NUM_VIRT_QUEUES 3
27 #define VIRTIO_BLN_QUEUE_SIZE 128
28 #define VIRTIO_BLN_INFLATE 0
29 #define VIRTIO_BLN_DEFLATE 1
30 #define VIRTIO_BLN_STATS 2
31
32 struct bln_dev {
33 struct list_head list;
34 struct virtio_device vdev;
35
36 /* virtio queue */
37 struct virt_queue vqs[NUM_VIRT_QUEUES];
38 struct thread_pool__job jobs[NUM_VIRT_QUEUES];
39
40 struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
41 struct virtio_balloon_stat *cur_stat;
42 u32 cur_stat_head;
43 u16 stat_count;
44 int stat_waitfd;
45
46 struct virtio_balloon_config config;
47 };
48
49 static struct bln_dev bdev;
50 static int compat_id = -1;
51
virtio_bln_do_io_request(struct kvm * kvm,struct bln_dev * bdev,struct virt_queue * queue)52 static bool virtio_bln_do_io_request(struct kvm *kvm, struct bln_dev *bdev, struct virt_queue *queue)
53 {
54 struct iovec iov[VIRTIO_BLN_QUEUE_SIZE];
55 unsigned int len = 0;
56 u16 out, in, head;
57 u32 *ptrs, i;
58 u32 actual;
59
60 head = virt_queue__get_iov(queue, iov, &out, &in, kvm);
61 ptrs = iov[0].iov_base;
62 len = iov[0].iov_len / sizeof(u32);
63
64 actual = le32_to_cpu(bdev->config.actual);
65 for (i = 0 ; i < len ; i++) {
66 void *guest_ptr;
67
68 guest_ptr = guest_flat_to_host(kvm, (u64)ptrs[i] << VIRTIO_BALLOON_PFN_SHIFT);
69 if (queue == &bdev->vqs[VIRTIO_BLN_INFLATE]) {
70 madvise(guest_ptr, 1 << VIRTIO_BALLOON_PFN_SHIFT, MADV_DONTNEED);
71 actual++;
72 } else if (queue == &bdev->vqs[VIRTIO_BLN_DEFLATE]) {
73 actual--;
74 }
75 }
76 bdev->config.actual = cpu_to_le32(actual);
77
78 virt_queue__set_used_elem(queue, head, len);
79
80 return true;
81 }
82
virtio_bln_do_stat_request(struct kvm * kvm,struct bln_dev * bdev,struct virt_queue * queue)83 static bool virtio_bln_do_stat_request(struct kvm *kvm, struct bln_dev *bdev, struct virt_queue *queue)
84 {
85 struct iovec iov[VIRTIO_BLN_QUEUE_SIZE];
86 u16 out, in, head;
87 struct virtio_balloon_stat *stat;
88 u64 wait_val = 1;
89
90 head = virt_queue__get_iov(queue, iov, &out, &in, kvm);
91 stat = iov[0].iov_base;
92
93 /* Initial empty stat buffer */
94 if (bdev->cur_stat == NULL) {
95 bdev->cur_stat = stat;
96 bdev->cur_stat_head = head;
97
98 return true;
99 }
100
101 memcpy(bdev->stats, stat, iov[0].iov_len);
102
103 bdev->stat_count = iov[0].iov_len / sizeof(struct virtio_balloon_stat);
104 bdev->cur_stat = stat;
105 bdev->cur_stat_head = head;
106
107 if (write(bdev->stat_waitfd, &wait_val, sizeof(wait_val)) <= 0)
108 return -EFAULT;
109
110 return 1;
111 }
112
virtio_bln_do_io(struct kvm * kvm,void * param)113 static void virtio_bln_do_io(struct kvm *kvm, void *param)
114 {
115 struct virt_queue *vq = param;
116
117 if (vq == &bdev.vqs[VIRTIO_BLN_STATS]) {
118 virtio_bln_do_stat_request(kvm, &bdev, vq);
119 bdev.vdev.ops->signal_vq(kvm, &bdev.vdev, VIRTIO_BLN_STATS);
120 return;
121 }
122
123 while (virt_queue__available(vq)) {
124 virtio_bln_do_io_request(kvm, &bdev, vq);
125 bdev.vdev.ops->signal_vq(kvm, &bdev.vdev, vq - bdev.vqs);
126 }
127 }
128
virtio_bln__collect_stats(struct kvm * kvm)129 static int virtio_bln__collect_stats(struct kvm *kvm)
130 {
131 struct virt_queue *vq = &bdev.vqs[VIRTIO_BLN_STATS];
132 u64 tmp;
133
134 /* Exit if the queue is not set up. */
135 if (!vq->enabled)
136 return -ENODEV;
137
138 virt_queue__set_used_elem(vq, bdev.cur_stat_head,
139 sizeof(struct virtio_balloon_stat));
140 bdev.vdev.ops->signal_vq(kvm, &bdev.vdev, VIRTIO_BLN_STATS);
141
142 if (read(bdev.stat_waitfd, &tmp, sizeof(tmp)) <= 0)
143 return -EFAULT;
144
145 return 0;
146 }
147
virtio_bln__print_stats(struct kvm * kvm,int fd,u32 type,u32 len,u8 * msg)148 static void virtio_bln__print_stats(struct kvm *kvm, int fd, u32 type, u32 len, u8 *msg)
149 {
150 int r;
151
152 if (WARN_ON(type != KVM_IPC_STAT || len))
153 return;
154
155 if (virtio_bln__collect_stats(kvm) < 0)
156 return;
157
158 r = write(fd, bdev.stats, sizeof(bdev.stats));
159 if (r < 0)
160 pr_warning("Failed sending memory stats");
161 }
162
handle_mem(struct kvm * kvm,int fd,u32 type,u32 len,u8 * msg)163 static void handle_mem(struct kvm *kvm, int fd, u32 type, u32 len, u8 *msg)
164 {
165 int mem;
166 u32 num_pages;
167
168 if (WARN_ON(type != KVM_IPC_BALLOON || len != sizeof(int)))
169 return;
170
171 mem = *(int *)msg;
172 num_pages = le32_to_cpu(bdev.config.num_pages);
173
174 if (mem > 0) {
175 num_pages += 256 * mem;
176 } else if (mem < 0) {
177 if (num_pages < (u32)(256 * (-mem)))
178 return;
179
180 num_pages += 256 * mem;
181 }
182
183 bdev.config.num_pages = cpu_to_le32(num_pages);
184
185 /* Notify that the configuration space has changed */
186 bdev.vdev.ops->signal_config(kvm, &bdev.vdev);
187 }
188
get_config(struct kvm * kvm,void * dev)189 static u8 *get_config(struct kvm *kvm, void *dev)
190 {
191 struct bln_dev *bdev = dev;
192
193 return ((u8 *)(&bdev->config));
194 }
195
get_config_size(struct kvm * kvm,void * dev)196 static size_t get_config_size(struct kvm *kvm, void *dev)
197 {
198 struct bln_dev *bdev = dev;
199
200 return sizeof(bdev->config);
201 }
202
get_host_features(struct kvm * kvm,void * dev)203 static u64 get_host_features(struct kvm *kvm, void *dev)
204 {
205 return 1 << VIRTIO_BALLOON_F_STATS_VQ;
206 }
207
init_vq(struct kvm * kvm,void * dev,u32 vq)208 static int init_vq(struct kvm *kvm, void *dev, u32 vq)
209 {
210 struct bln_dev *bdev = dev;
211 struct virt_queue *queue;
212
213 compat__remove_message(compat_id);
214
215 queue = &bdev->vqs[vq];
216
217 virtio_init_device_vq(kvm, &bdev->vdev, queue, VIRTIO_BLN_QUEUE_SIZE);
218
219 thread_pool__init_job(&bdev->jobs[vq], kvm, virtio_bln_do_io, queue);
220
221 return 0;
222 }
223
exit_vq(struct kvm * kvm,void * dev,u32 vq)224 static void exit_vq(struct kvm *kvm, void *dev, u32 vq)
225 {
226 struct bln_dev *bdev = dev;
227
228 thread_pool__cancel_job(&bdev->jobs[vq]);
229 }
230
notify_vq(struct kvm * kvm,void * dev,u32 vq)231 static int notify_vq(struct kvm *kvm, void *dev, u32 vq)
232 {
233 struct bln_dev *bdev = dev;
234
235 thread_pool__do_job(&bdev->jobs[vq]);
236
237 return 0;
238 }
239
get_vq(struct kvm * kvm,void * dev,u32 vq)240 static struct virt_queue *get_vq(struct kvm *kvm, void *dev, u32 vq)
241 {
242 struct bln_dev *bdev = dev;
243
244 return &bdev->vqs[vq];
245 }
246
get_size_vq(struct kvm * kvm,void * dev,u32 vq)247 static int get_size_vq(struct kvm *kvm, void *dev, u32 vq)
248 {
249 return VIRTIO_BLN_QUEUE_SIZE;
250 }
251
set_size_vq(struct kvm * kvm,void * dev,u32 vq,int size)252 static int set_size_vq(struct kvm *kvm, void *dev, u32 vq, int size)
253 {
254 /* FIXME: dynamic */
255 return size;
256 }
257
get_vq_count(struct kvm * kvm,void * dev)258 static unsigned int get_vq_count(struct kvm *kvm, void *dev)
259 {
260 return NUM_VIRT_QUEUES;
261 }
262
263 struct virtio_ops bln_dev_virtio_ops = {
264 .get_config = get_config,
265 .get_config_size = get_config_size,
266 .get_host_features = get_host_features,
267 .init_vq = init_vq,
268 .exit_vq = exit_vq,
269 .notify_vq = notify_vq,
270 .get_vq = get_vq,
271 .get_size_vq = get_size_vq,
272 .set_size_vq = set_size_vq,
273 .get_vq_count = get_vq_count,
274 };
275
virtio_bln__init(struct kvm * kvm)276 int virtio_bln__init(struct kvm *kvm)
277 {
278 int r;
279
280 if (!kvm->cfg.balloon)
281 return 0;
282
283 kvm_ipc__register_handler(KVM_IPC_BALLOON, handle_mem);
284 kvm_ipc__register_handler(KVM_IPC_STAT, virtio_bln__print_stats);
285
286 bdev.stat_waitfd = eventfd(0, 0);
287 memset(&bdev.config, 0, sizeof(struct virtio_balloon_config));
288
289 r = virtio_init(kvm, &bdev, &bdev.vdev, &bln_dev_virtio_ops,
290 kvm->cfg.virtio_transport, PCI_DEVICE_ID_VIRTIO_BLN,
291 VIRTIO_ID_BALLOON, PCI_CLASS_BLN);
292 if (r < 0)
293 return r;
294
295 if (compat_id == -1)
296 compat_id = virtio_compat_add_message("virtio-balloon", "CONFIG_VIRTIO_BALLOON");
297
298 return 0;
299 }
300 virtio_dev_init(virtio_bln__init);
301
virtio_bln__exit(struct kvm * kvm)302 int virtio_bln__exit(struct kvm *kvm)
303 {
304 virtio_exit(kvm, &bdev.vdev);
305
306 return 0;
307 }
308 virtio_dev_exit(virtio_bln__exit);
309