xref: /qemu/tests/qtest/vhost-user-test.c (revision f61badf32f1870e80e579f9dac363891a226e976)
1 /*
2  * QTest testcase for the vhost-user
3  *
4  * Copyright (c) 2014 Virtual Open Systems Sarl.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
7  * See the COPYING file in the top-level directory.
8  *
9  */
10 
11 #define QEMU_GLIB_COMPAT_H
12 #include <glib.h>
13 
14 #include "libqtest.h"
15 #include "qemu/option.h"
16 #include "sysemu/char.h"
17 #include "sysemu/sysemu.h"
18 
19 #include <linux/vhost.h>
20 #include <sys/mman.h>
21 #include <sys/vfs.h>
22 #include <qemu/sockets.h>
23 
24 /* GLIB version compatibility flags */
25 #if GLIB_CHECK_VERSION(2, 28, 0)
26 #define HAVE_MONOTONIC_TIME
27 #endif
28 
29 #if GLIB_CHECK_VERSION(2, 32, 0)
30 #define HAVE_MUTEX_INIT
31 #define HAVE_COND_INIT
32 #define HAVE_THREAD_NEW
33 #endif
34 
35 #define QEMU_CMD_ACCEL  " -machine accel=tcg"
36 #define QEMU_CMD_MEM    " -m 512 -object memory-backend-file,id=mem,size=512M,"\
37                         "mem-path=%s,share=on -numa node,memdev=mem"
38 #define QEMU_CMD_CHR    " -chardev socket,id=chr0,path=%s"
39 #define QEMU_CMD_NETDEV " -netdev vhost-user,id=net0,chardev=chr0,vhostforce"
40 #define QEMU_CMD_NET    " -device virtio-net-pci,netdev=net0 "
41 #define QEMU_CMD_ROM    " -option-rom ../pc-bios/pxe-virtio.rom"
42 
43 #define QEMU_CMD        QEMU_CMD_ACCEL QEMU_CMD_MEM QEMU_CMD_CHR \
44                         QEMU_CMD_NETDEV QEMU_CMD_NET QEMU_CMD_ROM
45 
46 #define HUGETLBFS_MAGIC       0x958458f6
47 
48 /*********** FROM hw/virtio/vhost-user.c *************************************/
49 
50 #define VHOST_MEMORY_MAX_NREGIONS    8
51 
52 typedef enum VhostUserRequest {
53     VHOST_USER_NONE = 0,
54     VHOST_USER_GET_FEATURES = 1,
55     VHOST_USER_SET_FEATURES = 2,
56     VHOST_USER_SET_OWNER = 3,
57     VHOST_USER_RESET_OWNER = 4,
58     VHOST_USER_SET_MEM_TABLE = 5,
59     VHOST_USER_SET_LOG_BASE = 6,
60     VHOST_USER_SET_LOG_FD = 7,
61     VHOST_USER_SET_VRING_NUM = 8,
62     VHOST_USER_SET_VRING_ADDR = 9,
63     VHOST_USER_SET_VRING_BASE = 10,
64     VHOST_USER_GET_VRING_BASE = 11,
65     VHOST_USER_SET_VRING_KICK = 12,
66     VHOST_USER_SET_VRING_CALL = 13,
67     VHOST_USER_SET_VRING_ERR = 14,
68     VHOST_USER_MAX
69 } VhostUserRequest;
70 
71 typedef struct VhostUserMemoryRegion {
72     uint64_t guest_phys_addr;
73     uint64_t memory_size;
74     uint64_t userspace_addr;
75 } VhostUserMemoryRegion;
76 
77 typedef struct VhostUserMemory {
78     uint32_t nregions;
79     uint32_t padding;
80     VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
81 } VhostUserMemory;
82 
83 typedef struct VhostUserMsg {
84     VhostUserRequest request;
85 
86 #define VHOST_USER_VERSION_MASK     (0x3)
87 #define VHOST_USER_REPLY_MASK       (0x1<<2)
88     uint32_t flags;
89     uint32_t size; /* the following payload size */
90     union {
91         uint64_t u64;
92         struct vhost_vring_state state;
93         struct vhost_vring_addr addr;
94         VhostUserMemory memory;
95     };
96 } QEMU_PACKED VhostUserMsg;
97 
98 static VhostUserMsg m __attribute__ ((unused));
99 #define VHOST_USER_HDR_SIZE (sizeof(m.request) \
100                             + sizeof(m.flags) \
101                             + sizeof(m.size))
102 
103 #define VHOST_USER_PAYLOAD_SIZE (sizeof(m) - VHOST_USER_HDR_SIZE)
104 
105 /* The version of the protocol we support */
106 #define VHOST_USER_VERSION    (0x1)
107 /*****************************************************************************/
108 
109 int fds_num = 0, fds[VHOST_MEMORY_MAX_NREGIONS];
110 static VhostUserMemory memory;
111 static GMutex *data_mutex;
112 static GCond *data_cond;
113 
114 static gint64 _get_time(void)
115 {
116 #ifdef HAVE_MONOTONIC_TIME
117     return g_get_monotonic_time();
118 #else
119     GTimeVal time;
120     g_get_current_time(&time);
121 
122     return time.tv_sec * G_TIME_SPAN_SECOND + time.tv_usec;
123 #endif
124 }
125 
126 static GMutex *_mutex_new(void)
127 {
128     GMutex *mutex;
129 
130 #ifdef HAVE_MUTEX_INIT
131     mutex = g_new(GMutex, 1);
132     g_mutex_init(mutex);
133 #else
134     mutex = g_mutex_new();
135 #endif
136 
137     return mutex;
138 }
139 
140 static void _mutex_free(GMutex *mutex)
141 {
142 #ifdef HAVE_MUTEX_INIT
143     g_mutex_clear(mutex);
144     g_free(mutex);
145 #else
146     g_mutex_free(mutex);
147 #endif
148 }
149 
150 static GCond *_cond_new(void)
151 {
152     GCond *cond;
153 
154 #ifdef HAVE_COND_INIT
155     cond = g_new(GCond, 1);
156     g_cond_init(cond);
157 #else
158     cond = g_cond_new();
159 #endif
160 
161     return cond;
162 }
163 
164 static gboolean _cond_wait_until(GCond *cond, GMutex *mutex, gint64 end_time)
165 {
166     gboolean ret = FALSE;
167 #ifdef HAVE_COND_INIT
168     ret = g_cond_wait_until(cond, mutex, end_time);
169 #else
170     GTimeVal time = { end_time / G_TIME_SPAN_SECOND,
171                       end_time % G_TIME_SPAN_SECOND };
172     ret = g_cond_timed_wait(cond, mutex, &time);
173 #endif
174     return ret;
175 }
176 
177 static void _cond_free(GCond *cond)
178 {
179 #ifdef HAVE_COND_INIT
180     g_cond_clear(cond);
181     g_free(cond);
182 #else
183     g_cond_free(cond);
184 #endif
185 }
186 
187 static GThread *_thread_new(const gchar *name, GThreadFunc func, gpointer data)
188 {
189     GThread *thread = NULL;
190     GError *error = NULL;
191 #ifdef HAVE_THREAD_NEW
192     thread = g_thread_try_new(name, func, data, &error);
193 #else
194     thread = g_thread_create(func, data, TRUE, &error);
195 #endif
196     return thread;
197 }
198 
199 static void read_guest_mem(void)
200 {
201     uint32_t *guest_mem;
202     gint64 end_time;
203     int i, j;
204 
205     g_mutex_lock(data_mutex);
206 
207     end_time = _get_time() + 5 * G_TIME_SPAN_SECOND;
208     while (!fds_num) {
209         if (!_cond_wait_until(data_cond, data_mutex, end_time)) {
210             /* timeout has passed */
211             g_assert(fds_num);
212             break;
213         }
214     }
215 
216     /* check for sanity */
217     g_assert_cmpint(fds_num, >, 0);
218     g_assert_cmpint(fds_num, ==, memory.nregions);
219 
220     /* iterate all regions */
221     for (i = 0; i < fds_num; i++) {
222 
223         /* We'll check only the region statring at 0x0*/
224         if (memory.regions[i].guest_phys_addr != 0x0) {
225             continue;
226         }
227 
228         g_assert_cmpint(memory.regions[i].memory_size, >, 1024);
229 
230         guest_mem = mmap(0, memory.regions[i].memory_size,
231         PROT_READ | PROT_WRITE, MAP_SHARED, fds[i], 0);
232 
233         for (j = 0; j < 256; j++) {
234             uint32_t a = readl(memory.regions[i].guest_phys_addr + j*4);
235             uint32_t b = guest_mem[j];
236 
237             g_assert_cmpint(a, ==, b);
238         }
239 
240         munmap(guest_mem, memory.regions[i].memory_size);
241     }
242 
243     g_assert_cmpint(1, ==, 1);
244     g_mutex_unlock(data_mutex);
245 }
246 
247 static void *thread_function(void *data)
248 {
249     GMainLoop *loop;
250     loop = g_main_loop_new(NULL, FALSE);
251     g_main_loop_run(loop);
252     return NULL;
253 }
254 
255 static int chr_can_read(void *opaque)
256 {
257     return VHOST_USER_HDR_SIZE;
258 }
259 
260 static void chr_read(void *opaque, const uint8_t *buf, int size)
261 {
262     CharDriverState *chr = opaque;
263     VhostUserMsg msg;
264     uint8_t *p = (uint8_t *) &msg;
265     int fd;
266 
267     if (size != VHOST_USER_HDR_SIZE) {
268         g_test_message("Wrong message size received %d\n", size);
269         return;
270     }
271 
272     g_mutex_lock(data_mutex);
273     memcpy(p, buf, VHOST_USER_HDR_SIZE);
274 
275     if (msg.size) {
276         p += VHOST_USER_HDR_SIZE;
277         qemu_chr_fe_read_all(chr, p, msg.size);
278     }
279 
280     switch (msg.request) {
281     case VHOST_USER_GET_FEATURES:
282         /* send back features to qemu */
283         msg.flags |= VHOST_USER_REPLY_MASK;
284         msg.size = sizeof(m.u64);
285         msg.u64 = 0;
286         p = (uint8_t *) &msg;
287         qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
288         break;
289 
290     case VHOST_USER_GET_VRING_BASE:
291         /* send back vring base to qemu */
292         msg.flags |= VHOST_USER_REPLY_MASK;
293         msg.size = sizeof(m.state);
294         msg.state.num = 0;
295         p = (uint8_t *) &msg;
296         qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
297         break;
298 
299     case VHOST_USER_SET_MEM_TABLE:
300         /* received the mem table */
301         memcpy(&memory, &msg.memory, sizeof(msg.memory));
302         fds_num = qemu_chr_fe_get_msgfds(chr, fds, sizeof(fds) / sizeof(int));
303 
304         /* signal the test that it can continue */
305         g_cond_signal(data_cond);
306         break;
307 
308     case VHOST_USER_SET_VRING_KICK:
309     case VHOST_USER_SET_VRING_CALL:
310         /* consume the fd */
311         qemu_chr_fe_get_msgfds(chr, &fd, 1);
312         /*
313          * This is a non-blocking eventfd.
314          * The receive function forces it to be blocking,
315          * so revert it back to non-blocking.
316          */
317         qemu_set_nonblock(fd);
318         break;
319     default:
320         break;
321     }
322     g_mutex_unlock(data_mutex);
323 }
324 
325 static const char *init_hugepagefs(void)
326 {
327     const char *path;
328     struct statfs fs;
329     int ret;
330 
331     path = getenv("QTEST_HUGETLBFS_PATH");
332     if (!path) {
333         path = "/hugetlbfs";
334     }
335 
336     if (access(path, R_OK | W_OK | X_OK)) {
337         g_test_message("access on path (%s): %s\n", path, strerror(errno));
338         return NULL;
339     }
340 
341     do {
342         ret = statfs(path, &fs);
343     } while (ret != 0 && errno == EINTR);
344 
345     if (ret != 0) {
346         g_test_message("statfs on path (%s): %s\n", path, strerror(errno));
347         return NULL;
348     }
349 
350     if (fs.f_type != HUGETLBFS_MAGIC) {
351         g_test_message("Warning: path not on HugeTLBFS: %s\n", path);
352         return NULL;
353     }
354 
355     return path;
356 }
357 
358 int main(int argc, char **argv)
359 {
360     QTestState *s = NULL;
361     CharDriverState *chr = NULL;
362     const char *hugefs = 0;
363     char *socket_path = 0;
364     char *qemu_cmd = 0;
365     char *chr_path = 0;
366     int ret;
367 
368     g_test_init(&argc, &argv, NULL);
369 
370     module_call_init(MODULE_INIT_QOM);
371 
372     hugefs = init_hugepagefs();
373     if (!hugefs) {
374         return 0;
375     }
376 
377     socket_path = g_strdup_printf("/tmp/vhost-%d.sock", getpid());
378 
379     /* create char dev and add read handlers */
380     qemu_add_opts(&qemu_chardev_opts);
381     chr_path = g_strdup_printf("unix:%s,server,nowait", socket_path);
382     chr = qemu_chr_new("chr0", chr_path, NULL);
383     g_free(chr_path);
384     qemu_chr_add_handlers(chr, chr_can_read, chr_read, NULL, chr);
385 
386     /* run the main loop thread so the chardev may operate */
387     data_mutex = _mutex_new();
388     data_cond = _cond_new();
389     _thread_new(NULL, thread_function, NULL);
390 
391     qemu_cmd = g_strdup_printf(QEMU_CMD, hugefs, socket_path);
392     s = qtest_start(qemu_cmd);
393     g_free(qemu_cmd);
394 
395     qtest_add_func("/vhost-user/read-guest-mem", read_guest_mem);
396 
397     ret = g_test_run();
398 
399     if (s) {
400         qtest_quit(s);
401     }
402 
403     /* cleanup */
404     unlink(socket_path);
405     g_free(socket_path);
406     _cond_free(data_cond);
407     _mutex_free(data_mutex);
408 
409     return ret;
410 }
411