xref: /qemu/tests/qtest/vhost-user-test.c (revision f48d994fb50c49093dd1dfe64c0d7314c6b62faf)
1 /*
2  * QTest testcase for the vhost-user
3  *
4  * Copyright (c) 2014 Virtual Open Systems Sarl.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
7  * See the COPYING file in the top-level directory.
8  *
9  */
10 
11 #include "qemu/osdep.h"
12 
13 #include "libqtest-single.h"
14 #include "qapi/error.h"
15 #include "qapi/qmp/qdict.h"
16 #include "qemu/config-file.h"
17 #include "qemu/option.h"
18 #include "qemu/range.h"
19 #include "qemu/sockets.h"
20 #include "chardev/char-fe.h"
21 #include "qemu/memfd.h"
22 #include "qemu/module.h"
23 #include "sysemu/sysemu.h"
24 #include "libqos/libqos.h"
25 #include "libqos/pci-pc.h"
26 #include "libqos/virtio-pci.h"
27 
28 #include "libqos/malloc-pc.h"
29 #include "libqos/qgraph_internal.h"
30 #include "hw/virtio/virtio-net.h"
31 
32 #include "standard-headers/linux/vhost_types.h"
33 #include "standard-headers/linux/virtio_ids.h"
34 #include "standard-headers/linux/virtio_net.h"
35 
36 #ifdef CONFIG_LINUX
37 #include <sys/vfs.h>
38 #endif
39 
40 
41 #define QEMU_CMD_MEM    " -m %d -object memory-backend-file,id=mem,size=%dM," \
42                         "mem-path=%s,share=on -numa node,memdev=mem"
43 #define QEMU_CMD_MEMFD  " -m %d -object memory-backend-memfd,id=mem,size=%dM," \
44                         " -numa node,memdev=mem"
45 #define QEMU_CMD_CHR    " -chardev socket,id=%s,path=%s%s"
46 #define QEMU_CMD_NETDEV " -netdev vhost-user,id=hs0,chardev=%s,vhostforce=on"
47 
48 #define HUGETLBFS_MAGIC       0x958458f6
49 
50 /*********** FROM hw/virtio/vhost-user.c *************************************/
51 
52 #define VHOST_MEMORY_MAX_NREGIONS    8
53 #define VHOST_MAX_VIRTQUEUES    0x100
54 
55 #define VHOST_USER_F_PROTOCOL_FEATURES 30
56 #define VHOST_USER_PROTOCOL_F_MQ 0
57 #define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
58 #define VHOST_USER_PROTOCOL_F_CROSS_ENDIAN   6
59 
60 #define VHOST_LOG_PAGE 0x1000
61 
62 typedef enum VhostUserRequest {
63     VHOST_USER_NONE = 0,
64     VHOST_USER_GET_FEATURES = 1,
65     VHOST_USER_SET_FEATURES = 2,
66     VHOST_USER_SET_OWNER = 3,
67     VHOST_USER_RESET_OWNER = 4,
68     VHOST_USER_SET_MEM_TABLE = 5,
69     VHOST_USER_SET_LOG_BASE = 6,
70     VHOST_USER_SET_LOG_FD = 7,
71     VHOST_USER_SET_VRING_NUM = 8,
72     VHOST_USER_SET_VRING_ADDR = 9,
73     VHOST_USER_SET_VRING_BASE = 10,
74     VHOST_USER_GET_VRING_BASE = 11,
75     VHOST_USER_SET_VRING_KICK = 12,
76     VHOST_USER_SET_VRING_CALL = 13,
77     VHOST_USER_SET_VRING_ERR = 14,
78     VHOST_USER_GET_PROTOCOL_FEATURES = 15,
79     VHOST_USER_SET_PROTOCOL_FEATURES = 16,
80     VHOST_USER_GET_QUEUE_NUM = 17,
81     VHOST_USER_SET_VRING_ENABLE = 18,
82     VHOST_USER_MAX
83 } VhostUserRequest;
84 
85 typedef struct VhostUserMemoryRegion {
86     uint64_t guest_phys_addr;
87     uint64_t memory_size;
88     uint64_t userspace_addr;
89     uint64_t mmap_offset;
90 } VhostUserMemoryRegion;
91 
92 typedef struct VhostUserMemory {
93     uint32_t nregions;
94     uint32_t padding;
95     VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
96 } VhostUserMemory;
97 
98 typedef struct VhostUserLog {
99     uint64_t mmap_size;
100     uint64_t mmap_offset;
101 } VhostUserLog;
102 
103 typedef struct VhostUserMsg {
104     VhostUserRequest request;
105 
106 #define VHOST_USER_VERSION_MASK     (0x3)
107 #define VHOST_USER_REPLY_MASK       (0x1<<2)
108     uint32_t flags;
109     uint32_t size; /* the following payload size */
110     union {
111 #define VHOST_USER_VRING_IDX_MASK   (0xff)
112 #define VHOST_USER_VRING_NOFD_MASK  (0x1<<8)
113         uint64_t u64;
114         struct vhost_vring_state state;
115         struct vhost_vring_addr addr;
116         VhostUserMemory memory;
117         VhostUserLog log;
118     } payload;
119 } QEMU_PACKED VhostUserMsg;
120 
121 static VhostUserMsg m __attribute__ ((unused));
122 #define VHOST_USER_HDR_SIZE (sizeof(m.request) \
123                             + sizeof(m.flags) \
124                             + sizeof(m.size))
125 
126 #define VHOST_USER_PAYLOAD_SIZE (sizeof(m) - VHOST_USER_HDR_SIZE)
127 
128 /* The version of the protocol we support */
129 #define VHOST_USER_VERSION    (0x1)
130 /*****************************************************************************/
131 
132 enum {
133     TEST_FLAGS_OK,
134     TEST_FLAGS_DISCONNECT,
135     TEST_FLAGS_BAD,
136     TEST_FLAGS_END,
137 };
138 
139 enum {
140     VHOST_USER_NET,
141 };
142 
143 typedef struct TestServer {
144     gchar *socket_path;
145     gchar *mig_path;
146     gchar *chr_name;
147     gchar *tmpfs;
148     CharBackend chr;
149     int fds_num;
150     int fds[VHOST_MEMORY_MAX_NREGIONS];
151     VhostUserMemory memory;
152     GMainContext *context;
153     GMainLoop *loop;
154     GThread *thread;
155     GMutex data_mutex;
156     GCond data_cond;
157     int log_fd;
158     uint64_t rings;
159     bool test_fail;
160     int test_flags;
161     int queues;
162     struct vhost_user_ops *vu_ops;
163 } TestServer;
164 
165 struct vhost_user_ops {
166     /* Device types. */
167     int type;
168     void (*append_opts)(TestServer *s, GString *cmd_line,
169             const char *chr_opts);
170 
171     /* VHOST-USER commands. */
172     void (*set_features)(TestServer *s, CharBackend *chr,
173             VhostUserMsg *msg);
174     void (*get_protocol_features)(TestServer *s,
175             CharBackend *chr, VhostUserMsg *msg);
176 };
177 
178 static const char *init_hugepagefs(void);
179 static TestServer *test_server_new(const gchar *name,
180         struct vhost_user_ops *ops);
181 static void test_server_free(TestServer *server);
182 static void test_server_listen(TestServer *server);
183 
184 enum test_memfd {
185     TEST_MEMFD_AUTO,
186     TEST_MEMFD_YES,
187     TEST_MEMFD_NO,
188 };
189 
190 static void append_vhost_net_opts(TestServer *s, GString *cmd_line,
191                              const char *chr_opts)
192 {
193     g_string_append_printf(cmd_line, QEMU_CMD_CHR QEMU_CMD_NETDEV,
194                            s->chr_name, s->socket_path,
195                            chr_opts, s->chr_name);
196 }
197 
198 static void append_mem_opts(TestServer *server, GString *cmd_line,
199                             int size, enum test_memfd memfd)
200 {
201     if (memfd == TEST_MEMFD_AUTO) {
202         memfd = qemu_memfd_check(MFD_ALLOW_SEALING) ? TEST_MEMFD_YES
203                                                     : TEST_MEMFD_NO;
204     }
205 
206     if (memfd == TEST_MEMFD_YES) {
207         g_string_append_printf(cmd_line, QEMU_CMD_MEMFD, size, size);
208     } else {
209         const char *root = init_hugepagefs() ? : server->tmpfs;
210 
211         g_string_append_printf(cmd_line, QEMU_CMD_MEM, size, size, root);
212     }
213 }
214 
215 static bool wait_for_fds(TestServer *s)
216 {
217     gint64 end_time;
218     bool got_region;
219     int i;
220 
221     g_mutex_lock(&s->data_mutex);
222 
223     end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND;
224     while (!s->fds_num) {
225         if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) {
226             /* timeout has passed */
227             g_assert(s->fds_num);
228             break;
229         }
230     }
231 
232     /* check for sanity */
233     g_assert_cmpint(s->fds_num, >, 0);
234     g_assert_cmpint(s->fds_num, ==, s->memory.nregions);
235 
236     g_mutex_unlock(&s->data_mutex);
237 
238     got_region = false;
239     for (i = 0; i < s->memory.nregions; ++i) {
240         VhostUserMemoryRegion *reg = &s->memory.regions[i];
241         if (reg->guest_phys_addr == 0) {
242             got_region = true;
243             break;
244         }
245     }
246     if (!got_region) {
247         g_test_skip("No memory at address 0x0");
248     }
249     return got_region;
250 }
251 
252 static void read_guest_mem_server(QTestState *qts, TestServer *s)
253 {
254     uint8_t *guest_mem;
255     int i, j;
256     size_t size;
257 
258     g_mutex_lock(&s->data_mutex);
259 
260     /* iterate all regions */
261     for (i = 0; i < s->fds_num; i++) {
262 
263         /* We'll check only the region statring at 0x0*/
264         if (s->memory.regions[i].guest_phys_addr != 0x0) {
265             continue;
266         }
267 
268         g_assert_cmpint(s->memory.regions[i].memory_size, >, 1024);
269 
270         size = s->memory.regions[i].memory_size +
271             s->memory.regions[i].mmap_offset;
272 
273         guest_mem = mmap(0, size, PROT_READ | PROT_WRITE,
274                          MAP_SHARED, s->fds[i], 0);
275 
276         g_assert(guest_mem != MAP_FAILED);
277         guest_mem += (s->memory.regions[i].mmap_offset / sizeof(*guest_mem));
278 
279         for (j = 0; j < 1024; j++) {
280             uint32_t a = qtest_readb(qts, s->memory.regions[i].guest_phys_addr + j);
281             uint32_t b = guest_mem[j];
282 
283             g_assert_cmpint(a, ==, b);
284         }
285 
286         munmap(guest_mem, s->memory.regions[i].memory_size);
287     }
288 
289     g_mutex_unlock(&s->data_mutex);
290 }
291 
292 static void *thread_function(void *data)
293 {
294     GMainLoop *loop = data;
295     g_main_loop_run(loop);
296     return NULL;
297 }
298 
299 static int chr_can_read(void *opaque)
300 {
301     return VHOST_USER_HDR_SIZE;
302 }
303 
304 static void chr_read(void *opaque, const uint8_t *buf, int size)
305 {
306     g_autoptr(GError) err = NULL;
307     TestServer *s = opaque;
308     CharBackend *chr = &s->chr;
309     VhostUserMsg msg;
310     uint8_t *p = (uint8_t *) &msg;
311     int fd = -1;
312 
313     if (s->test_fail) {
314         qemu_chr_fe_disconnect(chr);
315         /* now switch to non-failure */
316         s->test_fail = false;
317     }
318 
319     if (size != VHOST_USER_HDR_SIZE) {
320         qos_printf("%s: Wrong message size received %d\n", __func__, size);
321         return;
322     }
323 
324     g_mutex_lock(&s->data_mutex);
325     memcpy(p, buf, VHOST_USER_HDR_SIZE);
326 
327     if (msg.size) {
328         p += VHOST_USER_HDR_SIZE;
329         size = qemu_chr_fe_read_all(chr, p, msg.size);
330         if (size != msg.size) {
331             qos_printf("%s: Wrong message size received %d != %d\n",
332                        __func__, size, msg.size);
333             return;
334         }
335     }
336 
337     switch (msg.request) {
338     case VHOST_USER_GET_FEATURES:
339         /* send back features to qemu */
340         msg.flags |= VHOST_USER_REPLY_MASK;
341         msg.size = sizeof(m.payload.u64);
342         msg.payload.u64 = 0x1ULL << VHOST_F_LOG_ALL |
343             0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
344         if (s->queues > 1) {
345             msg.payload.u64 |= 0x1ULL << VIRTIO_NET_F_MQ;
346         }
347         if (s->test_flags >= TEST_FLAGS_BAD) {
348             msg.payload.u64 = 0;
349             s->test_flags = TEST_FLAGS_END;
350         }
351         p = (uint8_t *) &msg;
352         qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
353         break;
354 
355     case VHOST_USER_SET_FEATURES:
356         if (s->vu_ops->set_features) {
357             s->vu_ops->set_features(s, chr, &msg);
358         }
359         break;
360 
361     case VHOST_USER_SET_OWNER:
362         /*
363          * We don't need to do anything here, the remote is just
364          * letting us know it is in charge. Just log it.
365          */
366         qos_printf("set_owner: start of session\n");
367         break;
368 
369     case VHOST_USER_GET_PROTOCOL_FEATURES:
370         if (s->vu_ops->get_protocol_features) {
371             s->vu_ops->get_protocol_features(s, chr, &msg);
372         }
373         break;
374 
375     case VHOST_USER_SET_PROTOCOL_FEATURES:
376         /*
377          * We did set VHOST_USER_F_PROTOCOL_FEATURES so its valid for
378          * the remote end to send this. There is no handshake reply so
379          * just log the details for debugging.
380          */
381         qos_printf("set_protocol_features: 0x%"PRIx64 "\n", msg.payload.u64);
382         break;
383 
384         /*
385          * A real vhost-user backend would actually set the size and
386          * address of the vrings but we can simply report them.
387          */
388     case VHOST_USER_SET_VRING_NUM:
389         qos_printf("set_vring_num: %d/%d\n",
390                    msg.payload.state.index, msg.payload.state.num);
391         break;
392     case VHOST_USER_SET_VRING_ADDR:
393         qos_printf("set_vring_addr: 0x%"PRIx64"/0x%"PRIx64"/0x%"PRIx64"\n",
394                    msg.payload.addr.avail_user_addr,
395                    msg.payload.addr.desc_user_addr,
396                    msg.payload.addr.used_user_addr);
397         break;
398 
399     case VHOST_USER_GET_VRING_BASE:
400         /* send back vring base to qemu */
401         msg.flags |= VHOST_USER_REPLY_MASK;
402         msg.size = sizeof(m.payload.state);
403         msg.payload.state.num = 0;
404         p = (uint8_t *) &msg;
405         qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
406 
407         assert(msg.payload.state.index < s->queues * 2);
408         s->rings &= ~(0x1ULL << msg.payload.state.index);
409         g_cond_broadcast(&s->data_cond);
410         break;
411 
412     case VHOST_USER_SET_MEM_TABLE:
413         /* received the mem table */
414         memcpy(&s->memory, &msg.payload.memory, sizeof(msg.payload.memory));
415         s->fds_num = qemu_chr_fe_get_msgfds(chr, s->fds,
416                                             G_N_ELEMENTS(s->fds));
417 
418         /* signal the test that it can continue */
419         g_cond_broadcast(&s->data_cond);
420         break;
421 
422     case VHOST_USER_SET_VRING_KICK:
423     case VHOST_USER_SET_VRING_CALL:
424         /* consume the fd */
425         qemu_chr_fe_get_msgfds(chr, &fd, 1);
426         /*
427          * This is a non-blocking eventfd.
428          * The receive function forces it to be blocking,
429          * so revert it back to non-blocking.
430          */
431         g_unix_set_fd_nonblocking(fd, true, &err);
432         g_assert_no_error(err);
433         break;
434 
435     case VHOST_USER_SET_LOG_BASE:
436         if (s->log_fd != -1) {
437             close(s->log_fd);
438             s->log_fd = -1;
439         }
440         qemu_chr_fe_get_msgfds(chr, &s->log_fd, 1);
441         msg.flags |= VHOST_USER_REPLY_MASK;
442         msg.size = 0;
443         p = (uint8_t *) &msg;
444         qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE);
445 
446         g_cond_broadcast(&s->data_cond);
447         break;
448 
449     case VHOST_USER_SET_VRING_BASE:
450         assert(msg.payload.state.index < s->queues * 2);
451         s->rings |= 0x1ULL << msg.payload.state.index;
452         g_cond_broadcast(&s->data_cond);
453         break;
454 
455     case VHOST_USER_GET_QUEUE_NUM:
456         msg.flags |= VHOST_USER_REPLY_MASK;
457         msg.size = sizeof(m.payload.u64);
458         msg.payload.u64 = s->queues;
459         p = (uint8_t *) &msg;
460         qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
461         break;
462 
463     case VHOST_USER_SET_VRING_ENABLE:
464         /*
465          * Another case we ignore as we don't need to respond. With a
466          * fully functioning vhost-user we would enable/disable the
467          * vring monitoring.
468          */
469         qos_printf("set_vring(%d)=%s\n", msg.payload.state.index,
470                    msg.payload.state.num ? "enabled" : "disabled");
471         break;
472 
473     default:
474         qos_printf("vhost-user: un-handled message: %d\n", msg.request);
475         break;
476     }
477 
478     g_mutex_unlock(&s->data_mutex);
479 }
480 
481 static const char *init_hugepagefs(void)
482 {
483 #ifdef CONFIG_LINUX
484     static const char *hugepagefs;
485     const char *path = getenv("QTEST_HUGETLBFS_PATH");
486     struct statfs fs;
487     int ret;
488 
489     if (hugepagefs) {
490         return hugepagefs;
491     }
492     if (!path) {
493         return NULL;
494     }
495 
496     if (access(path, R_OK | W_OK | X_OK)) {
497         qos_printf("access on path (%s): %s", path, strerror(errno));
498         g_test_fail();
499         return NULL;
500     }
501 
502     do {
503         ret = statfs(path, &fs);
504     } while (ret != 0 && errno == EINTR);
505 
506     if (ret != 0) {
507         qos_printf("statfs on path (%s): %s", path, strerror(errno));
508         g_test_fail();
509         return NULL;
510     }
511 
512     if (fs.f_type != HUGETLBFS_MAGIC) {
513         qos_printf("Warning: path not on HugeTLBFS: %s", path);
514         g_test_fail();
515         return NULL;
516     }
517 
518     hugepagefs = path;
519     return hugepagefs;
520 #else
521     return NULL;
522 #endif
523 }
524 
525 static TestServer *test_server_new(const gchar *name,
526         struct vhost_user_ops *ops)
527 {
528     TestServer *server = g_new0(TestServer, 1);
529     g_autofree const char *tmpfs = NULL;
530     GError *err = NULL;
531 
532     server->context = g_main_context_new();
533     server->loop = g_main_loop_new(server->context, FALSE);
534 
535     /* run the main loop thread so the chardev may operate */
536     server->thread = g_thread_new(NULL, thread_function, server->loop);
537 
538     tmpfs = g_dir_make_tmp("vhost-test-XXXXXX", &err);
539     if (!tmpfs) {
540         g_test_message("g_dir_make_tmp on path (%s): %s", tmpfs,
541                        err->message);
542         g_error_free(err);
543     }
544     g_assert(tmpfs);
545 
546     server->tmpfs = g_strdup(tmpfs);
547     server->socket_path = g_strdup_printf("%s/%s.sock", tmpfs, name);
548     server->mig_path = g_strdup_printf("%s/%s.mig", tmpfs, name);
549     server->chr_name = g_strdup_printf("chr-%s", name);
550 
551     g_mutex_init(&server->data_mutex);
552     g_cond_init(&server->data_cond);
553 
554     server->log_fd = -1;
555     server->queues = 1;
556     server->vu_ops = ops;
557 
558     return server;
559 }
560 
561 static void chr_event(void *opaque, QEMUChrEvent event)
562 {
563     TestServer *s = opaque;
564 
565     if (s->test_flags == TEST_FLAGS_END &&
566         event == CHR_EVENT_CLOSED) {
567         s->test_flags = TEST_FLAGS_OK;
568     }
569 }
570 
571 static void test_server_create_chr(TestServer *server, const gchar *opt)
572 {
573     g_autofree gchar *chr_path = g_strdup_printf("unix:%s%s",
574                                                  server->socket_path, opt);
575     Chardev *chr;
576 
577     chr = qemu_chr_new(server->chr_name, chr_path, server->context);
578     g_assert(chr);
579 
580     qemu_chr_fe_init(&server->chr, chr, &error_abort);
581     qemu_chr_fe_set_handlers(&server->chr, chr_can_read, chr_read,
582                              chr_event, NULL, server, server->context, true);
583 }
584 
585 static void test_server_listen(TestServer *server)
586 {
587     test_server_create_chr(server, ",server=on,wait=off");
588 }
589 
590 static void test_server_free(TestServer *server)
591 {
592     int i, ret;
593 
594     /* finish the helper thread and dispatch pending sources */
595     g_main_loop_quit(server->loop);
596     g_thread_join(server->thread);
597     while (g_main_context_pending(NULL)) {
598         g_main_context_iteration(NULL, TRUE);
599     }
600 
601     unlink(server->socket_path);
602     g_free(server->socket_path);
603 
604     unlink(server->mig_path);
605     g_free(server->mig_path);
606 
607     ret = rmdir(server->tmpfs);
608     if (ret != 0) {
609         g_test_message("unable to rmdir: path (%s): %s",
610                        server->tmpfs, strerror(errno));
611     }
612     g_free(server->tmpfs);
613 
614     qemu_chr_fe_deinit(&server->chr, true);
615 
616     for (i = 0; i < server->fds_num; i++) {
617         close(server->fds[i]);
618     }
619 
620     if (server->log_fd != -1) {
621         close(server->log_fd);
622     }
623 
624     g_free(server->chr_name);
625 
626     g_main_loop_unref(server->loop);
627     g_main_context_unref(server->context);
628     g_cond_clear(&server->data_cond);
629     g_mutex_clear(&server->data_mutex);
630     g_free(server);
631 }
632 
633 static void wait_for_log_fd(TestServer *s)
634 {
635     gint64 end_time;
636 
637     g_mutex_lock(&s->data_mutex);
638     end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND;
639     while (s->log_fd == -1) {
640         if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) {
641             /* timeout has passed */
642             g_assert(s->log_fd != -1);
643             break;
644         }
645     }
646 
647     g_mutex_unlock(&s->data_mutex);
648 }
649 
650 static void write_guest_mem(TestServer *s, uint32_t seed)
651 {
652     uint32_t *guest_mem;
653     int i, j;
654     size_t size;
655 
656     /* iterate all regions */
657     for (i = 0; i < s->fds_num; i++) {
658 
659         /* We'll write only the region statring at 0x0 */
660         if (s->memory.regions[i].guest_phys_addr != 0x0) {
661             continue;
662         }
663 
664         g_assert_cmpint(s->memory.regions[i].memory_size, >, 1024);
665 
666         size = s->memory.regions[i].memory_size +
667             s->memory.regions[i].mmap_offset;
668 
669         guest_mem = mmap(0, size, PROT_READ | PROT_WRITE,
670                          MAP_SHARED, s->fds[i], 0);
671 
672         g_assert(guest_mem != MAP_FAILED);
673         guest_mem += (s->memory.regions[i].mmap_offset / sizeof(*guest_mem));
674 
675         for (j = 0; j < 256; j++) {
676             guest_mem[j] = seed + j;
677         }
678 
679         munmap(guest_mem, s->memory.regions[i].memory_size);
680         break;
681     }
682 }
683 
684 static guint64 get_log_size(TestServer *s)
685 {
686     guint64 log_size = 0;
687     int i;
688 
689     for (i = 0; i < s->memory.nregions; ++i) {
690         VhostUserMemoryRegion *reg = &s->memory.regions[i];
691         guint64 last = range_get_last(reg->guest_phys_addr,
692                                        reg->memory_size);
693         log_size = MAX(log_size, last / (8 * VHOST_LOG_PAGE) + 1);
694     }
695 
696     return log_size;
697 }
698 
699 typedef struct TestMigrateSource {
700     GSource source;
701     TestServer *src;
702     TestServer *dest;
703 } TestMigrateSource;
704 
705 static gboolean
706 test_migrate_source_check(GSource *source)
707 {
708     TestMigrateSource *t = (TestMigrateSource *)source;
709     gboolean overlap = t->src->rings && t->dest->rings;
710 
711     g_assert(!overlap);
712 
713     return FALSE;
714 }
715 
716 GSourceFuncs test_migrate_source_funcs = {
717     .check = test_migrate_source_check,
718 };
719 
720 static void vhost_user_test_cleanup(void *s)
721 {
722     TestServer *server = s;
723 
724     qos_invalidate_command_line();
725     test_server_free(server);
726 }
727 
728 static void *vhost_user_test_setup(GString *cmd_line, void *arg)
729 {
730     TestServer *server = test_server_new("vhost-user-test", arg);
731     test_server_listen(server);
732 
733     append_mem_opts(server, cmd_line, 256, TEST_MEMFD_AUTO);
734     server->vu_ops->append_opts(server, cmd_line, "");
735 
736     g_test_queue_destroy(vhost_user_test_cleanup, server);
737 
738     return server;
739 }
740 
741 static void *vhost_user_test_setup_memfd(GString *cmd_line, void *arg)
742 {
743     TestServer *server = test_server_new("vhost-user-test", arg);
744     test_server_listen(server);
745 
746     append_mem_opts(server, cmd_line, 256, TEST_MEMFD_YES);
747     server->vu_ops->append_opts(server, cmd_line, "");
748 
749     g_test_queue_destroy(vhost_user_test_cleanup, server);
750 
751     return server;
752 }
753 
754 static void test_read_guest_mem(void *obj, void *arg, QGuestAllocator *alloc)
755 {
756     TestServer *server = arg;
757 
758     if (!wait_for_fds(server)) {
759         return;
760     }
761 
762     read_guest_mem_server(global_qtest, server);
763 }
764 
765 static void test_migrate(void *obj, void *arg, QGuestAllocator *alloc)
766 {
767     TestServer *s = arg;
768     TestServer *dest;
769     GString *dest_cmdline;
770     char *uri;
771     QTestState *to;
772     GSource *source;
773     QDict *rsp;
774     guint8 *log;
775     guint64 size;
776 
777     if (!wait_for_fds(s)) {
778         return;
779     }
780 
781     dest = test_server_new("dest", s->vu_ops);
782     dest_cmdline = g_string_new(qos_get_current_command_line());
783     uri = g_strdup_printf("%s%s", "unix:", dest->mig_path);
784 
785     size = get_log_size(s);
786     g_assert_cmpint(size, ==, (256 * 1024 * 1024) / (VHOST_LOG_PAGE * 8));
787 
788     test_server_listen(dest);
789     g_string_append_printf(dest_cmdline, " -incoming %s", uri);
790     append_mem_opts(dest, dest_cmdline, 256, TEST_MEMFD_AUTO);
791     dest->vu_ops->append_opts(dest, dest_cmdline, "");
792     to = qtest_init(dest_cmdline->str);
793 
794     /* This would be where you call qos_allocate_objects(to, NULL), if you want
795      * to talk to the QVirtioNet object on the destination.
796      */
797 
798     source = g_source_new(&test_migrate_source_funcs,
799                           sizeof(TestMigrateSource));
800     ((TestMigrateSource *)source)->src = s;
801     ((TestMigrateSource *)source)->dest = dest;
802     g_source_attach(source, s->context);
803 
804     /* slow down migration to have time to fiddle with log */
805     /* TODO: qtest could learn to break on some places */
806     rsp = qmp("{ 'execute': 'migrate-set-parameters',"
807               "'arguments': { 'max-bandwidth': 10 } }");
808     g_assert(qdict_haskey(rsp, "return"));
809     qobject_unref(rsp);
810 
811     rsp = qmp("{ 'execute': 'migrate', 'arguments': { 'uri': %s } }", uri);
812     g_assert(qdict_haskey(rsp, "return"));
813     qobject_unref(rsp);
814 
815     wait_for_log_fd(s);
816 
817     log = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, s->log_fd, 0);
818     g_assert(log != MAP_FAILED);
819 
820     /* modify first page */
821     write_guest_mem(s, 0x42);
822     log[0] = 1;
823     munmap(log, size);
824 
825     /* speed things up */
826     rsp = qmp("{ 'execute': 'migrate-set-parameters',"
827               "'arguments': { 'max-bandwidth': 0 } }");
828     g_assert(qdict_haskey(rsp, "return"));
829     qobject_unref(rsp);
830 
831     qmp_eventwait("STOP");
832     qtest_qmp_eventwait(to, "RESUME");
833 
834     g_assert(wait_for_fds(dest));
835     read_guest_mem_server(to, dest);
836 
837     g_source_destroy(source);
838     g_source_unref(source);
839 
840     qtest_quit(to);
841     test_server_free(dest);
842     g_free(uri);
843     g_string_free(dest_cmdline, true);
844 }
845 
846 static void wait_for_rings_started(TestServer *s, size_t count)
847 {
848     gint64 end_time;
849 
850     g_mutex_lock(&s->data_mutex);
851     end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND;
852     while (ctpop64(s->rings) != count) {
853         if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) {
854             /* timeout has passed */
855             g_assert_cmpint(ctpop64(s->rings), ==, count);
856             break;
857         }
858     }
859 
860     g_mutex_unlock(&s->data_mutex);
861 }
862 
863 static inline void test_server_connect(TestServer *server)
864 {
865     test_server_create_chr(server, ",reconnect=1");
866 }
867 
868 static gboolean
869 reconnect_cb(gpointer user_data)
870 {
871     TestServer *s = user_data;
872 
873     qemu_chr_fe_disconnect(&s->chr);
874 
875     return FALSE;
876 }
877 
878 static gpointer
879 connect_thread(gpointer data)
880 {
881     TestServer *s = data;
882 
883     /* wait for qemu to start before first try, to avoid extra warnings */
884     g_usleep(G_USEC_PER_SEC);
885     test_server_connect(s);
886 
887     return NULL;
888 }
889 
890 static void *vhost_user_test_setup_reconnect(GString *cmd_line, void *arg)
891 {
892     TestServer *s = test_server_new("reconnect", arg);
893 
894     g_thread_new("connect", connect_thread, s);
895     append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO);
896     s->vu_ops->append_opts(s, cmd_line, ",server=on");
897 
898     g_test_queue_destroy(vhost_user_test_cleanup, s);
899 
900     return s;
901 }
902 
903 static void test_reconnect(void *obj, void *arg, QGuestAllocator *alloc)
904 {
905     TestServer *s = arg;
906     GSource *src;
907 
908     if (!wait_for_fds(s)) {
909         return;
910     }
911 
912     wait_for_rings_started(s, 2);
913 
914     /* reconnect */
915     s->fds_num = 0;
916     s->rings = 0;
917     src = g_idle_source_new();
918     g_source_set_callback(src, reconnect_cb, s, NULL);
919     g_source_attach(src, s->context);
920     g_source_unref(src);
921     g_assert(wait_for_fds(s));
922     wait_for_rings_started(s, 2);
923 }
924 
925 static void *vhost_user_test_setup_connect_fail(GString *cmd_line, void *arg)
926 {
927     TestServer *s = test_server_new("connect-fail", arg);
928 
929     s->test_fail = true;
930 
931     g_thread_new("connect", connect_thread, s);
932     append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO);
933     s->vu_ops->append_opts(s, cmd_line, ",server=on");
934 
935     g_test_queue_destroy(vhost_user_test_cleanup, s);
936 
937     return s;
938 }
939 
940 static void *vhost_user_test_setup_flags_mismatch(GString *cmd_line, void *arg)
941 {
942     TestServer *s = test_server_new("flags-mismatch", arg);
943 
944     s->test_flags = TEST_FLAGS_DISCONNECT;
945 
946     g_thread_new("connect", connect_thread, s);
947     append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO);
948     s->vu_ops->append_opts(s, cmd_line, ",server=on");
949 
950     g_test_queue_destroy(vhost_user_test_cleanup, s);
951 
952     return s;
953 }
954 
955 static void test_vhost_user_started(void *obj, void *arg, QGuestAllocator *alloc)
956 {
957     TestServer *s = arg;
958 
959     if (!wait_for_fds(s)) {
960         return;
961     }
962     wait_for_rings_started(s, 2);
963 }
964 
965 static void *vhost_user_test_setup_multiqueue(GString *cmd_line, void *arg)
966 {
967     TestServer *s = vhost_user_test_setup(cmd_line, arg);
968 
969     s->queues = 2;
970     g_string_append_printf(cmd_line,
971                            " -set netdev.hs0.queues=%d"
972                            " -global virtio-net-pci.vectors=%d",
973                            s->queues, s->queues * 2 + 2);
974 
975     return s;
976 }
977 
978 static void test_multiqueue(void *obj, void *arg, QGuestAllocator *alloc)
979 {
980     TestServer *s = arg;
981 
982     wait_for_rings_started(s, s->queues * 2);
983 }
984 
985 static void vu_net_set_features(TestServer *s, CharBackend *chr,
986         VhostUserMsg *msg)
987 {
988     g_assert(msg->payload.u64 & (0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES));
989     if (s->test_flags == TEST_FLAGS_DISCONNECT) {
990         qemu_chr_fe_disconnect(chr);
991         s->test_flags = TEST_FLAGS_BAD;
992     }
993 }
994 
995 static void vu_net_get_protocol_features(TestServer *s, CharBackend *chr,
996         VhostUserMsg *msg)
997 {
998     /* send back features to qemu */
999     msg->flags |= VHOST_USER_REPLY_MASK;
1000     msg->size = sizeof(m.payload.u64);
1001     msg->payload.u64 = 1 << VHOST_USER_PROTOCOL_F_LOG_SHMFD;
1002     msg->payload.u64 |= 1 << VHOST_USER_PROTOCOL_F_CROSS_ENDIAN;
1003     if (s->queues > 1) {
1004         msg->payload.u64 |= 1 << VHOST_USER_PROTOCOL_F_MQ;
1005     }
1006     qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size);
1007 }
1008 
1009 /* Each VHOST-USER device should have its ops structure defined. */
1010 static struct vhost_user_ops g_vu_net_ops = {
1011     .type = VHOST_USER_NET,
1012 
1013     .append_opts = append_vhost_net_opts,
1014 
1015     .set_features = vu_net_set_features,
1016     .get_protocol_features = vu_net_get_protocol_features,
1017 };
1018 
1019 static void register_vhost_user_test(void)
1020 {
1021     QOSGraphTestOptions opts = {
1022         .before = vhost_user_test_setup,
1023         .subprocess = true,
1024         .arg = &g_vu_net_ops,
1025     };
1026 
1027     qemu_add_opts(&qemu_chardev_opts);
1028 
1029     qos_add_test("vhost-user/read-guest-mem/memfile",
1030                  "virtio-net",
1031                  test_read_guest_mem, &opts);
1032 
1033     if (qemu_memfd_check(MFD_ALLOW_SEALING)) {
1034         opts.before = vhost_user_test_setup_memfd;
1035         qos_add_test("vhost-user/read-guest-mem/memfd",
1036                      "virtio-net",
1037                      test_read_guest_mem, &opts);
1038     }
1039 
1040     qos_add_test("vhost-user/migrate",
1041                  "virtio-net",
1042                  test_migrate, &opts);
1043 
1044     opts.before = vhost_user_test_setup_reconnect;
1045     qos_add_test("vhost-user/reconnect", "virtio-net",
1046                  test_reconnect, &opts);
1047 
1048     opts.before = vhost_user_test_setup_connect_fail;
1049     qos_add_test("vhost-user/connect-fail", "virtio-net",
1050                  test_vhost_user_started, &opts);
1051 
1052     opts.before = vhost_user_test_setup_flags_mismatch;
1053     qos_add_test("vhost-user/flags-mismatch", "virtio-net",
1054                  test_vhost_user_started, &opts);
1055 
1056     opts.before = vhost_user_test_setup_multiqueue;
1057     opts.edge.extra_device_opts = "mq=on";
1058     qos_add_test("vhost-user/multiqueue",
1059                  "virtio-net",
1060                  test_multiqueue, &opts);
1061 }
1062 libqos_init(register_vhost_user_test);
1063