xref: /qemu/tests/unit/test-block-iothread.c (revision f07a5674cf97b8473e5d06d7b1df9b51e97d553f)
1 /*
2  * Block tests for iothreads
3  *
4  * Copyright (c) 2018 Kevin Wolf <kwolf@redhat.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "block/block.h"
27 #include "block/block_int-global-state.h"
28 #include "block/blockjob_int.h"
29 #include "system/block-backend.h"
30 #include "qapi/error.h"
31 #include "qobject/qdict.h"
32 #include "qemu/clang-tsa.h"
33 #include "qemu/main-loop.h"
34 #include "iothread.h"
35 
36 static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs,
37                                             int64_t offset, int64_t bytes,
38                                             QEMUIOVector *qiov,
39                                             BdrvRequestFlags flags)
40 {
41     return 0;
42 }
43 
44 static int coroutine_fn bdrv_test_co_pwritev(BlockDriverState *bs,
45                                              int64_t offset, int64_t bytes,
46                                              QEMUIOVector *qiov,
47                                              BdrvRequestFlags flags)
48 {
49     return 0;
50 }
51 
52 static int coroutine_fn bdrv_test_co_pdiscard(BlockDriverState *bs,
53                                               int64_t offset, int64_t bytes)
54 {
55     return 0;
56 }
57 
58 static int coroutine_fn
59 bdrv_test_co_truncate(BlockDriverState *bs, int64_t offset, bool exact,
60                       PreallocMode prealloc, BdrvRequestFlags flags,
61                       Error **errp)
62 {
63     return 0;
64 }
65 
66 static int coroutine_fn bdrv_test_co_block_status(BlockDriverState *bs,
67                                                   bool want_zero,
68                                                   int64_t offset, int64_t count,
69                                                   int64_t *pnum, int64_t *map,
70                                                   BlockDriverState **file)
71 {
72     *pnum = count;
73     return 0;
74 }
75 
76 static BlockDriver bdrv_test = {
77     .format_name            = "test",
78     .instance_size          = 1,
79 
80     .bdrv_co_preadv         = bdrv_test_co_preadv,
81     .bdrv_co_pwritev        = bdrv_test_co_pwritev,
82     .bdrv_co_pdiscard       = bdrv_test_co_pdiscard,
83     .bdrv_co_truncate       = bdrv_test_co_truncate,
84     .bdrv_co_block_status   = bdrv_test_co_block_status,
85 };
86 
87 static void test_sync_op_pread(BdrvChild *c)
88 {
89     uint8_t buf[512];
90     int ret;
91 
92     /* Success */
93     ret = bdrv_pread(c, 0, sizeof(buf), buf, 0);
94     g_assert_cmpint(ret, ==, 0);
95 
96     /* Early error: Negative offset */
97     ret = bdrv_pread(c, -2, sizeof(buf), buf, 0);
98     g_assert_cmpint(ret, ==, -EIO);
99 }
100 
101 static void test_sync_op_pwrite(BdrvChild *c)
102 {
103     uint8_t buf[512] = { 0 };
104     int ret;
105 
106     /* Success */
107     ret = bdrv_pwrite(c, 0, sizeof(buf), buf, 0);
108     g_assert_cmpint(ret, ==, 0);
109 
110     /* Early error: Negative offset */
111     ret = bdrv_pwrite(c, -2, sizeof(buf), buf, 0);
112     g_assert_cmpint(ret, ==, -EIO);
113 }
114 
115 static void test_sync_op_blk_pread(BlockBackend *blk)
116 {
117     uint8_t buf[512];
118     int ret;
119 
120     /* Success */
121     ret = blk_pread(blk, 0, sizeof(buf), buf, 0);
122     g_assert_cmpint(ret, ==, 0);
123 
124     /* Early error: Negative offset */
125     ret = blk_pread(blk, -2, sizeof(buf), buf, 0);
126     g_assert_cmpint(ret, ==, -EIO);
127 }
128 
129 static void test_sync_op_blk_pwrite(BlockBackend *blk)
130 {
131     uint8_t buf[512] = { 0 };
132     int ret;
133 
134     /* Success */
135     ret = blk_pwrite(blk, 0, sizeof(buf), buf, 0);
136     g_assert_cmpint(ret, ==, 0);
137 
138     /* Early error: Negative offset */
139     ret = blk_pwrite(blk, -2, sizeof(buf), buf, 0);
140     g_assert_cmpint(ret, ==, -EIO);
141 }
142 
143 static void test_sync_op_blk_preadv(BlockBackend *blk)
144 {
145     uint8_t buf[512];
146     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf));
147     int ret;
148 
149     /* Success */
150     ret = blk_preadv(blk, 0, sizeof(buf), &qiov, 0);
151     g_assert_cmpint(ret, ==, 0);
152 
153     /* Early error: Negative offset */
154     ret = blk_preadv(blk, -2, sizeof(buf), &qiov, 0);
155     g_assert_cmpint(ret, ==, -EIO);
156 }
157 
158 static void test_sync_op_blk_pwritev(BlockBackend *blk)
159 {
160     uint8_t buf[512] = { 0 };
161     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf));
162     int ret;
163 
164     /* Success */
165     ret = blk_pwritev(blk, 0, sizeof(buf), &qiov, 0);
166     g_assert_cmpint(ret, ==, 0);
167 
168     /* Early error: Negative offset */
169     ret = blk_pwritev(blk, -2, sizeof(buf), &qiov, 0);
170     g_assert_cmpint(ret, ==, -EIO);
171 }
172 
173 static void test_sync_op_blk_preadv_part(BlockBackend *blk)
174 {
175     uint8_t buf[512];
176     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf));
177     int ret;
178 
179     /* Success */
180     ret = blk_preadv_part(blk, 0, sizeof(buf), &qiov, 0, 0);
181     g_assert_cmpint(ret, ==, 0);
182 
183     /* Early error: Negative offset */
184     ret = blk_preadv_part(blk, -2, sizeof(buf), &qiov, 0, 0);
185     g_assert_cmpint(ret, ==, -EIO);
186 }
187 
188 static void test_sync_op_blk_pwritev_part(BlockBackend *blk)
189 {
190     uint8_t buf[512] = { 0 };
191     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf));
192     int ret;
193 
194     /* Success */
195     ret = blk_pwritev_part(blk, 0, sizeof(buf), &qiov, 0, 0);
196     g_assert_cmpint(ret, ==, 0);
197 
198     /* Early error: Negative offset */
199     ret = blk_pwritev_part(blk, -2, sizeof(buf), &qiov, 0, 0);
200     g_assert_cmpint(ret, ==, -EIO);
201 }
202 
203 static void test_sync_op_blk_pwrite_compressed(BlockBackend *blk)
204 {
205     uint8_t buf[512] = { 0 };
206     int ret;
207 
208     /* Late error: Not supported */
209     ret = blk_pwrite_compressed(blk, 0, sizeof(buf), buf);
210     g_assert_cmpint(ret, ==, -ENOTSUP);
211 
212     /* Early error: Negative offset */
213     ret = blk_pwrite_compressed(blk, -2, sizeof(buf), buf);
214     g_assert_cmpint(ret, ==, -EIO);
215 }
216 
217 static void test_sync_op_blk_pwrite_zeroes(BlockBackend *blk)
218 {
219     int ret;
220 
221     /* Success */
222     ret = blk_pwrite_zeroes(blk, 0, 512, 0);
223     g_assert_cmpint(ret, ==, 0);
224 
225     /* Early error: Negative offset */
226     ret = blk_pwrite_zeroes(blk, -2, 512, 0);
227     g_assert_cmpint(ret, ==, -EIO);
228 }
229 
230 static void test_sync_op_load_vmstate(BdrvChild *c)
231 {
232     uint8_t buf[512];
233     int ret;
234 
235     /* Error: Driver does not support snapshots */
236     ret = bdrv_load_vmstate(c->bs, buf, 0, sizeof(buf));
237     g_assert_cmpint(ret, ==, -ENOTSUP);
238 }
239 
240 static void test_sync_op_save_vmstate(BdrvChild *c)
241 {
242     uint8_t buf[512] = { 0 };
243     int ret;
244 
245     /* Error: Driver does not support snapshots */
246     ret = bdrv_save_vmstate(c->bs, buf, 0, sizeof(buf));
247     g_assert_cmpint(ret, ==, -ENOTSUP);
248 }
249 
250 static void test_sync_op_pdiscard(BdrvChild *c)
251 {
252     int ret;
253 
254     /* Normal success path */
255     c->bs->open_flags |= BDRV_O_UNMAP;
256     ret = bdrv_pdiscard(c, 0, 512);
257     g_assert_cmpint(ret, ==, 0);
258 
259     /* Early success: UNMAP not supported */
260     c->bs->open_flags &= ~BDRV_O_UNMAP;
261     ret = bdrv_pdiscard(c, 0, 512);
262     g_assert_cmpint(ret, ==, 0);
263 
264     /* Early error: Negative offset */
265     ret = bdrv_pdiscard(c, -2, 512);
266     g_assert_cmpint(ret, ==, -EIO);
267 }
268 
269 static void test_sync_op_blk_pdiscard(BlockBackend *blk)
270 {
271     int ret;
272 
273     /* Early success: UNMAP not supported */
274     ret = blk_pdiscard(blk, 0, 512);
275     g_assert_cmpint(ret, ==, 0);
276 
277     /* Early error: Negative offset */
278     ret = blk_pdiscard(blk, -2, 512);
279     g_assert_cmpint(ret, ==, -EIO);
280 }
281 
282 static void test_sync_op_truncate(BdrvChild *c)
283 {
284     int ret;
285 
286     /* Normal success path */
287     ret = bdrv_truncate(c, 65536, false, PREALLOC_MODE_OFF, 0, NULL);
288     g_assert_cmpint(ret, ==, 0);
289 
290     /* Early error: Negative offset */
291     ret = bdrv_truncate(c, -2, false, PREALLOC_MODE_OFF, 0, NULL);
292     g_assert_cmpint(ret, ==, -EINVAL);
293 
294     /* Error: Read-only image */
295     c->bs->open_flags &= ~BDRV_O_RDWR;
296 
297     ret = bdrv_truncate(c, 65536, false, PREALLOC_MODE_OFF, 0, NULL);
298     g_assert_cmpint(ret, ==, -EACCES);
299 
300     c->bs->open_flags |= BDRV_O_RDWR;
301 }
302 
303 static void test_sync_op_blk_truncate(BlockBackend *blk)
304 {
305     int ret;
306 
307     /* Normal success path */
308     ret = blk_truncate(blk, 65536, false, PREALLOC_MODE_OFF, 0, NULL);
309     g_assert_cmpint(ret, ==, 0);
310 
311     /* Early error: Negative offset */
312     ret = blk_truncate(blk, -2, false, PREALLOC_MODE_OFF, 0, NULL);
313     g_assert_cmpint(ret, ==, -EINVAL);
314 }
315 
316 /* Disable TSA to make bdrv_test.bdrv_co_block_status writable */
317 static void TSA_NO_TSA test_sync_op_block_status(BdrvChild *c)
318 {
319     int ret;
320     int64_t n;
321 
322     /* Normal success path */
323     ret = bdrv_is_allocated(c->bs, 0, 65536, &n);
324     g_assert_cmpint(ret, ==, 0);
325 
326     /* Early success: No driver support */
327     bdrv_test.bdrv_co_block_status = NULL;
328     ret = bdrv_is_allocated(c->bs, 0, 65536, &n);
329     g_assert_cmpint(ret, ==, 1);
330 
331     /* Early success: bytes = 0 */
332     ret = bdrv_is_allocated(c->bs, 0, 0, &n);
333     g_assert_cmpint(ret, ==, 0);
334 
335     /* Early success: Offset > image size*/
336     ret = bdrv_is_allocated(c->bs, 0x1000000, 0x1000000, &n);
337     g_assert_cmpint(ret, ==, 0);
338 }
339 
340 static void test_sync_op_flush(BdrvChild *c)
341 {
342     int ret;
343 
344     /* Normal success path */
345     ret = bdrv_flush(c->bs);
346     g_assert_cmpint(ret, ==, 0);
347 
348     /* Early success: Read-only image */
349     c->bs->open_flags &= ~BDRV_O_RDWR;
350 
351     ret = bdrv_flush(c->bs);
352     g_assert_cmpint(ret, ==, 0);
353 
354     c->bs->open_flags |= BDRV_O_RDWR;
355 }
356 
357 static void test_sync_op_blk_flush(BlockBackend *blk)
358 {
359     BlockDriverState *bs = blk_bs(blk);
360     int ret;
361 
362     /* Normal success path */
363     ret = blk_flush(blk);
364     g_assert_cmpint(ret, ==, 0);
365 
366     /* Early success: Read-only image */
367     bs->open_flags &= ~BDRV_O_RDWR;
368 
369     ret = blk_flush(blk);
370     g_assert_cmpint(ret, ==, 0);
371 
372     bs->open_flags |= BDRV_O_RDWR;
373 }
374 
375 static void test_sync_op_check(BdrvChild *c)
376 {
377     BdrvCheckResult result;
378     int ret;
379 
380     /* Error: Driver does not implement check */
381     ret = bdrv_check(c->bs, &result, 0);
382     g_assert_cmpint(ret, ==, -ENOTSUP);
383 }
384 
385 static void test_sync_op_activate(BdrvChild *c)
386 {
387     GLOBAL_STATE_CODE();
388     GRAPH_RDLOCK_GUARD_MAINLOOP();
389 
390     /* Early success: Image is not inactive */
391     bdrv_activate(c->bs, NULL);
392 }
393 
394 
395 typedef struct SyncOpTest {
396     const char *name;
397     void (*fn)(BdrvChild *c);
398     void (*blkfn)(BlockBackend *blk);
399 } SyncOpTest;
400 
401 const SyncOpTest sync_op_tests[] = {
402     {
403         .name   = "/sync-op/pread",
404         .fn     = test_sync_op_pread,
405         .blkfn  = test_sync_op_blk_pread,
406     }, {
407         .name   = "/sync-op/pwrite",
408         .fn     = test_sync_op_pwrite,
409         .blkfn  = test_sync_op_blk_pwrite,
410     }, {
411         .name   = "/sync-op/preadv",
412         .fn     = NULL,
413         .blkfn  = test_sync_op_blk_preadv,
414     }, {
415         .name   = "/sync-op/pwritev",
416         .fn     = NULL,
417         .blkfn  = test_sync_op_blk_pwritev,
418     }, {
419         .name   = "/sync-op/preadv_part",
420         .fn     = NULL,
421         .blkfn  = test_sync_op_blk_preadv_part,
422     }, {
423         .name   = "/sync-op/pwritev_part",
424         .fn     = NULL,
425         .blkfn  = test_sync_op_blk_pwritev_part,
426     }, {
427         .name   = "/sync-op/pwrite_compressed",
428         .fn     = NULL,
429         .blkfn  = test_sync_op_blk_pwrite_compressed,
430     }, {
431         .name   = "/sync-op/pwrite_zeroes",
432         .fn     = NULL,
433         .blkfn  = test_sync_op_blk_pwrite_zeroes,
434     }, {
435         .name   = "/sync-op/load_vmstate",
436         .fn     = test_sync_op_load_vmstate,
437     }, {
438         .name   = "/sync-op/save_vmstate",
439         .fn     = test_sync_op_save_vmstate,
440     }, {
441         .name   = "/sync-op/pdiscard",
442         .fn     = test_sync_op_pdiscard,
443         .blkfn  = test_sync_op_blk_pdiscard,
444     }, {
445         .name   = "/sync-op/truncate",
446         .fn     = test_sync_op_truncate,
447         .blkfn  = test_sync_op_blk_truncate,
448     }, {
449         .name   = "/sync-op/block_status",
450         .fn     = test_sync_op_block_status,
451     }, {
452         .name   = "/sync-op/flush",
453         .fn     = test_sync_op_flush,
454         .blkfn  = test_sync_op_blk_flush,
455     }, {
456         .name   = "/sync-op/check",
457         .fn     = test_sync_op_check,
458     }, {
459         .name   = "/sync-op/activate",
460         .fn     = test_sync_op_activate,
461     },
462 };
463 
464 /* Test synchronous operations that run in a different iothread, so we have to
465  * poll for the coroutine there to return. */
466 static void test_sync_op(const void *opaque)
467 {
468     const SyncOpTest *t = opaque;
469     IOThread *iothread = iothread_new();
470     AioContext *ctx = iothread_get_aio_context(iothread);
471     BlockBackend *blk;
472     BlockDriverState *bs;
473     BdrvChild *c;
474 
475     GLOBAL_STATE_CODE();
476 
477     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
478     bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
479     bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
480     blk_insert_bs(blk, bs, &error_abort);
481 
482     bdrv_graph_rdlock_main_loop();
483     c = QLIST_FIRST(&bs->parents);
484     bdrv_graph_rdunlock_main_loop();
485 
486     blk_set_aio_context(blk, ctx, &error_abort);
487     if (t->fn) {
488         t->fn(c);
489     }
490     if (t->blkfn) {
491         t->blkfn(blk);
492     }
493     blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
494 
495     bdrv_unref(bs);
496     blk_unref(blk);
497 }
498 
499 typedef struct TestBlockJob {
500     BlockJob common;
501     bool should_complete;
502     int n;
503 } TestBlockJob;
504 
505 static int test_job_prepare(Job *job)
506 {
507     g_assert(qemu_get_current_aio_context() == qemu_get_aio_context());
508     return 0;
509 }
510 
511 static int coroutine_fn test_job_run(Job *job, Error **errp)
512 {
513     TestBlockJob *s = container_of(job, TestBlockJob, common.job);
514 
515     job_transition_to_ready(&s->common.job);
516     while (!s->should_complete) {
517         s->n++;
518         g_assert(qemu_get_current_aio_context() == job->aio_context);
519 
520         /* Avoid job_sleep_ns() because it marks the job as !busy. We want to
521          * emulate some actual activity (probably some I/O) here so that the
522          * drain involved in AioContext switches has to wait for this activity
523          * to stop. */
524         qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000);
525 
526         job_pause_point(&s->common.job);
527     }
528 
529     g_assert(qemu_get_current_aio_context() == job->aio_context);
530     return 0;
531 }
532 
533 static void test_job_complete(Job *job, Error **errp)
534 {
535     TestBlockJob *s = container_of(job, TestBlockJob, common.job);
536     s->should_complete = true;
537 }
538 
539 BlockJobDriver test_job_driver = {
540     .job_driver = {
541         .instance_size  = sizeof(TestBlockJob),
542         .free           = block_job_free,
543         .user_resume    = block_job_user_resume,
544         .run            = test_job_run,
545         .complete       = test_job_complete,
546         .prepare        = test_job_prepare,
547     },
548 };
549 
550 static void test_attach_blockjob(void)
551 {
552     IOThread *iothread = iothread_new();
553     AioContext *ctx = iothread_get_aio_context(iothread);
554     BlockBackend *blk;
555     BlockDriverState *bs;
556     TestBlockJob *tjob;
557 
558     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
559     bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
560     blk_insert_bs(blk, bs, &error_abort);
561 
562     tjob = block_job_create("job0", &test_job_driver, NULL, bs,
563                             0, BLK_PERM_ALL,
564                             0, 0, NULL, NULL, &error_abort);
565     job_start(&tjob->common.job);
566 
567     while (tjob->n == 0) {
568         aio_poll(qemu_get_aio_context(), false);
569     }
570 
571     blk_set_aio_context(blk, ctx, &error_abort);
572 
573     tjob->n = 0;
574     while (tjob->n == 0) {
575         aio_poll(qemu_get_aio_context(), false);
576     }
577 
578     blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
579 
580     tjob->n = 0;
581     while (tjob->n == 0) {
582         aio_poll(qemu_get_aio_context(), false);
583     }
584 
585     blk_set_aio_context(blk, ctx, &error_abort);
586 
587     tjob->n = 0;
588     while (tjob->n == 0) {
589         aio_poll(qemu_get_aio_context(), false);
590     }
591 
592     WITH_JOB_LOCK_GUARD() {
593         job_complete_sync_locked(&tjob->common.job, &error_abort);
594     }
595     blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
596 
597     bdrv_unref(bs);
598     blk_unref(blk);
599 }
600 
601 /*
602  * Test that changing the AioContext for one node in a tree (here through blk)
603  * changes all other nodes as well:
604  *
605  *  blk
606  *   |
607  *   |  bs_verify [blkverify]
608  *   |   /               \
609  *   |  /                 \
610  *  bs_a [bdrv_test]    bs_b [bdrv_test]
611  *
612  */
613 static void test_propagate_basic(void)
614 {
615     IOThread *iothread = iothread_new();
616     AioContext *ctx = iothread_get_aio_context(iothread);
617     AioContext *main_ctx;
618     BlockBackend *blk;
619     BlockDriverState *bs_a, *bs_b, *bs_verify;
620     QDict *options;
621 
622     /*
623      * Create bs_a and its BlockBackend.  We cannot take the RESIZE
624      * permission because blkverify will not share it on the test
625      * image.
626      */
627     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL & ~BLK_PERM_RESIZE,
628                   BLK_PERM_ALL);
629     bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort);
630     blk_insert_bs(blk, bs_a, &error_abort);
631 
632     /* Create bs_b */
633     bs_b = bdrv_new_open_driver(&bdrv_test, "bs_b", BDRV_O_RDWR, &error_abort);
634 
635     /* Create blkverify filter that references both bs_a and bs_b */
636     options = qdict_new();
637     qdict_put_str(options, "driver", "blkverify");
638     qdict_put_str(options, "test", "bs_a");
639     qdict_put_str(options, "raw", "bs_b");
640 
641     bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
642 
643     /* Switch the AioContext */
644     blk_set_aio_context(blk, ctx, &error_abort);
645     g_assert(blk_get_aio_context(blk) == ctx);
646     g_assert(bdrv_get_aio_context(bs_a) == ctx);
647     g_assert(bdrv_get_aio_context(bs_verify) == ctx);
648     g_assert(bdrv_get_aio_context(bs_b) == ctx);
649 
650     /* Switch the AioContext back */
651     main_ctx = qemu_get_aio_context();
652     blk_set_aio_context(blk, main_ctx, &error_abort);
653     g_assert(blk_get_aio_context(blk) == main_ctx);
654     g_assert(bdrv_get_aio_context(bs_a) == main_ctx);
655     g_assert(bdrv_get_aio_context(bs_verify) == main_ctx);
656     g_assert(bdrv_get_aio_context(bs_b) == main_ctx);
657 
658     bdrv_unref(bs_verify);
659     bdrv_unref(bs_b);
660     bdrv_unref(bs_a);
661     blk_unref(blk);
662 }
663 
664 /*
665  * Test that diamonds in the graph don't lead to endless recursion:
666  *
667  *              blk
668  *               |
669  *      bs_verify [blkverify]
670  *       /              \
671  *      /                \
672  *   bs_b [raw]         bs_c[raw]
673  *      \                /
674  *       \              /
675  *       bs_a [bdrv_test]
676  */
677 static void test_propagate_diamond(void)
678 {
679     IOThread *iothread = iothread_new();
680     AioContext *ctx = iothread_get_aio_context(iothread);
681     AioContext *main_ctx;
682     BlockBackend *blk;
683     BlockDriverState *bs_a, *bs_b, *bs_c, *bs_verify;
684     QDict *options;
685 
686     /* Create bs_a */
687     bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort);
688 
689     /* Create bs_b and bc_c */
690     options = qdict_new();
691     qdict_put_str(options, "driver", "raw");
692     qdict_put_str(options, "file", "bs_a");
693     qdict_put_str(options, "node-name", "bs_b");
694     bs_b = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
695 
696     options = qdict_new();
697     qdict_put_str(options, "driver", "raw");
698     qdict_put_str(options, "file", "bs_a");
699     qdict_put_str(options, "node-name", "bs_c");
700     bs_c = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
701 
702     /* Create blkverify filter that references both bs_b and bs_c */
703     options = qdict_new();
704     qdict_put_str(options, "driver", "blkverify");
705     qdict_put_str(options, "test", "bs_b");
706     qdict_put_str(options, "raw", "bs_c");
707 
708     bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
709     /*
710      * Do not take the RESIZE permission: This would require the same
711      * from bs_c and thus from bs_a; however, blkverify will not share
712      * it on bs_b, and thus it will not be available for bs_a.
713      */
714     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL & ~BLK_PERM_RESIZE,
715                   BLK_PERM_ALL);
716     blk_insert_bs(blk, bs_verify, &error_abort);
717 
718     /* Switch the AioContext */
719     blk_set_aio_context(blk, ctx, &error_abort);
720     g_assert(blk_get_aio_context(blk) == ctx);
721     g_assert(bdrv_get_aio_context(bs_verify) == ctx);
722     g_assert(bdrv_get_aio_context(bs_a) == ctx);
723     g_assert(bdrv_get_aio_context(bs_b) == ctx);
724     g_assert(bdrv_get_aio_context(bs_c) == ctx);
725 
726     /* Switch the AioContext back */
727     main_ctx = qemu_get_aio_context();
728     blk_set_aio_context(blk, main_ctx, &error_abort);
729     g_assert(blk_get_aio_context(blk) == main_ctx);
730     g_assert(bdrv_get_aio_context(bs_verify) == main_ctx);
731     g_assert(bdrv_get_aio_context(bs_a) == main_ctx);
732     g_assert(bdrv_get_aio_context(bs_b) == main_ctx);
733     g_assert(bdrv_get_aio_context(bs_c) == main_ctx);
734 
735     blk_unref(blk);
736     bdrv_unref(bs_verify);
737     bdrv_unref(bs_c);
738     bdrv_unref(bs_b);
739     bdrv_unref(bs_a);
740 }
741 
742 static void test_propagate_mirror(void)
743 {
744     IOThread *iothread = iothread_new();
745     AioContext *ctx = iothread_get_aio_context(iothread);
746     AioContext *main_ctx = qemu_get_aio_context();
747     BlockDriverState *src, *target, *filter;
748     BlockBackend *blk;
749     Job *job = NULL;
750     Error *local_err = NULL;
751 
752     /* Create src and target*/
753     src = bdrv_new_open_driver(&bdrv_test, "src", BDRV_O_RDWR, &error_abort);
754     target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
755                                   &error_abort);
756 
757     /* Start a mirror job */
758     mirror_start("job0", src, target, NULL, JOB_DEFAULT, 0, 0, 0,
759                  MIRROR_SYNC_MODE_NONE, MIRROR_OPEN_BACKING_CHAIN, false,
760                  BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
761                  false, "filter_node", MIRROR_COPY_MODE_BACKGROUND,
762                  &error_abort);
763 
764     WITH_JOB_LOCK_GUARD() {
765         job = job_get_locked("job0");
766     }
767     filter = bdrv_find_node("filter_node");
768 
769     /* Change the AioContext of src */
770     bdrv_try_change_aio_context(src, ctx, NULL, &error_abort);
771     g_assert(bdrv_get_aio_context(src) == ctx);
772     g_assert(bdrv_get_aio_context(target) == ctx);
773     g_assert(bdrv_get_aio_context(filter) == ctx);
774     g_assert(job->aio_context == ctx);
775 
776     /* Change the AioContext of target */
777     bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort);
778     g_assert(bdrv_get_aio_context(src) == main_ctx);
779     g_assert(bdrv_get_aio_context(target) == main_ctx);
780     g_assert(bdrv_get_aio_context(filter) == main_ctx);
781 
782     /* With a BlockBackend on src, changing target must fail */
783     blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
784     blk_insert_bs(blk, src, &error_abort);
785 
786     bdrv_try_change_aio_context(target, ctx, NULL, &local_err);
787     error_free_or_abort(&local_err);
788 
789     g_assert(blk_get_aio_context(blk) == main_ctx);
790     g_assert(bdrv_get_aio_context(src) == main_ctx);
791     g_assert(bdrv_get_aio_context(target) == main_ctx);
792     g_assert(bdrv_get_aio_context(filter) == main_ctx);
793 
794     /* ...unless we explicitly allow it */
795     blk_set_allow_aio_context_change(blk, true);
796     bdrv_try_change_aio_context(target, ctx, NULL, &error_abort);
797 
798     g_assert(blk_get_aio_context(blk) == ctx);
799     g_assert(bdrv_get_aio_context(src) == ctx);
800     g_assert(bdrv_get_aio_context(target) == ctx);
801     g_assert(bdrv_get_aio_context(filter) == ctx);
802 
803     job_cancel_sync_all();
804 
805     blk_set_aio_context(blk, main_ctx, &error_abort);
806     bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort);
807 
808     blk_unref(blk);
809     bdrv_unref(src);
810     bdrv_unref(target);
811 }
812 
813 static void test_attach_second_node(void)
814 {
815     IOThread *iothread = iothread_new();
816     AioContext *ctx = iothread_get_aio_context(iothread);
817     AioContext *main_ctx = qemu_get_aio_context();
818     BlockBackend *blk;
819     BlockDriverState *bs, *filter;
820     QDict *options;
821 
822     blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
823     bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
824     blk_insert_bs(blk, bs, &error_abort);
825 
826     options = qdict_new();
827     qdict_put_str(options, "driver", "raw");
828     qdict_put_str(options, "file", "base");
829 
830     filter = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
831 
832     g_assert(blk_get_aio_context(blk) == ctx);
833     g_assert(bdrv_get_aio_context(bs) == ctx);
834     g_assert(bdrv_get_aio_context(filter) == ctx);
835 
836     blk_set_aio_context(blk, main_ctx, &error_abort);
837     g_assert(blk_get_aio_context(blk) == main_ctx);
838     g_assert(bdrv_get_aio_context(bs) == main_ctx);
839     g_assert(bdrv_get_aio_context(filter) == main_ctx);
840 
841     bdrv_unref(filter);
842     bdrv_unref(bs);
843     blk_unref(blk);
844 }
845 
846 static void test_attach_preserve_blk_ctx(void)
847 {
848     IOThread *iothread = iothread_new();
849     AioContext *ctx = iothread_get_aio_context(iothread);
850     BlockBackend *blk;
851     BlockDriverState *bs;
852 
853     blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
854     bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
855     bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
856 
857     /* Add node to BlockBackend that has an iothread context assigned */
858     blk_insert_bs(blk, bs, &error_abort);
859     g_assert(blk_get_aio_context(blk) == ctx);
860     g_assert(bdrv_get_aio_context(bs) == ctx);
861 
862     /* Remove the node again */
863     blk_remove_bs(blk);
864     g_assert(blk_get_aio_context(blk) == ctx);
865     g_assert(bdrv_get_aio_context(bs) == qemu_get_aio_context());
866 
867     /* Re-attach the node */
868     blk_insert_bs(blk, bs, &error_abort);
869     g_assert(blk_get_aio_context(blk) == ctx);
870     g_assert(bdrv_get_aio_context(bs) == ctx);
871 
872     blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
873     bdrv_unref(bs);
874     blk_unref(blk);
875 }
876 
877 int main(int argc, char **argv)
878 {
879     int i;
880 
881     bdrv_init();
882     qemu_init_main_loop(&error_abort);
883 
884     g_test_init(&argc, &argv, NULL);
885 
886     for (i = 0; i < ARRAY_SIZE(sync_op_tests); i++) {
887         const SyncOpTest *t = &sync_op_tests[i];
888         g_test_add_data_func(t->name, t, test_sync_op);
889     }
890 
891     g_test_add_func("/attach/blockjob", test_attach_blockjob);
892     g_test_add_func("/attach/second_node", test_attach_second_node);
893     g_test_add_func("/attach/preserve_blk_ctx", test_attach_preserve_blk_ctx);
894     g_test_add_func("/propagate/basic", test_propagate_basic);
895     g_test_add_func("/propagate/diamond", test_propagate_diamond);
896     g_test_add_func("/propagate/mirror", test_propagate_mirror);
897 
898     return g_test_run();
899 }
900