1881cfd17SKevin Wolf /* 2881cfd17SKevin Wolf * Block node draining tests 3881cfd17SKevin Wolf * 4881cfd17SKevin Wolf * Copyright (c) 2017 Kevin Wolf <kwolf@redhat.com> 5881cfd17SKevin Wolf * 6881cfd17SKevin Wolf * Permission is hereby granted, free of charge, to any person obtaining a copy 7881cfd17SKevin Wolf * of this software and associated documentation files (the "Software"), to deal 8881cfd17SKevin Wolf * in the Software without restriction, including without limitation the rights 9881cfd17SKevin Wolf * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10881cfd17SKevin Wolf * copies of the Software, and to permit persons to whom the Software is 11881cfd17SKevin Wolf * furnished to do so, subject to the following conditions: 12881cfd17SKevin Wolf * 13881cfd17SKevin Wolf * The above copyright notice and this permission notice shall be included in 14881cfd17SKevin Wolf * all copies or substantial portions of the Software. 15881cfd17SKevin Wolf * 16881cfd17SKevin Wolf * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17881cfd17SKevin Wolf * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18881cfd17SKevin Wolf * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19881cfd17SKevin Wolf * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20881cfd17SKevin Wolf * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21881cfd17SKevin Wolf * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22881cfd17SKevin Wolf * THE SOFTWARE. 23881cfd17SKevin Wolf */ 24881cfd17SKevin Wolf 25881cfd17SKevin Wolf #include "qemu/osdep.h" 26e2c1c34fSMarkus Armbruster #include "block/block_int.h" 277253220dSKevin Wolf #include "block/blockjob_int.h" 28881cfd17SKevin Wolf #include "sysemu/block-backend.h" 29881cfd17SKevin Wolf #include "qapi/error.h" 30db725815SMarkus Armbruster #include "qemu/main-loop.h" 31bb675689SKevin Wolf #include "iothread.h" 32bb675689SKevin Wolf 33bb675689SKevin Wolf static QemuEvent done_event; 34881cfd17SKevin Wolf 35881cfd17SKevin Wolf typedef struct BDRVTestState { 36881cfd17SKevin Wolf int drain_count; 37bb675689SKevin Wolf AioContext *bh_indirection_ctx; 3857320ca9SKevin Wolf bool sleep_in_drain_begin; 39881cfd17SKevin Wolf } BDRVTestState; 40881cfd17SKevin Wolf 417bce1c29SKevin Wolf static void coroutine_fn sleep_in_drain_begin(void *opaque) 427bce1c29SKevin Wolf { 437bce1c29SKevin Wolf BlockDriverState *bs = opaque; 447bce1c29SKevin Wolf 457bce1c29SKevin Wolf qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000); 467bce1c29SKevin Wolf bdrv_dec_in_flight(bs); 477bce1c29SKevin Wolf } 487bce1c29SKevin Wolf 495e8ac217SKevin Wolf static void bdrv_test_drain_begin(BlockDriverState *bs) 50881cfd17SKevin Wolf { 51881cfd17SKevin Wolf BDRVTestState *s = bs->opaque; 52881cfd17SKevin Wolf s->drain_count++; 5357320ca9SKevin Wolf if (s->sleep_in_drain_begin) { 547bce1c29SKevin Wolf Coroutine *co = qemu_coroutine_create(sleep_in_drain_begin, bs); 557bce1c29SKevin Wolf bdrv_inc_in_flight(bs); 567bce1c29SKevin Wolf aio_co_enter(bdrv_get_aio_context(bs), co); 5757320ca9SKevin Wolf } 58881cfd17SKevin Wolf } 59881cfd17SKevin Wolf 605e8ac217SKevin Wolf static void bdrv_test_drain_end(BlockDriverState *bs) 61881cfd17SKevin Wolf { 62881cfd17SKevin Wolf BDRVTestState *s = bs->opaque; 63881cfd17SKevin Wolf s->drain_count--; 64881cfd17SKevin Wolf } 65881cfd17SKevin Wolf 66881cfd17SKevin Wolf static void bdrv_test_close(BlockDriverState *bs) 67881cfd17SKevin Wolf { 68881cfd17SKevin Wolf BDRVTestState *s = bs->opaque; 69881cfd17SKevin Wolf g_assert_cmpint(s->drain_count, >, 0); 70881cfd17SKevin Wolf } 71881cfd17SKevin Wolf 72bb675689SKevin Wolf static void co_reenter_bh(void *opaque) 73bb675689SKevin Wolf { 74bb675689SKevin Wolf aio_co_wake(opaque); 75bb675689SKevin Wolf } 76bb675689SKevin Wolf 77881cfd17SKevin Wolf static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs, 78f7ef38ddSVladimir Sementsov-Ogievskiy int64_t offset, int64_t bytes, 79f7ef38ddSVladimir Sementsov-Ogievskiy QEMUIOVector *qiov, 80f7ef38ddSVladimir Sementsov-Ogievskiy BdrvRequestFlags flags) 81881cfd17SKevin Wolf { 82bb675689SKevin Wolf BDRVTestState *s = bs->opaque; 83bb675689SKevin Wolf 84881cfd17SKevin Wolf /* We want this request to stay until the polling loop in drain waits for 85881cfd17SKevin Wolf * it to complete. We need to sleep a while as bdrv_drain_invoke() comes 86881cfd17SKevin Wolf * first and polls its result, too, but it shouldn't accidentally complete 87881cfd17SKevin Wolf * this request yet. */ 88881cfd17SKevin Wolf qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000); 89881cfd17SKevin Wolf 90bb675689SKevin Wolf if (s->bh_indirection_ctx) { 91bb675689SKevin Wolf aio_bh_schedule_oneshot(s->bh_indirection_ctx, co_reenter_bh, 92bb675689SKevin Wolf qemu_coroutine_self()); 93bb675689SKevin Wolf qemu_coroutine_yield(); 94bb675689SKevin Wolf } 95bb675689SKevin Wolf 96881cfd17SKevin Wolf return 0; 97881cfd17SKevin Wolf } 98881cfd17SKevin Wolf 99e2dd2737SKevin Wolf static int bdrv_test_co_change_backing_file(BlockDriverState *bs, 1009746b35cSMax Reitz const char *backing_file, 1019746b35cSMax Reitz const char *backing_fmt) 1029746b35cSMax Reitz { 1039746b35cSMax Reitz return 0; 1049746b35cSMax Reitz } 1059746b35cSMax Reitz 106881cfd17SKevin Wolf static BlockDriver bdrv_test = { 107881cfd17SKevin Wolf .format_name = "test", 108881cfd17SKevin Wolf .instance_size = sizeof(BDRVTestState), 10925f78d9eSVladimir Sementsov-Ogievskiy .supports_backing = true, 110881cfd17SKevin Wolf 111881cfd17SKevin Wolf .bdrv_close = bdrv_test_close, 112881cfd17SKevin Wolf .bdrv_co_preadv = bdrv_test_co_preadv, 113881cfd17SKevin Wolf 1145e8ac217SKevin Wolf .bdrv_drain_begin = bdrv_test_drain_begin, 1155e8ac217SKevin Wolf .bdrv_drain_end = bdrv_test_drain_end, 11686e1c840SKevin Wolf 117e5d8a406SMax Reitz .bdrv_child_perm = bdrv_default_perms, 1189746b35cSMax Reitz 119e2dd2737SKevin Wolf .bdrv_co_change_backing_file = bdrv_test_co_change_backing_file, 120881cfd17SKevin Wolf }; 121881cfd17SKevin Wolf 122881cfd17SKevin Wolf static void aio_ret_cb(void *opaque, int ret) 123881cfd17SKevin Wolf { 124881cfd17SKevin Wolf int *aio_ret = opaque; 125881cfd17SKevin Wolf *aio_ret = ret; 126881cfd17SKevin Wolf } 127881cfd17SKevin Wolf 1280582eb10SKevin Wolf typedef struct CallInCoroutineData { 1290582eb10SKevin Wolf void (*entry)(void); 1300582eb10SKevin Wolf bool done; 1310582eb10SKevin Wolf } CallInCoroutineData; 1320582eb10SKevin Wolf 1330582eb10SKevin Wolf static coroutine_fn void call_in_coroutine_entry(void *opaque) 1340582eb10SKevin Wolf { 1350582eb10SKevin Wolf CallInCoroutineData *data = opaque; 1360582eb10SKevin Wolf 1370582eb10SKevin Wolf data->entry(); 1380582eb10SKevin Wolf data->done = true; 1390582eb10SKevin Wolf } 1400582eb10SKevin Wolf 1410582eb10SKevin Wolf static void call_in_coroutine(void (*entry)(void)) 1420582eb10SKevin Wolf { 1430582eb10SKevin Wolf Coroutine *co; 1440582eb10SKevin Wolf CallInCoroutineData data = { 1450582eb10SKevin Wolf .entry = entry, 1460582eb10SKevin Wolf .done = false, 1470582eb10SKevin Wolf }; 1480582eb10SKevin Wolf 1490582eb10SKevin Wolf co = qemu_coroutine_create(call_in_coroutine_entry, &data); 1500582eb10SKevin Wolf qemu_coroutine_enter(co); 1510582eb10SKevin Wolf while (!data.done) { 1520582eb10SKevin Wolf aio_poll(qemu_get_aio_context(), true); 1530582eb10SKevin Wolf } 1540582eb10SKevin Wolf } 1550582eb10SKevin Wolf 15686e1c840SKevin Wolf enum drain_type { 15786e1c840SKevin Wolf BDRV_DRAIN_ALL, 15886e1c840SKevin Wolf BDRV_DRAIN, 1596c429a6aSKevin Wolf DRAIN_TYPE_MAX, 16086e1c840SKevin Wolf }; 16186e1c840SKevin Wolf 16286e1c840SKevin Wolf static void do_drain_begin(enum drain_type drain_type, BlockDriverState *bs) 16386e1c840SKevin Wolf { 16486e1c840SKevin Wolf switch (drain_type) { 16586e1c840SKevin Wolf case BDRV_DRAIN_ALL: bdrv_drain_all_begin(); break; 16686e1c840SKevin Wolf case BDRV_DRAIN: bdrv_drained_begin(bs); break; 16786e1c840SKevin Wolf default: g_assert_not_reached(); 16886e1c840SKevin Wolf } 16986e1c840SKevin Wolf } 17086e1c840SKevin Wolf 17186e1c840SKevin Wolf static void do_drain_end(enum drain_type drain_type, BlockDriverState *bs) 17286e1c840SKevin Wolf { 17386e1c840SKevin Wolf switch (drain_type) { 17486e1c840SKevin Wolf case BDRV_DRAIN_ALL: bdrv_drain_all_end(); break; 17586e1c840SKevin Wolf case BDRV_DRAIN: bdrv_drained_end(bs); break; 17686e1c840SKevin Wolf default: g_assert_not_reached(); 17786e1c840SKevin Wolf } 17886e1c840SKevin Wolf } 17986e1c840SKevin Wolf 180f62c1729SKevin Wolf static void do_drain_begin_unlocked(enum drain_type drain_type, BlockDriverState *bs) 181f62c1729SKevin Wolf { 182f62c1729SKevin Wolf do_drain_begin(drain_type, bs); 183f62c1729SKevin Wolf } 184f62c1729SKevin Wolf 18557f3d07bSKevin Wolf static BlockBackend * no_coroutine_fn test_setup(void) 18657f3d07bSKevin Wolf { 18757f3d07bSKevin Wolf BlockBackend *blk; 18857f3d07bSKevin Wolf BlockDriverState *bs, *backing; 18957f3d07bSKevin Wolf 19057f3d07bSKevin Wolf blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 19157f3d07bSKevin Wolf bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR, 19257f3d07bSKevin Wolf &error_abort); 19357f3d07bSKevin Wolf blk_insert_bs(blk, bs, &error_abort); 19457f3d07bSKevin Wolf 19557f3d07bSKevin Wolf backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort); 19657f3d07bSKevin Wolf bdrv_set_backing_hd(bs, backing, &error_abort); 19757f3d07bSKevin Wolf 19857f3d07bSKevin Wolf bdrv_unref(backing); 19957f3d07bSKevin Wolf bdrv_unref(bs); 20057f3d07bSKevin Wolf 20157f3d07bSKevin Wolf return blk; 20257f3d07bSKevin Wolf } 20357f3d07bSKevin Wolf 204f62c1729SKevin Wolf static void do_drain_end_unlocked(enum drain_type drain_type, BlockDriverState *bs) 205f62c1729SKevin Wolf { 206f62c1729SKevin Wolf do_drain_end(drain_type, bs); 207f62c1729SKevin Wolf } 208f62c1729SKevin Wolf 209004915a9SKevin Wolf /* 210004915a9SKevin Wolf * Locking the block graph would be a bit cumbersome here because this function 211004915a9SKevin Wolf * is called both in coroutine and non-coroutine context. We know this is a test 212004915a9SKevin Wolf * and nothing else is running, so don't bother with TSA. 213004915a9SKevin Wolf */ 214004915a9SKevin Wolf static void coroutine_mixed_fn TSA_NO_TSA 215004915a9SKevin Wolf test_drv_cb_common(BlockBackend *blk, enum drain_type drain_type, 21657f3d07bSKevin Wolf bool recursive) 217881cfd17SKevin Wolf { 21857f3d07bSKevin Wolf BlockDriverState *bs = blk_bs(blk); 21957f3d07bSKevin Wolf BlockDriverState *backing = bs->backing->bs; 22086e1c840SKevin Wolf BDRVTestState *s, *backing_s; 221881cfd17SKevin Wolf BlockAIOCB *acb; 222881cfd17SKevin Wolf int aio_ret; 223881cfd17SKevin Wolf 224405d8fe0SVladimir Sementsov-Ogievskiy QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0); 225881cfd17SKevin Wolf 226881cfd17SKevin Wolf s = bs->opaque; 22786e1c840SKevin Wolf backing_s = backing->opaque; 22886e1c840SKevin Wolf 229881cfd17SKevin Wolf /* Simple bdrv_drain_all_begin/end pair, check that CBs are called */ 230881cfd17SKevin Wolf g_assert_cmpint(s->drain_count, ==, 0); 23186e1c840SKevin Wolf g_assert_cmpint(backing_s->drain_count, ==, 0); 23286e1c840SKevin Wolf 23386e1c840SKevin Wolf do_drain_begin(drain_type, bs); 23486e1c840SKevin Wolf 235881cfd17SKevin Wolf g_assert_cmpint(s->drain_count, ==, 1); 23686e1c840SKevin Wolf g_assert_cmpint(backing_s->drain_count, ==, !!recursive); 23786e1c840SKevin Wolf 23886e1c840SKevin Wolf do_drain_end(drain_type, bs); 23986e1c840SKevin Wolf 240881cfd17SKevin Wolf g_assert_cmpint(s->drain_count, ==, 0); 24186e1c840SKevin Wolf g_assert_cmpint(backing_s->drain_count, ==, 0); 242881cfd17SKevin Wolf 243881cfd17SKevin Wolf /* Now do the same while a request is pending */ 244881cfd17SKevin Wolf aio_ret = -EINPROGRESS; 245881cfd17SKevin Wolf acb = blk_aio_preadv(blk, 0, &qiov, 0, aio_ret_cb, &aio_ret); 246881cfd17SKevin Wolf g_assert(acb != NULL); 247881cfd17SKevin Wolf g_assert_cmpint(aio_ret, ==, -EINPROGRESS); 248881cfd17SKevin Wolf 249881cfd17SKevin Wolf g_assert_cmpint(s->drain_count, ==, 0); 25086e1c840SKevin Wolf g_assert_cmpint(backing_s->drain_count, ==, 0); 25186e1c840SKevin Wolf 25286e1c840SKevin Wolf do_drain_begin(drain_type, bs); 25386e1c840SKevin Wolf 254881cfd17SKevin Wolf g_assert_cmpint(aio_ret, ==, 0); 255881cfd17SKevin Wolf g_assert_cmpint(s->drain_count, ==, 1); 25686e1c840SKevin Wolf g_assert_cmpint(backing_s->drain_count, ==, !!recursive); 257881cfd17SKevin Wolf 25886e1c840SKevin Wolf do_drain_end(drain_type, bs); 25986e1c840SKevin Wolf 26086e1c840SKevin Wolf g_assert_cmpint(s->drain_count, ==, 0); 26186e1c840SKevin Wolf g_assert_cmpint(backing_s->drain_count, ==, 0); 262881cfd17SKevin Wolf } 263881cfd17SKevin Wolf 26486e1c840SKevin Wolf static void test_drv_cb_drain_all(void) 26586e1c840SKevin Wolf { 26657f3d07bSKevin Wolf BlockBackend *blk = test_setup(); 26757f3d07bSKevin Wolf test_drv_cb_common(blk, BDRV_DRAIN_ALL, true); 26857f3d07bSKevin Wolf blk_unref(blk); 26986e1c840SKevin Wolf } 27086e1c840SKevin Wolf 27186e1c840SKevin Wolf static void test_drv_cb_drain(void) 27286e1c840SKevin Wolf { 27357f3d07bSKevin Wolf BlockBackend *blk = test_setup(); 27457f3d07bSKevin Wolf test_drv_cb_common(blk, BDRV_DRAIN, false); 27557f3d07bSKevin Wolf blk_unref(blk); 27657f3d07bSKevin Wolf } 27757f3d07bSKevin Wolf 27857f3d07bSKevin Wolf static void coroutine_fn test_drv_cb_co_drain_all_entry(void) 27957f3d07bSKevin Wolf { 28057f3d07bSKevin Wolf BlockBackend *blk = blk_all_next(NULL); 28157f3d07bSKevin Wolf test_drv_cb_common(blk, BDRV_DRAIN_ALL, true); 28286e1c840SKevin Wolf } 28386e1c840SKevin Wolf 2846d0252f2SKevin Wolf static void test_drv_cb_co_drain_all(void) 2856d0252f2SKevin Wolf { 28657f3d07bSKevin Wolf BlockBackend *blk = test_setup(); 28757f3d07bSKevin Wolf call_in_coroutine(test_drv_cb_co_drain_all_entry); 28857f3d07bSKevin Wolf blk_unref(blk); 28957f3d07bSKevin Wolf } 29057f3d07bSKevin Wolf 29157f3d07bSKevin Wolf static void coroutine_fn test_drv_cb_co_drain_entry(void) 29257f3d07bSKevin Wolf { 29357f3d07bSKevin Wolf BlockBackend *blk = blk_all_next(NULL); 29457f3d07bSKevin Wolf test_drv_cb_common(blk, BDRV_DRAIN, false); 2956d0252f2SKevin Wolf } 2966d0252f2SKevin Wolf 2970582eb10SKevin Wolf static void test_drv_cb_co_drain(void) 2980582eb10SKevin Wolf { 29957f3d07bSKevin Wolf BlockBackend *blk = test_setup(); 30057f3d07bSKevin Wolf call_in_coroutine(test_drv_cb_co_drain_entry); 30157f3d07bSKevin Wolf blk_unref(blk); 3020582eb10SKevin Wolf } 3030582eb10SKevin Wolf 304004915a9SKevin Wolf /* 305004915a9SKevin Wolf * Locking the block graph would be a bit cumbersome here because this function 306004915a9SKevin Wolf * is called both in coroutine and non-coroutine context. We know this is a test 307004915a9SKevin Wolf * and nothing else is running, so don't bother with TSA. 308004915a9SKevin Wolf */ 309004915a9SKevin Wolf static void coroutine_mixed_fn TSA_NO_TSA 310004915a9SKevin Wolf test_quiesce_common(BlockBackend *blk, enum drain_type drain_type, 31157f3d07bSKevin Wolf bool recursive) 31289a6ceabSKevin Wolf { 31357f3d07bSKevin Wolf BlockDriverState *bs = blk_bs(blk); 31457f3d07bSKevin Wolf BlockDriverState *backing = bs->backing->bs; 31589a6ceabSKevin Wolf 31689a6ceabSKevin Wolf g_assert_cmpint(bs->quiesce_counter, ==, 0); 31789a6ceabSKevin Wolf g_assert_cmpint(backing->quiesce_counter, ==, 0); 31889a6ceabSKevin Wolf 31989a6ceabSKevin Wolf do_drain_begin(drain_type, bs); 32089a6ceabSKevin Wolf 32157e05be3SKevin Wolf if (drain_type == BDRV_DRAIN_ALL) { 32257e05be3SKevin Wolf g_assert_cmpint(bs->quiesce_counter, ==, 2); 32357e05be3SKevin Wolf } else { 32489a6ceabSKevin Wolf g_assert_cmpint(bs->quiesce_counter, ==, 1); 32557e05be3SKevin Wolf } 32689a6ceabSKevin Wolf g_assert_cmpint(backing->quiesce_counter, ==, !!recursive); 32789a6ceabSKevin Wolf 32889a6ceabSKevin Wolf do_drain_end(drain_type, bs); 32989a6ceabSKevin Wolf 33089a6ceabSKevin Wolf g_assert_cmpint(bs->quiesce_counter, ==, 0); 33189a6ceabSKevin Wolf g_assert_cmpint(backing->quiesce_counter, ==, 0); 33289a6ceabSKevin Wolf } 33389a6ceabSKevin Wolf 33489a6ceabSKevin Wolf static void test_quiesce_drain_all(void) 33589a6ceabSKevin Wolf { 33657f3d07bSKevin Wolf BlockBackend *blk = test_setup(); 33757f3d07bSKevin Wolf test_quiesce_common(blk, BDRV_DRAIN_ALL, true); 33857f3d07bSKevin Wolf blk_unref(blk); 33989a6ceabSKevin Wolf } 34089a6ceabSKevin Wolf 34189a6ceabSKevin Wolf static void test_quiesce_drain(void) 34289a6ceabSKevin Wolf { 34357f3d07bSKevin Wolf BlockBackend *blk = test_setup(); 34457f3d07bSKevin Wolf test_quiesce_common(blk, BDRV_DRAIN, false); 34557f3d07bSKevin Wolf blk_unref(blk); 34657f3d07bSKevin Wolf } 34757f3d07bSKevin Wolf 34857f3d07bSKevin Wolf static void coroutine_fn test_quiesce_co_drain_all_entry(void) 34957f3d07bSKevin Wolf { 35057f3d07bSKevin Wolf BlockBackend *blk = blk_all_next(NULL); 35157f3d07bSKevin Wolf test_quiesce_common(blk, BDRV_DRAIN_ALL, true); 35289a6ceabSKevin Wolf } 35389a6ceabSKevin Wolf 3546d0252f2SKevin Wolf static void test_quiesce_co_drain_all(void) 3556d0252f2SKevin Wolf { 35657f3d07bSKevin Wolf BlockBackend *blk = test_setup(); 35757f3d07bSKevin Wolf call_in_coroutine(test_quiesce_co_drain_all_entry); 35857f3d07bSKevin Wolf blk_unref(blk); 35957f3d07bSKevin Wolf } 36057f3d07bSKevin Wolf 36157f3d07bSKevin Wolf static void coroutine_fn test_quiesce_co_drain_entry(void) 36257f3d07bSKevin Wolf { 36357f3d07bSKevin Wolf BlockBackend *blk = blk_all_next(NULL); 36457f3d07bSKevin Wolf test_quiesce_common(blk, BDRV_DRAIN, false); 3656d0252f2SKevin Wolf } 3666d0252f2SKevin Wolf 3670582eb10SKevin Wolf static void test_quiesce_co_drain(void) 3680582eb10SKevin Wolf { 36957f3d07bSKevin Wolf BlockBackend *blk = test_setup(); 37057f3d07bSKevin Wolf call_in_coroutine(test_quiesce_co_drain_entry); 37157f3d07bSKevin Wolf blk_unref(blk); 3720582eb10SKevin Wolf } 3730582eb10SKevin Wolf 3746c429a6aSKevin Wolf static void test_nested(void) 3756c429a6aSKevin Wolf { 3766c429a6aSKevin Wolf BlockBackend *blk; 3776c429a6aSKevin Wolf BlockDriverState *bs, *backing; 3786c429a6aSKevin Wolf BDRVTestState *s, *backing_s; 3796c429a6aSKevin Wolf enum drain_type outer, inner; 3806c429a6aSKevin Wolf 381d861ab3aSKevin Wolf blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 3826c429a6aSKevin Wolf bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR, 3836c429a6aSKevin Wolf &error_abort); 3846c429a6aSKevin Wolf s = bs->opaque; 3856c429a6aSKevin Wolf blk_insert_bs(blk, bs, &error_abort); 3866c429a6aSKevin Wolf 3876c429a6aSKevin Wolf backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort); 3886c429a6aSKevin Wolf backing_s = backing->opaque; 3896c429a6aSKevin Wolf bdrv_set_backing_hd(bs, backing, &error_abort); 3906c429a6aSKevin Wolf 3916c429a6aSKevin Wolf for (outer = 0; outer < DRAIN_TYPE_MAX; outer++) { 3926c429a6aSKevin Wolf for (inner = 0; inner < DRAIN_TYPE_MAX; inner++) { 39357e05be3SKevin Wolf int backing_quiesce = (outer == BDRV_DRAIN_ALL) + 39457e05be3SKevin Wolf (inner == BDRV_DRAIN_ALL); 3956c429a6aSKevin Wolf 3966c429a6aSKevin Wolf g_assert_cmpint(bs->quiesce_counter, ==, 0); 3976c429a6aSKevin Wolf g_assert_cmpint(backing->quiesce_counter, ==, 0); 3986c429a6aSKevin Wolf g_assert_cmpint(s->drain_count, ==, 0); 3996c429a6aSKevin Wolf g_assert_cmpint(backing_s->drain_count, ==, 0); 4006c429a6aSKevin Wolf 4016c429a6aSKevin Wolf do_drain_begin(outer, bs); 4026c429a6aSKevin Wolf do_drain_begin(inner, bs); 4036c429a6aSKevin Wolf 40457e05be3SKevin Wolf g_assert_cmpint(bs->quiesce_counter, ==, 2 + !!backing_quiesce); 4056c429a6aSKevin Wolf g_assert_cmpint(backing->quiesce_counter, ==, backing_quiesce); 40657e05be3SKevin Wolf g_assert_cmpint(s->drain_count, ==, 1); 40757e05be3SKevin Wolf g_assert_cmpint(backing_s->drain_count, ==, !!backing_quiesce); 4086c429a6aSKevin Wolf 4096c429a6aSKevin Wolf do_drain_end(inner, bs); 4106c429a6aSKevin Wolf do_drain_end(outer, bs); 4116c429a6aSKevin Wolf 4126c429a6aSKevin Wolf g_assert_cmpint(bs->quiesce_counter, ==, 0); 4136c429a6aSKevin Wolf g_assert_cmpint(backing->quiesce_counter, ==, 0); 4146c429a6aSKevin Wolf g_assert_cmpint(s->drain_count, ==, 0); 4156c429a6aSKevin Wolf g_assert_cmpint(backing_s->drain_count, ==, 0); 4166c429a6aSKevin Wolf } 4176c429a6aSKevin Wolf } 4186c429a6aSKevin Wolf 4196c429a6aSKevin Wolf bdrv_unref(backing); 4206c429a6aSKevin Wolf bdrv_unref(bs); 4216c429a6aSKevin Wolf blk_unref(blk); 4226c429a6aSKevin Wolf } 4236c429a6aSKevin Wolf 42419f7a7e5SKevin Wolf static void test_graph_change_drain_all(void) 42519f7a7e5SKevin Wolf { 42619f7a7e5SKevin Wolf BlockBackend *blk_a, *blk_b; 42719f7a7e5SKevin Wolf BlockDriverState *bs_a, *bs_b; 42819f7a7e5SKevin Wolf BDRVTestState *a_s, *b_s; 42919f7a7e5SKevin Wolf 43019f7a7e5SKevin Wolf /* Create node A with a BlockBackend */ 431d861ab3aSKevin Wolf blk_a = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 43219f7a7e5SKevin Wolf bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR, 43319f7a7e5SKevin Wolf &error_abort); 43419f7a7e5SKevin Wolf a_s = bs_a->opaque; 43519f7a7e5SKevin Wolf blk_insert_bs(blk_a, bs_a, &error_abort); 43619f7a7e5SKevin Wolf 43719f7a7e5SKevin Wolf g_assert_cmpint(bs_a->quiesce_counter, ==, 0); 43819f7a7e5SKevin Wolf g_assert_cmpint(a_s->drain_count, ==, 0); 43919f7a7e5SKevin Wolf 44019f7a7e5SKevin Wolf /* Call bdrv_drain_all_begin() */ 44119f7a7e5SKevin Wolf bdrv_drain_all_begin(); 44219f7a7e5SKevin Wolf 44319f7a7e5SKevin Wolf g_assert_cmpint(bs_a->quiesce_counter, ==, 1); 44419f7a7e5SKevin Wolf g_assert_cmpint(a_s->drain_count, ==, 1); 44519f7a7e5SKevin Wolf 44619f7a7e5SKevin Wolf /* Create node B with a BlockBackend */ 447d861ab3aSKevin Wolf blk_b = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 44819f7a7e5SKevin Wolf bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR, 44919f7a7e5SKevin Wolf &error_abort); 45019f7a7e5SKevin Wolf b_s = bs_b->opaque; 45119f7a7e5SKevin Wolf blk_insert_bs(blk_b, bs_b, &error_abort); 45219f7a7e5SKevin Wolf 45319f7a7e5SKevin Wolf g_assert_cmpint(bs_a->quiesce_counter, ==, 1); 45419f7a7e5SKevin Wolf g_assert_cmpint(bs_b->quiesce_counter, ==, 1); 45519f7a7e5SKevin Wolf g_assert_cmpint(a_s->drain_count, ==, 1); 45619f7a7e5SKevin Wolf g_assert_cmpint(b_s->drain_count, ==, 1); 45719f7a7e5SKevin Wolf 45819f7a7e5SKevin Wolf /* Unref and finally delete node A */ 45919f7a7e5SKevin Wolf blk_unref(blk_a); 46019f7a7e5SKevin Wolf 46119f7a7e5SKevin Wolf g_assert_cmpint(bs_a->quiesce_counter, ==, 1); 46219f7a7e5SKevin Wolf g_assert_cmpint(bs_b->quiesce_counter, ==, 1); 46319f7a7e5SKevin Wolf g_assert_cmpint(a_s->drain_count, ==, 1); 46419f7a7e5SKevin Wolf g_assert_cmpint(b_s->drain_count, ==, 1); 46519f7a7e5SKevin Wolf 46619f7a7e5SKevin Wolf bdrv_unref(bs_a); 46719f7a7e5SKevin Wolf 46819f7a7e5SKevin Wolf g_assert_cmpint(bs_b->quiesce_counter, ==, 1); 46919f7a7e5SKevin Wolf g_assert_cmpint(b_s->drain_count, ==, 1); 47019f7a7e5SKevin Wolf 47119f7a7e5SKevin Wolf /* End the drained section */ 47219f7a7e5SKevin Wolf bdrv_drain_all_end(); 47319f7a7e5SKevin Wolf 47419f7a7e5SKevin Wolf g_assert_cmpint(bs_b->quiesce_counter, ==, 0); 47519f7a7e5SKevin Wolf g_assert_cmpint(b_s->drain_count, ==, 0); 47619f7a7e5SKevin Wolf 47719f7a7e5SKevin Wolf bdrv_unref(bs_b); 47819f7a7e5SKevin Wolf blk_unref(blk_b); 47919f7a7e5SKevin Wolf } 48019f7a7e5SKevin Wolf 481bb675689SKevin Wolf struct test_iothread_data { 482bb675689SKevin Wolf BlockDriverState *bs; 483bb675689SKevin Wolf enum drain_type drain_type; 484bb675689SKevin Wolf int *aio_ret; 485ab613350SStefan Hajnoczi bool co_done; 486bb675689SKevin Wolf }; 487bb675689SKevin Wolf 488ab613350SStefan Hajnoczi static void coroutine_fn test_iothread_drain_co_entry(void *opaque) 489bb675689SKevin Wolf { 490bb675689SKevin Wolf struct test_iothread_data *data = opaque; 491bb675689SKevin Wolf 492bb675689SKevin Wolf do_drain_begin(data->drain_type, data->bs); 493bb675689SKevin Wolf g_assert_cmpint(*data->aio_ret, ==, 0); 494bb675689SKevin Wolf do_drain_end(data->drain_type, data->bs); 495bb675689SKevin Wolf 496ab613350SStefan Hajnoczi data->co_done = true; 497ab613350SStefan Hajnoczi aio_wait_kick(); 498bb675689SKevin Wolf } 499bb675689SKevin Wolf 500bb675689SKevin Wolf static void test_iothread_aio_cb(void *opaque, int ret) 501bb675689SKevin Wolf { 502bb675689SKevin Wolf int *aio_ret = opaque; 503bb675689SKevin Wolf *aio_ret = ret; 504bb675689SKevin Wolf qemu_event_set(&done_event); 505bb675689SKevin Wolf } 506bb675689SKevin Wolf 507ecc1a5c7SKevin Wolf static void test_iothread_main_thread_bh(void *opaque) 508ecc1a5c7SKevin Wolf { 509ecc1a5c7SKevin Wolf struct test_iothread_data *data = opaque; 510ecc1a5c7SKevin Wolf 511ecc1a5c7SKevin Wolf bdrv_flush(data->bs); 512c8bf923dSStefan Hajnoczi bdrv_dec_in_flight(data->bs); /* incremented by test_iothread_common() */ 513ecc1a5c7SKevin Wolf } 514ecc1a5c7SKevin Wolf 515bb675689SKevin Wolf /* 516bb675689SKevin Wolf * Starts an AIO request on a BDS that runs in the AioContext of iothread 1. 517bb675689SKevin Wolf * The request involves a BH on iothread 2 before it can complete. 518bb675689SKevin Wolf * 519bb675689SKevin Wolf * @drain_thread = 0 means that do_drain_begin/end are called from the main 520bb675689SKevin Wolf * thread, @drain_thread = 1 means that they are called from iothread 1. Drain 521bb675689SKevin Wolf * for this BDS cannot be called from iothread 2 because only the main thread 522bb675689SKevin Wolf * may do cross-AioContext polling. 523bb675689SKevin Wolf */ 524bb675689SKevin Wolf static void test_iothread_common(enum drain_type drain_type, int drain_thread) 525bb675689SKevin Wolf { 526bb675689SKevin Wolf BlockBackend *blk; 527bb675689SKevin Wolf BlockDriverState *bs; 528bb675689SKevin Wolf BDRVTestState *s; 529bb675689SKevin Wolf BlockAIOCB *acb; 530ab613350SStefan Hajnoczi Coroutine *co; 531bb675689SKevin Wolf int aio_ret; 532bb675689SKevin Wolf struct test_iothread_data data; 533bb675689SKevin Wolf 534bb675689SKevin Wolf IOThread *a = iothread_new(); 535bb675689SKevin Wolf IOThread *b = iothread_new(); 536bb675689SKevin Wolf AioContext *ctx_a = iothread_get_aio_context(a); 537bb675689SKevin Wolf AioContext *ctx_b = iothread_get_aio_context(b); 538bb675689SKevin Wolf 539405d8fe0SVladimir Sementsov-Ogievskiy QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0); 540bb675689SKevin Wolf 541bb675689SKevin Wolf /* bdrv_drain_all() may only be called from the main loop thread */ 542bb675689SKevin Wolf if (drain_type == BDRV_DRAIN_ALL && drain_thread != 0) { 543bb675689SKevin Wolf goto out; 544bb675689SKevin Wolf } 545bb675689SKevin Wolf 546d861ab3aSKevin Wolf blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 547bb675689SKevin Wolf bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR, 548bb675689SKevin Wolf &error_abort); 549bb675689SKevin Wolf s = bs->opaque; 550bb675689SKevin Wolf blk_insert_bs(blk, bs, &error_abort); 551cf312932SKevin Wolf blk_set_disable_request_queuing(blk, true); 552bb675689SKevin Wolf 55397896a48SKevin Wolf blk_set_aio_context(blk, ctx_a, &error_abort); 554bb675689SKevin Wolf 555bb675689SKevin Wolf s->bh_indirection_ctx = ctx_b; 556bb675689SKevin Wolf 557bb675689SKevin Wolf aio_ret = -EINPROGRESS; 558dd353157SKevin Wolf qemu_event_reset(&done_event); 559dd353157SKevin Wolf 560bb675689SKevin Wolf if (drain_thread == 0) { 561bb675689SKevin Wolf acb = blk_aio_preadv(blk, 0, &qiov, 0, test_iothread_aio_cb, &aio_ret); 562bb675689SKevin Wolf } else { 563bb675689SKevin Wolf acb = blk_aio_preadv(blk, 0, &qiov, 0, aio_ret_cb, &aio_ret); 564bb675689SKevin Wolf } 565bb675689SKevin Wolf g_assert(acb != NULL); 566bb675689SKevin Wolf g_assert_cmpint(aio_ret, ==, -EINPROGRESS); 567bb675689SKevin Wolf 568bb675689SKevin Wolf data = (struct test_iothread_data) { 569bb675689SKevin Wolf .bs = bs, 570bb675689SKevin Wolf .drain_type = drain_type, 571bb675689SKevin Wolf .aio_ret = &aio_ret, 572bb675689SKevin Wolf }; 573bb675689SKevin Wolf 574bb675689SKevin Wolf switch (drain_thread) { 575bb675689SKevin Wolf case 0: 576c8bf923dSStefan Hajnoczi /* 577c8bf923dSStefan Hajnoczi * Increment in_flight so that do_drain_begin() waits for 578c8bf923dSStefan Hajnoczi * test_iothread_main_thread_bh(). This prevents the race between 579c8bf923dSStefan Hajnoczi * test_iothread_main_thread_bh() in IOThread a and do_drain_begin() in 580c8bf923dSStefan Hajnoczi * this thread. test_iothread_main_thread_bh() decrements in_flight. 581c8bf923dSStefan Hajnoczi */ 582c8bf923dSStefan Hajnoczi bdrv_inc_in_flight(bs); 583ecc1a5c7SKevin Wolf aio_bh_schedule_oneshot(ctx_a, test_iothread_main_thread_bh, &data); 584ecc1a5c7SKevin Wolf 585bb675689SKevin Wolf /* The request is running on the IOThread a. Draining its block device 586bb675689SKevin Wolf * will make sure that it has completed as far as the BDS is concerned, 587bb675689SKevin Wolf * but the drain in this thread can continue immediately after 588bb675689SKevin Wolf * bdrv_dec_in_flight() and aio_ret might be assigned only slightly 589bb675689SKevin Wolf * later. */ 590bb675689SKevin Wolf do_drain_begin(drain_type, bs); 591bb675689SKevin Wolf g_assert_cmpint(bs->in_flight, ==, 0); 592bb675689SKevin Wolf 593bb675689SKevin Wolf qemu_event_wait(&done_event); 594bb675689SKevin Wolf 595bb675689SKevin Wolf g_assert_cmpint(aio_ret, ==, 0); 596bb675689SKevin Wolf do_drain_end(drain_type, bs); 597bb675689SKevin Wolf break; 598bb675689SKevin Wolf case 1: 599ab613350SStefan Hajnoczi co = qemu_coroutine_create(test_iothread_drain_co_entry, &data); 600ab613350SStefan Hajnoczi aio_co_enter(ctx_a, co); 601ab613350SStefan Hajnoczi AIO_WAIT_WHILE_UNLOCKED(NULL, !data.co_done); 602bb675689SKevin Wolf break; 603bb675689SKevin Wolf default: 604bb675689SKevin Wolf g_assert_not_reached(); 605bb675689SKevin Wolf } 606bb675689SKevin Wolf 60797896a48SKevin Wolf blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); 608bb675689SKevin Wolf 609bb675689SKevin Wolf bdrv_unref(bs); 610bb675689SKevin Wolf blk_unref(blk); 611bb675689SKevin Wolf 612bb675689SKevin Wolf out: 613bb675689SKevin Wolf iothread_join(a); 614bb675689SKevin Wolf iothread_join(b); 615bb675689SKevin Wolf } 616bb675689SKevin Wolf 617bb675689SKevin Wolf static void test_iothread_drain_all(void) 618bb675689SKevin Wolf { 619bb675689SKevin Wolf test_iothread_common(BDRV_DRAIN_ALL, 0); 620bb675689SKevin Wolf test_iothread_common(BDRV_DRAIN_ALL, 1); 621bb675689SKevin Wolf } 622bb675689SKevin Wolf 623bb675689SKevin Wolf static void test_iothread_drain(void) 624bb675689SKevin Wolf { 625bb675689SKevin Wolf test_iothread_common(BDRV_DRAIN, 0); 626bb675689SKevin Wolf test_iothread_common(BDRV_DRAIN, 1); 627bb675689SKevin Wolf } 628bb675689SKevin Wolf 6297253220dSKevin Wolf 6307253220dSKevin Wolf typedef struct TestBlockJob { 6317253220dSKevin Wolf BlockJob common; 6321b177bbeSVladimir Sementsov-Ogievskiy BlockDriverState *bs; 633d49725afSKevin Wolf int run_ret; 634d49725afSKevin Wolf int prepare_ret; 635d8b3afd5SKevin Wolf bool running; 6367253220dSKevin Wolf bool should_complete; 6377253220dSKevin Wolf } TestBlockJob; 6387253220dSKevin Wolf 639ae23dde9SKevin Wolf static int test_job_prepare(Job *job) 640ae23dde9SKevin Wolf { 641ae23dde9SKevin Wolf TestBlockJob *s = container_of(job, TestBlockJob, common.job); 642ae23dde9SKevin Wolf 643ae23dde9SKevin Wolf /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */ 6441b177bbeSVladimir Sementsov-Ogievskiy bdrv_flush(s->bs); 645d49725afSKevin Wolf return s->prepare_ret; 646d49725afSKevin Wolf } 647d49725afSKevin Wolf 648d49725afSKevin Wolf static void test_job_commit(Job *job) 649d49725afSKevin Wolf { 650d49725afSKevin Wolf TestBlockJob *s = container_of(job, TestBlockJob, common.job); 651d49725afSKevin Wolf 652d49725afSKevin Wolf /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */ 6531b177bbeSVladimir Sementsov-Ogievskiy bdrv_flush(s->bs); 654d49725afSKevin Wolf } 655d49725afSKevin Wolf 656d49725afSKevin Wolf static void test_job_abort(Job *job) 657d49725afSKevin Wolf { 658d49725afSKevin Wolf TestBlockJob *s = container_of(job, TestBlockJob, common.job); 659d49725afSKevin Wolf 660d49725afSKevin Wolf /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */ 6611b177bbeSVladimir Sementsov-Ogievskiy bdrv_flush(s->bs); 662ae23dde9SKevin Wolf } 663ae23dde9SKevin Wolf 664f67432a2SJohn Snow static int coroutine_fn test_job_run(Job *job, Error **errp) 6657253220dSKevin Wolf { 666f67432a2SJohn Snow TestBlockJob *s = container_of(job, TestBlockJob, common.job); 6677253220dSKevin Wolf 668d8b3afd5SKevin Wolf /* We are running the actual job code past the pause point in 669d8b3afd5SKevin Wolf * job_co_entry(). */ 670d8b3afd5SKevin Wolf s->running = true; 671d8b3afd5SKevin Wolf 6722e1795b5SKevin Wolf job_transition_to_ready(&s->common.job); 6737253220dSKevin Wolf while (!s->should_complete) { 6745599c162SKevin Wolf /* Avoid job_sleep_ns() because it marks the job as !busy. We want to 6755599c162SKevin Wolf * emulate some actual activity (probably some I/O) here so that drain 6765599c162SKevin Wolf * has to wait for this activity to stop. */ 677d8b3afd5SKevin Wolf qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000); 678d8b3afd5SKevin Wolf 67989bd0305SKevin Wolf job_pause_point(&s->common.job); 6807253220dSKevin Wolf } 6817253220dSKevin Wolf 682d49725afSKevin Wolf return s->run_ret; 6837253220dSKevin Wolf } 6847253220dSKevin Wolf 6853453d972SKevin Wolf static void test_job_complete(Job *job, Error **errp) 6867253220dSKevin Wolf { 6873453d972SKevin Wolf TestBlockJob *s = container_of(job, TestBlockJob, common.job); 6887253220dSKevin Wolf s->should_complete = true; 6897253220dSKevin Wolf } 6907253220dSKevin Wolf 6917253220dSKevin Wolf BlockJobDriver test_job_driver = { 69233e9e9bdSKevin Wolf .job_driver = { 6937253220dSKevin Wolf .instance_size = sizeof(TestBlockJob), 69480fa2c75SKevin Wolf .free = block_job_free, 695b15de828SKevin Wolf .user_resume = block_job_user_resume, 696f67432a2SJohn Snow .run = test_job_run, 6977253220dSKevin Wolf .complete = test_job_complete, 698ae23dde9SKevin Wolf .prepare = test_job_prepare, 699d49725afSKevin Wolf .commit = test_job_commit, 700d49725afSKevin Wolf .abort = test_job_abort, 7013453d972SKevin Wolf }, 7027253220dSKevin Wolf }; 7037253220dSKevin Wolf 704d49725afSKevin Wolf enum test_job_result { 705d49725afSKevin Wolf TEST_JOB_SUCCESS, 706d49725afSKevin Wolf TEST_JOB_FAIL_RUN, 707d49725afSKevin Wolf TEST_JOB_FAIL_PREPARE, 708d49725afSKevin Wolf }; 709d49725afSKevin Wolf 710d8b3afd5SKevin Wolf enum test_job_drain_node { 711d8b3afd5SKevin Wolf TEST_JOB_DRAIN_SRC, 712d8b3afd5SKevin Wolf TEST_JOB_DRAIN_SRC_CHILD, 713d8b3afd5SKevin Wolf }; 714d8b3afd5SKevin Wolf 715d8b3afd5SKevin Wolf static void test_blockjob_common_drain_node(enum drain_type drain_type, 716d8b3afd5SKevin Wolf bool use_iothread, 717d8b3afd5SKevin Wolf enum test_job_result result, 718d8b3afd5SKevin Wolf enum test_job_drain_node drain_node) 7197253220dSKevin Wolf { 7207253220dSKevin Wolf BlockBackend *blk_src, *blk_target; 721d8b3afd5SKevin Wolf BlockDriverState *src, *src_backing, *src_overlay, *target, *drain_bs; 7227253220dSKevin Wolf BlockJob *job; 723d49725afSKevin Wolf TestBlockJob *tjob; 724f62c1729SKevin Wolf IOThread *iothread = NULL; 7257253220dSKevin Wolf int ret; 7267253220dSKevin Wolf 7277253220dSKevin Wolf src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR, 7287253220dSKevin Wolf &error_abort); 729d8b3afd5SKevin Wolf src_backing = bdrv_new_open_driver(&bdrv_test, "source-backing", 730d8b3afd5SKevin Wolf BDRV_O_RDWR, &error_abort); 731d8b3afd5SKevin Wolf src_overlay = bdrv_new_open_driver(&bdrv_test, "source-overlay", 732d8b3afd5SKevin Wolf BDRV_O_RDWR, &error_abort); 733d8b3afd5SKevin Wolf 734d8b3afd5SKevin Wolf bdrv_set_backing_hd(src_overlay, src, &error_abort); 735d8b3afd5SKevin Wolf bdrv_unref(src); 736d8b3afd5SKevin Wolf bdrv_set_backing_hd(src, src_backing, &error_abort); 737d8b3afd5SKevin Wolf bdrv_unref(src_backing); 738d8b3afd5SKevin Wolf 739d861ab3aSKevin Wolf blk_src = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 740d8b3afd5SKevin Wolf blk_insert_bs(blk_src, src_overlay, &error_abort); 741d8b3afd5SKevin Wolf 742d8b3afd5SKevin Wolf switch (drain_node) { 743d8b3afd5SKevin Wolf case TEST_JOB_DRAIN_SRC: 744d8b3afd5SKevin Wolf drain_bs = src; 745d8b3afd5SKevin Wolf break; 746d8b3afd5SKevin Wolf case TEST_JOB_DRAIN_SRC_CHILD: 747d8b3afd5SKevin Wolf drain_bs = src_backing; 748d8b3afd5SKevin Wolf break; 749d8b3afd5SKevin Wolf default: 750d8b3afd5SKevin Wolf g_assert_not_reached(); 751d8b3afd5SKevin Wolf } 7527253220dSKevin Wolf 753f62c1729SKevin Wolf if (use_iothread) { 754*b49f4755SStefan Hajnoczi AioContext *ctx; 755*b49f4755SStefan Hajnoczi 756f62c1729SKevin Wolf iothread = iothread_new(); 757f62c1729SKevin Wolf ctx = iothread_get_aio_context(iothread); 75897896a48SKevin Wolf blk_set_aio_context(blk_src, ctx, &error_abort); 759f62c1729SKevin Wolf } 760f62c1729SKevin Wolf 7617253220dSKevin Wolf target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR, 7627253220dSKevin Wolf &error_abort); 763d861ab3aSKevin Wolf blk_target = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 7647253220dSKevin Wolf blk_insert_bs(blk_target, target, &error_abort); 765132ada80SKevin Wolf blk_set_allow_aio_context_change(blk_target, true); 7667253220dSKevin Wolf 767d49725afSKevin Wolf tjob = block_job_create("job0", &test_job_driver, NULL, src, 768d49725afSKevin Wolf 0, BLK_PERM_ALL, 76975859b94SJohn Snow 0, 0, NULL, NULL, &error_abort); 7701b177bbeSVladimir Sementsov-Ogievskiy tjob->bs = src; 771d49725afSKevin Wolf job = &tjob->common; 772f3bbc53dSKevin Wolf 7736bc30f19SStefan Hajnoczi bdrv_graph_wrlock(); 7747253220dSKevin Wolf block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort); 7756bc30f19SStefan Hajnoczi bdrv_graph_wrunlock(); 776d49725afSKevin Wolf 777d49725afSKevin Wolf switch (result) { 778d49725afSKevin Wolf case TEST_JOB_SUCCESS: 779d49725afSKevin Wolf break; 780d49725afSKevin Wolf case TEST_JOB_FAIL_RUN: 781d49725afSKevin Wolf tjob->run_ret = -EIO; 782d49725afSKevin Wolf break; 783d49725afSKevin Wolf case TEST_JOB_FAIL_PREPARE: 784d49725afSKevin Wolf tjob->prepare_ret = -EIO; 785d49725afSKevin Wolf break; 786d49725afSKevin Wolf } 787d49725afSKevin Wolf 788da01ff7fSKevin Wolf job_start(&job->job); 7897253220dSKevin Wolf 790d8b3afd5SKevin Wolf if (use_iothread) { 791d8b3afd5SKevin Wolf /* job_co_entry() is run in the I/O thread, wait for the actual job 792d8b3afd5SKevin Wolf * code to start (we don't want to catch the job in the pause point in 793d8b3afd5SKevin Wolf * job_co_entry(). */ 794d8b3afd5SKevin Wolf while (!tjob->running) { 795d8b3afd5SKevin Wolf aio_poll(qemu_get_aio_context(), false); 796d8b3afd5SKevin Wolf } 797d8b3afd5SKevin Wolf } 798d8b3afd5SKevin Wolf 799191e7af3SEmanuele Giuseppe Esposito WITH_JOB_LOCK_GUARD() { 800da01ff7fSKevin Wolf g_assert_cmpint(job->job.pause_count, ==, 0); 801da01ff7fSKevin Wolf g_assert_false(job->job.paused); 802d8b3afd5SKevin Wolf g_assert_true(tjob->running); 8035599c162SKevin Wolf g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ 804191e7af3SEmanuele Giuseppe Esposito } 8057253220dSKevin Wolf 806d8b3afd5SKevin Wolf do_drain_begin_unlocked(drain_type, drain_bs); 8077253220dSKevin Wolf 808191e7af3SEmanuele Giuseppe Esposito WITH_JOB_LOCK_GUARD() { 8097253220dSKevin Wolf if (drain_type == BDRV_DRAIN_ALL) { 81081193349SKevin Wolf /* bdrv_drain_all() drains both src and target */ 811da01ff7fSKevin Wolf g_assert_cmpint(job->job.pause_count, ==, 2); 8127253220dSKevin Wolf } else { 813da01ff7fSKevin Wolf g_assert_cmpint(job->job.pause_count, ==, 1); 8147253220dSKevin Wolf } 81589bd0305SKevin Wolf g_assert_true(job->job.paused); 816da01ff7fSKevin Wolf g_assert_false(job->job.busy); /* The job is paused */ 817191e7af3SEmanuele Giuseppe Esposito } 8187253220dSKevin Wolf 819d8b3afd5SKevin Wolf do_drain_end_unlocked(drain_type, drain_bs); 820f62c1729SKevin Wolf 821f62c1729SKevin Wolf if (use_iothread) { 822191e7af3SEmanuele Giuseppe Esposito /* 823191e7af3SEmanuele Giuseppe Esposito * Here we are waiting for the paused status to change, 824191e7af3SEmanuele Giuseppe Esposito * so don't bother protecting the read every time. 825191e7af3SEmanuele Giuseppe Esposito * 826191e7af3SEmanuele Giuseppe Esposito * paused is reset in the I/O thread, wait for it 827191e7af3SEmanuele Giuseppe Esposito */ 828f62c1729SKevin Wolf while (job->job.paused) { 829f62c1729SKevin Wolf aio_poll(qemu_get_aio_context(), false); 830f62c1729SKevin Wolf } 831f62c1729SKevin Wolf } 8327253220dSKevin Wolf 833191e7af3SEmanuele Giuseppe Esposito WITH_JOB_LOCK_GUARD() { 834da01ff7fSKevin Wolf g_assert_cmpint(job->job.pause_count, ==, 0); 835da01ff7fSKevin Wolf g_assert_false(job->job.paused); 83689bd0305SKevin Wolf g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ 837191e7af3SEmanuele Giuseppe Esposito } 8387253220dSKevin Wolf 839132ada80SKevin Wolf do_drain_begin_unlocked(drain_type, target); 8407253220dSKevin Wolf 841191e7af3SEmanuele Giuseppe Esposito WITH_JOB_LOCK_GUARD() { 8427253220dSKevin Wolf if (drain_type == BDRV_DRAIN_ALL) { 84381193349SKevin Wolf /* bdrv_drain_all() drains both src and target */ 844da01ff7fSKevin Wolf g_assert_cmpint(job->job.pause_count, ==, 2); 8457253220dSKevin Wolf } else { 846da01ff7fSKevin Wolf g_assert_cmpint(job->job.pause_count, ==, 1); 8477253220dSKevin Wolf } 84889bd0305SKevin Wolf g_assert_true(job->job.paused); 849da01ff7fSKevin Wolf g_assert_false(job->job.busy); /* The job is paused */ 850191e7af3SEmanuele Giuseppe Esposito } 8517253220dSKevin Wolf 852132ada80SKevin Wolf do_drain_end_unlocked(drain_type, target); 8537253220dSKevin Wolf 854f62c1729SKevin Wolf if (use_iothread) { 855191e7af3SEmanuele Giuseppe Esposito /* 856191e7af3SEmanuele Giuseppe Esposito * Here we are waiting for the paused status to change, 857191e7af3SEmanuele Giuseppe Esposito * so don't bother protecting the read every time. 858191e7af3SEmanuele Giuseppe Esposito * 859191e7af3SEmanuele Giuseppe Esposito * paused is reset in the I/O thread, wait for it 860191e7af3SEmanuele Giuseppe Esposito */ 861f62c1729SKevin Wolf while (job->job.paused) { 862f62c1729SKevin Wolf aio_poll(qemu_get_aio_context(), false); 863f62c1729SKevin Wolf } 864f62c1729SKevin Wolf } 865f62c1729SKevin Wolf 866191e7af3SEmanuele Giuseppe Esposito WITH_JOB_LOCK_GUARD() { 867da01ff7fSKevin Wolf g_assert_cmpint(job->job.pause_count, ==, 0); 868da01ff7fSKevin Wolf g_assert_false(job->job.paused); 8695599c162SKevin Wolf g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ 870191e7af3SEmanuele Giuseppe Esposito } 8717253220dSKevin Wolf 872191e7af3SEmanuele Giuseppe Esposito WITH_JOB_LOCK_GUARD() { 873191e7af3SEmanuele Giuseppe Esposito ret = job_complete_sync_locked(&job->job, &error_abort); 874191e7af3SEmanuele Giuseppe Esposito } 875d49725afSKevin Wolf g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO)); 8767253220dSKevin Wolf 877f62c1729SKevin Wolf if (use_iothread) { 87897896a48SKevin Wolf blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort); 879ad943dcbSKevin Wolf assert(blk_get_aio_context(blk_target) == qemu_get_aio_context()); 880f62c1729SKevin Wolf } 881f62c1729SKevin Wolf 8827253220dSKevin Wolf blk_unref(blk_src); 8837253220dSKevin Wolf blk_unref(blk_target); 884d8b3afd5SKevin Wolf bdrv_unref(src_overlay); 8857253220dSKevin Wolf bdrv_unref(target); 886f62c1729SKevin Wolf 887f62c1729SKevin Wolf if (iothread) { 888f62c1729SKevin Wolf iothread_join(iothread); 889f62c1729SKevin Wolf } 8907253220dSKevin Wolf } 8917253220dSKevin Wolf 892d8b3afd5SKevin Wolf static void test_blockjob_common(enum drain_type drain_type, bool use_iothread, 893d8b3afd5SKevin Wolf enum test_job_result result) 894d8b3afd5SKevin Wolf { 895d8b3afd5SKevin Wolf test_blockjob_common_drain_node(drain_type, use_iothread, result, 896d8b3afd5SKevin Wolf TEST_JOB_DRAIN_SRC); 897d8b3afd5SKevin Wolf test_blockjob_common_drain_node(drain_type, use_iothread, result, 898d8b3afd5SKevin Wolf TEST_JOB_DRAIN_SRC_CHILD); 899d8b3afd5SKevin Wolf } 900d8b3afd5SKevin Wolf 9017253220dSKevin Wolf static void test_blockjob_drain_all(void) 9027253220dSKevin Wolf { 903d49725afSKevin Wolf test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_SUCCESS); 9047253220dSKevin Wolf } 9057253220dSKevin Wolf 9067253220dSKevin Wolf static void test_blockjob_drain(void) 9077253220dSKevin Wolf { 908d49725afSKevin Wolf test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_SUCCESS); 9097253220dSKevin Wolf } 9107253220dSKevin Wolf 911d49725afSKevin Wolf static void test_blockjob_error_drain_all(void) 912d49725afSKevin Wolf { 913d49725afSKevin Wolf test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_FAIL_RUN); 914d49725afSKevin Wolf test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_FAIL_PREPARE); 915d49725afSKevin Wolf } 916d49725afSKevin Wolf 917d49725afSKevin Wolf static void test_blockjob_error_drain(void) 918d49725afSKevin Wolf { 919d49725afSKevin Wolf test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_FAIL_RUN); 920d49725afSKevin Wolf test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_FAIL_PREPARE); 921d49725afSKevin Wolf } 922d49725afSKevin Wolf 923f62c1729SKevin Wolf static void test_blockjob_iothread_drain_all(void) 924f62c1729SKevin Wolf { 925d49725afSKevin Wolf test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_SUCCESS); 926f62c1729SKevin Wolf } 927f62c1729SKevin Wolf 928f62c1729SKevin Wolf static void test_blockjob_iothread_drain(void) 929f62c1729SKevin Wolf { 930d49725afSKevin Wolf test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_SUCCESS); 931f62c1729SKevin Wolf } 932f62c1729SKevin Wolf 933d49725afSKevin Wolf static void test_blockjob_iothread_error_drain_all(void) 934d49725afSKevin Wolf { 935d49725afSKevin Wolf test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_FAIL_RUN); 936d49725afSKevin Wolf test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_FAIL_PREPARE); 937d49725afSKevin Wolf } 938d49725afSKevin Wolf 939d49725afSKevin Wolf static void test_blockjob_iothread_error_drain(void) 940d49725afSKevin Wolf { 941d49725afSKevin Wolf test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_FAIL_RUN); 942d49725afSKevin Wolf test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_FAIL_PREPARE); 943d49725afSKevin Wolf } 944d49725afSKevin Wolf 9454c8158e3SMax Reitz 9464c8158e3SMax Reitz typedef struct BDRVTestTopState { 9474c8158e3SMax Reitz BdrvChild *wait_child; 9484c8158e3SMax Reitz } BDRVTestTopState; 9494c8158e3SMax Reitz 9504c8158e3SMax Reitz static void bdrv_test_top_close(BlockDriverState *bs) 9514c8158e3SMax Reitz { 9524c8158e3SMax Reitz BdrvChild *c, *next_c; 95332a8aba3SKevin Wolf 9546bc30f19SStefan Hajnoczi bdrv_graph_wrlock(); 9554c8158e3SMax Reitz QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) { 9564c8158e3SMax Reitz bdrv_unref_child(bs, c); 9574c8158e3SMax Reitz } 9586bc30f19SStefan Hajnoczi bdrv_graph_wrunlock(); 9594c8158e3SMax Reitz } 9604c8158e3SMax Reitz 961b9b10c35SKevin Wolf static int coroutine_fn GRAPH_RDLOCK 962b9b10c35SKevin Wolf bdrv_test_top_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, 963b9b10c35SKevin Wolf QEMUIOVector *qiov, BdrvRequestFlags flags) 9644c8158e3SMax Reitz { 9654c8158e3SMax Reitz BDRVTestTopState *tts = bs->opaque; 9664c8158e3SMax Reitz return bdrv_co_preadv(tts->wait_child, offset, bytes, qiov, flags); 9674c8158e3SMax Reitz } 9684c8158e3SMax Reitz 9694c8158e3SMax Reitz static BlockDriver bdrv_test_top_driver = { 9704c8158e3SMax Reitz .format_name = "test_top_driver", 9714c8158e3SMax Reitz .instance_size = sizeof(BDRVTestTopState), 9724c8158e3SMax Reitz 9734c8158e3SMax Reitz .bdrv_close = bdrv_test_top_close, 9744c8158e3SMax Reitz .bdrv_co_preadv = bdrv_test_top_co_preadv, 9754c8158e3SMax Reitz 97669dca43dSMax Reitz .bdrv_child_perm = bdrv_default_perms, 9774c8158e3SMax Reitz }; 9784c8158e3SMax Reitz 9794c8158e3SMax Reitz typedef struct TestCoDeleteByDrainData { 9804c8158e3SMax Reitz BlockBackend *blk; 9814c8158e3SMax Reitz bool detach_instead_of_delete; 9824c8158e3SMax Reitz bool done; 9834c8158e3SMax Reitz } TestCoDeleteByDrainData; 9844c8158e3SMax Reitz 9854c8158e3SMax Reitz static void coroutine_fn test_co_delete_by_drain(void *opaque) 9864c8158e3SMax Reitz { 9874c8158e3SMax Reitz TestCoDeleteByDrainData *dbdd = opaque; 9884c8158e3SMax Reitz BlockBackend *blk = dbdd->blk; 9894c8158e3SMax Reitz BlockDriverState *bs = blk_bs(blk); 9904c8158e3SMax Reitz BDRVTestTopState *tts = bs->opaque; 9914c8158e3SMax Reitz void *buffer = g_malloc(65536); 992405d8fe0SVladimir Sementsov-Ogievskiy QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buffer, 65536); 9934c8158e3SMax Reitz 9944c8158e3SMax Reitz /* Pretend some internal write operation from parent to child. 9954c8158e3SMax Reitz * Important: We have to read from the child, not from the parent! 9964c8158e3SMax Reitz * Draining works by first propagating it all up the tree to the 9974c8158e3SMax Reitz * root and then waiting for drainage from root to the leaves 9984c8158e3SMax Reitz * (protocol nodes). If we have a request waiting on the root, 9994c8158e3SMax Reitz * everything will be drained before we go back down the tree, but 10004c8158e3SMax Reitz * we do not want that. We want to be in the middle of draining 10014c8158e3SMax Reitz * when this following requests returns. */ 100287f130bdSKevin Wolf bdrv_graph_co_rdlock(); 10034c8158e3SMax Reitz bdrv_co_preadv(tts->wait_child, 0, 65536, &qiov, 0); 100487f130bdSKevin Wolf bdrv_graph_co_rdunlock(); 10054c8158e3SMax Reitz 10064c8158e3SMax Reitz g_assert_cmpint(bs->refcnt, ==, 1); 10074c8158e3SMax Reitz 10084c8158e3SMax Reitz if (!dbdd->detach_instead_of_delete) { 100901a10c24SKevin Wolf blk_co_unref(blk); 10104c8158e3SMax Reitz } else { 10114c8158e3SMax Reitz BdrvChild *c, *next_c; 1012680e0cc4SKevin Wolf bdrv_graph_co_rdlock(); 10134c8158e3SMax Reitz QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) { 1014680e0cc4SKevin Wolf bdrv_graph_co_rdunlock(); 101532a8aba3SKevin Wolf bdrv_co_unref_child(bs, c); 1016680e0cc4SKevin Wolf bdrv_graph_co_rdlock(); 10174c8158e3SMax Reitz } 1018680e0cc4SKevin Wolf bdrv_graph_co_rdunlock(); 10194c8158e3SMax Reitz } 10204c8158e3SMax Reitz 10214c8158e3SMax Reitz dbdd->done = true; 10227b43db3cSMarc-André Lureau g_free(buffer); 10234c8158e3SMax Reitz } 10244c8158e3SMax Reitz 10254c8158e3SMax Reitz /** 10264c8158e3SMax Reitz * Test what happens when some BDS has some children, you drain one of 10274c8158e3SMax Reitz * them and this results in the BDS being deleted. 10284c8158e3SMax Reitz * 10294c8158e3SMax Reitz * If @detach_instead_of_delete is set, the BDS is not going to be 10304c8158e3SMax Reitz * deleted but will only detach all of its children. 10314c8158e3SMax Reitz */ 1032ebd31837SKevin Wolf static void do_test_delete_by_drain(bool detach_instead_of_delete, 1033ebd31837SKevin Wolf enum drain_type drain_type) 10344c8158e3SMax Reitz { 10354c8158e3SMax Reitz BlockBackend *blk; 10364c8158e3SMax Reitz BlockDriverState *bs, *child_bs, *null_bs; 10374c8158e3SMax Reitz BDRVTestTopState *tts; 10384c8158e3SMax Reitz TestCoDeleteByDrainData dbdd; 10394c8158e3SMax Reitz Coroutine *co; 10404c8158e3SMax Reitz 10414c8158e3SMax Reitz bs = bdrv_new_open_driver(&bdrv_test_top_driver, "top", BDRV_O_RDWR, 10424c8158e3SMax Reitz &error_abort); 10434c8158e3SMax Reitz bs->total_sectors = 65536 >> BDRV_SECTOR_BITS; 10444c8158e3SMax Reitz tts = bs->opaque; 10454c8158e3SMax Reitz 10464c8158e3SMax Reitz null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, 10474c8158e3SMax Reitz &error_abort); 10486bc30f19SStefan Hajnoczi bdrv_graph_wrlock(); 1049a16be3cdSMax Reitz bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, 1050a16be3cdSMax Reitz BDRV_CHILD_DATA, &error_abort); 10516bc30f19SStefan Hajnoczi bdrv_graph_wrunlock(); 10524c8158e3SMax Reitz 10534c8158e3SMax Reitz /* This child will be the one to pass to requests through to, and 10544c8158e3SMax Reitz * it will stall until a drain occurs */ 10554c8158e3SMax Reitz child_bs = bdrv_new_open_driver(&bdrv_test, "child", BDRV_O_RDWR, 10564c8158e3SMax Reitz &error_abort); 10574c8158e3SMax Reitz child_bs->total_sectors = 65536 >> BDRV_SECTOR_BITS; 10584c8158e3SMax Reitz /* Takes our reference to child_bs */ 10596bc30f19SStefan Hajnoczi bdrv_graph_wrlock(); 1060a16be3cdSMax Reitz tts->wait_child = bdrv_attach_child(bs, child_bs, "wait-child", 1061a16be3cdSMax Reitz &child_of_bds, 1062a16be3cdSMax Reitz BDRV_CHILD_DATA | BDRV_CHILD_PRIMARY, 1063a16be3cdSMax Reitz &error_abort); 10646bc30f19SStefan Hajnoczi bdrv_graph_wrunlock(); 10654c8158e3SMax Reitz 10664c8158e3SMax Reitz /* This child is just there to be deleted 10674c8158e3SMax Reitz * (for detach_instead_of_delete == true) */ 10684c8158e3SMax Reitz null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, 10694c8158e3SMax Reitz &error_abort); 10706bc30f19SStefan Hajnoczi bdrv_graph_wrlock(); 1071a16be3cdSMax Reitz bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, BDRV_CHILD_DATA, 1072a16be3cdSMax Reitz &error_abort); 10736bc30f19SStefan Hajnoczi bdrv_graph_wrunlock(); 10744c8158e3SMax Reitz 1075d861ab3aSKevin Wolf blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 10764c8158e3SMax Reitz blk_insert_bs(blk, bs, &error_abort); 10774c8158e3SMax Reitz 10784c8158e3SMax Reitz /* Referenced by blk now */ 10794c8158e3SMax Reitz bdrv_unref(bs); 10804c8158e3SMax Reitz 10814c8158e3SMax Reitz g_assert_cmpint(bs->refcnt, ==, 1); 10824c8158e3SMax Reitz g_assert_cmpint(child_bs->refcnt, ==, 1); 10834c8158e3SMax Reitz g_assert_cmpint(null_bs->refcnt, ==, 1); 10844c8158e3SMax Reitz 10854c8158e3SMax Reitz 10864c8158e3SMax Reitz dbdd = (TestCoDeleteByDrainData){ 10874c8158e3SMax Reitz .blk = blk, 10884c8158e3SMax Reitz .detach_instead_of_delete = detach_instead_of_delete, 10894c8158e3SMax Reitz .done = false, 10904c8158e3SMax Reitz }; 10914c8158e3SMax Reitz co = qemu_coroutine_create(test_co_delete_by_drain, &dbdd); 10924c8158e3SMax Reitz qemu_coroutine_enter(co); 10934c8158e3SMax Reitz 10944c8158e3SMax Reitz /* Drain the child while the read operation is still pending. 10954c8158e3SMax Reitz * This should result in the operation finishing and 10964c8158e3SMax Reitz * test_co_delete_by_drain() resuming. Thus, @bs will be deleted 10974c8158e3SMax Reitz * and the coroutine will exit while this drain operation is still 10984c8158e3SMax Reitz * in progress. */ 1099ebd31837SKevin Wolf switch (drain_type) { 1100ebd31837SKevin Wolf case BDRV_DRAIN: 11014c8158e3SMax Reitz bdrv_ref(child_bs); 11024c8158e3SMax Reitz bdrv_drain(child_bs); 11034c8158e3SMax Reitz bdrv_unref(child_bs); 1104ebd31837SKevin Wolf break; 110519f7a7e5SKevin Wolf case BDRV_DRAIN_ALL: 110619f7a7e5SKevin Wolf bdrv_drain_all_begin(); 110719f7a7e5SKevin Wolf bdrv_drain_all_end(); 110819f7a7e5SKevin Wolf break; 1109ebd31837SKevin Wolf default: 1110ebd31837SKevin Wolf g_assert_not_reached(); 1111ebd31837SKevin Wolf } 11124c8158e3SMax Reitz 11134c8158e3SMax Reitz while (!dbdd.done) { 11144c8158e3SMax Reitz aio_poll(qemu_get_aio_context(), true); 11154c8158e3SMax Reitz } 11164c8158e3SMax Reitz 11174c8158e3SMax Reitz if (detach_instead_of_delete) { 11184c8158e3SMax Reitz /* Here, the reference has not passed over to the coroutine, 11194c8158e3SMax Reitz * so we have to delete the BB ourselves */ 11204c8158e3SMax Reitz blk_unref(blk); 11214c8158e3SMax Reitz } 11224c8158e3SMax Reitz } 11234c8158e3SMax Reitz 11244c8158e3SMax Reitz static void test_delete_by_drain(void) 11254c8158e3SMax Reitz { 1126ebd31837SKevin Wolf do_test_delete_by_drain(false, BDRV_DRAIN); 11274c8158e3SMax Reitz } 11284c8158e3SMax Reitz 112919f7a7e5SKevin Wolf static void test_detach_by_drain_all(void) 113019f7a7e5SKevin Wolf { 113119f7a7e5SKevin Wolf do_test_delete_by_drain(true, BDRV_DRAIN_ALL); 113219f7a7e5SKevin Wolf } 113319f7a7e5SKevin Wolf 11344c8158e3SMax Reitz static void test_detach_by_drain(void) 11354c8158e3SMax Reitz { 1136ebd31837SKevin Wolf do_test_delete_by_drain(true, BDRV_DRAIN); 1137ebd31837SKevin Wolf } 1138ebd31837SKevin Wolf 11394c8158e3SMax Reitz 1140231281abSKevin Wolf struct detach_by_parent_data { 1141231281abSKevin Wolf BlockDriverState *parent_b; 1142231281abSKevin Wolf BdrvChild *child_b; 1143231281abSKevin Wolf BlockDriverState *c; 1144231281abSKevin Wolf BdrvChild *child_c; 114557320ca9SKevin Wolf bool by_parent_cb; 1146617f3a96SKevin Wolf bool detach_on_drain; 1147231281abSKevin Wolf }; 114857320ca9SKevin Wolf static struct detach_by_parent_data detach_by_parent_data; 1149231281abSKevin Wolf 1150903df115SKevin Wolf static void no_coroutine_fn detach_indirect_bh(void *opaque) 1151231281abSKevin Wolf { 1152231281abSKevin Wolf struct detach_by_parent_data *data = opaque; 1153231281abSKevin Wolf 1154617f3a96SKevin Wolf bdrv_dec_in_flight(data->child_b->bs); 115532a8aba3SKevin Wolf 11566bc30f19SStefan Hajnoczi bdrv_graph_wrlock(); 1157231281abSKevin Wolf bdrv_unref_child(data->parent_b, data->child_b); 1158231281abSKevin Wolf 1159231281abSKevin Wolf bdrv_ref(data->c); 1160231281abSKevin Wolf data->child_c = bdrv_attach_child(data->parent_b, data->c, "PB-C", 1161a16be3cdSMax Reitz &child_of_bds, BDRV_CHILD_DATA, 1162a16be3cdSMax Reitz &error_abort); 11636bc30f19SStefan Hajnoczi bdrv_graph_wrunlock(); 1164231281abSKevin Wolf } 1165231281abSKevin Wolf 1166903df115SKevin Wolf static void coroutine_mixed_fn detach_by_parent_aio_cb(void *opaque, int ret) 116757320ca9SKevin Wolf { 116857320ca9SKevin Wolf struct detach_by_parent_data *data = &detach_by_parent_data; 116957320ca9SKevin Wolf 117057320ca9SKevin Wolf g_assert_cmpint(ret, ==, 0); 117157320ca9SKevin Wolf if (data->by_parent_cb) { 1172617f3a96SKevin Wolf bdrv_inc_in_flight(data->child_b->bs); 1173903df115SKevin Wolf aio_bh_schedule_oneshot(qemu_get_current_aio_context(), 1174903df115SKevin Wolf detach_indirect_bh, &detach_by_parent_data); 117557320ca9SKevin Wolf } 117657320ca9SKevin Wolf } 117757320ca9SKevin Wolf 1178d05ab380SEmanuele Giuseppe Esposito static void GRAPH_RDLOCK detach_by_driver_cb_drained_begin(BdrvChild *child) 117957320ca9SKevin Wolf { 1180617f3a96SKevin Wolf struct detach_by_parent_data *data = &detach_by_parent_data; 1181617f3a96SKevin Wolf 1182617f3a96SKevin Wolf if (!data->detach_on_drain) { 1183617f3a96SKevin Wolf return; 1184617f3a96SKevin Wolf } 1185617f3a96SKevin Wolf data->detach_on_drain = false; 1186617f3a96SKevin Wolf 1187617f3a96SKevin Wolf bdrv_inc_in_flight(data->child_b->bs); 118857320ca9SKevin Wolf aio_bh_schedule_oneshot(qemu_get_current_aio_context(), 118957320ca9SKevin Wolf detach_indirect_bh, &detach_by_parent_data); 1190a16be3cdSMax Reitz child_of_bds.drained_begin(child); 119157320ca9SKevin Wolf } 119257320ca9SKevin Wolf 1193bd86fb99SMax Reitz static BdrvChildClass detach_by_driver_cb_class; 119457320ca9SKevin Wolf 1195231281abSKevin Wolf /* 1196231281abSKevin Wolf * Initial graph: 1197231281abSKevin Wolf * 1198231281abSKevin Wolf * PA PB 1199231281abSKevin Wolf * \ / \ 1200231281abSKevin Wolf * A B C 1201231281abSKevin Wolf * 120257320ca9SKevin Wolf * by_parent_cb == true: Test that parent callbacks don't poll 120357320ca9SKevin Wolf * 120457320ca9SKevin Wolf * PA has a pending write request whose callback changes the child nodes of 120557320ca9SKevin Wolf * PB: It removes B and adds C instead. The subtree of PB is drained, which 120657320ca9SKevin Wolf * will indirectly drain the write request, too. 120757320ca9SKevin Wolf * 120857320ca9SKevin Wolf * by_parent_cb == false: Test that bdrv_drain_invoke() doesn't poll 120957320ca9SKevin Wolf * 1210bd86fb99SMax Reitz * PA's BdrvChildClass has a .drained_begin callback that schedules a BH 121157320ca9SKevin Wolf * that does the same graph change. If bdrv_drain_invoke() calls it, the 121257320ca9SKevin Wolf * state is messed up, but if it is only polled in the single 121357320ca9SKevin Wolf * BDRV_POLL_WHILE() at the end of the drain, this should work fine. 1214231281abSKevin Wolf */ 1215d05ab380SEmanuele Giuseppe Esposito static void TSA_NO_TSA test_detach_indirect(bool by_parent_cb) 1216231281abSKevin Wolf { 1217231281abSKevin Wolf BlockBackend *blk; 1218231281abSKevin Wolf BlockDriverState *parent_a, *parent_b, *a, *b, *c; 1219231281abSKevin Wolf BdrvChild *child_a, *child_b; 1220231281abSKevin Wolf BlockAIOCB *acb; 1221231281abSKevin Wolf 1222405d8fe0SVladimir Sementsov-Ogievskiy QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0); 1223231281abSKevin Wolf 122457320ca9SKevin Wolf if (!by_parent_cb) { 1225a16be3cdSMax Reitz detach_by_driver_cb_class = child_of_bds; 1226bd86fb99SMax Reitz detach_by_driver_cb_class.drained_begin = 122757320ca9SKevin Wolf detach_by_driver_cb_drained_begin; 1228617f3a96SKevin Wolf detach_by_driver_cb_class.drained_end = NULL; 1229617f3a96SKevin Wolf detach_by_driver_cb_class.drained_poll = NULL; 123057320ca9SKevin Wolf } 123157320ca9SKevin Wolf 1232617f3a96SKevin Wolf detach_by_parent_data = (struct detach_by_parent_data) { 1233617f3a96SKevin Wolf .detach_on_drain = false, 1234617f3a96SKevin Wolf }; 1235617f3a96SKevin Wolf 1236231281abSKevin Wolf /* Create all involved nodes */ 1237231281abSKevin Wolf parent_a = bdrv_new_open_driver(&bdrv_test, "parent-a", BDRV_O_RDWR, 1238231281abSKevin Wolf &error_abort); 1239231281abSKevin Wolf parent_b = bdrv_new_open_driver(&bdrv_test, "parent-b", 0, 1240231281abSKevin Wolf &error_abort); 1241231281abSKevin Wolf 1242231281abSKevin Wolf a = bdrv_new_open_driver(&bdrv_test, "a", BDRV_O_RDWR, &error_abort); 1243231281abSKevin Wolf b = bdrv_new_open_driver(&bdrv_test, "b", BDRV_O_RDWR, &error_abort); 1244231281abSKevin Wolf c = bdrv_new_open_driver(&bdrv_test, "c", BDRV_O_RDWR, &error_abort); 1245231281abSKevin Wolf 1246231281abSKevin Wolf /* blk is a BB for parent-a */ 1247d861ab3aSKevin Wolf blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 1248231281abSKevin Wolf blk_insert_bs(blk, parent_a, &error_abort); 1249231281abSKevin Wolf bdrv_unref(parent_a); 1250231281abSKevin Wolf 125157320ca9SKevin Wolf /* If we want to get bdrv_drain_invoke() to call aio_poll(), the driver 125257320ca9SKevin Wolf * callback must not return immediately. */ 125357320ca9SKevin Wolf if (!by_parent_cb) { 125457320ca9SKevin Wolf BDRVTestState *s = parent_a->opaque; 125557320ca9SKevin Wolf s->sleep_in_drain_begin = true; 125657320ca9SKevin Wolf } 125757320ca9SKevin Wolf 1258231281abSKevin Wolf /* Set child relationships */ 1259231281abSKevin Wolf bdrv_ref(b); 1260231281abSKevin Wolf bdrv_ref(a); 12616bc30f19SStefan Hajnoczi bdrv_graph_wrlock(); 1262a16be3cdSMax Reitz child_b = bdrv_attach_child(parent_b, b, "PB-B", &child_of_bds, 1263a16be3cdSMax Reitz BDRV_CHILD_DATA, &error_abort); 126425191e5fSMax Reitz child_a = bdrv_attach_child(parent_b, a, "PB-A", &child_of_bds, 126525191e5fSMax Reitz BDRV_CHILD_COW, &error_abort); 1266231281abSKevin Wolf 1267231281abSKevin Wolf bdrv_ref(a); 126857320ca9SKevin Wolf bdrv_attach_child(parent_a, a, "PA-A", 1269a16be3cdSMax Reitz by_parent_cb ? &child_of_bds : &detach_by_driver_cb_class, 1270a16be3cdSMax Reitz BDRV_CHILD_DATA, &error_abort); 12716bc30f19SStefan Hajnoczi bdrv_graph_wrunlock(); 1272231281abSKevin Wolf 1273231281abSKevin Wolf g_assert_cmpint(parent_a->refcnt, ==, 1); 1274231281abSKevin Wolf g_assert_cmpint(parent_b->refcnt, ==, 1); 1275231281abSKevin Wolf g_assert_cmpint(a->refcnt, ==, 3); 1276231281abSKevin Wolf g_assert_cmpint(b->refcnt, ==, 2); 1277231281abSKevin Wolf g_assert_cmpint(c->refcnt, ==, 1); 1278231281abSKevin Wolf 1279231281abSKevin Wolf g_assert(QLIST_FIRST(&parent_b->children) == child_a); 1280231281abSKevin Wolf g_assert(QLIST_NEXT(child_a, next) == child_b); 1281231281abSKevin Wolf g_assert(QLIST_NEXT(child_b, next) == NULL); 1282231281abSKevin Wolf 1283231281abSKevin Wolf /* Start the evil write request */ 128457320ca9SKevin Wolf detach_by_parent_data = (struct detach_by_parent_data) { 1285231281abSKevin Wolf .parent_b = parent_b, 1286231281abSKevin Wolf .child_b = child_b, 1287231281abSKevin Wolf .c = c, 128857320ca9SKevin Wolf .by_parent_cb = by_parent_cb, 1289617f3a96SKevin Wolf .detach_on_drain = true, 1290231281abSKevin Wolf }; 129157320ca9SKevin Wolf acb = blk_aio_preadv(blk, 0, &qiov, 0, detach_by_parent_aio_cb, NULL); 1292231281abSKevin Wolf g_assert(acb != NULL); 1293231281abSKevin Wolf 1294231281abSKevin Wolf /* Drain and check the expected result */ 1295299403aeSKevin Wolf bdrv_drained_begin(parent_b); 1296299403aeSKevin Wolf bdrv_drained_begin(a); 1297299403aeSKevin Wolf bdrv_drained_begin(b); 1298299403aeSKevin Wolf bdrv_drained_begin(c); 1299231281abSKevin Wolf 130057320ca9SKevin Wolf g_assert(detach_by_parent_data.child_c != NULL); 1301231281abSKevin Wolf 1302231281abSKevin Wolf g_assert_cmpint(parent_a->refcnt, ==, 1); 1303231281abSKevin Wolf g_assert_cmpint(parent_b->refcnt, ==, 1); 1304231281abSKevin Wolf g_assert_cmpint(a->refcnt, ==, 3); 1305231281abSKevin Wolf g_assert_cmpint(b->refcnt, ==, 1); 1306231281abSKevin Wolf g_assert_cmpint(c->refcnt, ==, 2); 1307231281abSKevin Wolf 130857320ca9SKevin Wolf g_assert(QLIST_FIRST(&parent_b->children) == detach_by_parent_data.child_c); 130957320ca9SKevin Wolf g_assert(QLIST_NEXT(detach_by_parent_data.child_c, next) == child_a); 1310231281abSKevin Wolf g_assert(QLIST_NEXT(child_a, next) == NULL); 1311231281abSKevin Wolf 1312231281abSKevin Wolf g_assert_cmpint(parent_a->quiesce_counter, ==, 1); 1313299403aeSKevin Wolf g_assert_cmpint(parent_b->quiesce_counter, ==, 3); 1314231281abSKevin Wolf g_assert_cmpint(a->quiesce_counter, ==, 1); 1315299403aeSKevin Wolf g_assert_cmpint(b->quiesce_counter, ==, 1); 1316231281abSKevin Wolf g_assert_cmpint(c->quiesce_counter, ==, 1); 1317231281abSKevin Wolf 1318299403aeSKevin Wolf bdrv_drained_end(parent_b); 1319299403aeSKevin Wolf bdrv_drained_end(a); 1320299403aeSKevin Wolf bdrv_drained_end(b); 1321299403aeSKevin Wolf bdrv_drained_end(c); 1322231281abSKevin Wolf 1323231281abSKevin Wolf bdrv_unref(parent_b); 1324231281abSKevin Wolf blk_unref(blk); 1325231281abSKevin Wolf 1326231281abSKevin Wolf g_assert_cmpint(a->refcnt, ==, 1); 1327231281abSKevin Wolf g_assert_cmpint(b->refcnt, ==, 1); 1328231281abSKevin Wolf g_assert_cmpint(c->refcnt, ==, 1); 1329231281abSKevin Wolf bdrv_unref(a); 1330231281abSKevin Wolf bdrv_unref(b); 1331231281abSKevin Wolf bdrv_unref(c); 1332231281abSKevin Wolf } 1333231281abSKevin Wolf 133457320ca9SKevin Wolf static void test_detach_by_parent_cb(void) 133557320ca9SKevin Wolf { 133657320ca9SKevin Wolf test_detach_indirect(true); 133757320ca9SKevin Wolf } 133857320ca9SKevin Wolf 133957320ca9SKevin Wolf static void test_detach_by_driver_cb(void) 134057320ca9SKevin Wolf { 134157320ca9SKevin Wolf test_detach_indirect(false); 134257320ca9SKevin Wolf } 1343231281abSKevin Wolf 1344b994c5bcSKevin Wolf static void test_append_to_drained(void) 1345b994c5bcSKevin Wolf { 1346b994c5bcSKevin Wolf BlockBackend *blk; 1347b994c5bcSKevin Wolf BlockDriverState *base, *overlay; 1348b994c5bcSKevin Wolf BDRVTestState *base_s, *overlay_s; 1349b994c5bcSKevin Wolf 1350d861ab3aSKevin Wolf blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 1351b994c5bcSKevin Wolf base = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort); 1352b994c5bcSKevin Wolf base_s = base->opaque; 1353b994c5bcSKevin Wolf blk_insert_bs(blk, base, &error_abort); 1354b994c5bcSKevin Wolf 1355b994c5bcSKevin Wolf overlay = bdrv_new_open_driver(&bdrv_test, "overlay", BDRV_O_RDWR, 1356b994c5bcSKevin Wolf &error_abort); 1357b994c5bcSKevin Wolf overlay_s = overlay->opaque; 1358b994c5bcSKevin Wolf 1359b994c5bcSKevin Wolf do_drain_begin(BDRV_DRAIN, base); 1360b994c5bcSKevin Wolf g_assert_cmpint(base->quiesce_counter, ==, 1); 1361b994c5bcSKevin Wolf g_assert_cmpint(base_s->drain_count, ==, 1); 1362b994c5bcSKevin Wolf g_assert_cmpint(base->in_flight, ==, 0); 1363b994c5bcSKevin Wolf 1364b994c5bcSKevin Wolf bdrv_append(overlay, base, &error_abort); 1365487b9187SKevin Wolf 1366b994c5bcSKevin Wolf g_assert_cmpint(base->in_flight, ==, 0); 1367b994c5bcSKevin Wolf g_assert_cmpint(overlay->in_flight, ==, 0); 1368b994c5bcSKevin Wolf 1369b994c5bcSKevin Wolf g_assert_cmpint(base->quiesce_counter, ==, 1); 1370b994c5bcSKevin Wolf g_assert_cmpint(base_s->drain_count, ==, 1); 1371b994c5bcSKevin Wolf g_assert_cmpint(overlay->quiesce_counter, ==, 1); 1372b994c5bcSKevin Wolf g_assert_cmpint(overlay_s->drain_count, ==, 1); 1373b994c5bcSKevin Wolf 1374b994c5bcSKevin Wolf do_drain_end(BDRV_DRAIN, base); 1375b994c5bcSKevin Wolf 1376b994c5bcSKevin Wolf g_assert_cmpint(base->quiesce_counter, ==, 0); 1377b994c5bcSKevin Wolf g_assert_cmpint(base_s->drain_count, ==, 0); 1378b994c5bcSKevin Wolf g_assert_cmpint(overlay->quiesce_counter, ==, 0); 1379b994c5bcSKevin Wolf g_assert_cmpint(overlay_s->drain_count, ==, 0); 1380b994c5bcSKevin Wolf 1381ae9d4417SVladimir Sementsov-Ogievskiy bdrv_unref(overlay); 1382b994c5bcSKevin Wolf bdrv_unref(base); 1383b994c5bcSKevin Wolf blk_unref(blk); 1384b994c5bcSKevin Wolf } 1385b994c5bcSKevin Wolf 1386247d2737SKevin Wolf static void test_set_aio_context(void) 1387247d2737SKevin Wolf { 1388247d2737SKevin Wolf BlockDriverState *bs; 1389247d2737SKevin Wolf IOThread *a = iothread_new(); 1390247d2737SKevin Wolf IOThread *b = iothread_new(); 1391247d2737SKevin Wolf AioContext *ctx_a = iothread_get_aio_context(a); 1392247d2737SKevin Wolf AioContext *ctx_b = iothread_get_aio_context(b); 1393247d2737SKevin Wolf 1394247d2737SKevin Wolf bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR, 1395247d2737SKevin Wolf &error_abort); 1396247d2737SKevin Wolf 1397247d2737SKevin Wolf bdrv_drained_begin(bs); 1398142e6907SEmanuele Giuseppe Esposito bdrv_try_change_aio_context(bs, ctx_a, NULL, &error_abort); 1399247d2737SKevin Wolf bdrv_drained_end(bs); 1400247d2737SKevin Wolf 1401247d2737SKevin Wolf bdrv_drained_begin(bs); 1402142e6907SEmanuele Giuseppe Esposito bdrv_try_change_aio_context(bs, ctx_b, NULL, &error_abort); 1403142e6907SEmanuele Giuseppe Esposito bdrv_try_change_aio_context(bs, qemu_get_aio_context(), NULL, &error_abort); 1404247d2737SKevin Wolf bdrv_drained_end(bs); 1405247d2737SKevin Wolf 1406247d2737SKevin Wolf bdrv_unref(bs); 1407247d2737SKevin Wolf iothread_join(a); 1408247d2737SKevin Wolf iothread_join(b); 1409247d2737SKevin Wolf } 1410247d2737SKevin Wolf 14118e442810SMax Reitz 14128e442810SMax Reitz typedef struct TestDropBackingBlockJob { 14138e442810SMax Reitz BlockJob common; 14148e442810SMax Reitz bool should_complete; 14158e442810SMax Reitz bool *did_complete; 14162afdc790SMax Reitz BlockDriverState *detach_also; 14171b177bbeSVladimir Sementsov-Ogievskiy BlockDriverState *bs; 14188e442810SMax Reitz } TestDropBackingBlockJob; 14198e442810SMax Reitz 14208e442810SMax Reitz static int coroutine_fn test_drop_backing_job_run(Job *job, Error **errp) 14218e442810SMax Reitz { 14228e442810SMax Reitz TestDropBackingBlockJob *s = 14238e442810SMax Reitz container_of(job, TestDropBackingBlockJob, common.job); 14248e442810SMax Reitz 14258e442810SMax Reitz while (!s->should_complete) { 14268e442810SMax Reitz job_sleep_ns(job, 0); 14278e442810SMax Reitz } 14288e442810SMax Reitz 14298e442810SMax Reitz return 0; 14308e442810SMax Reitz } 14318e442810SMax Reitz 14328e442810SMax Reitz static void test_drop_backing_job_commit(Job *job) 14338e442810SMax Reitz { 14348e442810SMax Reitz TestDropBackingBlockJob *s = 14358e442810SMax Reitz container_of(job, TestDropBackingBlockJob, common.job); 14368e442810SMax Reitz 14371b177bbeSVladimir Sementsov-Ogievskiy bdrv_set_backing_hd(s->bs, NULL, &error_abort); 14382afdc790SMax Reitz bdrv_set_backing_hd(s->detach_also, NULL, &error_abort); 14398e442810SMax Reitz 14408e442810SMax Reitz *s->did_complete = true; 14418e442810SMax Reitz } 14428e442810SMax Reitz 14438e442810SMax Reitz static const BlockJobDriver test_drop_backing_job_driver = { 14448e442810SMax Reitz .job_driver = { 14458e442810SMax Reitz .instance_size = sizeof(TestDropBackingBlockJob), 14468e442810SMax Reitz .free = block_job_free, 14478e442810SMax Reitz .user_resume = block_job_user_resume, 14488e442810SMax Reitz .run = test_drop_backing_job_run, 14498e442810SMax Reitz .commit = test_drop_backing_job_commit, 14508e442810SMax Reitz } 14518e442810SMax Reitz }; 14528e442810SMax Reitz 14538e442810SMax Reitz /** 14548e442810SMax Reitz * Creates a child node with three parent nodes on it, and then runs a 14558e442810SMax Reitz * block job on the final one, parent-node-2. 14568e442810SMax Reitz * 14578e442810SMax Reitz * The job is then asked to complete before a section where the child 14588e442810SMax Reitz * is drained. 14598e442810SMax Reitz * 14608e442810SMax Reitz * Ending this section will undrain the child's parents, first 14618e442810SMax Reitz * parent-node-2, then parent-node-1, then parent-node-0 -- the parent 14628e442810SMax Reitz * list is in reverse order of how they were added. Ending the drain 14638e442810SMax Reitz * on parent-node-2 will resume the job, thus completing it and 14648e442810SMax Reitz * scheduling job_exit(). 14658e442810SMax Reitz * 14668e442810SMax Reitz * Ending the drain on parent-node-1 will poll the AioContext, which 14678e442810SMax Reitz * lets job_exit() and thus test_drop_backing_job_commit() run. That 14682afdc790SMax Reitz * function first removes the child as parent-node-2's backing file. 14698e442810SMax Reitz * 14708e442810SMax Reitz * In old (and buggy) implementations, there are two problems with 14718e442810SMax Reitz * that: 14728e442810SMax Reitz * (A) bdrv_drain_invoke() polls for every node that leaves the 14738e442810SMax Reitz * drained section. This means that job_exit() is scheduled 14748e442810SMax Reitz * before the child has left the drained section. Its 14758e442810SMax Reitz * quiesce_counter is therefore still 1 when it is removed from 14768e442810SMax Reitz * parent-node-2. 14778e442810SMax Reitz * 14788e442810SMax Reitz * (B) bdrv_replace_child_noperm() calls drained_end() on the old 14798e442810SMax Reitz * child's parents as many times as the child is quiesced. This 14808e442810SMax Reitz * means it will call drained_end() on parent-node-2 once. 14818e442810SMax Reitz * Because parent-node-2 is no longer quiesced at this point, this 14828e442810SMax Reitz * will fail. 14838e442810SMax Reitz * 14848e442810SMax Reitz * bdrv_replace_child_noperm() therefore must call drained_end() on 14858e442810SMax Reitz * the parent only if it really is still drained because the child is 14868e442810SMax Reitz * drained. 14872afdc790SMax Reitz * 14882afdc790SMax Reitz * If removing child from parent-node-2 was successful (as it should 14892afdc790SMax Reitz * be), test_drop_backing_job_commit() will then also remove the child 14902afdc790SMax Reitz * from parent-node-0. 14912afdc790SMax Reitz * 14922afdc790SMax Reitz * With an old version of our drain infrastructure ((A) above), that 14932afdc790SMax Reitz * resulted in the following flow: 14942afdc790SMax Reitz * 14952afdc790SMax Reitz * 1. child attempts to leave its drained section. The call recurses 14962afdc790SMax Reitz * to its parents. 14972afdc790SMax Reitz * 14982afdc790SMax Reitz * 2. parent-node-2 leaves the drained section. Polling in 14992afdc790SMax Reitz * bdrv_drain_invoke() will schedule job_exit(). 15002afdc790SMax Reitz * 15012afdc790SMax Reitz * 3. parent-node-1 leaves the drained section. Polling in 15022afdc790SMax Reitz * bdrv_drain_invoke() will run job_exit(), thus disconnecting 15032afdc790SMax Reitz * parent-node-0 from the child node. 15042afdc790SMax Reitz * 15052afdc790SMax Reitz * 4. bdrv_parent_drained_end() uses a QLIST_FOREACH_SAFE() loop to 15062afdc790SMax Reitz * iterate over the parents. Thus, it now accesses the BdrvChild 15072afdc790SMax Reitz * object that used to connect parent-node-0 and the child node. 15082afdc790SMax Reitz * However, that object no longer exists, so it accesses a dangling 15092afdc790SMax Reitz * pointer. 15102afdc790SMax Reitz * 15112afdc790SMax Reitz * The solution is to only poll once when running a bdrv_drained_end() 15122afdc790SMax Reitz * operation, specifically at the end when all drained_end() 15132afdc790SMax Reitz * operations for all involved nodes have been scheduled. 15142afdc790SMax Reitz * Note that this also solves (A) above, thus hiding (B). 15158e442810SMax Reitz */ 15168e442810SMax Reitz static void test_blockjob_commit_by_drained_end(void) 15178e442810SMax Reitz { 15188e442810SMax Reitz BlockDriverState *bs_child, *bs_parents[3]; 15198e442810SMax Reitz TestDropBackingBlockJob *job; 15208e442810SMax Reitz bool job_has_completed = false; 15218e442810SMax Reitz int i; 15228e442810SMax Reitz 15238e442810SMax Reitz bs_child = bdrv_new_open_driver(&bdrv_test, "child-node", BDRV_O_RDWR, 15248e442810SMax Reitz &error_abort); 15258e442810SMax Reitz 15268e442810SMax Reitz for (i = 0; i < 3; i++) { 15278e442810SMax Reitz char name[32]; 15288e442810SMax Reitz snprintf(name, sizeof(name), "parent-node-%i", i); 15298e442810SMax Reitz bs_parents[i] = bdrv_new_open_driver(&bdrv_test, name, BDRV_O_RDWR, 15308e442810SMax Reitz &error_abort); 15318e442810SMax Reitz bdrv_set_backing_hd(bs_parents[i], bs_child, &error_abort); 15328e442810SMax Reitz } 15338e442810SMax Reitz 15348e442810SMax Reitz job = block_job_create("job", &test_drop_backing_job_driver, NULL, 15358e442810SMax Reitz bs_parents[2], 0, BLK_PERM_ALL, 0, 0, NULL, NULL, 15368e442810SMax Reitz &error_abort); 15371b177bbeSVladimir Sementsov-Ogievskiy job->bs = bs_parents[2]; 15388e442810SMax Reitz 15392afdc790SMax Reitz job->detach_also = bs_parents[0]; 15408e442810SMax Reitz job->did_complete = &job_has_completed; 15418e442810SMax Reitz 15428e442810SMax Reitz job_start(&job->common.job); 15438e442810SMax Reitz 15448e442810SMax Reitz job->should_complete = true; 15458e442810SMax Reitz bdrv_drained_begin(bs_child); 15468e442810SMax Reitz g_assert(!job_has_completed); 15478e442810SMax Reitz bdrv_drained_end(bs_child); 15485e8ac217SKevin Wolf aio_poll(qemu_get_aio_context(), false); 15498e442810SMax Reitz g_assert(job_has_completed); 15508e442810SMax Reitz 15518e442810SMax Reitz bdrv_unref(bs_parents[0]); 15528e442810SMax Reitz bdrv_unref(bs_parents[1]); 15538e442810SMax Reitz bdrv_unref(bs_parents[2]); 15548e442810SMax Reitz bdrv_unref(bs_child); 15558e442810SMax Reitz } 15568e442810SMax Reitz 15579746b35cSMax Reitz 15589746b35cSMax Reitz typedef struct TestSimpleBlockJob { 15599746b35cSMax Reitz BlockJob common; 15609746b35cSMax Reitz bool should_complete; 15619746b35cSMax Reitz bool *did_complete; 15629746b35cSMax Reitz } TestSimpleBlockJob; 15639746b35cSMax Reitz 15649746b35cSMax Reitz static int coroutine_fn test_simple_job_run(Job *job, Error **errp) 15659746b35cSMax Reitz { 15669746b35cSMax Reitz TestSimpleBlockJob *s = container_of(job, TestSimpleBlockJob, common.job); 15679746b35cSMax Reitz 15689746b35cSMax Reitz while (!s->should_complete) { 15699746b35cSMax Reitz job_sleep_ns(job, 0); 15709746b35cSMax Reitz } 15719746b35cSMax Reitz 15729746b35cSMax Reitz return 0; 15739746b35cSMax Reitz } 15749746b35cSMax Reitz 15759746b35cSMax Reitz static void test_simple_job_clean(Job *job) 15769746b35cSMax Reitz { 15779746b35cSMax Reitz TestSimpleBlockJob *s = container_of(job, TestSimpleBlockJob, common.job); 15789746b35cSMax Reitz *s->did_complete = true; 15799746b35cSMax Reitz } 15809746b35cSMax Reitz 15819746b35cSMax Reitz static const BlockJobDriver test_simple_job_driver = { 15829746b35cSMax Reitz .job_driver = { 15839746b35cSMax Reitz .instance_size = sizeof(TestSimpleBlockJob), 15849746b35cSMax Reitz .free = block_job_free, 15859746b35cSMax Reitz .user_resume = block_job_user_resume, 15869746b35cSMax Reitz .run = test_simple_job_run, 15879746b35cSMax Reitz .clean = test_simple_job_clean, 15889746b35cSMax Reitz }, 15899746b35cSMax Reitz }; 15909746b35cSMax Reitz 15919746b35cSMax Reitz static int drop_intermediate_poll_update_filename(BdrvChild *child, 15929746b35cSMax Reitz BlockDriverState *new_base, 15939746b35cSMax Reitz const char *filename, 15949746b35cSMax Reitz Error **errp) 15959746b35cSMax Reitz { 15969746b35cSMax Reitz /* 15979746b35cSMax Reitz * We are free to poll here, which may change the block graph, if 15989746b35cSMax Reitz * it is not drained. 15999746b35cSMax Reitz */ 16009746b35cSMax Reitz 16019746b35cSMax Reitz /* If the job is not drained: Complete it, schedule job_exit() */ 16029746b35cSMax Reitz aio_poll(qemu_get_current_aio_context(), false); 16039746b35cSMax Reitz /* If the job is not drained: Run job_exit(), finish the job */ 16049746b35cSMax Reitz aio_poll(qemu_get_current_aio_context(), false); 16059746b35cSMax Reitz 16069746b35cSMax Reitz return 0; 16079746b35cSMax Reitz } 16089746b35cSMax Reitz 16099746b35cSMax Reitz /** 16109746b35cSMax Reitz * Test a poll in the midst of bdrv_drop_intermediate(). 16119746b35cSMax Reitz * 1612bd86fb99SMax Reitz * bdrv_drop_intermediate() calls BdrvChildClass.update_filename(), 16139746b35cSMax Reitz * which can yield or poll. This may lead to graph changes, unless 16149746b35cSMax Reitz * the whole subtree in question is drained. 16159746b35cSMax Reitz * 16169746b35cSMax Reitz * We test this on the following graph: 16179746b35cSMax Reitz * 16189746b35cSMax Reitz * Job 16199746b35cSMax Reitz * 16209746b35cSMax Reitz * | 16219746b35cSMax Reitz * job-node 16229746b35cSMax Reitz * | 16239746b35cSMax Reitz * v 16249746b35cSMax Reitz * 16259746b35cSMax Reitz * job-node 16269746b35cSMax Reitz * 16279746b35cSMax Reitz * | 16289746b35cSMax Reitz * backing 16299746b35cSMax Reitz * | 16309746b35cSMax Reitz * v 16319746b35cSMax Reitz * 16329746b35cSMax Reitz * node-2 --chain--> node-1 --chain--> node-0 16339746b35cSMax Reitz * 16349746b35cSMax Reitz * We drop node-1 with bdrv_drop_intermediate(top=node-1, base=node-0). 16359746b35cSMax Reitz * 16369746b35cSMax Reitz * This first updates node-2's backing filename by invoking 16379746b35cSMax Reitz * drop_intermediate_poll_update_filename(), which polls twice. This 16389746b35cSMax Reitz * causes the job to finish, which in turns causes the job-node to be 16399746b35cSMax Reitz * deleted. 16409746b35cSMax Reitz * 16419746b35cSMax Reitz * bdrv_drop_intermediate() uses a QLIST_FOREACH_SAFE() loop, so it 16429746b35cSMax Reitz * already has a pointer to the BdrvChild edge between job-node and 16439746b35cSMax Reitz * node-1. When it tries to handle that edge, we probably get a 16449746b35cSMax Reitz * segmentation fault because the object no longer exists. 16459746b35cSMax Reitz * 16469746b35cSMax Reitz * 16479746b35cSMax Reitz * The solution is for bdrv_drop_intermediate() to drain top's 16489746b35cSMax Reitz * subtree. This prevents graph changes from happening just because 1649bd86fb99SMax Reitz * BdrvChildClass.update_filename() yields or polls. Thus, the block 16509746b35cSMax Reitz * job is paused during that drained section and must finish before or 16519746b35cSMax Reitz * after. 16529746b35cSMax Reitz * 16539746b35cSMax Reitz * (In addition, bdrv_replace_child() must keep the job paused.) 16549746b35cSMax Reitz */ 16559746b35cSMax Reitz static void test_drop_intermediate_poll(void) 16569746b35cSMax Reitz { 1657bd86fb99SMax Reitz static BdrvChildClass chain_child_class; 16589746b35cSMax Reitz BlockDriverState *chain[3]; 16599746b35cSMax Reitz TestSimpleBlockJob *job; 16609746b35cSMax Reitz BlockDriverState *job_node; 16619746b35cSMax Reitz bool job_has_completed = false; 16629746b35cSMax Reitz int i; 16639746b35cSMax Reitz int ret; 16649746b35cSMax Reitz 166525191e5fSMax Reitz chain_child_class = child_of_bds; 1666bd86fb99SMax Reitz chain_child_class.update_filename = drop_intermediate_poll_update_filename; 16679746b35cSMax Reitz 16689746b35cSMax Reitz for (i = 0; i < 3; i++) { 16699746b35cSMax Reitz char name[32]; 16709746b35cSMax Reitz snprintf(name, 32, "node-%i", i); 16719746b35cSMax Reitz 16729746b35cSMax Reitz chain[i] = bdrv_new_open_driver(&bdrv_test, name, 0, &error_abort); 16739746b35cSMax Reitz } 16749746b35cSMax Reitz 16759746b35cSMax Reitz job_node = bdrv_new_open_driver(&bdrv_test, "job-node", BDRV_O_RDWR, 16769746b35cSMax Reitz &error_abort); 16779746b35cSMax Reitz bdrv_set_backing_hd(job_node, chain[1], &error_abort); 16789746b35cSMax Reitz 16799746b35cSMax Reitz /* 16809746b35cSMax Reitz * Establish the chain last, so the chain links are the first 16819746b35cSMax Reitz * elements in the BDS.parents lists 16829746b35cSMax Reitz */ 16836bc30f19SStefan Hajnoczi bdrv_graph_wrlock(); 16849746b35cSMax Reitz for (i = 0; i < 3; i++) { 16859746b35cSMax Reitz if (i) { 16869746b35cSMax Reitz /* Takes the reference to chain[i - 1] */ 16875bb04747SVladimir Sementsov-Ogievskiy bdrv_attach_child(chain[i], chain[i - 1], "chain", 16885bb04747SVladimir Sementsov-Ogievskiy &chain_child_class, BDRV_CHILD_COW, &error_abort); 16899746b35cSMax Reitz } 16909746b35cSMax Reitz } 16916bc30f19SStefan Hajnoczi bdrv_graph_wrunlock(); 16929746b35cSMax Reitz 16939746b35cSMax Reitz job = block_job_create("job", &test_simple_job_driver, NULL, job_node, 16949746b35cSMax Reitz 0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort); 16959746b35cSMax Reitz 16969746b35cSMax Reitz /* The job has a reference now */ 16979746b35cSMax Reitz bdrv_unref(job_node); 16989746b35cSMax Reitz 16999746b35cSMax Reitz job->did_complete = &job_has_completed; 17009746b35cSMax Reitz 17019746b35cSMax Reitz job_start(&job->common.job); 17029746b35cSMax Reitz job->should_complete = true; 17039746b35cSMax Reitz 17049746b35cSMax Reitz g_assert(!job_has_completed); 17059746b35cSMax Reitz ret = bdrv_drop_intermediate(chain[1], chain[0], NULL); 17065e8ac217SKevin Wolf aio_poll(qemu_get_aio_context(), false); 17079746b35cSMax Reitz g_assert(ret == 0); 17089746b35cSMax Reitz g_assert(job_has_completed); 17099746b35cSMax Reitz 17109746b35cSMax Reitz bdrv_unref(chain[2]); 17119746b35cSMax Reitz } 17129746b35cSMax Reitz 17130513f984SMax Reitz 17140513f984SMax Reitz typedef struct BDRVReplaceTestState { 171523987471SKevin Wolf bool setup_completed; 17160513f984SMax Reitz bool was_drained; 17170513f984SMax Reitz bool was_undrained; 17180513f984SMax Reitz bool has_read; 17190513f984SMax Reitz 17200513f984SMax Reitz int drain_count; 17210513f984SMax Reitz 17220513f984SMax Reitz bool yield_before_read; 17230513f984SMax Reitz Coroutine *io_co; 17240513f984SMax Reitz Coroutine *drain_co; 17250513f984SMax Reitz } BDRVReplaceTestState; 17260513f984SMax Reitz 17270513f984SMax Reitz static void bdrv_replace_test_close(BlockDriverState *bs) 17280513f984SMax Reitz { 17290513f984SMax Reitz } 17300513f984SMax Reitz 17310513f984SMax Reitz /** 17320513f984SMax Reitz * If @bs has a backing file: 17330513f984SMax Reitz * Yield if .yield_before_read is true (and wait for drain_begin to 17340513f984SMax Reitz * wake us up). 17350513f984SMax Reitz * Forward the read to bs->backing. Set .has_read to true. 17360513f984SMax Reitz * If drain_begin has woken us, wake it in turn. 17370513f984SMax Reitz * 17380513f984SMax Reitz * Otherwise: 17390513f984SMax Reitz * Set .has_read to true and return success. 17400513f984SMax Reitz */ 1741b9b10c35SKevin Wolf static int coroutine_fn GRAPH_RDLOCK 1742b9b10c35SKevin Wolf bdrv_replace_test_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, 1743b9b10c35SKevin Wolf QEMUIOVector *qiov, BdrvRequestFlags flags) 17440513f984SMax Reitz { 17450513f984SMax Reitz BDRVReplaceTestState *s = bs->opaque; 17460513f984SMax Reitz 17470513f984SMax Reitz if (bs->backing) { 17480513f984SMax Reitz int ret; 17490513f984SMax Reitz 17500513f984SMax Reitz g_assert(!s->drain_count); 17510513f984SMax Reitz 17520513f984SMax Reitz s->io_co = qemu_coroutine_self(); 17530513f984SMax Reitz if (s->yield_before_read) { 17540513f984SMax Reitz s->yield_before_read = false; 17550513f984SMax Reitz qemu_coroutine_yield(); 17560513f984SMax Reitz } 17570513f984SMax Reitz s->io_co = NULL; 17580513f984SMax Reitz 1759fae2681aSVladimir Sementsov-Ogievskiy ret = bdrv_co_preadv(bs->backing, offset, bytes, qiov, 0); 17600513f984SMax Reitz s->has_read = true; 17610513f984SMax Reitz 17620513f984SMax Reitz /* Wake up drain_co if it runs */ 17630513f984SMax Reitz if (s->drain_co) { 17640513f984SMax Reitz aio_co_wake(s->drain_co); 17650513f984SMax Reitz } 17660513f984SMax Reitz 17670513f984SMax Reitz return ret; 17680513f984SMax Reitz } 17690513f984SMax Reitz 17700513f984SMax Reitz s->has_read = true; 17710513f984SMax Reitz return 0; 17720513f984SMax Reitz } 17730513f984SMax Reitz 17747bce1c29SKevin Wolf static void coroutine_fn bdrv_replace_test_drain_co(void *opaque) 17757bce1c29SKevin Wolf { 17767bce1c29SKevin Wolf BlockDriverState *bs = opaque; 17777bce1c29SKevin Wolf BDRVReplaceTestState *s = bs->opaque; 17787bce1c29SKevin Wolf 17797bce1c29SKevin Wolf /* Keep waking io_co up until it is done */ 17807bce1c29SKevin Wolf while (s->io_co) { 17817bce1c29SKevin Wolf aio_co_wake(s->io_co); 17827bce1c29SKevin Wolf s->io_co = NULL; 17837bce1c29SKevin Wolf qemu_coroutine_yield(); 17847bce1c29SKevin Wolf } 17857bce1c29SKevin Wolf s->drain_co = NULL; 17867bce1c29SKevin Wolf bdrv_dec_in_flight(bs); 17877bce1c29SKevin Wolf } 17887bce1c29SKevin Wolf 17890513f984SMax Reitz /** 17900513f984SMax Reitz * If .drain_count is 0, wake up .io_co if there is one; and set 17910513f984SMax Reitz * .was_drained. 17920513f984SMax Reitz * Increment .drain_count. 17930513f984SMax Reitz */ 17945e8ac217SKevin Wolf static void bdrv_replace_test_drain_begin(BlockDriverState *bs) 17950513f984SMax Reitz { 17960513f984SMax Reitz BDRVReplaceTestState *s = bs->opaque; 17970513f984SMax Reitz 179823987471SKevin Wolf if (!s->setup_completed) { 179923987471SKevin Wolf return; 180023987471SKevin Wolf } 180123987471SKevin Wolf 18020513f984SMax Reitz if (!s->drain_count) { 18037bce1c29SKevin Wolf s->drain_co = qemu_coroutine_create(bdrv_replace_test_drain_co, bs); 18047bce1c29SKevin Wolf bdrv_inc_in_flight(bs); 18057bce1c29SKevin Wolf aio_co_enter(bdrv_get_aio_context(bs), s->drain_co); 18060513f984SMax Reitz s->was_drained = true; 18070513f984SMax Reitz } 18080513f984SMax Reitz s->drain_count++; 18090513f984SMax Reitz } 18100513f984SMax Reitz 18117bce1c29SKevin Wolf static void coroutine_fn bdrv_replace_test_read_entry(void *opaque) 18127bce1c29SKevin Wolf { 18137bce1c29SKevin Wolf BlockDriverState *bs = opaque; 18147bce1c29SKevin Wolf char data; 18157bce1c29SKevin Wolf QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, &data, 1); 18167bce1c29SKevin Wolf int ret; 18177bce1c29SKevin Wolf 18187bce1c29SKevin Wolf /* Queue a read request post-drain */ 1819b9b10c35SKevin Wolf bdrv_graph_co_rdlock(); 18207bce1c29SKevin Wolf ret = bdrv_replace_test_co_preadv(bs, 0, 1, &qiov, 0); 1821b9b10c35SKevin Wolf bdrv_graph_co_rdunlock(); 1822b9b10c35SKevin Wolf 18237bce1c29SKevin Wolf g_assert(ret >= 0); 18247bce1c29SKevin Wolf bdrv_dec_in_flight(bs); 18257bce1c29SKevin Wolf } 18267bce1c29SKevin Wolf 18270513f984SMax Reitz /** 18280513f984SMax Reitz * Reduce .drain_count, set .was_undrained once it reaches 0. 18290513f984SMax Reitz * If .drain_count reaches 0 and the node has a backing file, issue a 18300513f984SMax Reitz * read request. 18310513f984SMax Reitz */ 18325e8ac217SKevin Wolf static void bdrv_replace_test_drain_end(BlockDriverState *bs) 18330513f984SMax Reitz { 18340513f984SMax Reitz BDRVReplaceTestState *s = bs->opaque; 18350513f984SMax Reitz 1836004915a9SKevin Wolf GRAPH_RDLOCK_GUARD_MAINLOOP(); 1837004915a9SKevin Wolf 183823987471SKevin Wolf if (!s->setup_completed) { 183923987471SKevin Wolf return; 184023987471SKevin Wolf } 184123987471SKevin Wolf 18420513f984SMax Reitz g_assert(s->drain_count > 0); 18430513f984SMax Reitz if (!--s->drain_count) { 18440513f984SMax Reitz s->was_undrained = true; 18450513f984SMax Reitz 18460513f984SMax Reitz if (bs->backing) { 18477bce1c29SKevin Wolf Coroutine *co = qemu_coroutine_create(bdrv_replace_test_read_entry, 18487bce1c29SKevin Wolf bs); 18497bce1c29SKevin Wolf bdrv_inc_in_flight(bs); 18507bce1c29SKevin Wolf aio_co_enter(bdrv_get_aio_context(bs), co); 18510513f984SMax Reitz } 18520513f984SMax Reitz } 18530513f984SMax Reitz } 18540513f984SMax Reitz 18550513f984SMax Reitz static BlockDriver bdrv_replace_test = { 18560513f984SMax Reitz .format_name = "replace_test", 18570513f984SMax Reitz .instance_size = sizeof(BDRVReplaceTestState), 18589ebfc111SVladimir Sementsov-Ogievskiy .supports_backing = true, 18590513f984SMax Reitz 18600513f984SMax Reitz .bdrv_close = bdrv_replace_test_close, 18610513f984SMax Reitz .bdrv_co_preadv = bdrv_replace_test_co_preadv, 18620513f984SMax Reitz 18635e8ac217SKevin Wolf .bdrv_drain_begin = bdrv_replace_test_drain_begin, 18645e8ac217SKevin Wolf .bdrv_drain_end = bdrv_replace_test_drain_end, 18650513f984SMax Reitz 186669dca43dSMax Reitz .bdrv_child_perm = bdrv_default_perms, 18670513f984SMax Reitz }; 18680513f984SMax Reitz 18690513f984SMax Reitz static void coroutine_fn test_replace_child_mid_drain_read_co(void *opaque) 18700513f984SMax Reitz { 18710513f984SMax Reitz int ret; 18720513f984SMax Reitz char data; 18730513f984SMax Reitz 18740513f984SMax Reitz ret = blk_co_pread(opaque, 0, 1, &data, 0); 18750513f984SMax Reitz g_assert(ret >= 0); 18760513f984SMax Reitz } 18770513f984SMax Reitz 18780513f984SMax Reitz /** 18790513f984SMax Reitz * We test two things: 18800513f984SMax Reitz * (1) bdrv_replace_child_noperm() must not undrain the parent if both 18810513f984SMax Reitz * children are drained. 18820513f984SMax Reitz * (2) bdrv_replace_child_noperm() must never flush I/O requests to a 18830513f984SMax Reitz * drained child. If the old child is drained, it must flush I/O 18840513f984SMax Reitz * requests after the new one has been attached. If the new child 18850513f984SMax Reitz * is drained, it must flush I/O requests before the old one is 18860513f984SMax Reitz * detached. 18870513f984SMax Reitz * 18880513f984SMax Reitz * To do so, we create one parent node and two child nodes; then 18890513f984SMax Reitz * attach one of the children (old_child_bs) to the parent, then 18900513f984SMax Reitz * drain both old_child_bs and new_child_bs according to 18910513f984SMax Reitz * old_drain_count and new_drain_count, respectively, and finally 18920513f984SMax Reitz * we invoke bdrv_replace_node() to replace old_child_bs by 18930513f984SMax Reitz * new_child_bs. 18940513f984SMax Reitz * 18950513f984SMax Reitz * The test block driver we use here (bdrv_replace_test) has a read 18960513f984SMax Reitz * function that: 18970513f984SMax Reitz * - For the parent node, can optionally yield, and then forwards the 18980513f984SMax Reitz * read to bdrv_preadv(), 18990513f984SMax Reitz * - For the child node, just returns immediately. 19000513f984SMax Reitz * 19010513f984SMax Reitz * If the read yields, the drain_begin function will wake it up. 19020513f984SMax Reitz * 19030513f984SMax Reitz * The drain_end function issues a read on the parent once it is fully 19040513f984SMax Reitz * undrained (which simulates requests starting to come in again). 19050513f984SMax Reitz */ 19060513f984SMax Reitz static void do_test_replace_child_mid_drain(int old_drain_count, 19070513f984SMax Reitz int new_drain_count) 19080513f984SMax Reitz { 19090513f984SMax Reitz BlockBackend *parent_blk; 19100513f984SMax Reitz BlockDriverState *parent_bs; 19110513f984SMax Reitz BlockDriverState *old_child_bs, *new_child_bs; 19120513f984SMax Reitz BDRVReplaceTestState *parent_s; 19130513f984SMax Reitz BDRVReplaceTestState *old_child_s, *new_child_s; 19140513f984SMax Reitz Coroutine *io_co; 19150513f984SMax Reitz int i; 19160513f984SMax Reitz 19170513f984SMax Reitz parent_bs = bdrv_new_open_driver(&bdrv_replace_test, "parent", 0, 19180513f984SMax Reitz &error_abort); 19190513f984SMax Reitz parent_s = parent_bs->opaque; 19200513f984SMax Reitz 19210513f984SMax Reitz parent_blk = blk_new(qemu_get_aio_context(), 19220513f984SMax Reitz BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL); 19230513f984SMax Reitz blk_insert_bs(parent_blk, parent_bs, &error_abort); 19240513f984SMax Reitz 19250513f984SMax Reitz old_child_bs = bdrv_new_open_driver(&bdrv_replace_test, "old-child", 0, 19260513f984SMax Reitz &error_abort); 19270513f984SMax Reitz new_child_bs = bdrv_new_open_driver(&bdrv_replace_test, "new-child", 0, 19280513f984SMax Reitz &error_abort); 19290513f984SMax Reitz old_child_s = old_child_bs->opaque; 19300513f984SMax Reitz new_child_s = new_child_bs->opaque; 19310513f984SMax Reitz 19320513f984SMax Reitz /* So that we can read something */ 19330513f984SMax Reitz parent_bs->total_sectors = 1; 19340513f984SMax Reitz old_child_bs->total_sectors = 1; 19350513f984SMax Reitz new_child_bs->total_sectors = 1; 19360513f984SMax Reitz 19370513f984SMax Reitz bdrv_ref(old_child_bs); 19386bc30f19SStefan Hajnoczi bdrv_graph_wrlock(); 19395bb04747SVladimir Sementsov-Ogievskiy bdrv_attach_child(parent_bs, old_child_bs, "child", &child_of_bds, 19405bb04747SVladimir Sementsov-Ogievskiy BDRV_CHILD_COW, &error_abort); 19416bc30f19SStefan Hajnoczi bdrv_graph_wrunlock(); 194223987471SKevin Wolf parent_s->setup_completed = true; 19430513f984SMax Reitz 19440513f984SMax Reitz for (i = 0; i < old_drain_count; i++) { 19450513f984SMax Reitz bdrv_drained_begin(old_child_bs); 19460513f984SMax Reitz } 19470513f984SMax Reitz for (i = 0; i < new_drain_count; i++) { 19480513f984SMax Reitz bdrv_drained_begin(new_child_bs); 19490513f984SMax Reitz } 19500513f984SMax Reitz 19510513f984SMax Reitz if (!old_drain_count) { 19520513f984SMax Reitz /* 19530513f984SMax Reitz * Start a read operation that will yield, so it will not 19540513f984SMax Reitz * complete before the node is drained. 19550513f984SMax Reitz */ 19560513f984SMax Reitz parent_s->yield_before_read = true; 19570513f984SMax Reitz io_co = qemu_coroutine_create(test_replace_child_mid_drain_read_co, 19580513f984SMax Reitz parent_blk); 19590513f984SMax Reitz qemu_coroutine_enter(io_co); 19600513f984SMax Reitz } 19610513f984SMax Reitz 19620513f984SMax Reitz /* If we have started a read operation, it should have yielded */ 19630513f984SMax Reitz g_assert(!parent_s->has_read); 19640513f984SMax Reitz 19650513f984SMax Reitz /* Reset drained status so we can see what bdrv_replace_node() does */ 19660513f984SMax Reitz parent_s->was_drained = false; 19670513f984SMax Reitz parent_s->was_undrained = false; 19680513f984SMax Reitz 19690513f984SMax Reitz g_assert(parent_bs->quiesce_counter == old_drain_count); 1970ccd6a379SKevin Wolf bdrv_drained_begin(old_child_bs); 1971ccd6a379SKevin Wolf bdrv_drained_begin(new_child_bs); 19726bc30f19SStefan Hajnoczi bdrv_graph_wrlock(); 19730513f984SMax Reitz bdrv_replace_node(old_child_bs, new_child_bs, &error_abort); 19746bc30f19SStefan Hajnoczi bdrv_graph_wrunlock(); 1975ccd6a379SKevin Wolf bdrv_drained_end(new_child_bs); 1976ccd6a379SKevin Wolf bdrv_drained_end(old_child_bs); 19770513f984SMax Reitz g_assert(parent_bs->quiesce_counter == new_drain_count); 19780513f984SMax Reitz 19790513f984SMax Reitz if (!old_drain_count && !new_drain_count) { 19800513f984SMax Reitz /* 19810513f984SMax Reitz * From undrained to undrained drains and undrains the parent, 19820513f984SMax Reitz * because bdrv_replace_node() contains a drained section for 19830513f984SMax Reitz * @old_child_bs. 19840513f984SMax Reitz */ 19850513f984SMax Reitz g_assert(parent_s->was_drained && parent_s->was_undrained); 19860513f984SMax Reitz } else if (!old_drain_count && new_drain_count) { 19870513f984SMax Reitz /* 19880513f984SMax Reitz * From undrained to drained should drain the parent and keep 19890513f984SMax Reitz * it that way. 19900513f984SMax Reitz */ 19910513f984SMax Reitz g_assert(parent_s->was_drained && !parent_s->was_undrained); 19920513f984SMax Reitz } else if (old_drain_count && !new_drain_count) { 19930513f984SMax Reitz /* 19940513f984SMax Reitz * From drained to undrained should undrain the parent and 19950513f984SMax Reitz * keep it that way. 19960513f984SMax Reitz */ 19970513f984SMax Reitz g_assert(!parent_s->was_drained && parent_s->was_undrained); 19980513f984SMax Reitz } else /* if (old_drain_count && new_drain_count) */ { 19990513f984SMax Reitz /* 20000513f984SMax Reitz * From drained to drained must not undrain the parent at any 20010513f984SMax Reitz * point 20020513f984SMax Reitz */ 20030513f984SMax Reitz g_assert(!parent_s->was_drained && !parent_s->was_undrained); 20040513f984SMax Reitz } 20050513f984SMax Reitz 20060513f984SMax Reitz if (!old_drain_count || !new_drain_count) { 20070513f984SMax Reitz /* 20080513f984SMax Reitz * If !old_drain_count, we have started a read request before 20090513f984SMax Reitz * bdrv_replace_node(). If !new_drain_count, the parent must 20100513f984SMax Reitz * have been undrained at some point, and 20110513f984SMax Reitz * bdrv_replace_test_co_drain_end() starts a read request 20120513f984SMax Reitz * then. 20130513f984SMax Reitz */ 20140513f984SMax Reitz g_assert(parent_s->has_read); 20150513f984SMax Reitz } else { 20160513f984SMax Reitz /* 20170513f984SMax Reitz * If the parent was never undrained, there is no way to start 20180513f984SMax Reitz * a read request. 20190513f984SMax Reitz */ 20200513f984SMax Reitz g_assert(!parent_s->has_read); 20210513f984SMax Reitz } 20220513f984SMax Reitz 20230513f984SMax Reitz /* A drained child must have not received any request */ 20240513f984SMax Reitz g_assert(!(old_drain_count && old_child_s->has_read)); 20250513f984SMax Reitz g_assert(!(new_drain_count && new_child_s->has_read)); 20260513f984SMax Reitz 20270513f984SMax Reitz for (i = 0; i < new_drain_count; i++) { 20280513f984SMax Reitz bdrv_drained_end(new_child_bs); 20290513f984SMax Reitz } 20300513f984SMax Reitz for (i = 0; i < old_drain_count; i++) { 20310513f984SMax Reitz bdrv_drained_end(old_child_bs); 20320513f984SMax Reitz } 20330513f984SMax Reitz 20340513f984SMax Reitz /* 20350513f984SMax Reitz * By now, bdrv_replace_test_co_drain_end() must have been called 20360513f984SMax Reitz * at some point while the new child was attached to the parent. 20370513f984SMax Reitz */ 20380513f984SMax Reitz g_assert(parent_s->has_read); 20390513f984SMax Reitz g_assert(new_child_s->has_read); 20400513f984SMax Reitz 20410513f984SMax Reitz blk_unref(parent_blk); 20420513f984SMax Reitz bdrv_unref(parent_bs); 20430513f984SMax Reitz bdrv_unref(old_child_bs); 20440513f984SMax Reitz bdrv_unref(new_child_bs); 20450513f984SMax Reitz } 20460513f984SMax Reitz 20470513f984SMax Reitz static void test_replace_child_mid_drain(void) 20480513f984SMax Reitz { 20490513f984SMax Reitz int old_drain_count, new_drain_count; 20500513f984SMax Reitz 20510513f984SMax Reitz for (old_drain_count = 0; old_drain_count < 2; old_drain_count++) { 20520513f984SMax Reitz for (new_drain_count = 0; new_drain_count < 2; new_drain_count++) { 20530513f984SMax Reitz do_test_replace_child_mid_drain(old_drain_count, new_drain_count); 20540513f984SMax Reitz } 20550513f984SMax Reitz } 20560513f984SMax Reitz } 20570513f984SMax Reitz 2058881cfd17SKevin Wolf int main(int argc, char **argv) 2059881cfd17SKevin Wolf { 2060bb675689SKevin Wolf int ret; 2061bb675689SKevin Wolf 2062881cfd17SKevin Wolf bdrv_init(); 2063881cfd17SKevin Wolf qemu_init_main_loop(&error_abort); 2064881cfd17SKevin Wolf 2065881cfd17SKevin Wolf g_test_init(&argc, &argv, NULL); 2066bb675689SKevin Wolf qemu_event_init(&done_event, false); 2067881cfd17SKevin Wolf 2068881cfd17SKevin Wolf g_test_add_func("/bdrv-drain/driver-cb/drain_all", test_drv_cb_drain_all); 206986e1c840SKevin Wolf g_test_add_func("/bdrv-drain/driver-cb/drain", test_drv_cb_drain); 2070881cfd17SKevin Wolf 20716d0252f2SKevin Wolf g_test_add_func("/bdrv-drain/driver-cb/co/drain_all", 20726d0252f2SKevin Wolf test_drv_cb_co_drain_all); 20730582eb10SKevin Wolf g_test_add_func("/bdrv-drain/driver-cb/co/drain", test_drv_cb_co_drain); 20740582eb10SKevin Wolf 207589a6ceabSKevin Wolf g_test_add_func("/bdrv-drain/quiesce/drain_all", test_quiesce_drain_all); 207689a6ceabSKevin Wolf g_test_add_func("/bdrv-drain/quiesce/drain", test_quiesce_drain); 207789a6ceabSKevin Wolf 20786d0252f2SKevin Wolf g_test_add_func("/bdrv-drain/quiesce/co/drain_all", 20796d0252f2SKevin Wolf test_quiesce_co_drain_all); 20800582eb10SKevin Wolf g_test_add_func("/bdrv-drain/quiesce/co/drain", test_quiesce_co_drain); 20810582eb10SKevin Wolf 20826c429a6aSKevin Wolf g_test_add_func("/bdrv-drain/nested", test_nested); 208319f7a7e5SKevin Wolf 208419f7a7e5SKevin Wolf g_test_add_func("/bdrv-drain/graph-change/drain_all", 208519f7a7e5SKevin Wolf test_graph_change_drain_all); 20866c429a6aSKevin Wolf 2087bb675689SKevin Wolf g_test_add_func("/bdrv-drain/iothread/drain_all", test_iothread_drain_all); 2088bb675689SKevin Wolf g_test_add_func("/bdrv-drain/iothread/drain", test_iothread_drain); 2089bb675689SKevin Wolf 20907253220dSKevin Wolf g_test_add_func("/bdrv-drain/blockjob/drain_all", test_blockjob_drain_all); 20917253220dSKevin Wolf g_test_add_func("/bdrv-drain/blockjob/drain", test_blockjob_drain); 20927253220dSKevin Wolf 2093d49725afSKevin Wolf g_test_add_func("/bdrv-drain/blockjob/error/drain_all", 2094d49725afSKevin Wolf test_blockjob_error_drain_all); 2095d49725afSKevin Wolf g_test_add_func("/bdrv-drain/blockjob/error/drain", 2096d49725afSKevin Wolf test_blockjob_error_drain); 2097d49725afSKevin Wolf 2098f62c1729SKevin Wolf g_test_add_func("/bdrv-drain/blockjob/iothread/drain_all", 2099f62c1729SKevin Wolf test_blockjob_iothread_drain_all); 2100f62c1729SKevin Wolf g_test_add_func("/bdrv-drain/blockjob/iothread/drain", 2101f62c1729SKevin Wolf test_blockjob_iothread_drain); 2102f62c1729SKevin Wolf 2103d49725afSKevin Wolf g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain_all", 2104d49725afSKevin Wolf test_blockjob_iothread_error_drain_all); 2105d49725afSKevin Wolf g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain", 2106d49725afSKevin Wolf test_blockjob_iothread_error_drain); 2107d49725afSKevin Wolf 2108ebd31837SKevin Wolf g_test_add_func("/bdrv-drain/deletion/drain", test_delete_by_drain); 210919f7a7e5SKevin Wolf g_test_add_func("/bdrv-drain/detach/drain_all", test_detach_by_drain_all); 2110ebd31837SKevin Wolf g_test_add_func("/bdrv-drain/detach/drain", test_detach_by_drain); 2111231281abSKevin Wolf g_test_add_func("/bdrv-drain/detach/parent_cb", test_detach_by_parent_cb); 211257320ca9SKevin Wolf g_test_add_func("/bdrv-drain/detach/driver_cb", test_detach_by_driver_cb); 21134c8158e3SMax Reitz 2114b994c5bcSKevin Wolf g_test_add_func("/bdrv-drain/attach/drain", test_append_to_drained); 2115b994c5bcSKevin Wolf 2116247d2737SKevin Wolf g_test_add_func("/bdrv-drain/set_aio_context", test_set_aio_context); 2117247d2737SKevin Wolf 21188e442810SMax Reitz g_test_add_func("/bdrv-drain/blockjob/commit_by_drained_end", 21198e442810SMax Reitz test_blockjob_commit_by_drained_end); 21208e442810SMax Reitz 21219746b35cSMax Reitz g_test_add_func("/bdrv-drain/bdrv_drop_intermediate/poll", 21229746b35cSMax Reitz test_drop_intermediate_poll); 21239746b35cSMax Reitz 21240513f984SMax Reitz g_test_add_func("/bdrv-drain/replace_child/mid-drain", 21250513f984SMax Reitz test_replace_child_mid_drain); 21260513f984SMax Reitz 2127bb675689SKevin Wolf ret = g_test_run(); 2128bb675689SKevin Wolf qemu_event_destroy(&done_event); 2129bb675689SKevin Wolf return ret; 2130881cfd17SKevin Wolf } 2131