1893f7ebaSPaolo Bonzini /* 2893f7ebaSPaolo Bonzini * Image mirroring 3893f7ebaSPaolo Bonzini * 4893f7ebaSPaolo Bonzini * Copyright Red Hat, Inc. 2012 5893f7ebaSPaolo Bonzini * 6893f7ebaSPaolo Bonzini * Authors: 7893f7ebaSPaolo Bonzini * Paolo Bonzini <pbonzini@redhat.com> 8893f7ebaSPaolo Bonzini * 9893f7ebaSPaolo Bonzini * This work is licensed under the terms of the GNU LGPL, version 2 or later. 10893f7ebaSPaolo Bonzini * See the COPYING.LIB file in the top-level directory. 11893f7ebaSPaolo Bonzini * 12893f7ebaSPaolo Bonzini */ 13893f7ebaSPaolo Bonzini 14893f7ebaSPaolo Bonzini #include "trace.h" 15737e150eSPaolo Bonzini #include "block/blockjob.h" 16737e150eSPaolo Bonzini #include "block/block_int.h" 17cc7a8ea7SMarkus Armbruster #include "qapi/qmp/qerror.h" 18893f7ebaSPaolo Bonzini #include "qemu/ratelimit.h" 19b812f671SPaolo Bonzini #include "qemu/bitmap.h" 20893f7ebaSPaolo Bonzini 21893f7ebaSPaolo Bonzini #define SLICE_TIME 100000000ULL /* ns */ 22402a4741SPaolo Bonzini #define MAX_IN_FLIGHT 16 23402a4741SPaolo Bonzini 24402a4741SPaolo Bonzini /* The mirroring buffer is a list of granularity-sized chunks. 25402a4741SPaolo Bonzini * Free chunks are organized in a list. 26402a4741SPaolo Bonzini */ 27402a4741SPaolo Bonzini typedef struct MirrorBuffer { 28402a4741SPaolo Bonzini QSIMPLEQ_ENTRY(MirrorBuffer) next; 29402a4741SPaolo Bonzini } MirrorBuffer; 30893f7ebaSPaolo Bonzini 31893f7ebaSPaolo Bonzini typedef struct MirrorBlockJob { 32893f7ebaSPaolo Bonzini BlockJob common; 33893f7ebaSPaolo Bonzini RateLimit limit; 34893f7ebaSPaolo Bonzini BlockDriverState *target; 355bc361b8SFam Zheng BlockDriverState *base; 3609158f00SBenoît Canet /* The name of the graph node to replace */ 3709158f00SBenoît Canet char *replaces; 3809158f00SBenoît Canet /* The BDS to replace */ 3909158f00SBenoît Canet BlockDriverState *to_replace; 4009158f00SBenoît Canet /* Used to block operations on the drive-mirror-replace target */ 4109158f00SBenoît Canet Error *replace_blocker; 4203544a6eSFam Zheng bool is_none_mode; 43b952b558SPaolo Bonzini BlockdevOnError on_source_error, on_target_error; 44d63ffd87SPaolo Bonzini bool synced; 45d63ffd87SPaolo Bonzini bool should_complete; 46893f7ebaSPaolo Bonzini int64_t sector_num; 47eee13dfeSPaolo Bonzini int64_t granularity; 48b812f671SPaolo Bonzini size_t buf_size; 49b21c7652SMax Reitz int64_t bdev_length; 50b812f671SPaolo Bonzini unsigned long *cow_bitmap; 51e4654d2dSFam Zheng BdrvDirtyBitmap *dirty_bitmap; 528f0720ecSPaolo Bonzini HBitmapIter hbi; 53893f7ebaSPaolo Bonzini uint8_t *buf; 54402a4741SPaolo Bonzini QSIMPLEQ_HEAD(, MirrorBuffer) buf_free; 55402a4741SPaolo Bonzini int buf_free_count; 56bd48bde8SPaolo Bonzini 57402a4741SPaolo Bonzini unsigned long *in_flight_bitmap; 58bd48bde8SPaolo Bonzini int in_flight; 59b21c7652SMax Reitz int sectors_in_flight; 60bd48bde8SPaolo Bonzini int ret; 610fc9f8eaSFam Zheng bool unmap; 62893f7ebaSPaolo Bonzini } MirrorBlockJob; 63893f7ebaSPaolo Bonzini 64bd48bde8SPaolo Bonzini typedef struct MirrorOp { 65bd48bde8SPaolo Bonzini MirrorBlockJob *s; 66bd48bde8SPaolo Bonzini QEMUIOVector qiov; 67bd48bde8SPaolo Bonzini int64_t sector_num; 68bd48bde8SPaolo Bonzini int nb_sectors; 69bd48bde8SPaolo Bonzini } MirrorOp; 70bd48bde8SPaolo Bonzini 71b952b558SPaolo Bonzini static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, 72b952b558SPaolo Bonzini int error) 73b952b558SPaolo Bonzini { 74b952b558SPaolo Bonzini s->synced = false; 75b952b558SPaolo Bonzini if (read) { 76b952b558SPaolo Bonzini return block_job_error_action(&s->common, s->common.bs, 77b952b558SPaolo Bonzini s->on_source_error, true, error); 78b952b558SPaolo Bonzini } else { 79b952b558SPaolo Bonzini return block_job_error_action(&s->common, s->target, 80b952b558SPaolo Bonzini s->on_target_error, false, error); 81b952b558SPaolo Bonzini } 82b952b558SPaolo Bonzini } 83b952b558SPaolo Bonzini 84bd48bde8SPaolo Bonzini static void mirror_iteration_done(MirrorOp *op, int ret) 85bd48bde8SPaolo Bonzini { 86bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 87402a4741SPaolo Bonzini struct iovec *iov; 88bd48bde8SPaolo Bonzini int64_t chunk_num; 89402a4741SPaolo Bonzini int i, nb_chunks, sectors_per_chunk; 90bd48bde8SPaolo Bonzini 91bd48bde8SPaolo Bonzini trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret); 92bd48bde8SPaolo Bonzini 93bd48bde8SPaolo Bonzini s->in_flight--; 94b21c7652SMax Reitz s->sectors_in_flight -= op->nb_sectors; 95402a4741SPaolo Bonzini iov = op->qiov.iov; 96402a4741SPaolo Bonzini for (i = 0; i < op->qiov.niov; i++) { 97402a4741SPaolo Bonzini MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base; 98402a4741SPaolo Bonzini QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next); 99402a4741SPaolo Bonzini s->buf_free_count++; 100402a4741SPaolo Bonzini } 101402a4741SPaolo Bonzini 102bd48bde8SPaolo Bonzini sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; 103bd48bde8SPaolo Bonzini chunk_num = op->sector_num / sectors_per_chunk; 104bd48bde8SPaolo Bonzini nb_chunks = op->nb_sectors / sectors_per_chunk; 105402a4741SPaolo Bonzini bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); 106b21c7652SMax Reitz if (ret >= 0) { 107b21c7652SMax Reitz if (s->cow_bitmap) { 108bd48bde8SPaolo Bonzini bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); 109bd48bde8SPaolo Bonzini } 110b21c7652SMax Reitz s->common.offset += (uint64_t)op->nb_sectors * BDRV_SECTOR_SIZE; 111b21c7652SMax Reitz } 112bd48bde8SPaolo Bonzini 1136df3bf8eSZhang Min qemu_iovec_destroy(&op->qiov); 114bd48bde8SPaolo Bonzini g_slice_free(MirrorOp, op); 1157b770c72SStefan Hajnoczi 1167b770c72SStefan Hajnoczi /* Enter coroutine when it is not sleeping. The coroutine sleeps to 1177b770c72SStefan Hajnoczi * rate-limit itself. The coroutine will eventually resume since there is 1187b770c72SStefan Hajnoczi * a sleep timeout so don't wake it early. 1197b770c72SStefan Hajnoczi */ 1207b770c72SStefan Hajnoczi if (s->common.busy) { 121bd48bde8SPaolo Bonzini qemu_coroutine_enter(s->common.co, NULL); 122bd48bde8SPaolo Bonzini } 1237b770c72SStefan Hajnoczi } 124bd48bde8SPaolo Bonzini 125bd48bde8SPaolo Bonzini static void mirror_write_complete(void *opaque, int ret) 126bd48bde8SPaolo Bonzini { 127bd48bde8SPaolo Bonzini MirrorOp *op = opaque; 128bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 129bd48bde8SPaolo Bonzini if (ret < 0) { 130bd48bde8SPaolo Bonzini BlockErrorAction action; 131bd48bde8SPaolo Bonzini 13220dca810SJohn Snow bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors); 133bd48bde8SPaolo Bonzini action = mirror_error_action(s, false, -ret); 134a589569fSWenchao Xia if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 135bd48bde8SPaolo Bonzini s->ret = ret; 136bd48bde8SPaolo Bonzini } 137bd48bde8SPaolo Bonzini } 138bd48bde8SPaolo Bonzini mirror_iteration_done(op, ret); 139bd48bde8SPaolo Bonzini } 140bd48bde8SPaolo Bonzini 141bd48bde8SPaolo Bonzini static void mirror_read_complete(void *opaque, int ret) 142bd48bde8SPaolo Bonzini { 143bd48bde8SPaolo Bonzini MirrorOp *op = opaque; 144bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 145bd48bde8SPaolo Bonzini if (ret < 0) { 146bd48bde8SPaolo Bonzini BlockErrorAction action; 147bd48bde8SPaolo Bonzini 14820dca810SJohn Snow bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors); 149bd48bde8SPaolo Bonzini action = mirror_error_action(s, true, -ret); 150a589569fSWenchao Xia if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 151bd48bde8SPaolo Bonzini s->ret = ret; 152bd48bde8SPaolo Bonzini } 153bd48bde8SPaolo Bonzini 154bd48bde8SPaolo Bonzini mirror_iteration_done(op, ret); 155bd48bde8SPaolo Bonzini return; 156bd48bde8SPaolo Bonzini } 157bd48bde8SPaolo Bonzini bdrv_aio_writev(s->target, op->sector_num, &op->qiov, op->nb_sectors, 158bd48bde8SPaolo Bonzini mirror_write_complete, op); 159bd48bde8SPaolo Bonzini } 160bd48bde8SPaolo Bonzini 161cc8c9d6cSPaolo Bonzini static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) 162893f7ebaSPaolo Bonzini { 163893f7ebaSPaolo Bonzini BlockDriverState *source = s->common.bs; 164402a4741SPaolo Bonzini int nb_sectors, sectors_per_chunk, nb_chunks; 165884fea4eSPaolo Bonzini int64_t end, sector_num, next_chunk, next_sector, hbitmap_next_sector; 1666d0de8ebSStefan Hajnoczi uint64_t delay_ns = 0; 167bd48bde8SPaolo Bonzini MirrorOp *op; 168*dcfb3bebSFam Zheng int pnum; 169*dcfb3bebSFam Zheng int64_t ret; 170893f7ebaSPaolo Bonzini 1718f0720ecSPaolo Bonzini s->sector_num = hbitmap_iter_next(&s->hbi); 1728f0720ecSPaolo Bonzini if (s->sector_num < 0) { 17320dca810SJohn Snow bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi); 1748f0720ecSPaolo Bonzini s->sector_num = hbitmap_iter_next(&s->hbi); 17520dca810SJohn Snow trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); 1768f0720ecSPaolo Bonzini assert(s->sector_num >= 0); 1778f0720ecSPaolo Bonzini } 1788f0720ecSPaolo Bonzini 179402a4741SPaolo Bonzini hbitmap_next_sector = s->sector_num; 180b812f671SPaolo Bonzini sector_num = s->sector_num; 181884fea4eSPaolo Bonzini sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; 182b21c7652SMax Reitz end = s->bdev_length / BDRV_SECTOR_SIZE; 183402a4741SPaolo Bonzini 184884fea4eSPaolo Bonzini /* Extend the QEMUIOVector to include all adjacent blocks that will 185884fea4eSPaolo Bonzini * be copied in this operation. 186884fea4eSPaolo Bonzini * 187884fea4eSPaolo Bonzini * We have to do this if we have no backing file yet in the destination, 188884fea4eSPaolo Bonzini * and the cluster size is very large. Then we need to do COW ourselves. 189884fea4eSPaolo Bonzini * The first time a cluster is copied, copy it entirely. Note that, 190884fea4eSPaolo Bonzini * because both the granularity and the cluster size are powers of two, 191884fea4eSPaolo Bonzini * the number of sectors to copy cannot exceed one cluster. 192884fea4eSPaolo Bonzini * 193884fea4eSPaolo Bonzini * We also want to extend the QEMUIOVector to include more adjacent 194884fea4eSPaolo Bonzini * dirty blocks if possible, to limit the number of I/O operations and 195884fea4eSPaolo Bonzini * run efficiently even with a small granularity. 196402a4741SPaolo Bonzini */ 197884fea4eSPaolo Bonzini nb_chunks = 0; 198884fea4eSPaolo Bonzini nb_sectors = 0; 199884fea4eSPaolo Bonzini next_sector = sector_num; 200884fea4eSPaolo Bonzini next_chunk = sector_num / sectors_per_chunk; 201402a4741SPaolo Bonzini 202402a4741SPaolo Bonzini /* Wait for I/O to this cluster (from a previous iteration) to be done. */ 203884fea4eSPaolo Bonzini while (test_bit(next_chunk, s->in_flight_bitmap)) { 204402a4741SPaolo Bonzini trace_mirror_yield_in_flight(s, sector_num, s->in_flight); 205402a4741SPaolo Bonzini qemu_coroutine_yield(); 206b812f671SPaolo Bonzini } 207b812f671SPaolo Bonzini 208884fea4eSPaolo Bonzini do { 209884fea4eSPaolo Bonzini int added_sectors, added_chunks; 210884fea4eSPaolo Bonzini 211e4654d2dSFam Zheng if (!bdrv_get_dirty(source, s->dirty_bitmap, next_sector) || 212884fea4eSPaolo Bonzini test_bit(next_chunk, s->in_flight_bitmap)) { 213884fea4eSPaolo Bonzini assert(nb_sectors > 0); 214884fea4eSPaolo Bonzini break; 215884fea4eSPaolo Bonzini } 216884fea4eSPaolo Bonzini 217884fea4eSPaolo Bonzini added_sectors = sectors_per_chunk; 218884fea4eSPaolo Bonzini if (s->cow_bitmap && !test_bit(next_chunk, s->cow_bitmap)) { 219884fea4eSPaolo Bonzini bdrv_round_to_clusters(s->target, 220884fea4eSPaolo Bonzini next_sector, added_sectors, 221884fea4eSPaolo Bonzini &next_sector, &added_sectors); 222884fea4eSPaolo Bonzini 223884fea4eSPaolo Bonzini /* On the first iteration, the rounding may make us copy 224884fea4eSPaolo Bonzini * sectors before the first dirty one. 225884fea4eSPaolo Bonzini */ 226884fea4eSPaolo Bonzini if (next_sector < sector_num) { 227884fea4eSPaolo Bonzini assert(nb_sectors == 0); 228884fea4eSPaolo Bonzini sector_num = next_sector; 229884fea4eSPaolo Bonzini next_chunk = next_sector / sectors_per_chunk; 230884fea4eSPaolo Bonzini } 231884fea4eSPaolo Bonzini } 232884fea4eSPaolo Bonzini 233884fea4eSPaolo Bonzini added_sectors = MIN(added_sectors, end - (sector_num + nb_sectors)); 234884fea4eSPaolo Bonzini added_chunks = (added_sectors + sectors_per_chunk - 1) / sectors_per_chunk; 235884fea4eSPaolo Bonzini 236884fea4eSPaolo Bonzini /* When doing COW, it may happen that there is not enough space for 237884fea4eSPaolo Bonzini * a full cluster. Wait if that is the case. 238884fea4eSPaolo Bonzini */ 239884fea4eSPaolo Bonzini while (nb_chunks == 0 && s->buf_free_count < added_chunks) { 240402a4741SPaolo Bonzini trace_mirror_yield_buf_busy(s, nb_chunks, s->in_flight); 241402a4741SPaolo Bonzini qemu_coroutine_yield(); 242402a4741SPaolo Bonzini } 243884fea4eSPaolo Bonzini if (s->buf_free_count < nb_chunks + added_chunks) { 244884fea4eSPaolo Bonzini trace_mirror_break_buf_busy(s, nb_chunks, s->in_flight); 245884fea4eSPaolo Bonzini break; 246884fea4eSPaolo Bonzini } 247402a4741SPaolo Bonzini 248402a4741SPaolo Bonzini /* We have enough free space to copy these sectors. */ 249884fea4eSPaolo Bonzini bitmap_set(s->in_flight_bitmap, next_chunk, added_chunks); 250884fea4eSPaolo Bonzini 251884fea4eSPaolo Bonzini nb_sectors += added_sectors; 252884fea4eSPaolo Bonzini nb_chunks += added_chunks; 253884fea4eSPaolo Bonzini next_sector += added_sectors; 254884fea4eSPaolo Bonzini next_chunk += added_chunks; 255cc8c9d6cSPaolo Bonzini if (!s->synced && s->common.speed) { 256cc8c9d6cSPaolo Bonzini delay_ns = ratelimit_calculate_delay(&s->limit, added_sectors); 257cc8c9d6cSPaolo Bonzini } 258cc8c9d6cSPaolo Bonzini } while (delay_ns == 0 && next_sector < end); 259bd48bde8SPaolo Bonzini 260bd48bde8SPaolo Bonzini /* Allocate a MirrorOp that is used as an AIO callback. */ 261bd48bde8SPaolo Bonzini op = g_slice_new(MirrorOp); 262bd48bde8SPaolo Bonzini op->s = s; 263bd48bde8SPaolo Bonzini op->sector_num = sector_num; 264bd48bde8SPaolo Bonzini op->nb_sectors = nb_sectors; 265402a4741SPaolo Bonzini 266402a4741SPaolo Bonzini /* Now make a QEMUIOVector taking enough granularity-sized chunks 267402a4741SPaolo Bonzini * from s->buf_free. 268402a4741SPaolo Bonzini */ 269402a4741SPaolo Bonzini qemu_iovec_init(&op->qiov, nb_chunks); 270402a4741SPaolo Bonzini next_sector = sector_num; 271402a4741SPaolo Bonzini while (nb_chunks-- > 0) { 272402a4741SPaolo Bonzini MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); 2735a0f6fd5SKevin Wolf size_t remaining = (nb_sectors * BDRV_SECTOR_SIZE) - op->qiov.size; 2745a0f6fd5SKevin Wolf 275402a4741SPaolo Bonzini QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); 276402a4741SPaolo Bonzini s->buf_free_count--; 2775a0f6fd5SKevin Wolf qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); 278402a4741SPaolo Bonzini 279402a4741SPaolo Bonzini /* Advance the HBitmapIter in parallel, so that we do not examine 280402a4741SPaolo Bonzini * the same sector twice. 281402a4741SPaolo Bonzini */ 282e4654d2dSFam Zheng if (next_sector > hbitmap_next_sector 283e4654d2dSFam Zheng && bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) { 284402a4741SPaolo Bonzini hbitmap_next_sector = hbitmap_iter_next(&s->hbi); 285402a4741SPaolo Bonzini } 286402a4741SPaolo Bonzini 287402a4741SPaolo Bonzini next_sector += sectors_per_chunk; 288402a4741SPaolo Bonzini } 289bd48bde8SPaolo Bonzini 29020dca810SJohn Snow bdrv_reset_dirty_bitmap(s->dirty_bitmap, sector_num, nb_sectors); 291893f7ebaSPaolo Bonzini 292893f7ebaSPaolo Bonzini /* Copy the dirty cluster. */ 293bd48bde8SPaolo Bonzini s->in_flight++; 294b21c7652SMax Reitz s->sectors_in_flight += nb_sectors; 295b812f671SPaolo Bonzini trace_mirror_one_iteration(s, sector_num, nb_sectors); 296*dcfb3bebSFam Zheng 297*dcfb3bebSFam Zheng ret = bdrv_get_block_status_above(source, NULL, sector_num, 298*dcfb3bebSFam Zheng nb_sectors, &pnum); 299*dcfb3bebSFam Zheng if (ret < 0 || pnum < nb_sectors || 300*dcfb3bebSFam Zheng (ret & BDRV_BLOCK_DATA && !(ret & BDRV_BLOCK_ZERO))) { 301bd48bde8SPaolo Bonzini bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors, 302bd48bde8SPaolo Bonzini mirror_read_complete, op); 303*dcfb3bebSFam Zheng } else if (ret & BDRV_BLOCK_ZERO) { 304*dcfb3bebSFam Zheng bdrv_aio_write_zeroes(s->target, sector_num, op->nb_sectors, 305*dcfb3bebSFam Zheng s->unmap ? BDRV_REQ_MAY_UNMAP : 0, 306*dcfb3bebSFam Zheng mirror_write_complete, op); 307*dcfb3bebSFam Zheng } else { 308*dcfb3bebSFam Zheng assert(!(ret & BDRV_BLOCK_DATA)); 309*dcfb3bebSFam Zheng bdrv_aio_discard(s->target, sector_num, op->nb_sectors, 310*dcfb3bebSFam Zheng mirror_write_complete, op); 311*dcfb3bebSFam Zheng } 312cc8c9d6cSPaolo Bonzini return delay_ns; 313893f7ebaSPaolo Bonzini } 314b952b558SPaolo Bonzini 315402a4741SPaolo Bonzini static void mirror_free_init(MirrorBlockJob *s) 316402a4741SPaolo Bonzini { 317402a4741SPaolo Bonzini int granularity = s->granularity; 318402a4741SPaolo Bonzini size_t buf_size = s->buf_size; 319402a4741SPaolo Bonzini uint8_t *buf = s->buf; 320402a4741SPaolo Bonzini 321402a4741SPaolo Bonzini assert(s->buf_free_count == 0); 322402a4741SPaolo Bonzini QSIMPLEQ_INIT(&s->buf_free); 323402a4741SPaolo Bonzini while (buf_size != 0) { 324402a4741SPaolo Bonzini MirrorBuffer *cur = (MirrorBuffer *)buf; 325402a4741SPaolo Bonzini QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next); 326402a4741SPaolo Bonzini s->buf_free_count++; 327402a4741SPaolo Bonzini buf_size -= granularity; 328402a4741SPaolo Bonzini buf += granularity; 329402a4741SPaolo Bonzini } 330402a4741SPaolo Bonzini } 331402a4741SPaolo Bonzini 332bd48bde8SPaolo Bonzini static void mirror_drain(MirrorBlockJob *s) 333bd48bde8SPaolo Bonzini { 334bd48bde8SPaolo Bonzini while (s->in_flight > 0) { 335bd48bde8SPaolo Bonzini qemu_coroutine_yield(); 336bd48bde8SPaolo Bonzini } 337893f7ebaSPaolo Bonzini } 338893f7ebaSPaolo Bonzini 3395a7e7a0bSStefan Hajnoczi typedef struct { 3405a7e7a0bSStefan Hajnoczi int ret; 3415a7e7a0bSStefan Hajnoczi } MirrorExitData; 3425a7e7a0bSStefan Hajnoczi 3435a7e7a0bSStefan Hajnoczi static void mirror_exit(BlockJob *job, void *opaque) 3445a7e7a0bSStefan Hajnoczi { 3455a7e7a0bSStefan Hajnoczi MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 3465a7e7a0bSStefan Hajnoczi MirrorExitData *data = opaque; 3475a7e7a0bSStefan Hajnoczi AioContext *replace_aio_context = NULL; 3485a7e7a0bSStefan Hajnoczi 3495a7e7a0bSStefan Hajnoczi if (s->to_replace) { 3505a7e7a0bSStefan Hajnoczi replace_aio_context = bdrv_get_aio_context(s->to_replace); 3515a7e7a0bSStefan Hajnoczi aio_context_acquire(replace_aio_context); 3525a7e7a0bSStefan Hajnoczi } 3535a7e7a0bSStefan Hajnoczi 3545a7e7a0bSStefan Hajnoczi if (s->should_complete && data->ret == 0) { 3555a7e7a0bSStefan Hajnoczi BlockDriverState *to_replace = s->common.bs; 3565a7e7a0bSStefan Hajnoczi if (s->to_replace) { 3575a7e7a0bSStefan Hajnoczi to_replace = s->to_replace; 3585a7e7a0bSStefan Hajnoczi } 3595a7e7a0bSStefan Hajnoczi if (bdrv_get_flags(s->target) != bdrv_get_flags(to_replace)) { 3605a7e7a0bSStefan Hajnoczi bdrv_reopen(s->target, bdrv_get_flags(to_replace), NULL); 3615a7e7a0bSStefan Hajnoczi } 3625a7e7a0bSStefan Hajnoczi bdrv_swap(s->target, to_replace); 3635a7e7a0bSStefan Hajnoczi if (s->common.driver->job_type == BLOCK_JOB_TYPE_COMMIT) { 3645a7e7a0bSStefan Hajnoczi /* drop the bs loop chain formed by the swap: break the loop then 3655a7e7a0bSStefan Hajnoczi * trigger the unref from the top one */ 3665a7e7a0bSStefan Hajnoczi BlockDriverState *p = s->base->backing_hd; 3675a7e7a0bSStefan Hajnoczi bdrv_set_backing_hd(s->base, NULL); 3685a7e7a0bSStefan Hajnoczi bdrv_unref(p); 3695a7e7a0bSStefan Hajnoczi } 3705a7e7a0bSStefan Hajnoczi } 3715a7e7a0bSStefan Hajnoczi if (s->to_replace) { 3725a7e7a0bSStefan Hajnoczi bdrv_op_unblock_all(s->to_replace, s->replace_blocker); 3735a7e7a0bSStefan Hajnoczi error_free(s->replace_blocker); 3745a7e7a0bSStefan Hajnoczi bdrv_unref(s->to_replace); 3755a7e7a0bSStefan Hajnoczi } 3765a7e7a0bSStefan Hajnoczi if (replace_aio_context) { 3775a7e7a0bSStefan Hajnoczi aio_context_release(replace_aio_context); 3785a7e7a0bSStefan Hajnoczi } 3795a7e7a0bSStefan Hajnoczi g_free(s->replaces); 3805a7e7a0bSStefan Hajnoczi bdrv_unref(s->target); 3815a7e7a0bSStefan Hajnoczi block_job_completed(&s->common, data->ret); 3825a7e7a0bSStefan Hajnoczi g_free(data); 3835a7e7a0bSStefan Hajnoczi } 3845a7e7a0bSStefan Hajnoczi 385893f7ebaSPaolo Bonzini static void coroutine_fn mirror_run(void *opaque) 386893f7ebaSPaolo Bonzini { 387893f7ebaSPaolo Bonzini MirrorBlockJob *s = opaque; 3885a7e7a0bSStefan Hajnoczi MirrorExitData *data; 389893f7ebaSPaolo Bonzini BlockDriverState *bs = s->common.bs; 390eee13dfeSPaolo Bonzini int64_t sector_num, end, sectors_per_chunk, length; 391bd48bde8SPaolo Bonzini uint64_t last_pause_ns; 392b812f671SPaolo Bonzini BlockDriverInfo bdi; 3931d33936eSJeff Cody char backing_filename[2]; /* we only need 2 characters because we are only 3941d33936eSJeff Cody checking for a NULL string */ 395893f7ebaSPaolo Bonzini int ret = 0; 396893f7ebaSPaolo Bonzini int n; 397893f7ebaSPaolo Bonzini 398893f7ebaSPaolo Bonzini if (block_job_is_cancelled(&s->common)) { 399893f7ebaSPaolo Bonzini goto immediate_exit; 400893f7ebaSPaolo Bonzini } 401893f7ebaSPaolo Bonzini 402b21c7652SMax Reitz s->bdev_length = bdrv_getlength(bs); 403b21c7652SMax Reitz if (s->bdev_length < 0) { 404b21c7652SMax Reitz ret = s->bdev_length; 405373df5b1SFam Zheng goto immediate_exit; 406b21c7652SMax Reitz } else if (s->bdev_length == 0) { 4079e48b025SFam Zheng /* Report BLOCK_JOB_READY and wait for complete. */ 4089e48b025SFam Zheng block_job_event_ready(&s->common); 4099e48b025SFam Zheng s->synced = true; 4109e48b025SFam Zheng while (!block_job_is_cancelled(&s->common) && !s->should_complete) { 4119e48b025SFam Zheng block_job_yield(&s->common); 4129e48b025SFam Zheng } 4139e48b025SFam Zheng s->common.cancelled = false; 4149e48b025SFam Zheng goto immediate_exit; 415893f7ebaSPaolo Bonzini } 416893f7ebaSPaolo Bonzini 417b21c7652SMax Reitz length = DIV_ROUND_UP(s->bdev_length, s->granularity); 418402a4741SPaolo Bonzini s->in_flight_bitmap = bitmap_new(length); 419402a4741SPaolo Bonzini 420b812f671SPaolo Bonzini /* If we have no backing file yet in the destination, we cannot let 421b812f671SPaolo Bonzini * the destination do COW. Instead, we copy sectors around the 422b812f671SPaolo Bonzini * dirty data if needed. We need a bitmap to do that. 423b812f671SPaolo Bonzini */ 424b812f671SPaolo Bonzini bdrv_get_backing_filename(s->target, backing_filename, 425b812f671SPaolo Bonzini sizeof(backing_filename)); 426b812f671SPaolo Bonzini if (backing_filename[0] && !s->target->backing_hd) { 427c3cc95bdSFam Zheng ret = bdrv_get_info(s->target, &bdi); 428c3cc95bdSFam Zheng if (ret < 0) { 429c3cc95bdSFam Zheng goto immediate_exit; 430c3cc95bdSFam Zheng } 431eee13dfeSPaolo Bonzini if (s->granularity < bdi.cluster_size) { 43208e4ed6cSPaolo Bonzini s->buf_size = MAX(s->buf_size, bdi.cluster_size); 433b812f671SPaolo Bonzini s->cow_bitmap = bitmap_new(length); 434b812f671SPaolo Bonzini } 435b812f671SPaolo Bonzini } 436b812f671SPaolo Bonzini 437b21c7652SMax Reitz end = s->bdev_length / BDRV_SECTOR_SIZE; 4387504edf4SKevin Wolf s->buf = qemu_try_blockalign(bs, s->buf_size); 4397504edf4SKevin Wolf if (s->buf == NULL) { 4407504edf4SKevin Wolf ret = -ENOMEM; 4417504edf4SKevin Wolf goto immediate_exit; 4427504edf4SKevin Wolf } 4437504edf4SKevin Wolf 444eee13dfeSPaolo Bonzini sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; 445402a4741SPaolo Bonzini mirror_free_init(s); 446893f7ebaSPaolo Bonzini 44703544a6eSFam Zheng if (!s->is_none_mode) { 448893f7ebaSPaolo Bonzini /* First part, loop on the sectors and initialize the dirty bitmap. */ 4495bc361b8SFam Zheng BlockDriverState *base = s->base; 450893f7ebaSPaolo Bonzini for (sector_num = 0; sector_num < end; ) { 451eee13dfeSPaolo Bonzini int64_t next = (sector_num | (sectors_per_chunk - 1)) + 1; 4524f578637SPaolo Bonzini ret = bdrv_is_allocated_above(bs, base, 453893f7ebaSPaolo Bonzini sector_num, next - sector_num, &n); 454893f7ebaSPaolo Bonzini 455893f7ebaSPaolo Bonzini if (ret < 0) { 456893f7ebaSPaolo Bonzini goto immediate_exit; 457893f7ebaSPaolo Bonzini } 458893f7ebaSPaolo Bonzini 459893f7ebaSPaolo Bonzini assert(n > 0); 460893f7ebaSPaolo Bonzini if (ret == 1) { 46120dca810SJohn Snow bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n); 462893f7ebaSPaolo Bonzini sector_num = next; 463893f7ebaSPaolo Bonzini } else { 464893f7ebaSPaolo Bonzini sector_num += n; 465893f7ebaSPaolo Bonzini } 466893f7ebaSPaolo Bonzini } 467893f7ebaSPaolo Bonzini } 468893f7ebaSPaolo Bonzini 46920dca810SJohn Snow bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi); 470bc72ad67SAlex Bligh last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 471893f7ebaSPaolo Bonzini for (;;) { 472cc8c9d6cSPaolo Bonzini uint64_t delay_ns = 0; 473893f7ebaSPaolo Bonzini int64_t cnt; 474893f7ebaSPaolo Bonzini bool should_complete; 475893f7ebaSPaolo Bonzini 476bd48bde8SPaolo Bonzini if (s->ret < 0) { 477bd48bde8SPaolo Bonzini ret = s->ret; 478893f7ebaSPaolo Bonzini goto immediate_exit; 479893f7ebaSPaolo Bonzini } 480bd48bde8SPaolo Bonzini 48120dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 482b21c7652SMax Reitz /* s->common.offset contains the number of bytes already processed so 483b21c7652SMax Reitz * far, cnt is the number of dirty sectors remaining and 484b21c7652SMax Reitz * s->sectors_in_flight is the number of sectors currently being 485b21c7652SMax Reitz * processed; together those are the current total operation length */ 486b21c7652SMax Reitz s->common.len = s->common.offset + 487b21c7652SMax Reitz (cnt + s->sectors_in_flight) * BDRV_SECTOR_SIZE; 488bd48bde8SPaolo Bonzini 489bd48bde8SPaolo Bonzini /* Note that even when no rate limit is applied we need to yield 490a7282330SFam Zheng * periodically with no pending I/O so that bdrv_drain_all() returns. 491bd48bde8SPaolo Bonzini * We do so every SLICE_TIME nanoseconds, or when there is an error, 492bd48bde8SPaolo Bonzini * or when the source is clean, whichever comes first. 493bd48bde8SPaolo Bonzini */ 494bc72ad67SAlex Bligh if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - last_pause_ns < SLICE_TIME && 495bd48bde8SPaolo Bonzini s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 496402a4741SPaolo Bonzini if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 || 497402a4741SPaolo Bonzini (cnt == 0 && s->in_flight > 0)) { 498402a4741SPaolo Bonzini trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt); 499bd48bde8SPaolo Bonzini qemu_coroutine_yield(); 500bd48bde8SPaolo Bonzini continue; 501bd48bde8SPaolo Bonzini } else if (cnt != 0) { 502cc8c9d6cSPaolo Bonzini delay_ns = mirror_iteration(s); 503893f7ebaSPaolo Bonzini } 504cc8c9d6cSPaolo Bonzini } 505893f7ebaSPaolo Bonzini 506893f7ebaSPaolo Bonzini should_complete = false; 507bd48bde8SPaolo Bonzini if (s->in_flight == 0 && cnt == 0) { 508893f7ebaSPaolo Bonzini trace_mirror_before_flush(s); 509893f7ebaSPaolo Bonzini ret = bdrv_flush(s->target); 510893f7ebaSPaolo Bonzini if (ret < 0) { 511a589569fSWenchao Xia if (mirror_error_action(s, false, -ret) == 512a589569fSWenchao Xia BLOCK_ERROR_ACTION_REPORT) { 513893f7ebaSPaolo Bonzini goto immediate_exit; 514893f7ebaSPaolo Bonzini } 515b952b558SPaolo Bonzini } else { 516893f7ebaSPaolo Bonzini /* We're out of the streaming phase. From now on, if the job 517893f7ebaSPaolo Bonzini * is cancelled we will actually complete all pending I/O and 518893f7ebaSPaolo Bonzini * report completion. This way, block-job-cancel will leave 519893f7ebaSPaolo Bonzini * the target in a consistent state. 520893f7ebaSPaolo Bonzini */ 521d63ffd87SPaolo Bonzini if (!s->synced) { 522bcada37bSWenchao Xia block_job_event_ready(&s->common); 523d63ffd87SPaolo Bonzini s->synced = true; 524d63ffd87SPaolo Bonzini } 525d63ffd87SPaolo Bonzini 526d63ffd87SPaolo Bonzini should_complete = s->should_complete || 527d63ffd87SPaolo Bonzini block_job_is_cancelled(&s->common); 52820dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 529893f7ebaSPaolo Bonzini } 530b952b558SPaolo Bonzini } 531893f7ebaSPaolo Bonzini 532893f7ebaSPaolo Bonzini if (cnt == 0 && should_complete) { 533893f7ebaSPaolo Bonzini /* The dirty bitmap is not updated while operations are pending. 534893f7ebaSPaolo Bonzini * If we're about to exit, wait for pending operations before 535893f7ebaSPaolo Bonzini * calling bdrv_get_dirty_count(bs), or we may exit while the 536893f7ebaSPaolo Bonzini * source has dirty data to copy! 537893f7ebaSPaolo Bonzini * 538893f7ebaSPaolo Bonzini * Note that I/O can be submitted by the guest while 539893f7ebaSPaolo Bonzini * mirror_populate runs. 540893f7ebaSPaolo Bonzini */ 541893f7ebaSPaolo Bonzini trace_mirror_before_drain(s, cnt); 5425a7e7a0bSStefan Hajnoczi bdrv_drain(bs); 54320dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 544893f7ebaSPaolo Bonzini } 545893f7ebaSPaolo Bonzini 546893f7ebaSPaolo Bonzini ret = 0; 547cc8c9d6cSPaolo Bonzini trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); 548d63ffd87SPaolo Bonzini if (!s->synced) { 5497483d1e5SAlex Bligh block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); 550893f7ebaSPaolo Bonzini if (block_job_is_cancelled(&s->common)) { 551893f7ebaSPaolo Bonzini break; 552893f7ebaSPaolo Bonzini } 553893f7ebaSPaolo Bonzini } else if (!should_complete) { 554bd48bde8SPaolo Bonzini delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0); 5557483d1e5SAlex Bligh block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); 556893f7ebaSPaolo Bonzini } else if (cnt == 0) { 557893f7ebaSPaolo Bonzini /* The two disks are in sync. Exit and report successful 558893f7ebaSPaolo Bonzini * completion. 559893f7ebaSPaolo Bonzini */ 560893f7ebaSPaolo Bonzini assert(QLIST_EMPTY(&bs->tracked_requests)); 561893f7ebaSPaolo Bonzini s->common.cancelled = false; 562893f7ebaSPaolo Bonzini break; 563893f7ebaSPaolo Bonzini } 564bc72ad67SAlex Bligh last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 565893f7ebaSPaolo Bonzini } 566893f7ebaSPaolo Bonzini 567893f7ebaSPaolo Bonzini immediate_exit: 568bd48bde8SPaolo Bonzini if (s->in_flight > 0) { 569bd48bde8SPaolo Bonzini /* We get here only if something went wrong. Either the job failed, 570bd48bde8SPaolo Bonzini * or it was cancelled prematurely so that we do not guarantee that 571bd48bde8SPaolo Bonzini * the target is a copy of the source. 572bd48bde8SPaolo Bonzini */ 573bd48bde8SPaolo Bonzini assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common))); 574bd48bde8SPaolo Bonzini mirror_drain(s); 575bd48bde8SPaolo Bonzini } 576bd48bde8SPaolo Bonzini 577bd48bde8SPaolo Bonzini assert(s->in_flight == 0); 5787191bf31SMarkus Armbruster qemu_vfree(s->buf); 579b812f671SPaolo Bonzini g_free(s->cow_bitmap); 580402a4741SPaolo Bonzini g_free(s->in_flight_bitmap); 581e4654d2dSFam Zheng bdrv_release_dirty_bitmap(bs, s->dirty_bitmap); 582b952b558SPaolo Bonzini bdrv_iostatus_disable(s->target); 5835a7e7a0bSStefan Hajnoczi 5845a7e7a0bSStefan Hajnoczi data = g_malloc(sizeof(*data)); 5855a7e7a0bSStefan Hajnoczi data->ret = ret; 5865a7e7a0bSStefan Hajnoczi block_job_defer_to_main_loop(&s->common, mirror_exit, data); 587893f7ebaSPaolo Bonzini } 588893f7ebaSPaolo Bonzini 589893f7ebaSPaolo Bonzini static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp) 590893f7ebaSPaolo Bonzini { 591893f7ebaSPaolo Bonzini MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 592893f7ebaSPaolo Bonzini 593893f7ebaSPaolo Bonzini if (speed < 0) { 594c6bd8c70SMarkus Armbruster error_setg(errp, QERR_INVALID_PARAMETER, "speed"); 595893f7ebaSPaolo Bonzini return; 596893f7ebaSPaolo Bonzini } 597893f7ebaSPaolo Bonzini ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME); 598893f7ebaSPaolo Bonzini } 599893f7ebaSPaolo Bonzini 600b952b558SPaolo Bonzini static void mirror_iostatus_reset(BlockJob *job) 601b952b558SPaolo Bonzini { 602b952b558SPaolo Bonzini MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 603b952b558SPaolo Bonzini 604b952b558SPaolo Bonzini bdrv_iostatus_reset(s->target); 605b952b558SPaolo Bonzini } 606b952b558SPaolo Bonzini 607d63ffd87SPaolo Bonzini static void mirror_complete(BlockJob *job, Error **errp) 608d63ffd87SPaolo Bonzini { 609d63ffd87SPaolo Bonzini MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 61034b5d2c6SMax Reitz Error *local_err = NULL; 611d63ffd87SPaolo Bonzini int ret; 612d63ffd87SPaolo Bonzini 61334b5d2c6SMax Reitz ret = bdrv_open_backing_file(s->target, NULL, &local_err); 614d63ffd87SPaolo Bonzini if (ret < 0) { 61534b5d2c6SMax Reitz error_propagate(errp, local_err); 616d63ffd87SPaolo Bonzini return; 617d63ffd87SPaolo Bonzini } 618d63ffd87SPaolo Bonzini if (!s->synced) { 619c6bd8c70SMarkus Armbruster error_setg(errp, QERR_BLOCK_JOB_NOT_READY, 620bfb197e0SMarkus Armbruster bdrv_get_device_name(job->bs)); 621d63ffd87SPaolo Bonzini return; 622d63ffd87SPaolo Bonzini } 623d63ffd87SPaolo Bonzini 62409158f00SBenoît Canet /* check the target bs is not blocked and block all operations on it */ 62509158f00SBenoît Canet if (s->replaces) { 6265a7e7a0bSStefan Hajnoczi AioContext *replace_aio_context; 6275a7e7a0bSStefan Hajnoczi 62809158f00SBenoît Canet s->to_replace = check_to_replace_node(s->replaces, &local_err); 62909158f00SBenoît Canet if (!s->to_replace) { 63009158f00SBenoît Canet error_propagate(errp, local_err); 63109158f00SBenoît Canet return; 63209158f00SBenoît Canet } 63309158f00SBenoît Canet 6345a7e7a0bSStefan Hajnoczi replace_aio_context = bdrv_get_aio_context(s->to_replace); 6355a7e7a0bSStefan Hajnoczi aio_context_acquire(replace_aio_context); 6365a7e7a0bSStefan Hajnoczi 63709158f00SBenoît Canet error_setg(&s->replace_blocker, 63809158f00SBenoît Canet "block device is in use by block-job-complete"); 63909158f00SBenoît Canet bdrv_op_block_all(s->to_replace, s->replace_blocker); 64009158f00SBenoît Canet bdrv_ref(s->to_replace); 6415a7e7a0bSStefan Hajnoczi 6425a7e7a0bSStefan Hajnoczi aio_context_release(replace_aio_context); 64309158f00SBenoît Canet } 64409158f00SBenoît Canet 645d63ffd87SPaolo Bonzini s->should_complete = true; 646751ebd76SFam Zheng block_job_enter(&s->common); 647d63ffd87SPaolo Bonzini } 648d63ffd87SPaolo Bonzini 6493fc4b10aSFam Zheng static const BlockJobDriver mirror_job_driver = { 650893f7ebaSPaolo Bonzini .instance_size = sizeof(MirrorBlockJob), 65179e14bf7SFam Zheng .job_type = BLOCK_JOB_TYPE_MIRROR, 652893f7ebaSPaolo Bonzini .set_speed = mirror_set_speed, 653b952b558SPaolo Bonzini .iostatus_reset= mirror_iostatus_reset, 654d63ffd87SPaolo Bonzini .complete = mirror_complete, 655893f7ebaSPaolo Bonzini }; 656893f7ebaSPaolo Bonzini 65703544a6eSFam Zheng static const BlockJobDriver commit_active_job_driver = { 65803544a6eSFam Zheng .instance_size = sizeof(MirrorBlockJob), 65903544a6eSFam Zheng .job_type = BLOCK_JOB_TYPE_COMMIT, 66003544a6eSFam Zheng .set_speed = mirror_set_speed, 66103544a6eSFam Zheng .iostatus_reset 66203544a6eSFam Zheng = mirror_iostatus_reset, 66303544a6eSFam Zheng .complete = mirror_complete, 66403544a6eSFam Zheng }; 66503544a6eSFam Zheng 66603544a6eSFam Zheng static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target, 66709158f00SBenoît Canet const char *replaces, 6685fba6c0eSJohn Snow int64_t speed, uint32_t granularity, 66903544a6eSFam Zheng int64_t buf_size, 67003544a6eSFam Zheng BlockdevOnError on_source_error, 671b952b558SPaolo Bonzini BlockdevOnError on_target_error, 6720fc9f8eaSFam Zheng bool unmap, 673097310b5SMarkus Armbruster BlockCompletionFunc *cb, 67403544a6eSFam Zheng void *opaque, Error **errp, 67503544a6eSFam Zheng const BlockJobDriver *driver, 67603544a6eSFam Zheng bool is_none_mode, BlockDriverState *base) 677893f7ebaSPaolo Bonzini { 678893f7ebaSPaolo Bonzini MirrorBlockJob *s; 679893f7ebaSPaolo Bonzini 680eee13dfeSPaolo Bonzini if (granularity == 0) { 681341ebc2fSJohn Snow granularity = bdrv_get_default_bitmap_granularity(target); 682eee13dfeSPaolo Bonzini } 683eee13dfeSPaolo Bonzini 684eee13dfeSPaolo Bonzini assert ((granularity & (granularity - 1)) == 0); 685eee13dfeSPaolo Bonzini 686b952b558SPaolo Bonzini if ((on_source_error == BLOCKDEV_ON_ERROR_STOP || 687b952b558SPaolo Bonzini on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) && 688b952b558SPaolo Bonzini !bdrv_iostatus_is_enabled(bs)) { 689c6bd8c70SMarkus Armbruster error_setg(errp, QERR_INVALID_PARAMETER, "on-source-error"); 690b952b558SPaolo Bonzini return; 691b952b558SPaolo Bonzini } 692b952b558SPaolo Bonzini 6935bc361b8SFam Zheng 69403544a6eSFam Zheng s = block_job_create(driver, bs, speed, cb, opaque, errp); 695893f7ebaSPaolo Bonzini if (!s) { 696893f7ebaSPaolo Bonzini return; 697893f7ebaSPaolo Bonzini } 698893f7ebaSPaolo Bonzini 69909158f00SBenoît Canet s->replaces = g_strdup(replaces); 700b952b558SPaolo Bonzini s->on_source_error = on_source_error; 701b952b558SPaolo Bonzini s->on_target_error = on_target_error; 702893f7ebaSPaolo Bonzini s->target = target; 70303544a6eSFam Zheng s->is_none_mode = is_none_mode; 7045bc361b8SFam Zheng s->base = base; 705eee13dfeSPaolo Bonzini s->granularity = granularity; 70608e4ed6cSPaolo Bonzini s->buf_size = MAX(buf_size, granularity); 7070fc9f8eaSFam Zheng s->unmap = unmap; 708b812f671SPaolo Bonzini 7090db6e54aSFam Zheng s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); 710b8afb520SFam Zheng if (!s->dirty_bitmap) { 711b8afb520SFam Zheng return; 712b8afb520SFam Zheng } 713893f7ebaSPaolo Bonzini bdrv_set_enable_write_cache(s->target, true); 714b952b558SPaolo Bonzini bdrv_set_on_error(s->target, on_target_error, on_target_error); 715b952b558SPaolo Bonzini bdrv_iostatus_enable(s->target); 716893f7ebaSPaolo Bonzini s->common.co = qemu_coroutine_create(mirror_run); 717893f7ebaSPaolo Bonzini trace_mirror_start(bs, s, s->common.co, opaque); 718893f7ebaSPaolo Bonzini qemu_coroutine_enter(s->common.co, s); 719893f7ebaSPaolo Bonzini } 72003544a6eSFam Zheng 72103544a6eSFam Zheng void mirror_start(BlockDriverState *bs, BlockDriverState *target, 72209158f00SBenoît Canet const char *replaces, 7235fba6c0eSJohn Snow int64_t speed, uint32_t granularity, int64_t buf_size, 72403544a6eSFam Zheng MirrorSyncMode mode, BlockdevOnError on_source_error, 72503544a6eSFam Zheng BlockdevOnError on_target_error, 7260fc9f8eaSFam Zheng bool unmap, 727097310b5SMarkus Armbruster BlockCompletionFunc *cb, 72803544a6eSFam Zheng void *opaque, Error **errp) 72903544a6eSFam Zheng { 73003544a6eSFam Zheng bool is_none_mode; 73103544a6eSFam Zheng BlockDriverState *base; 73203544a6eSFam Zheng 7334b80ab2bSJohn Snow if (mode == MIRROR_SYNC_MODE_INCREMENTAL) { 7344b80ab2bSJohn Snow error_setg(errp, "Sync mode 'incremental' not supported"); 735d58d8453SJohn Snow return; 736d58d8453SJohn Snow } 73703544a6eSFam Zheng is_none_mode = mode == MIRROR_SYNC_MODE_NONE; 73803544a6eSFam Zheng base = mode == MIRROR_SYNC_MODE_TOP ? bs->backing_hd : NULL; 73909158f00SBenoît Canet mirror_start_job(bs, target, replaces, 74009158f00SBenoît Canet speed, granularity, buf_size, 7410fc9f8eaSFam Zheng on_source_error, on_target_error, unmap, cb, opaque, errp, 74203544a6eSFam Zheng &mirror_job_driver, is_none_mode, base); 74303544a6eSFam Zheng } 74403544a6eSFam Zheng 74503544a6eSFam Zheng void commit_active_start(BlockDriverState *bs, BlockDriverState *base, 74603544a6eSFam Zheng int64_t speed, 74703544a6eSFam Zheng BlockdevOnError on_error, 748097310b5SMarkus Armbruster BlockCompletionFunc *cb, 74903544a6eSFam Zheng void *opaque, Error **errp) 75003544a6eSFam Zheng { 7514da83585SJeff Cody int64_t length, base_length; 7524da83585SJeff Cody int orig_base_flags; 75339a611a3SJeff Cody int ret; 754cc67f4d1SJeff Cody Error *local_err = NULL; 7554da83585SJeff Cody 7564da83585SJeff Cody orig_base_flags = bdrv_get_flags(base); 7574da83585SJeff Cody 75820a63d2cSFam Zheng if (bdrv_reopen(base, bs->open_flags, errp)) { 75920a63d2cSFam Zheng return; 76020a63d2cSFam Zheng } 7614da83585SJeff Cody 7624da83585SJeff Cody length = bdrv_getlength(bs); 7634da83585SJeff Cody if (length < 0) { 76439a611a3SJeff Cody error_setg_errno(errp, -length, 76539a611a3SJeff Cody "Unable to determine length of %s", bs->filename); 7664da83585SJeff Cody goto error_restore_flags; 7674da83585SJeff Cody } 7684da83585SJeff Cody 7694da83585SJeff Cody base_length = bdrv_getlength(base); 7704da83585SJeff Cody if (base_length < 0) { 77139a611a3SJeff Cody error_setg_errno(errp, -base_length, 77239a611a3SJeff Cody "Unable to determine length of %s", base->filename); 7734da83585SJeff Cody goto error_restore_flags; 7744da83585SJeff Cody } 7754da83585SJeff Cody 7764da83585SJeff Cody if (length > base_length) { 77739a611a3SJeff Cody ret = bdrv_truncate(base, length); 77839a611a3SJeff Cody if (ret < 0) { 77939a611a3SJeff Cody error_setg_errno(errp, -ret, 78039a611a3SJeff Cody "Top image %s is larger than base image %s, and " 7814da83585SJeff Cody "resize of base image failed", 7824da83585SJeff Cody bs->filename, base->filename); 7834da83585SJeff Cody goto error_restore_flags; 7844da83585SJeff Cody } 7854da83585SJeff Cody } 7864da83585SJeff Cody 78720a63d2cSFam Zheng bdrv_ref(base); 78809158f00SBenoît Canet mirror_start_job(bs, base, NULL, speed, 0, 0, 7890fc9f8eaSFam Zheng on_error, on_error, false, cb, opaque, &local_err, 79003544a6eSFam Zheng &commit_active_job_driver, false, base); 7910fb6395cSMarkus Armbruster if (local_err) { 792cc67f4d1SJeff Cody error_propagate(errp, local_err); 7934da83585SJeff Cody goto error_restore_flags; 7944da83585SJeff Cody } 7954da83585SJeff Cody 7964da83585SJeff Cody return; 7974da83585SJeff Cody 7984da83585SJeff Cody error_restore_flags: 7994da83585SJeff Cody /* ignore error and errp for bdrv_reopen, because we want to propagate 8004da83585SJeff Cody * the original error */ 8014da83585SJeff Cody bdrv_reopen(base, orig_base_flags, NULL); 8024da83585SJeff Cody return; 80303544a6eSFam Zheng } 804