1893f7ebaSPaolo Bonzini /* 2893f7ebaSPaolo Bonzini * Image mirroring 3893f7ebaSPaolo Bonzini * 4893f7ebaSPaolo Bonzini * Copyright Red Hat, Inc. 2012 5893f7ebaSPaolo Bonzini * 6893f7ebaSPaolo Bonzini * Authors: 7893f7ebaSPaolo Bonzini * Paolo Bonzini <pbonzini@redhat.com> 8893f7ebaSPaolo Bonzini * 9893f7ebaSPaolo Bonzini * This work is licensed under the terms of the GNU LGPL, version 2 or later. 10893f7ebaSPaolo Bonzini * See the COPYING.LIB file in the top-level directory. 11893f7ebaSPaolo Bonzini * 12893f7ebaSPaolo Bonzini */ 13893f7ebaSPaolo Bonzini 1480c71a24SPeter Maydell #include "qemu/osdep.h" 15fd4a6493SKevin Wolf #include "qemu/cutils.h" 16893f7ebaSPaolo Bonzini #include "trace.h" 17c87621eaSJohn Snow #include "block/blockjob_int.h" 18737e150eSPaolo Bonzini #include "block/block_int.h" 19373340b2SMax Reitz #include "sysemu/block-backend.h" 20da34e65cSMarkus Armbruster #include "qapi/error.h" 21cc7a8ea7SMarkus Armbruster #include "qapi/qmp/qerror.h" 22893f7ebaSPaolo Bonzini #include "qemu/ratelimit.h" 23b812f671SPaolo Bonzini #include "qemu/bitmap.h" 24893f7ebaSPaolo Bonzini 25893f7ebaSPaolo Bonzini #define SLICE_TIME 100000000ULL /* ns */ 26402a4741SPaolo Bonzini #define MAX_IN_FLIGHT 16 27b436982fSEric Blake #define MAX_IO_BYTES (1 << 20) /* 1 Mb */ 28b436982fSEric Blake #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES) 29402a4741SPaolo Bonzini 30402a4741SPaolo Bonzini /* The mirroring buffer is a list of granularity-sized chunks. 31402a4741SPaolo Bonzini * Free chunks are organized in a list. 32402a4741SPaolo Bonzini */ 33402a4741SPaolo Bonzini typedef struct MirrorBuffer { 34402a4741SPaolo Bonzini QSIMPLEQ_ENTRY(MirrorBuffer) next; 35402a4741SPaolo Bonzini } MirrorBuffer; 36893f7ebaSPaolo Bonzini 37893f7ebaSPaolo Bonzini typedef struct MirrorBlockJob { 38893f7ebaSPaolo Bonzini BlockJob common; 39893f7ebaSPaolo Bonzini RateLimit limit; 40e253f4b8SKevin Wolf BlockBackend *target; 414ef85a9cSKevin Wolf BlockDriverState *mirror_top_bs; 424ef85a9cSKevin Wolf BlockDriverState *source; 435bc361b8SFam Zheng BlockDriverState *base; 444ef85a9cSKevin Wolf 4509158f00SBenoît Canet /* The name of the graph node to replace */ 4609158f00SBenoît Canet char *replaces; 4709158f00SBenoît Canet /* The BDS to replace */ 4809158f00SBenoît Canet BlockDriverState *to_replace; 4909158f00SBenoît Canet /* Used to block operations on the drive-mirror-replace target */ 5009158f00SBenoît Canet Error *replace_blocker; 5103544a6eSFam Zheng bool is_none_mode; 52274fcceeSMax Reitz BlockMirrorBackingMode backing_mode; 53b952b558SPaolo Bonzini BlockdevOnError on_source_error, on_target_error; 54d63ffd87SPaolo Bonzini bool synced; 55d63ffd87SPaolo Bonzini bool should_complete; 56eee13dfeSPaolo Bonzini int64_t granularity; 57b812f671SPaolo Bonzini size_t buf_size; 58b21c7652SMax Reitz int64_t bdev_length; 59b812f671SPaolo Bonzini unsigned long *cow_bitmap; 60e4654d2dSFam Zheng BdrvDirtyBitmap *dirty_bitmap; 61dc162c8eSFam Zheng BdrvDirtyBitmapIter *dbi; 62893f7ebaSPaolo Bonzini uint8_t *buf; 63402a4741SPaolo Bonzini QSIMPLEQ_HEAD(, MirrorBuffer) buf_free; 64402a4741SPaolo Bonzini int buf_free_count; 65bd48bde8SPaolo Bonzini 6649efb1f5SDenis V. Lunev uint64_t last_pause_ns; 67402a4741SPaolo Bonzini unsigned long *in_flight_bitmap; 68bd48bde8SPaolo Bonzini int in_flight; 69b436982fSEric Blake int64_t bytes_in_flight; 70bd48bde8SPaolo Bonzini int ret; 710fc9f8eaSFam Zheng bool unmap; 72e424aff5SKevin Wolf bool waiting_for_io; 73b436982fSEric Blake int target_cluster_size; 74e5b43573SFam Zheng int max_iov; 7590ab48ebSAnton Nefedov bool initial_zeroing_ongoing; 76893f7ebaSPaolo Bonzini } MirrorBlockJob; 77893f7ebaSPaolo Bonzini 78bd48bde8SPaolo Bonzini typedef struct MirrorOp { 79bd48bde8SPaolo Bonzini MirrorBlockJob *s; 80bd48bde8SPaolo Bonzini QEMUIOVector qiov; 81b436982fSEric Blake int64_t offset; 82b436982fSEric Blake uint64_t bytes; 83bd48bde8SPaolo Bonzini } MirrorOp; 84bd48bde8SPaolo Bonzini 85b952b558SPaolo Bonzini static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, 86b952b558SPaolo Bonzini int error) 87b952b558SPaolo Bonzini { 88b952b558SPaolo Bonzini s->synced = false; 89b952b558SPaolo Bonzini if (read) { 9081e254dcSKevin Wolf return block_job_error_action(&s->common, s->on_source_error, 9181e254dcSKevin Wolf true, error); 92b952b558SPaolo Bonzini } else { 9381e254dcSKevin Wolf return block_job_error_action(&s->common, s->on_target_error, 9481e254dcSKevin Wolf false, error); 95b952b558SPaolo Bonzini } 96b952b558SPaolo Bonzini } 97b952b558SPaolo Bonzini 98bd48bde8SPaolo Bonzini static void mirror_iteration_done(MirrorOp *op, int ret) 99bd48bde8SPaolo Bonzini { 100bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 101402a4741SPaolo Bonzini struct iovec *iov; 102bd48bde8SPaolo Bonzini int64_t chunk_num; 103b436982fSEric Blake int i, nb_chunks; 104bd48bde8SPaolo Bonzini 105b436982fSEric Blake trace_mirror_iteration_done(s, op->offset, op->bytes, ret); 106bd48bde8SPaolo Bonzini 107bd48bde8SPaolo Bonzini s->in_flight--; 108b436982fSEric Blake s->bytes_in_flight -= op->bytes; 109402a4741SPaolo Bonzini iov = op->qiov.iov; 110402a4741SPaolo Bonzini for (i = 0; i < op->qiov.niov; i++) { 111402a4741SPaolo Bonzini MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base; 112402a4741SPaolo Bonzini QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next); 113402a4741SPaolo Bonzini s->buf_free_count++; 114402a4741SPaolo Bonzini } 115402a4741SPaolo Bonzini 116b436982fSEric Blake chunk_num = op->offset / s->granularity; 117b436982fSEric Blake nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity); 118402a4741SPaolo Bonzini bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); 119b21c7652SMax Reitz if (ret >= 0) { 120b21c7652SMax Reitz if (s->cow_bitmap) { 121bd48bde8SPaolo Bonzini bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); 122bd48bde8SPaolo Bonzini } 12390ab48ebSAnton Nefedov if (!s->initial_zeroing_ongoing) { 124b436982fSEric Blake s->common.offset += op->bytes; 125b21c7652SMax Reitz } 12690ab48ebSAnton Nefedov } 1276df3bf8eSZhang Min qemu_iovec_destroy(&op->qiov); 128c84b3192SPaolo Bonzini g_free(op); 1297b770c72SStefan Hajnoczi 130e424aff5SKevin Wolf if (s->waiting_for_io) { 1310b8b8753SPaolo Bonzini qemu_coroutine_enter(s->common.co); 132bd48bde8SPaolo Bonzini } 1337b770c72SStefan Hajnoczi } 134bd48bde8SPaolo Bonzini 135bd48bde8SPaolo Bonzini static void mirror_write_complete(void *opaque, int ret) 136bd48bde8SPaolo Bonzini { 137bd48bde8SPaolo Bonzini MirrorOp *op = opaque; 138bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 139b9e413ddSPaolo Bonzini 140b9e413ddSPaolo Bonzini aio_context_acquire(blk_get_aio_context(s->common.blk)); 141bd48bde8SPaolo Bonzini if (ret < 0) { 142bd48bde8SPaolo Bonzini BlockErrorAction action; 143bd48bde8SPaolo Bonzini 144b436982fSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset >> BDRV_SECTOR_BITS, 145b436982fSEric Blake op->bytes >> BDRV_SECTOR_BITS); 146bd48bde8SPaolo Bonzini action = mirror_error_action(s, false, -ret); 147a589569fSWenchao Xia if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 148bd48bde8SPaolo Bonzini s->ret = ret; 149bd48bde8SPaolo Bonzini } 150bd48bde8SPaolo Bonzini } 151bd48bde8SPaolo Bonzini mirror_iteration_done(op, ret); 152b9e413ddSPaolo Bonzini aio_context_release(blk_get_aio_context(s->common.blk)); 153bd48bde8SPaolo Bonzini } 154bd48bde8SPaolo Bonzini 155bd48bde8SPaolo Bonzini static void mirror_read_complete(void *opaque, int ret) 156bd48bde8SPaolo Bonzini { 157bd48bde8SPaolo Bonzini MirrorOp *op = opaque; 158bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 159b9e413ddSPaolo Bonzini 160b9e413ddSPaolo Bonzini aio_context_acquire(blk_get_aio_context(s->common.blk)); 161bd48bde8SPaolo Bonzini if (ret < 0) { 162bd48bde8SPaolo Bonzini BlockErrorAction action; 163bd48bde8SPaolo Bonzini 164b436982fSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset >> BDRV_SECTOR_BITS, 165b436982fSEric Blake op->bytes >> BDRV_SECTOR_BITS); 166bd48bde8SPaolo Bonzini action = mirror_error_action(s, true, -ret); 167a589569fSWenchao Xia if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 168bd48bde8SPaolo Bonzini s->ret = ret; 169bd48bde8SPaolo Bonzini } 170bd48bde8SPaolo Bonzini 171bd48bde8SPaolo Bonzini mirror_iteration_done(op, ret); 172b9e413ddSPaolo Bonzini } else { 173b436982fSEric Blake blk_aio_pwritev(s->target, op->offset, &op->qiov, 17473698c30SEric Blake 0, mirror_write_complete, op); 175bd48bde8SPaolo Bonzini } 176b9e413ddSPaolo Bonzini aio_context_release(blk_get_aio_context(s->common.blk)); 177b9e413ddSPaolo Bonzini } 178bd48bde8SPaolo Bonzini 179782d97efSEric Blake /* Clip bytes relative to offset to not exceed end-of-file */ 180782d97efSEric Blake static inline int64_t mirror_clip_bytes(MirrorBlockJob *s, 181782d97efSEric Blake int64_t offset, 182782d97efSEric Blake int64_t bytes) 183782d97efSEric Blake { 184782d97efSEric Blake return MIN(bytes, s->bdev_length - offset); 185782d97efSEric Blake } 186782d97efSEric Blake 187782d97efSEric Blake /* Round offset and/or bytes to target cluster if COW is needed, and 188782d97efSEric Blake * return the offset of the adjusted tail against original. */ 189782d97efSEric Blake static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset, 190ae4cc877SEric Blake uint64_t *bytes) 191893f7ebaSPaolo Bonzini { 192e5b43573SFam Zheng bool need_cow; 193e5b43573SFam Zheng int ret = 0; 194782d97efSEric Blake int64_t align_offset = *offset; 195782d97efSEric Blake unsigned int align_bytes = *bytes; 196782d97efSEric Blake int max_bytes = s->granularity * s->max_iov; 197893f7ebaSPaolo Bonzini 198ae4cc877SEric Blake assert(*bytes < INT_MAX); 199782d97efSEric Blake need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap); 200782d97efSEric Blake need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity, 201e5b43573SFam Zheng s->cow_bitmap); 202e5b43573SFam Zheng if (need_cow) { 203782d97efSEric Blake bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes, 204782d97efSEric Blake &align_offset, &align_bytes); 2058f0720ecSPaolo Bonzini } 2068f0720ecSPaolo Bonzini 207782d97efSEric Blake if (align_bytes > max_bytes) { 208782d97efSEric Blake align_bytes = max_bytes; 209e5b43573SFam Zheng if (need_cow) { 210782d97efSEric Blake align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size); 211e5b43573SFam Zheng } 212e5b43573SFam Zheng } 213782d97efSEric Blake /* Clipping may result in align_bytes unaligned to chunk boundary, but 2144150ae60SFam Zheng * that doesn't matter because it's already the end of source image. */ 215782d97efSEric Blake align_bytes = mirror_clip_bytes(s, align_offset, align_bytes); 216402a4741SPaolo Bonzini 217782d97efSEric Blake ret = align_offset + align_bytes - (*offset + *bytes); 218782d97efSEric Blake *offset = align_offset; 219782d97efSEric Blake *bytes = align_bytes; 220e5b43573SFam Zheng assert(ret >= 0); 221e5b43573SFam Zheng return ret; 222e5b43573SFam Zheng } 223e5b43573SFam Zheng 22421cd917fSFam Zheng static inline void mirror_wait_for_io(MirrorBlockJob *s) 22521cd917fSFam Zheng { 22621cd917fSFam Zheng assert(!s->waiting_for_io); 22721cd917fSFam Zheng s->waiting_for_io = true; 22821cd917fSFam Zheng qemu_coroutine_yield(); 22921cd917fSFam Zheng s->waiting_for_io = false; 23021cd917fSFam Zheng } 23121cd917fSFam Zheng 232e5b43573SFam Zheng /* Submit async read while handling COW. 233ae4cc877SEric Blake * Returns: The number of bytes copied after and including offset, 234ae4cc877SEric Blake * excluding any bytes copied prior to offset due to alignment. 235ae4cc877SEric Blake * This will be @bytes if no alignment is necessary, or 236ae4cc877SEric Blake * (new_end - offset) if tail is rounded up or down due to 237e5b43573SFam Zheng * alignment or buffer limit. 238402a4741SPaolo Bonzini */ 239ae4cc877SEric Blake static uint64_t mirror_do_read(MirrorBlockJob *s, int64_t offset, 240ae4cc877SEric Blake uint64_t bytes) 241e5b43573SFam Zheng { 242e253f4b8SKevin Wolf BlockBackend *source = s->common.blk; 243ae4cc877SEric Blake int nb_chunks; 244ae4cc877SEric Blake uint64_t ret; 245e5b43573SFam Zheng MirrorOp *op; 246ae4cc877SEric Blake uint64_t max_bytes; 247402a4741SPaolo Bonzini 248ae4cc877SEric Blake max_bytes = s->granularity * s->max_iov; 249e5b43573SFam Zheng 250e5b43573SFam Zheng /* We can only handle as much as buf_size at a time. */ 251ae4cc877SEric Blake bytes = MIN(s->buf_size, MIN(max_bytes, bytes)); 252ae4cc877SEric Blake assert(bytes); 253ae4cc877SEric Blake assert(bytes < BDRV_REQUEST_MAX_BYTES); 254ae4cc877SEric Blake ret = bytes; 255e5b43573SFam Zheng 256e5b43573SFam Zheng if (s->cow_bitmap) { 257ae4cc877SEric Blake ret += mirror_cow_align(s, &offset, &bytes); 258e5b43573SFam Zheng } 259ae4cc877SEric Blake assert(bytes <= s->buf_size); 260ae4cc877SEric Blake /* The offset is granularity-aligned because: 261e5b43573SFam Zheng * 1) Caller passes in aligned values; 262e5b43573SFam Zheng * 2) mirror_cow_align is used only when target cluster is larger. */ 263ae4cc877SEric Blake assert(QEMU_IS_ALIGNED(offset, s->granularity)); 264ae4cc877SEric Blake /* The range is sector-aligned, since bdrv_getlength() rounds up. */ 265ae4cc877SEric Blake assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 266ae4cc877SEric Blake nb_chunks = DIV_ROUND_UP(bytes, s->granularity); 267e5b43573SFam Zheng 268e5b43573SFam Zheng while (s->buf_free_count < nb_chunks) { 269ae4cc877SEric Blake trace_mirror_yield_in_flight(s, offset, s->in_flight); 27021cd917fSFam Zheng mirror_wait_for_io(s); 271b812f671SPaolo Bonzini } 272b812f671SPaolo Bonzini 273bd48bde8SPaolo Bonzini /* Allocate a MirrorOp that is used as an AIO callback. */ 274c84b3192SPaolo Bonzini op = g_new(MirrorOp, 1); 275bd48bde8SPaolo Bonzini op->s = s; 276ae4cc877SEric Blake op->offset = offset; 277ae4cc877SEric Blake op->bytes = bytes; 278402a4741SPaolo Bonzini 279402a4741SPaolo Bonzini /* Now make a QEMUIOVector taking enough granularity-sized chunks 280402a4741SPaolo Bonzini * from s->buf_free. 281402a4741SPaolo Bonzini */ 282402a4741SPaolo Bonzini qemu_iovec_init(&op->qiov, nb_chunks); 283402a4741SPaolo Bonzini while (nb_chunks-- > 0) { 284402a4741SPaolo Bonzini MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); 285ae4cc877SEric Blake size_t remaining = bytes - op->qiov.size; 2865a0f6fd5SKevin Wolf 287402a4741SPaolo Bonzini QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); 288402a4741SPaolo Bonzini s->buf_free_count--; 2895a0f6fd5SKevin Wolf qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); 290402a4741SPaolo Bonzini } 291402a4741SPaolo Bonzini 292893f7ebaSPaolo Bonzini /* Copy the dirty cluster. */ 293bd48bde8SPaolo Bonzini s->in_flight++; 294ae4cc877SEric Blake s->bytes_in_flight += bytes; 295ae4cc877SEric Blake trace_mirror_one_iteration(s, offset, bytes); 296dcfb3bebSFam Zheng 297ae4cc877SEric Blake blk_aio_preadv(source, offset, &op->qiov, 0, mirror_read_complete, op); 298e5b43573SFam Zheng return ret; 299e5b43573SFam Zheng } 300e5b43573SFam Zheng 301e5b43573SFam Zheng static void mirror_do_zero_or_discard(MirrorBlockJob *s, 302e6f24193SEric Blake int64_t offset, 303e6f24193SEric Blake uint64_t bytes, 304e5b43573SFam Zheng bool is_discard) 305e5b43573SFam Zheng { 306e5b43573SFam Zheng MirrorOp *op; 307e5b43573SFam Zheng 308e5b43573SFam Zheng /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed 309e5b43573SFam Zheng * so the freeing in mirror_iteration_done is nop. */ 310e5b43573SFam Zheng op = g_new0(MirrorOp, 1); 311e5b43573SFam Zheng op->s = s; 312e6f24193SEric Blake op->offset = offset; 313e6f24193SEric Blake op->bytes = bytes; 314e5b43573SFam Zheng 315e5b43573SFam Zheng s->in_flight++; 316e6f24193SEric Blake s->bytes_in_flight += bytes; 317e5b43573SFam Zheng if (is_discard) { 318e6f24193SEric Blake blk_aio_pdiscard(s->target, offset, 319b436982fSEric Blake op->bytes, mirror_write_complete, op); 320e5b43573SFam Zheng } else { 321e6f24193SEric Blake blk_aio_pwrite_zeroes(s->target, offset, 322b436982fSEric Blake op->bytes, s->unmap ? BDRV_REQ_MAY_UNMAP : 0, 323dcfb3bebSFam Zheng mirror_write_complete, op); 324e5b43573SFam Zheng } 325e5b43573SFam Zheng } 326e5b43573SFam Zheng 327e5b43573SFam Zheng static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) 328e5b43573SFam Zheng { 3294ef85a9cSKevin Wolf BlockDriverState *source = s->source; 330fb2ef791SEric Blake int64_t offset, first_chunk; 331e5b43573SFam Zheng uint64_t delay_ns = 0; 332e5b43573SFam Zheng /* At least the first dirty chunk is mirrored in one iteration. */ 333e5b43573SFam Zheng int nb_chunks = 1; 334e5b43573SFam Zheng int sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; 3354b5004d9SDenis V. Lunev bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target)); 336b436982fSEric Blake int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES); 337e5b43573SFam Zheng 338b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_lock(s->dirty_bitmap); 339f798184cSEric Blake offset = bdrv_dirty_iter_next(s->dbi); 340fb2ef791SEric Blake if (offset < 0) { 341dc162c8eSFam Zheng bdrv_set_dirty_iter(s->dbi, 0); 342f798184cSEric Blake offset = bdrv_dirty_iter_next(s->dbi); 343*9a46dba7SEric Blake trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); 344fb2ef791SEric Blake assert(offset >= 0); 345e5b43573SFam Zheng } 346b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_unlock(s->dirty_bitmap); 347e5b43573SFam Zheng 348fb2ef791SEric Blake first_chunk = offset / s->granularity; 3499c83625bSMax Reitz while (test_bit(first_chunk, s->in_flight_bitmap)) { 350fb2ef791SEric Blake trace_mirror_yield_in_flight(s, offset, s->in_flight); 3519c83625bSMax Reitz mirror_wait_for_io(s); 3529c83625bSMax Reitz } 3539c83625bSMax Reitz 354565ac01fSStefan Hajnoczi block_job_pause_point(&s->common); 355565ac01fSStefan Hajnoczi 356e5b43573SFam Zheng /* Find the number of consective dirty chunks following the first dirty 357e5b43573SFam Zheng * one, and wait for in flight requests in them. */ 358b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_lock(s->dirty_bitmap); 359fb2ef791SEric Blake while (nb_chunks * s->granularity < s->buf_size) { 360dc162c8eSFam Zheng int64_t next_dirty; 361fb2ef791SEric Blake int64_t next_offset = offset + nb_chunks * s->granularity; 362fb2ef791SEric Blake int64_t next_chunk = next_offset / s->granularity; 363fb2ef791SEric Blake if (next_offset >= s->bdev_length || 364fb2ef791SEric Blake !bdrv_get_dirty_locked(source, s->dirty_bitmap, 365fb2ef791SEric Blake next_offset >> BDRV_SECTOR_BITS)) { 366e5b43573SFam Zheng break; 367e5b43573SFam Zheng } 368e5b43573SFam Zheng if (test_bit(next_chunk, s->in_flight_bitmap)) { 369e5b43573SFam Zheng break; 370e5b43573SFam Zheng } 3719c83625bSMax Reitz 372f798184cSEric Blake next_dirty = bdrv_dirty_iter_next(s->dbi); 373fb2ef791SEric Blake if (next_dirty > next_offset || next_dirty < 0) { 374f27a2742SMax Reitz /* The bitmap iterator's cache is stale, refresh it */ 375715a74d8SEric Blake bdrv_set_dirty_iter(s->dbi, next_offset); 376f798184cSEric Blake next_dirty = bdrv_dirty_iter_next(s->dbi); 377f27a2742SMax Reitz } 378fb2ef791SEric Blake assert(next_dirty == next_offset); 379e5b43573SFam Zheng nb_chunks++; 380e5b43573SFam Zheng } 381e5b43573SFam Zheng 382e5b43573SFam Zheng /* Clear dirty bits before querying the block status, because 383e5b43573SFam Zheng * calling bdrv_get_block_status_above could yield - if some blocks are 384e5b43573SFam Zheng * marked dirty in this window, we need to know. 385e5b43573SFam Zheng */ 386fb2ef791SEric Blake bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset >> BDRV_SECTOR_BITS, 387e5b43573SFam Zheng nb_chunks * sectors_per_chunk); 388b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_unlock(s->dirty_bitmap); 389b64bd51eSPaolo Bonzini 390fb2ef791SEric Blake bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks); 391fb2ef791SEric Blake while (nb_chunks > 0 && offset < s->bdev_length) { 39239c11580SJohn Snow int64_t ret; 393f3e4ce4aSEric Blake int io_sectors; 394fb2ef791SEric Blake unsigned int io_bytes; 395f3e4ce4aSEric Blake int64_t io_bytes_acct; 396e5b43573SFam Zheng BlockDriverState *file; 397e5b43573SFam Zheng enum MirrorMethod { 398e5b43573SFam Zheng MIRROR_METHOD_COPY, 399e5b43573SFam Zheng MIRROR_METHOD_ZERO, 400e5b43573SFam Zheng MIRROR_METHOD_DISCARD 401e5b43573SFam Zheng } mirror_method = MIRROR_METHOD_COPY; 402e5b43573SFam Zheng 403fb2ef791SEric Blake assert(!(offset % s->granularity)); 404fb2ef791SEric Blake ret = bdrv_get_block_status_above(source, NULL, 405fb2ef791SEric Blake offset >> BDRV_SECTOR_BITS, 406e5b43573SFam Zheng nb_chunks * sectors_per_chunk, 407e5b43573SFam Zheng &io_sectors, &file); 408fb2ef791SEric Blake io_bytes = io_sectors * BDRV_SECTOR_SIZE; 409e5b43573SFam Zheng if (ret < 0) { 410fb2ef791SEric Blake io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes); 4110965a41eSVladimir Sementsov-Ogievskiy } else if (ret & BDRV_BLOCK_DATA) { 412fb2ef791SEric Blake io_bytes = MIN(io_bytes, max_io_bytes); 413e5b43573SFam Zheng } 414e5b43573SFam Zheng 415fb2ef791SEric Blake io_bytes -= io_bytes % s->granularity; 416fb2ef791SEric Blake if (io_bytes < s->granularity) { 417fb2ef791SEric Blake io_bytes = s->granularity; 418e5b43573SFam Zheng } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) { 419fb2ef791SEric Blake int64_t target_offset; 420fb2ef791SEric Blake unsigned int target_bytes; 421fb2ef791SEric Blake bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes, 422fb2ef791SEric Blake &target_offset, &target_bytes); 423fb2ef791SEric Blake if (target_offset == offset && 424fb2ef791SEric Blake target_bytes == io_bytes) { 425e5b43573SFam Zheng mirror_method = ret & BDRV_BLOCK_ZERO ? 426e5b43573SFam Zheng MIRROR_METHOD_ZERO : 427e5b43573SFam Zheng MIRROR_METHOD_DISCARD; 428e5b43573SFam Zheng } 429e5b43573SFam Zheng } 430e5b43573SFam Zheng 431cf56a3c6SDenis V. Lunev while (s->in_flight >= MAX_IN_FLIGHT) { 432fb2ef791SEric Blake trace_mirror_yield_in_flight(s, offset, s->in_flight); 433cf56a3c6SDenis V. Lunev mirror_wait_for_io(s); 434cf56a3c6SDenis V. Lunev } 435cf56a3c6SDenis V. Lunev 436dbaa7b57SVladimir Sementsov-Ogievskiy if (s->ret < 0) { 437dbaa7b57SVladimir Sementsov-Ogievskiy return 0; 438dbaa7b57SVladimir Sementsov-Ogievskiy } 439dbaa7b57SVladimir Sementsov-Ogievskiy 440fb2ef791SEric Blake io_bytes = mirror_clip_bytes(s, offset, io_bytes); 441e5b43573SFam Zheng switch (mirror_method) { 442e5b43573SFam Zheng case MIRROR_METHOD_COPY: 443fb2ef791SEric Blake io_bytes = io_bytes_acct = mirror_do_read(s, offset, io_bytes); 444e5b43573SFam Zheng break; 445e5b43573SFam Zheng case MIRROR_METHOD_ZERO: 446e5b43573SFam Zheng case MIRROR_METHOD_DISCARD: 447fb2ef791SEric Blake mirror_do_zero_or_discard(s, offset, io_bytes, 4484b5004d9SDenis V. Lunev mirror_method == MIRROR_METHOD_DISCARD); 4494b5004d9SDenis V. Lunev if (write_zeroes_ok) { 450f3e4ce4aSEric Blake io_bytes_acct = 0; 4514b5004d9SDenis V. Lunev } else { 452fb2ef791SEric Blake io_bytes_acct = io_bytes; 4534b5004d9SDenis V. Lunev } 454e5b43573SFam Zheng break; 455e5b43573SFam Zheng default: 456e5b43573SFam Zheng abort(); 457e5b43573SFam Zheng } 458fb2ef791SEric Blake assert(io_bytes); 459fb2ef791SEric Blake offset += io_bytes; 460fb2ef791SEric Blake nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity); 461f14a39ccSSascha Silbe if (s->common.speed) { 462f3e4ce4aSEric Blake delay_ns = ratelimit_calculate_delay(&s->limit, io_bytes_acct); 463f14a39ccSSascha Silbe } 464dcfb3bebSFam Zheng } 465cc8c9d6cSPaolo Bonzini return delay_ns; 466893f7ebaSPaolo Bonzini } 467b952b558SPaolo Bonzini 468402a4741SPaolo Bonzini static void mirror_free_init(MirrorBlockJob *s) 469402a4741SPaolo Bonzini { 470402a4741SPaolo Bonzini int granularity = s->granularity; 471402a4741SPaolo Bonzini size_t buf_size = s->buf_size; 472402a4741SPaolo Bonzini uint8_t *buf = s->buf; 473402a4741SPaolo Bonzini 474402a4741SPaolo Bonzini assert(s->buf_free_count == 0); 475402a4741SPaolo Bonzini QSIMPLEQ_INIT(&s->buf_free); 476402a4741SPaolo Bonzini while (buf_size != 0) { 477402a4741SPaolo Bonzini MirrorBuffer *cur = (MirrorBuffer *)buf; 478402a4741SPaolo Bonzini QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next); 479402a4741SPaolo Bonzini s->buf_free_count++; 480402a4741SPaolo Bonzini buf_size -= granularity; 481402a4741SPaolo Bonzini buf += granularity; 482402a4741SPaolo Bonzini } 483402a4741SPaolo Bonzini } 484402a4741SPaolo Bonzini 485bae8196dSPaolo Bonzini /* This is also used for the .pause callback. There is no matching 486bae8196dSPaolo Bonzini * mirror_resume() because mirror_run() will begin iterating again 487bae8196dSPaolo Bonzini * when the job is resumed. 488bae8196dSPaolo Bonzini */ 489bae8196dSPaolo Bonzini static void mirror_wait_for_all_io(MirrorBlockJob *s) 490bd48bde8SPaolo Bonzini { 491bd48bde8SPaolo Bonzini while (s->in_flight > 0) { 49221cd917fSFam Zheng mirror_wait_for_io(s); 493bd48bde8SPaolo Bonzini } 494893f7ebaSPaolo Bonzini } 495893f7ebaSPaolo Bonzini 4965a7e7a0bSStefan Hajnoczi typedef struct { 4975a7e7a0bSStefan Hajnoczi int ret; 4985a7e7a0bSStefan Hajnoczi } MirrorExitData; 4995a7e7a0bSStefan Hajnoczi 5005a7e7a0bSStefan Hajnoczi static void mirror_exit(BlockJob *job, void *opaque) 5015a7e7a0bSStefan Hajnoczi { 5025a7e7a0bSStefan Hajnoczi MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 5035a7e7a0bSStefan Hajnoczi MirrorExitData *data = opaque; 5045a7e7a0bSStefan Hajnoczi AioContext *replace_aio_context = NULL; 5054ef85a9cSKevin Wolf BlockDriverState *src = s->source; 506e253f4b8SKevin Wolf BlockDriverState *target_bs = blk_bs(s->target); 5074ef85a9cSKevin Wolf BlockDriverState *mirror_top_bs = s->mirror_top_bs; 50812fa4af6SKevin Wolf Error *local_err = NULL; 5093f09bfbcSKevin Wolf 5102119882cSPaolo Bonzini bdrv_release_dirty_bitmap(src, s->dirty_bitmap); 5112119882cSPaolo Bonzini 5123f09bfbcSKevin Wolf /* Make sure that the source BDS doesn't go away before we called 5133f09bfbcSKevin Wolf * block_job_completed(). */ 5143f09bfbcSKevin Wolf bdrv_ref(src); 5154ef85a9cSKevin Wolf bdrv_ref(mirror_top_bs); 5167d9fcb39SKevin Wolf bdrv_ref(target_bs); 5177d9fcb39SKevin Wolf 5187d9fcb39SKevin Wolf /* Remove target parent that still uses BLK_PERM_WRITE/RESIZE before 5197d9fcb39SKevin Wolf * inserting target_bs at s->to_replace, where we might not be able to get 52063c8ef28SKevin Wolf * these permissions. 52163c8ef28SKevin Wolf * 52263c8ef28SKevin Wolf * Note that blk_unref() alone doesn't necessarily drop permissions because 52363c8ef28SKevin Wolf * we might be running nested inside mirror_drain(), which takes an extra 52463c8ef28SKevin Wolf * reference, so use an explicit blk_set_perm() first. */ 52563c8ef28SKevin Wolf blk_set_perm(s->target, 0, BLK_PERM_ALL, &error_abort); 5267d9fcb39SKevin Wolf blk_unref(s->target); 5277d9fcb39SKevin Wolf s->target = NULL; 5284ef85a9cSKevin Wolf 5294ef85a9cSKevin Wolf /* We don't access the source any more. Dropping any WRITE/RESIZE is 5304ef85a9cSKevin Wolf * required before it could become a backing file of target_bs. */ 5314ef85a9cSKevin Wolf bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL, 5324ef85a9cSKevin Wolf &error_abort); 5334ef85a9cSKevin Wolf if (s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) { 5344ef85a9cSKevin Wolf BlockDriverState *backing = s->is_none_mode ? src : s->base; 5354ef85a9cSKevin Wolf if (backing_bs(target_bs) != backing) { 53612fa4af6SKevin Wolf bdrv_set_backing_hd(target_bs, backing, &local_err); 53712fa4af6SKevin Wolf if (local_err) { 53812fa4af6SKevin Wolf error_report_err(local_err); 53912fa4af6SKevin Wolf data->ret = -EPERM; 54012fa4af6SKevin Wolf } 5414ef85a9cSKevin Wolf } 5424ef85a9cSKevin Wolf } 5435a7e7a0bSStefan Hajnoczi 5445a7e7a0bSStefan Hajnoczi if (s->to_replace) { 5455a7e7a0bSStefan Hajnoczi replace_aio_context = bdrv_get_aio_context(s->to_replace); 5465a7e7a0bSStefan Hajnoczi aio_context_acquire(replace_aio_context); 5475a7e7a0bSStefan Hajnoczi } 5485a7e7a0bSStefan Hajnoczi 5495a7e7a0bSStefan Hajnoczi if (s->should_complete && data->ret == 0) { 550e253f4b8SKevin Wolf BlockDriverState *to_replace = src; 5515a7e7a0bSStefan Hajnoczi if (s->to_replace) { 5525a7e7a0bSStefan Hajnoczi to_replace = s->to_replace; 5535a7e7a0bSStefan Hajnoczi } 55440365552SKevin Wolf 555e253f4b8SKevin Wolf if (bdrv_get_flags(target_bs) != bdrv_get_flags(to_replace)) { 556e253f4b8SKevin Wolf bdrv_reopen(target_bs, bdrv_get_flags(to_replace), NULL); 5575a7e7a0bSStefan Hajnoczi } 558b8804815SKevin Wolf 559b8804815SKevin Wolf /* The mirror job has no requests in flight any more, but we need to 560b8804815SKevin Wolf * drain potential other users of the BDS before changing the graph. */ 561e253f4b8SKevin Wolf bdrv_drained_begin(target_bs); 5625fe31c25SKevin Wolf bdrv_replace_node(to_replace, target_bs, &local_err); 563e253f4b8SKevin Wolf bdrv_drained_end(target_bs); 5645fe31c25SKevin Wolf if (local_err) { 5655fe31c25SKevin Wolf error_report_err(local_err); 5665fe31c25SKevin Wolf data->ret = -EPERM; 5675fe31c25SKevin Wolf } 5685a7e7a0bSStefan Hajnoczi } 5695a7e7a0bSStefan Hajnoczi if (s->to_replace) { 5705a7e7a0bSStefan Hajnoczi bdrv_op_unblock_all(s->to_replace, s->replace_blocker); 5715a7e7a0bSStefan Hajnoczi error_free(s->replace_blocker); 5725a7e7a0bSStefan Hajnoczi bdrv_unref(s->to_replace); 5735a7e7a0bSStefan Hajnoczi } 5745a7e7a0bSStefan Hajnoczi if (replace_aio_context) { 5755a7e7a0bSStefan Hajnoczi aio_context_release(replace_aio_context); 5765a7e7a0bSStefan Hajnoczi } 5775a7e7a0bSStefan Hajnoczi g_free(s->replaces); 5787d9fcb39SKevin Wolf bdrv_unref(target_bs); 5794ef85a9cSKevin Wolf 5804ef85a9cSKevin Wolf /* Remove the mirror filter driver from the graph. Before this, get rid of 5814ef85a9cSKevin Wolf * the blockers on the intermediate nodes so that the resulting state is 5820bf74767SKevin Wolf * valid. Also give up permissions on mirror_top_bs->backing, which might 5830bf74767SKevin Wolf * block the removal. */ 5844ef85a9cSKevin Wolf block_job_remove_all_bdrv(job); 585c1cef672SFam Zheng bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL, 586c1cef672SFam Zheng &error_abort); 5875fe31c25SKevin Wolf bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort); 5884ef85a9cSKevin Wolf 5894ef85a9cSKevin Wolf /* We just changed the BDS the job BB refers to (with either or both of the 5905fe31c25SKevin Wolf * bdrv_replace_node() calls), so switch the BB back so the cleanup does 5915fe31c25SKevin Wolf * the right thing. We don't need any permissions any more now. */ 5924ef85a9cSKevin Wolf blk_remove_bs(job->blk); 5934ef85a9cSKevin Wolf blk_set_perm(job->blk, 0, BLK_PERM_ALL, &error_abort); 5944ef85a9cSKevin Wolf blk_insert_bs(job->blk, mirror_top_bs, &error_abort); 5954ef85a9cSKevin Wolf 5965a7e7a0bSStefan Hajnoczi block_job_completed(&s->common, data->ret); 5974ef85a9cSKevin Wolf 5985a7e7a0bSStefan Hajnoczi g_free(data); 599176c3699SFam Zheng bdrv_drained_end(src); 6004ef85a9cSKevin Wolf bdrv_unref(mirror_top_bs); 6013f09bfbcSKevin Wolf bdrv_unref(src); 6025a7e7a0bSStefan Hajnoczi } 6035a7e7a0bSStefan Hajnoczi 60449efb1f5SDenis V. Lunev static void mirror_throttle(MirrorBlockJob *s) 60549efb1f5SDenis V. Lunev { 60649efb1f5SDenis V. Lunev int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 60749efb1f5SDenis V. Lunev 60849efb1f5SDenis V. Lunev if (now - s->last_pause_ns > SLICE_TIME) { 60949efb1f5SDenis V. Lunev s->last_pause_ns = now; 61049efb1f5SDenis V. Lunev block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0); 61149efb1f5SDenis V. Lunev } else { 61249efb1f5SDenis V. Lunev block_job_pause_point(&s->common); 61349efb1f5SDenis V. Lunev } 61449efb1f5SDenis V. Lunev } 61549efb1f5SDenis V. Lunev 616c0b363adSDenis V. Lunev static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s) 617c0b363adSDenis V. Lunev { 618c0b363adSDenis V. Lunev int64_t sector_num, end; 619c0b363adSDenis V. Lunev BlockDriverState *base = s->base; 6204ef85a9cSKevin Wolf BlockDriverState *bs = s->source; 621c0b363adSDenis V. Lunev BlockDriverState *target_bs = blk_bs(s->target); 622c0b363adSDenis V. Lunev int ret, n; 62351b0a488SEric Blake int64_t count; 624c0b363adSDenis V. Lunev 625c0b363adSDenis V. Lunev end = s->bdev_length / BDRV_SECTOR_SIZE; 626c0b363adSDenis V. Lunev 627b7d5062cSDenis V. Lunev if (base == NULL && !bdrv_has_zero_init(target_bs)) { 628c7c2769cSDenis V. Lunev if (!bdrv_can_write_zeroes_with_unmap(target_bs)) { 629b7d5062cSDenis V. Lunev bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, end); 630b7d5062cSDenis V. Lunev return 0; 631b7d5062cSDenis V. Lunev } 632b7d5062cSDenis V. Lunev 63390ab48ebSAnton Nefedov s->initial_zeroing_ongoing = true; 634c7c2769cSDenis V. Lunev for (sector_num = 0; sector_num < end; ) { 635c7c2769cSDenis V. Lunev int nb_sectors = MIN(end - sector_num, 636c7c2769cSDenis V. Lunev QEMU_ALIGN_DOWN(INT_MAX, s->granularity) >> BDRV_SECTOR_BITS); 637c7c2769cSDenis V. Lunev 638c7c2769cSDenis V. Lunev mirror_throttle(s); 639c7c2769cSDenis V. Lunev 640c7c2769cSDenis V. Lunev if (block_job_is_cancelled(&s->common)) { 64190ab48ebSAnton Nefedov s->initial_zeroing_ongoing = false; 642c7c2769cSDenis V. Lunev return 0; 643c7c2769cSDenis V. Lunev } 644c7c2769cSDenis V. Lunev 645c7c2769cSDenis V. Lunev if (s->in_flight >= MAX_IN_FLIGHT) { 64667adf4b3SEric Blake trace_mirror_yield(s, UINT64_MAX, s->buf_free_count, 64767adf4b3SEric Blake s->in_flight); 648c7c2769cSDenis V. Lunev mirror_wait_for_io(s); 649c7c2769cSDenis V. Lunev continue; 650c7c2769cSDenis V. Lunev } 651c7c2769cSDenis V. Lunev 652e6f24193SEric Blake mirror_do_zero_or_discard(s, sector_num * BDRV_SECTOR_SIZE, 653e6f24193SEric Blake nb_sectors * BDRV_SECTOR_SIZE, false); 654c7c2769cSDenis V. Lunev sector_num += nb_sectors; 655c7c2769cSDenis V. Lunev } 656c7c2769cSDenis V. Lunev 657bae8196dSPaolo Bonzini mirror_wait_for_all_io(s); 65890ab48ebSAnton Nefedov s->initial_zeroing_ongoing = false; 659c7c2769cSDenis V. Lunev } 660c7c2769cSDenis V. Lunev 661c0b363adSDenis V. Lunev /* First part, loop on the sectors and initialize the dirty bitmap. */ 662c0b363adSDenis V. Lunev for (sector_num = 0; sector_num < end; ) { 663c0b363adSDenis V. Lunev /* Just to make sure we are not exceeding int limit. */ 664c0b363adSDenis V. Lunev int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS, 665c0b363adSDenis V. Lunev end - sector_num); 666c0b363adSDenis V. Lunev 667c0b363adSDenis V. Lunev mirror_throttle(s); 668c0b363adSDenis V. Lunev 669c0b363adSDenis V. Lunev if (block_job_is_cancelled(&s->common)) { 670c0b363adSDenis V. Lunev return 0; 671c0b363adSDenis V. Lunev } 672c0b363adSDenis V. Lunev 67351b0a488SEric Blake ret = bdrv_is_allocated_above(bs, base, sector_num * BDRV_SECTOR_SIZE, 67451b0a488SEric Blake nb_sectors * BDRV_SECTOR_SIZE, &count); 675c0b363adSDenis V. Lunev if (ret < 0) { 676c0b363adSDenis V. Lunev return ret; 677c0b363adSDenis V. Lunev } 678c0b363adSDenis V. Lunev 67951b0a488SEric Blake /* TODO: Relax this once bdrv_is_allocated_above and dirty 68051b0a488SEric Blake * bitmaps no longer require sector alignment. */ 68151b0a488SEric Blake assert(QEMU_IS_ALIGNED(count, BDRV_SECTOR_SIZE)); 68251b0a488SEric Blake n = count >> BDRV_SECTOR_BITS; 683c0b363adSDenis V. Lunev assert(n > 0); 684b7d5062cSDenis V. Lunev if (ret == 1) { 685c0b363adSDenis V. Lunev bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n); 686c0b363adSDenis V. Lunev } 687c0b363adSDenis V. Lunev sector_num += n; 688c0b363adSDenis V. Lunev } 689c0b363adSDenis V. Lunev return 0; 690c0b363adSDenis V. Lunev } 691c0b363adSDenis V. Lunev 692bdffb31dSPaolo Bonzini /* Called when going out of the streaming phase to flush the bulk of the 693bdffb31dSPaolo Bonzini * data to the medium, or just before completing. 694bdffb31dSPaolo Bonzini */ 695bdffb31dSPaolo Bonzini static int mirror_flush(MirrorBlockJob *s) 696bdffb31dSPaolo Bonzini { 697bdffb31dSPaolo Bonzini int ret = blk_flush(s->target); 698bdffb31dSPaolo Bonzini if (ret < 0) { 699bdffb31dSPaolo Bonzini if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) { 700bdffb31dSPaolo Bonzini s->ret = ret; 701bdffb31dSPaolo Bonzini } 702bdffb31dSPaolo Bonzini } 703bdffb31dSPaolo Bonzini return ret; 704bdffb31dSPaolo Bonzini } 705bdffb31dSPaolo Bonzini 706893f7ebaSPaolo Bonzini static void coroutine_fn mirror_run(void *opaque) 707893f7ebaSPaolo Bonzini { 708893f7ebaSPaolo Bonzini MirrorBlockJob *s = opaque; 7095a7e7a0bSStefan Hajnoczi MirrorExitData *data; 7104ef85a9cSKevin Wolf BlockDriverState *bs = s->source; 711e253f4b8SKevin Wolf BlockDriverState *target_bs = blk_bs(s->target); 7129a0cec66SPaolo Bonzini bool need_drain = true; 713c0b363adSDenis V. Lunev int64_t length; 714b812f671SPaolo Bonzini BlockDriverInfo bdi; 7151d33936eSJeff Cody char backing_filename[2]; /* we only need 2 characters because we are only 7161d33936eSJeff Cody checking for a NULL string */ 717893f7ebaSPaolo Bonzini int ret = 0; 718893f7ebaSPaolo Bonzini 719893f7ebaSPaolo Bonzini if (block_job_is_cancelled(&s->common)) { 720893f7ebaSPaolo Bonzini goto immediate_exit; 721893f7ebaSPaolo Bonzini } 722893f7ebaSPaolo Bonzini 723b21c7652SMax Reitz s->bdev_length = bdrv_getlength(bs); 724b21c7652SMax Reitz if (s->bdev_length < 0) { 725b21c7652SMax Reitz ret = s->bdev_length; 726373df5b1SFam Zheng goto immediate_exit; 727becc347eSKevin Wolf } 728becc347eSKevin Wolf 729becc347eSKevin Wolf /* Active commit must resize the base image if its size differs from the 730becc347eSKevin Wolf * active layer. */ 731becc347eSKevin Wolf if (s->base == blk_bs(s->target)) { 732becc347eSKevin Wolf int64_t base_length; 733becc347eSKevin Wolf 734becc347eSKevin Wolf base_length = blk_getlength(s->target); 735becc347eSKevin Wolf if (base_length < 0) { 736becc347eSKevin Wolf ret = base_length; 737becc347eSKevin Wolf goto immediate_exit; 738becc347eSKevin Wolf } 739becc347eSKevin Wolf 740becc347eSKevin Wolf if (s->bdev_length > base_length) { 7413a691c50SMax Reitz ret = blk_truncate(s->target, s->bdev_length, PREALLOC_MODE_OFF, 7423a691c50SMax Reitz NULL); 743becc347eSKevin Wolf if (ret < 0) { 744becc347eSKevin Wolf goto immediate_exit; 745becc347eSKevin Wolf } 746becc347eSKevin Wolf } 747becc347eSKevin Wolf } 748becc347eSKevin Wolf 749becc347eSKevin Wolf if (s->bdev_length == 0) { 7509e48b025SFam Zheng /* Report BLOCK_JOB_READY and wait for complete. */ 7519e48b025SFam Zheng block_job_event_ready(&s->common); 7529e48b025SFam Zheng s->synced = true; 7539e48b025SFam Zheng while (!block_job_is_cancelled(&s->common) && !s->should_complete) { 7549e48b025SFam Zheng block_job_yield(&s->common); 7559e48b025SFam Zheng } 7569e48b025SFam Zheng s->common.cancelled = false; 7579e48b025SFam Zheng goto immediate_exit; 758893f7ebaSPaolo Bonzini } 759893f7ebaSPaolo Bonzini 760b21c7652SMax Reitz length = DIV_ROUND_UP(s->bdev_length, s->granularity); 761402a4741SPaolo Bonzini s->in_flight_bitmap = bitmap_new(length); 762402a4741SPaolo Bonzini 763b812f671SPaolo Bonzini /* If we have no backing file yet in the destination, we cannot let 764b812f671SPaolo Bonzini * the destination do COW. Instead, we copy sectors around the 765b812f671SPaolo Bonzini * dirty data if needed. We need a bitmap to do that. 766b812f671SPaolo Bonzini */ 767e253f4b8SKevin Wolf bdrv_get_backing_filename(target_bs, backing_filename, 768b812f671SPaolo Bonzini sizeof(backing_filename)); 769e253f4b8SKevin Wolf if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) { 770b436982fSEric Blake s->target_cluster_size = bdi.cluster_size; 771b436982fSEric Blake } else { 772b436982fSEric Blake s->target_cluster_size = BDRV_SECTOR_SIZE; 773c3cc95bdSFam Zheng } 774b436982fSEric Blake if (backing_filename[0] && !target_bs->backing && 775b436982fSEric Blake s->granularity < s->target_cluster_size) { 776b436982fSEric Blake s->buf_size = MAX(s->buf_size, s->target_cluster_size); 777b812f671SPaolo Bonzini s->cow_bitmap = bitmap_new(length); 778b812f671SPaolo Bonzini } 779e253f4b8SKevin Wolf s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov); 780b812f671SPaolo Bonzini 7817504edf4SKevin Wolf s->buf = qemu_try_blockalign(bs, s->buf_size); 7827504edf4SKevin Wolf if (s->buf == NULL) { 7837504edf4SKevin Wolf ret = -ENOMEM; 7847504edf4SKevin Wolf goto immediate_exit; 7857504edf4SKevin Wolf } 7867504edf4SKevin Wolf 787402a4741SPaolo Bonzini mirror_free_init(s); 788893f7ebaSPaolo Bonzini 78949efb1f5SDenis V. Lunev s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 79003544a6eSFam Zheng if (!s->is_none_mode) { 791c0b363adSDenis V. Lunev ret = mirror_dirty_init(s); 792c0b363adSDenis V. Lunev if (ret < 0 || block_job_is_cancelled(&s->common)) { 7934c0cbd6fSFam Zheng goto immediate_exit; 7944c0cbd6fSFam Zheng } 795893f7ebaSPaolo Bonzini } 796893f7ebaSPaolo Bonzini 797dc162c8eSFam Zheng assert(!s->dbi); 798715a74d8SEric Blake s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap); 799893f7ebaSPaolo Bonzini for (;;) { 800cc8c9d6cSPaolo Bonzini uint64_t delay_ns = 0; 80149efb1f5SDenis V. Lunev int64_t cnt, delta; 802893f7ebaSPaolo Bonzini bool should_complete; 803893f7ebaSPaolo Bonzini 804bd48bde8SPaolo Bonzini if (s->ret < 0) { 805bd48bde8SPaolo Bonzini ret = s->ret; 806893f7ebaSPaolo Bonzini goto immediate_exit; 807893f7ebaSPaolo Bonzini } 808bd48bde8SPaolo Bonzini 809565ac01fSStefan Hajnoczi block_job_pause_point(&s->common); 810565ac01fSStefan Hajnoczi 81120dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 812b21c7652SMax Reitz /* s->common.offset contains the number of bytes already processed so 813*9a46dba7SEric Blake * far, cnt is the number of dirty bytes remaining and 814b436982fSEric Blake * s->bytes_in_flight is the number of bytes currently being 815b21c7652SMax Reitz * processed; together those are the current total operation length */ 816*9a46dba7SEric Blake s->common.len = s->common.offset + s->bytes_in_flight + cnt; 817bd48bde8SPaolo Bonzini 818bd48bde8SPaolo Bonzini /* Note that even when no rate limit is applied we need to yield 819a7282330SFam Zheng * periodically with no pending I/O so that bdrv_drain_all() returns. 820bd48bde8SPaolo Bonzini * We do so every SLICE_TIME nanoseconds, or when there is an error, 821bd48bde8SPaolo Bonzini * or when the source is clean, whichever comes first. 822bd48bde8SPaolo Bonzini */ 82349efb1f5SDenis V. Lunev delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns; 82449efb1f5SDenis V. Lunev if (delta < SLICE_TIME && 825bd48bde8SPaolo Bonzini s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 826cf56a3c6SDenis V. Lunev if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 || 827402a4741SPaolo Bonzini (cnt == 0 && s->in_flight > 0)) { 828*9a46dba7SEric Blake trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight); 82921cd917fSFam Zheng mirror_wait_for_io(s); 830bd48bde8SPaolo Bonzini continue; 831bd48bde8SPaolo Bonzini } else if (cnt != 0) { 832cc8c9d6cSPaolo Bonzini delay_ns = mirror_iteration(s); 833893f7ebaSPaolo Bonzini } 834cc8c9d6cSPaolo Bonzini } 835893f7ebaSPaolo Bonzini 836893f7ebaSPaolo Bonzini should_complete = false; 837bd48bde8SPaolo Bonzini if (s->in_flight == 0 && cnt == 0) { 838893f7ebaSPaolo Bonzini trace_mirror_before_flush(s); 839bdffb31dSPaolo Bonzini if (!s->synced) { 840bdffb31dSPaolo Bonzini if (mirror_flush(s) < 0) { 841bdffb31dSPaolo Bonzini /* Go check s->ret. */ 842bdffb31dSPaolo Bonzini continue; 843893f7ebaSPaolo Bonzini } 844893f7ebaSPaolo Bonzini /* We're out of the streaming phase. From now on, if the job 845893f7ebaSPaolo Bonzini * is cancelled we will actually complete all pending I/O and 846893f7ebaSPaolo Bonzini * report completion. This way, block-job-cancel will leave 847893f7ebaSPaolo Bonzini * the target in a consistent state. 848893f7ebaSPaolo Bonzini */ 849bcada37bSWenchao Xia block_job_event_ready(&s->common); 850d63ffd87SPaolo Bonzini s->synced = true; 851d63ffd87SPaolo Bonzini } 852d63ffd87SPaolo Bonzini 853d63ffd87SPaolo Bonzini should_complete = s->should_complete || 854d63ffd87SPaolo Bonzini block_job_is_cancelled(&s->common); 85520dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 856893f7ebaSPaolo Bonzini } 857893f7ebaSPaolo Bonzini 858893f7ebaSPaolo Bonzini if (cnt == 0 && should_complete) { 859893f7ebaSPaolo Bonzini /* The dirty bitmap is not updated while operations are pending. 860893f7ebaSPaolo Bonzini * If we're about to exit, wait for pending operations before 861893f7ebaSPaolo Bonzini * calling bdrv_get_dirty_count(bs), or we may exit while the 862893f7ebaSPaolo Bonzini * source has dirty data to copy! 863893f7ebaSPaolo Bonzini * 864893f7ebaSPaolo Bonzini * Note that I/O can be submitted by the guest while 8659a0cec66SPaolo Bonzini * mirror_populate runs, so pause it now. Before deciding 8669a0cec66SPaolo Bonzini * whether to switch to target check one last time if I/O has 8679a0cec66SPaolo Bonzini * come in the meanwhile, and if not flush the data to disk. 868893f7ebaSPaolo Bonzini */ 869*9a46dba7SEric Blake trace_mirror_before_drain(s, cnt); 8709a0cec66SPaolo Bonzini 8719a0cec66SPaolo Bonzini bdrv_drained_begin(bs); 87220dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 873bdffb31dSPaolo Bonzini if (cnt > 0 || mirror_flush(s) < 0) { 8749a0cec66SPaolo Bonzini bdrv_drained_end(bs); 8759a0cec66SPaolo Bonzini continue; 8769a0cec66SPaolo Bonzini } 8779a0cec66SPaolo Bonzini 8789a0cec66SPaolo Bonzini /* The two disks are in sync. Exit and report successful 8799a0cec66SPaolo Bonzini * completion. 8809a0cec66SPaolo Bonzini */ 8819a0cec66SPaolo Bonzini assert(QLIST_EMPTY(&bs->tracked_requests)); 8829a0cec66SPaolo Bonzini s->common.cancelled = false; 8839a0cec66SPaolo Bonzini need_drain = false; 8849a0cec66SPaolo Bonzini break; 885893f7ebaSPaolo Bonzini } 886893f7ebaSPaolo Bonzini 887893f7ebaSPaolo Bonzini ret = 0; 888*9a46dba7SEric Blake trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); 889d63ffd87SPaolo Bonzini if (!s->synced) { 8907483d1e5SAlex Bligh block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); 891893f7ebaSPaolo Bonzini if (block_job_is_cancelled(&s->common)) { 892893f7ebaSPaolo Bonzini break; 893893f7ebaSPaolo Bonzini } 894893f7ebaSPaolo Bonzini } else if (!should_complete) { 895bd48bde8SPaolo Bonzini delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0); 8967483d1e5SAlex Bligh block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); 897893f7ebaSPaolo Bonzini } 89849efb1f5SDenis V. Lunev s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 899893f7ebaSPaolo Bonzini } 900893f7ebaSPaolo Bonzini 901893f7ebaSPaolo Bonzini immediate_exit: 902bd48bde8SPaolo Bonzini if (s->in_flight > 0) { 903bd48bde8SPaolo Bonzini /* We get here only if something went wrong. Either the job failed, 904bd48bde8SPaolo Bonzini * or it was cancelled prematurely so that we do not guarantee that 905bd48bde8SPaolo Bonzini * the target is a copy of the source. 906bd48bde8SPaolo Bonzini */ 907bd48bde8SPaolo Bonzini assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common))); 9089a0cec66SPaolo Bonzini assert(need_drain); 909bae8196dSPaolo Bonzini mirror_wait_for_all_io(s); 910bd48bde8SPaolo Bonzini } 911bd48bde8SPaolo Bonzini 912bd48bde8SPaolo Bonzini assert(s->in_flight == 0); 9137191bf31SMarkus Armbruster qemu_vfree(s->buf); 914b812f671SPaolo Bonzini g_free(s->cow_bitmap); 915402a4741SPaolo Bonzini g_free(s->in_flight_bitmap); 916dc162c8eSFam Zheng bdrv_dirty_iter_free(s->dbi); 9175a7e7a0bSStefan Hajnoczi 9185a7e7a0bSStefan Hajnoczi data = g_malloc(sizeof(*data)); 9195a7e7a0bSStefan Hajnoczi data->ret = ret; 9209a0cec66SPaolo Bonzini 9219a0cec66SPaolo Bonzini if (need_drain) { 922e253f4b8SKevin Wolf bdrv_drained_begin(bs); 9239a0cec66SPaolo Bonzini } 9245a7e7a0bSStefan Hajnoczi block_job_defer_to_main_loop(&s->common, mirror_exit, data); 925893f7ebaSPaolo Bonzini } 926893f7ebaSPaolo Bonzini 927893f7ebaSPaolo Bonzini static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp) 928893f7ebaSPaolo Bonzini { 929893f7ebaSPaolo Bonzini MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 930893f7ebaSPaolo Bonzini 931893f7ebaSPaolo Bonzini if (speed < 0) { 932c6bd8c70SMarkus Armbruster error_setg(errp, QERR_INVALID_PARAMETER, "speed"); 933893f7ebaSPaolo Bonzini return; 934893f7ebaSPaolo Bonzini } 935f3e4ce4aSEric Blake ratelimit_set_speed(&s->limit, speed, SLICE_TIME); 936893f7ebaSPaolo Bonzini } 937893f7ebaSPaolo Bonzini 938d63ffd87SPaolo Bonzini static void mirror_complete(BlockJob *job, Error **errp) 939d63ffd87SPaolo Bonzini { 940d63ffd87SPaolo Bonzini MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 9414ef85a9cSKevin Wolf BlockDriverState *target; 942d63ffd87SPaolo Bonzini 943274fcceeSMax Reitz target = blk_bs(s->target); 944274fcceeSMax Reitz 945d63ffd87SPaolo Bonzini if (!s->synced) { 9469df229c3SAlberto Garcia error_setg(errp, "The active block job '%s' cannot be completed", 9479df229c3SAlberto Garcia job->id); 948d63ffd87SPaolo Bonzini return; 949d63ffd87SPaolo Bonzini } 950d63ffd87SPaolo Bonzini 951274fcceeSMax Reitz if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) { 952274fcceeSMax Reitz int ret; 953274fcceeSMax Reitz 954274fcceeSMax Reitz assert(!target->backing); 955274fcceeSMax Reitz ret = bdrv_open_backing_file(target, NULL, "backing", errp); 956274fcceeSMax Reitz if (ret < 0) { 957274fcceeSMax Reitz return; 958274fcceeSMax Reitz } 959274fcceeSMax Reitz } 960274fcceeSMax Reitz 96115d67298SChanglong Xie /* block all operations on to_replace bs */ 96209158f00SBenoît Canet if (s->replaces) { 9635a7e7a0bSStefan Hajnoczi AioContext *replace_aio_context; 9645a7e7a0bSStefan Hajnoczi 965e12f3784SWen Congyang s->to_replace = bdrv_find_node(s->replaces); 96609158f00SBenoît Canet if (!s->to_replace) { 967e12f3784SWen Congyang error_setg(errp, "Node name '%s' not found", s->replaces); 96809158f00SBenoît Canet return; 96909158f00SBenoît Canet } 97009158f00SBenoît Canet 9715a7e7a0bSStefan Hajnoczi replace_aio_context = bdrv_get_aio_context(s->to_replace); 9725a7e7a0bSStefan Hajnoczi aio_context_acquire(replace_aio_context); 9735a7e7a0bSStefan Hajnoczi 9744ef85a9cSKevin Wolf /* TODO Translate this into permission system. Current definition of 9754ef85a9cSKevin Wolf * GRAPH_MOD would require to request it for the parents; they might 9764ef85a9cSKevin Wolf * not even be BlockDriverStates, however, so a BdrvChild can't address 9774ef85a9cSKevin Wolf * them. May need redefinition of GRAPH_MOD. */ 97809158f00SBenoît Canet error_setg(&s->replace_blocker, 97909158f00SBenoît Canet "block device is in use by block-job-complete"); 98009158f00SBenoît Canet bdrv_op_block_all(s->to_replace, s->replace_blocker); 98109158f00SBenoît Canet bdrv_ref(s->to_replace); 9825a7e7a0bSStefan Hajnoczi 9835a7e7a0bSStefan Hajnoczi aio_context_release(replace_aio_context); 98409158f00SBenoît Canet } 98509158f00SBenoît Canet 986d63ffd87SPaolo Bonzini s->should_complete = true; 987751ebd76SFam Zheng block_job_enter(&s->common); 988d63ffd87SPaolo Bonzini } 989d63ffd87SPaolo Bonzini 990bae8196dSPaolo Bonzini static void mirror_pause(BlockJob *job) 991565ac01fSStefan Hajnoczi { 992565ac01fSStefan Hajnoczi MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 993565ac01fSStefan Hajnoczi 994bae8196dSPaolo Bonzini mirror_wait_for_all_io(s); 995565ac01fSStefan Hajnoczi } 996565ac01fSStefan Hajnoczi 997565ac01fSStefan Hajnoczi static void mirror_attached_aio_context(BlockJob *job, AioContext *new_context) 998565ac01fSStefan Hajnoczi { 999565ac01fSStefan Hajnoczi MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 1000565ac01fSStefan Hajnoczi 1001565ac01fSStefan Hajnoczi blk_set_aio_context(s->target, new_context); 1002565ac01fSStefan Hajnoczi } 1003565ac01fSStefan Hajnoczi 1004bae8196dSPaolo Bonzini static void mirror_drain(BlockJob *job) 1005bae8196dSPaolo Bonzini { 1006bae8196dSPaolo Bonzini MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 1007bae8196dSPaolo Bonzini 1008bae8196dSPaolo Bonzini /* Need to keep a reference in case blk_drain triggers execution 1009bae8196dSPaolo Bonzini * of mirror_complete... 1010bae8196dSPaolo Bonzini */ 1011bae8196dSPaolo Bonzini if (s->target) { 1012bae8196dSPaolo Bonzini BlockBackend *target = s->target; 1013bae8196dSPaolo Bonzini blk_ref(target); 1014bae8196dSPaolo Bonzini blk_drain(target); 1015bae8196dSPaolo Bonzini blk_unref(target); 1016bae8196dSPaolo Bonzini } 1017bae8196dSPaolo Bonzini } 1018bae8196dSPaolo Bonzini 10193fc4b10aSFam Zheng static const BlockJobDriver mirror_job_driver = { 1020893f7ebaSPaolo Bonzini .instance_size = sizeof(MirrorBlockJob), 102179e14bf7SFam Zheng .job_type = BLOCK_JOB_TYPE_MIRROR, 1022893f7ebaSPaolo Bonzini .set_speed = mirror_set_speed, 1023a7815a76SJohn Snow .start = mirror_run, 1024d63ffd87SPaolo Bonzini .complete = mirror_complete, 1025565ac01fSStefan Hajnoczi .pause = mirror_pause, 1026565ac01fSStefan Hajnoczi .attached_aio_context = mirror_attached_aio_context, 1027bae8196dSPaolo Bonzini .drain = mirror_drain, 1028893f7ebaSPaolo Bonzini }; 1029893f7ebaSPaolo Bonzini 103003544a6eSFam Zheng static const BlockJobDriver commit_active_job_driver = { 103103544a6eSFam Zheng .instance_size = sizeof(MirrorBlockJob), 103203544a6eSFam Zheng .job_type = BLOCK_JOB_TYPE_COMMIT, 103303544a6eSFam Zheng .set_speed = mirror_set_speed, 1034a7815a76SJohn Snow .start = mirror_run, 103503544a6eSFam Zheng .complete = mirror_complete, 1036565ac01fSStefan Hajnoczi .pause = mirror_pause, 1037565ac01fSStefan Hajnoczi .attached_aio_context = mirror_attached_aio_context, 1038bae8196dSPaolo Bonzini .drain = mirror_drain, 103903544a6eSFam Zheng }; 104003544a6eSFam Zheng 10414ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs, 10424ef85a9cSKevin Wolf uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) 10434ef85a9cSKevin Wolf { 10444ef85a9cSKevin Wolf return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags); 10454ef85a9cSKevin Wolf } 10464ef85a9cSKevin Wolf 10474ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs, 10484ef85a9cSKevin Wolf uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) 10494ef85a9cSKevin Wolf { 10504ef85a9cSKevin Wolf return bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags); 10514ef85a9cSKevin Wolf } 10524ef85a9cSKevin Wolf 10534ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs) 10544ef85a9cSKevin Wolf { 10554ef85a9cSKevin Wolf return bdrv_co_flush(bs->backing->bs); 10564ef85a9cSKevin Wolf } 10574ef85a9cSKevin Wolf 10584ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs, 1059f5a5ca79SManos Pitsidianakis int64_t offset, int bytes, BdrvRequestFlags flags) 10604ef85a9cSKevin Wolf { 1061f5a5ca79SManos Pitsidianakis return bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags); 10624ef85a9cSKevin Wolf } 10634ef85a9cSKevin Wolf 10644ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs, 1065f5a5ca79SManos Pitsidianakis int64_t offset, int bytes) 10664ef85a9cSKevin Wolf { 1067f5a5ca79SManos Pitsidianakis return bdrv_co_pdiscard(bs->backing->bs, offset, bytes); 10684ef85a9cSKevin Wolf } 10694ef85a9cSKevin Wolf 1070fd4a6493SKevin Wolf static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs, QDict *opts) 1071fd4a6493SKevin Wolf { 1072fd4a6493SKevin Wolf bdrv_refresh_filename(bs->backing->bs); 1073fd4a6493SKevin Wolf pstrcpy(bs->exact_filename, sizeof(bs->exact_filename), 1074fd4a6493SKevin Wolf bs->backing->bs->filename); 1075fd4a6493SKevin Wolf } 1076fd4a6493SKevin Wolf 10774ef85a9cSKevin Wolf static void bdrv_mirror_top_close(BlockDriverState *bs) 10784ef85a9cSKevin Wolf { 10794ef85a9cSKevin Wolf } 10804ef85a9cSKevin Wolf 10814ef85a9cSKevin Wolf static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c, 10824ef85a9cSKevin Wolf const BdrvChildRole *role, 1083e0995dc3SKevin Wolf BlockReopenQueue *reopen_queue, 10844ef85a9cSKevin Wolf uint64_t perm, uint64_t shared, 10854ef85a9cSKevin Wolf uint64_t *nperm, uint64_t *nshared) 10864ef85a9cSKevin Wolf { 10874ef85a9cSKevin Wolf /* Must be able to forward guest writes to the real image */ 10884ef85a9cSKevin Wolf *nperm = 0; 10894ef85a9cSKevin Wolf if (perm & BLK_PERM_WRITE) { 10904ef85a9cSKevin Wolf *nperm |= BLK_PERM_WRITE; 10914ef85a9cSKevin Wolf } 10924ef85a9cSKevin Wolf 10934ef85a9cSKevin Wolf *nshared = BLK_PERM_ALL; 10944ef85a9cSKevin Wolf } 10954ef85a9cSKevin Wolf 10964ef85a9cSKevin Wolf /* Dummy node that provides consistent read to its users without requiring it 10974ef85a9cSKevin Wolf * from its backing file and that allows writes on the backing file chain. */ 10984ef85a9cSKevin Wolf static BlockDriver bdrv_mirror_top = { 10994ef85a9cSKevin Wolf .format_name = "mirror_top", 11004ef85a9cSKevin Wolf .bdrv_co_preadv = bdrv_mirror_top_preadv, 11014ef85a9cSKevin Wolf .bdrv_co_pwritev = bdrv_mirror_top_pwritev, 11024ef85a9cSKevin Wolf .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes, 11034ef85a9cSKevin Wolf .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard, 11044ef85a9cSKevin Wolf .bdrv_co_flush = bdrv_mirror_top_flush, 1105f7cc69b3SManos Pitsidianakis .bdrv_co_get_block_status = bdrv_co_get_block_status_from_backing, 1106fd4a6493SKevin Wolf .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename, 11074ef85a9cSKevin Wolf .bdrv_close = bdrv_mirror_top_close, 11084ef85a9cSKevin Wolf .bdrv_child_perm = bdrv_mirror_top_child_perm, 11094ef85a9cSKevin Wolf }; 11104ef85a9cSKevin Wolf 111171aa9867SAlberto Garcia static void mirror_start_job(const char *job_id, BlockDriverState *bs, 111247970dfbSJohn Snow int creation_flags, BlockDriverState *target, 111347970dfbSJohn Snow const char *replaces, int64_t speed, 111447970dfbSJohn Snow uint32_t granularity, int64_t buf_size, 1115274fcceeSMax Reitz BlockMirrorBackingMode backing_mode, 111603544a6eSFam Zheng BlockdevOnError on_source_error, 1117b952b558SPaolo Bonzini BlockdevOnError on_target_error, 11180fc9f8eaSFam Zheng bool unmap, 1119097310b5SMarkus Armbruster BlockCompletionFunc *cb, 112051ccfa2dSFam Zheng void *opaque, 112103544a6eSFam Zheng const BlockJobDriver *driver, 1122b49f7eadSWen Congyang bool is_none_mode, BlockDriverState *base, 112351ccfa2dSFam Zheng bool auto_complete, const char *filter_node_name, 1124045a2f82SFam Zheng bool is_mirror, 112551ccfa2dSFam Zheng Error **errp) 1126893f7ebaSPaolo Bonzini { 1127893f7ebaSPaolo Bonzini MirrorBlockJob *s; 11284ef85a9cSKevin Wolf BlockDriverState *mirror_top_bs; 11294ef85a9cSKevin Wolf bool target_graph_mod; 11304ef85a9cSKevin Wolf bool target_is_backing; 1131b2c2832cSKevin Wolf Error *local_err = NULL; 1132d7086422SKevin Wolf int ret; 1133893f7ebaSPaolo Bonzini 1134eee13dfeSPaolo Bonzini if (granularity == 0) { 1135341ebc2fSJohn Snow granularity = bdrv_get_default_bitmap_granularity(target); 1136eee13dfeSPaolo Bonzini } 1137eee13dfeSPaolo Bonzini 1138eee13dfeSPaolo Bonzini assert ((granularity & (granularity - 1)) == 0); 1139b436982fSEric Blake /* Granularity must be large enough for sector-based dirty bitmap */ 1140b436982fSEric Blake assert(granularity >= BDRV_SECTOR_SIZE); 1141eee13dfeSPaolo Bonzini 114248ac0a4dSWen Congyang if (buf_size < 0) { 114348ac0a4dSWen Congyang error_setg(errp, "Invalid parameter 'buf-size'"); 114448ac0a4dSWen Congyang return; 114548ac0a4dSWen Congyang } 114648ac0a4dSWen Congyang 114748ac0a4dSWen Congyang if (buf_size == 0) { 114848ac0a4dSWen Congyang buf_size = DEFAULT_MIRROR_BUF_SIZE; 114948ac0a4dSWen Congyang } 11505bc361b8SFam Zheng 11514ef85a9cSKevin Wolf /* In the case of active commit, add dummy driver to provide consistent 11524ef85a9cSKevin Wolf * reads on the top, while disabling it in the intermediate nodes, and make 11534ef85a9cSKevin Wolf * the backing chain writable. */ 11546cdbceb1SKevin Wolf mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name, 11556cdbceb1SKevin Wolf BDRV_O_RDWR, errp); 11564ef85a9cSKevin Wolf if (mirror_top_bs == NULL) { 1157893f7ebaSPaolo Bonzini return; 1158893f7ebaSPaolo Bonzini } 1159d3c8c674SKevin Wolf if (!filter_node_name) { 1160d3c8c674SKevin Wolf mirror_top_bs->implicit = true; 1161d3c8c674SKevin Wolf } 11624ef85a9cSKevin Wolf mirror_top_bs->total_sectors = bs->total_sectors; 116319dd29e8SFam Zheng bdrv_set_aio_context(mirror_top_bs, bdrv_get_aio_context(bs)); 1164893f7ebaSPaolo Bonzini 11654ef85a9cSKevin Wolf /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep 11667a25fcd0SMax Reitz * it alive until block_job_create() succeeds even if bs has no parent. */ 11674ef85a9cSKevin Wolf bdrv_ref(mirror_top_bs); 11684ef85a9cSKevin Wolf bdrv_drained_begin(bs); 1169b2c2832cSKevin Wolf bdrv_append(mirror_top_bs, bs, &local_err); 11704ef85a9cSKevin Wolf bdrv_drained_end(bs); 11714ef85a9cSKevin Wolf 1172b2c2832cSKevin Wolf if (local_err) { 1173b2c2832cSKevin Wolf bdrv_unref(mirror_top_bs); 1174b2c2832cSKevin Wolf error_propagate(errp, local_err); 1175b2c2832cSKevin Wolf return; 1176b2c2832cSKevin Wolf } 1177b2c2832cSKevin Wolf 11784ef85a9cSKevin Wolf /* Make sure that the source is not resized while the job is running */ 11794ef85a9cSKevin Wolf s = block_job_create(job_id, driver, mirror_top_bs, 11804ef85a9cSKevin Wolf BLK_PERM_CONSISTENT_READ, 11814ef85a9cSKevin Wolf BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | 11824ef85a9cSKevin Wolf BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed, 11834ef85a9cSKevin Wolf creation_flags, cb, opaque, errp); 11844ef85a9cSKevin Wolf if (!s) { 11854ef85a9cSKevin Wolf goto fail; 11864ef85a9cSKevin Wolf } 11877a25fcd0SMax Reitz /* The block job now has a reference to this node */ 11887a25fcd0SMax Reitz bdrv_unref(mirror_top_bs); 11897a25fcd0SMax Reitz 11904ef85a9cSKevin Wolf s->source = bs; 11914ef85a9cSKevin Wolf s->mirror_top_bs = mirror_top_bs; 11924ef85a9cSKevin Wolf 11934ef85a9cSKevin Wolf /* No resize for the target either; while the mirror is still running, a 11944ef85a9cSKevin Wolf * consistent read isn't necessarily possible. We could possibly allow 11954ef85a9cSKevin Wolf * writes and graph modifications, though it would likely defeat the 11964ef85a9cSKevin Wolf * purpose of a mirror, so leave them blocked for now. 11974ef85a9cSKevin Wolf * 11984ef85a9cSKevin Wolf * In the case of active commit, things look a bit different, though, 11994ef85a9cSKevin Wolf * because the target is an already populated backing file in active use. 12004ef85a9cSKevin Wolf * We can allow anything except resize there.*/ 12014ef85a9cSKevin Wolf target_is_backing = bdrv_chain_contains(bs, target); 12024ef85a9cSKevin Wolf target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN); 12034ef85a9cSKevin Wolf s->target = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE | 12044ef85a9cSKevin Wolf (target_graph_mod ? BLK_PERM_GRAPH_MOD : 0), 12054ef85a9cSKevin Wolf BLK_PERM_WRITE_UNCHANGED | 12064ef85a9cSKevin Wolf (target_is_backing ? BLK_PERM_CONSISTENT_READ | 12074ef85a9cSKevin Wolf BLK_PERM_WRITE | 12084ef85a9cSKevin Wolf BLK_PERM_GRAPH_MOD : 0)); 1209d7086422SKevin Wolf ret = blk_insert_bs(s->target, target, errp); 1210d7086422SKevin Wolf if (ret < 0) { 12114ef85a9cSKevin Wolf goto fail; 1212d7086422SKevin Wolf } 1213045a2f82SFam Zheng if (is_mirror) { 1214045a2f82SFam Zheng /* XXX: Mirror target could be a NBD server of target QEMU in the case 1215045a2f82SFam Zheng * of non-shared block migration. To allow migration completion, we 1216045a2f82SFam Zheng * have to allow "inactivate" of the target BB. When that happens, we 1217045a2f82SFam Zheng * know the job is drained, and the vcpus are stopped, so no write 1218045a2f82SFam Zheng * operation will be performed. Block layer already has assertions to 1219045a2f82SFam Zheng * ensure that. */ 1220045a2f82SFam Zheng blk_set_force_allow_inactivate(s->target); 1221045a2f82SFam Zheng } 1222e253f4b8SKevin Wolf 122309158f00SBenoît Canet s->replaces = g_strdup(replaces); 1224b952b558SPaolo Bonzini s->on_source_error = on_source_error; 1225b952b558SPaolo Bonzini s->on_target_error = on_target_error; 122603544a6eSFam Zheng s->is_none_mode = is_none_mode; 1227274fcceeSMax Reitz s->backing_mode = backing_mode; 12285bc361b8SFam Zheng s->base = base; 1229eee13dfeSPaolo Bonzini s->granularity = granularity; 123048ac0a4dSWen Congyang s->buf_size = ROUND_UP(buf_size, granularity); 12310fc9f8eaSFam Zheng s->unmap = unmap; 1232b49f7eadSWen Congyang if (auto_complete) { 1233b49f7eadSWen Congyang s->should_complete = true; 1234b49f7eadSWen Congyang } 1235b812f671SPaolo Bonzini 12360db6e54aSFam Zheng s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); 1237b8afb520SFam Zheng if (!s->dirty_bitmap) { 123888f9d1b3SKevin Wolf goto fail; 1239b8afb520SFam Zheng } 124010f3cd15SAlberto Garcia 12414ef85a9cSKevin Wolf /* Required permissions are already taken with blk_new() */ 124276d554e2SKevin Wolf block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL, 124376d554e2SKevin Wolf &error_abort); 124476d554e2SKevin Wolf 1245f3ede4b0SAlberto Garcia /* In commit_active_start() all intermediate nodes disappear, so 1246f3ede4b0SAlberto Garcia * any jobs in them must be blocked */ 12474ef85a9cSKevin Wolf if (target_is_backing) { 1248f3ede4b0SAlberto Garcia BlockDriverState *iter; 1249f3ede4b0SAlberto Garcia for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) { 12504ef85a9cSKevin Wolf /* XXX BLK_PERM_WRITE needs to be allowed so we don't block 12514ef85a9cSKevin Wolf * ourselves at s->base (if writes are blocked for a node, they are 12524ef85a9cSKevin Wolf * also blocked for its backing file). The other options would be a 12534ef85a9cSKevin Wolf * second filter driver above s->base (== target). */ 12544ef85a9cSKevin Wolf ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0, 12554ef85a9cSKevin Wolf BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE, 12564ef85a9cSKevin Wolf errp); 12574ef85a9cSKevin Wolf if (ret < 0) { 12584ef85a9cSKevin Wolf goto fail; 12594ef85a9cSKevin Wolf } 1260f3ede4b0SAlberto Garcia } 1261f3ede4b0SAlberto Garcia } 126210f3cd15SAlberto Garcia 12635ccac6f1SJohn Snow trace_mirror_start(bs, s, opaque); 12645ccac6f1SJohn Snow block_job_start(&s->common); 12654ef85a9cSKevin Wolf return; 12664ef85a9cSKevin Wolf 12674ef85a9cSKevin Wolf fail: 12684ef85a9cSKevin Wolf if (s) { 12697a25fcd0SMax Reitz /* Make sure this BDS does not go away until we have completed the graph 12707a25fcd0SMax Reitz * changes below */ 12717a25fcd0SMax Reitz bdrv_ref(mirror_top_bs); 12727a25fcd0SMax Reitz 12734ef85a9cSKevin Wolf g_free(s->replaces); 12744ef85a9cSKevin Wolf blk_unref(s->target); 127505b0d8e3SPaolo Bonzini block_job_early_fail(&s->common); 12764ef85a9cSKevin Wolf } 12774ef85a9cSKevin Wolf 1278c1cef672SFam Zheng bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL, 1279c1cef672SFam Zheng &error_abort); 12805fe31c25SKevin Wolf bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort); 12817a25fcd0SMax Reitz 12827a25fcd0SMax Reitz bdrv_unref(mirror_top_bs); 1283893f7ebaSPaolo Bonzini } 128403544a6eSFam Zheng 128571aa9867SAlberto Garcia void mirror_start(const char *job_id, BlockDriverState *bs, 128671aa9867SAlberto Garcia BlockDriverState *target, const char *replaces, 12875fba6c0eSJohn Snow int64_t speed, uint32_t granularity, int64_t buf_size, 1288274fcceeSMax Reitz MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, 1289274fcceeSMax Reitz BlockdevOnError on_source_error, 129003544a6eSFam Zheng BlockdevOnError on_target_error, 12916cdbceb1SKevin Wolf bool unmap, const char *filter_node_name, Error **errp) 129203544a6eSFam Zheng { 129303544a6eSFam Zheng bool is_none_mode; 129403544a6eSFam Zheng BlockDriverState *base; 129503544a6eSFam Zheng 12964b80ab2bSJohn Snow if (mode == MIRROR_SYNC_MODE_INCREMENTAL) { 12974b80ab2bSJohn Snow error_setg(errp, "Sync mode 'incremental' not supported"); 1298d58d8453SJohn Snow return; 1299d58d8453SJohn Snow } 130003544a6eSFam Zheng is_none_mode = mode == MIRROR_SYNC_MODE_NONE; 1301760e0063SKevin Wolf base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL; 130247970dfbSJohn Snow mirror_start_job(job_id, bs, BLOCK_JOB_DEFAULT, target, replaces, 1303274fcceeSMax Reitz speed, granularity, buf_size, backing_mode, 130451ccfa2dSFam Zheng on_source_error, on_target_error, unmap, NULL, NULL, 13056cdbceb1SKevin Wolf &mirror_job_driver, is_none_mode, base, false, 1306045a2f82SFam Zheng filter_node_name, true, errp); 130703544a6eSFam Zheng } 130803544a6eSFam Zheng 1309fd62c609SAlberto Garcia void commit_active_start(const char *job_id, BlockDriverState *bs, 131047970dfbSJohn Snow BlockDriverState *base, int creation_flags, 131147970dfbSJohn Snow int64_t speed, BlockdevOnError on_error, 13120db832f4SKevin Wolf const char *filter_node_name, 131378bbd910SFam Zheng BlockCompletionFunc *cb, void *opaque, 131478bbd910SFam Zheng bool auto_complete, Error **errp) 131503544a6eSFam Zheng { 13164da83585SJeff Cody int orig_base_flags; 1317cc67f4d1SJeff Cody Error *local_err = NULL; 13184da83585SJeff Cody 13194da83585SJeff Cody orig_base_flags = bdrv_get_flags(base); 13204da83585SJeff Cody 132120a63d2cSFam Zheng if (bdrv_reopen(base, bs->open_flags, errp)) { 132220a63d2cSFam Zheng return; 132320a63d2cSFam Zheng } 13244da83585SJeff Cody 132547970dfbSJohn Snow mirror_start_job(job_id, bs, creation_flags, base, NULL, speed, 0, 0, 132671aa9867SAlberto Garcia MIRROR_LEAVE_BACKING_CHAIN, 132751ccfa2dSFam Zheng on_error, on_error, true, cb, opaque, 13286cdbceb1SKevin Wolf &commit_active_job_driver, false, base, auto_complete, 1329045a2f82SFam Zheng filter_node_name, false, &local_err); 13300fb6395cSMarkus Armbruster if (local_err) { 1331cc67f4d1SJeff Cody error_propagate(errp, local_err); 13324da83585SJeff Cody goto error_restore_flags; 13334da83585SJeff Cody } 13344da83585SJeff Cody 13354da83585SJeff Cody return; 13364da83585SJeff Cody 13374da83585SJeff Cody error_restore_flags: 13384da83585SJeff Cody /* ignore error and errp for bdrv_reopen, because we want to propagate 13394da83585SJeff Cody * the original error */ 13404da83585SJeff Cody bdrv_reopen(base, orig_base_flags, NULL); 13414da83585SJeff Cody return; 134203544a6eSFam Zheng } 1343