1893f7ebaSPaolo Bonzini /* 2893f7ebaSPaolo Bonzini * Image mirroring 3893f7ebaSPaolo Bonzini * 4893f7ebaSPaolo Bonzini * Copyright Red Hat, Inc. 2012 5893f7ebaSPaolo Bonzini * 6893f7ebaSPaolo Bonzini * Authors: 7893f7ebaSPaolo Bonzini * Paolo Bonzini <pbonzini@redhat.com> 8893f7ebaSPaolo Bonzini * 9893f7ebaSPaolo Bonzini * This work is licensed under the terms of the GNU LGPL, version 2 or later. 10893f7ebaSPaolo Bonzini * See the COPYING.LIB file in the top-level directory. 11893f7ebaSPaolo Bonzini * 12893f7ebaSPaolo Bonzini */ 13893f7ebaSPaolo Bonzini 1480c71a24SPeter Maydell #include "qemu/osdep.h" 15fd4a6493SKevin Wolf #include "qemu/cutils.h" 1612aa4082SMax Reitz #include "qemu/coroutine.h" 171181e19aSMax Reitz #include "qemu/range.h" 18893f7ebaSPaolo Bonzini #include "trace.h" 19c87621eaSJohn Snow #include "block/blockjob_int.h" 20737e150eSPaolo Bonzini #include "block/block_int.h" 21373340b2SMax Reitz #include "sysemu/block-backend.h" 22da34e65cSMarkus Armbruster #include "qapi/error.h" 23cc7a8ea7SMarkus Armbruster #include "qapi/qmp/qerror.h" 24893f7ebaSPaolo Bonzini #include "qemu/ratelimit.h" 25b812f671SPaolo Bonzini #include "qemu/bitmap.h" 26893f7ebaSPaolo Bonzini 27402a4741SPaolo Bonzini #define MAX_IN_FLIGHT 16 28b436982fSEric Blake #define MAX_IO_BYTES (1 << 20) /* 1 Mb */ 29b436982fSEric Blake #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES) 30402a4741SPaolo Bonzini 31402a4741SPaolo Bonzini /* The mirroring buffer is a list of granularity-sized chunks. 32402a4741SPaolo Bonzini * Free chunks are organized in a list. 33402a4741SPaolo Bonzini */ 34402a4741SPaolo Bonzini typedef struct MirrorBuffer { 35402a4741SPaolo Bonzini QSIMPLEQ_ENTRY(MirrorBuffer) next; 36402a4741SPaolo Bonzini } MirrorBuffer; 37893f7ebaSPaolo Bonzini 3812aa4082SMax Reitz typedef struct MirrorOp MirrorOp; 3912aa4082SMax Reitz 40893f7ebaSPaolo Bonzini typedef struct MirrorBlockJob { 41893f7ebaSPaolo Bonzini BlockJob common; 42e253f4b8SKevin Wolf BlockBackend *target; 434ef85a9cSKevin Wolf BlockDriverState *mirror_top_bs; 445bc361b8SFam Zheng BlockDriverState *base; 454ef85a9cSKevin Wolf 4609158f00SBenoît Canet /* The name of the graph node to replace */ 4709158f00SBenoît Canet char *replaces; 4809158f00SBenoît Canet /* The BDS to replace */ 4909158f00SBenoît Canet BlockDriverState *to_replace; 5009158f00SBenoît Canet /* Used to block operations on the drive-mirror-replace target */ 5109158f00SBenoît Canet Error *replace_blocker; 5203544a6eSFam Zheng bool is_none_mode; 53274fcceeSMax Reitz BlockMirrorBackingMode backing_mode; 54cdf3bc93SMax Reitz /* Whether the target image requires explicit zero-initialization */ 55cdf3bc93SMax Reitz bool zero_target; 56d06107adSMax Reitz MirrorCopyMode copy_mode; 57b952b558SPaolo Bonzini BlockdevOnError on_source_error, on_target_error; 58d63ffd87SPaolo Bonzini bool synced; 59d06107adSMax Reitz /* Set when the target is synced (dirty bitmap is clean, nothing 60d06107adSMax Reitz * in flight) and the job is running in active mode */ 61d06107adSMax Reitz bool actively_synced; 62d63ffd87SPaolo Bonzini bool should_complete; 63eee13dfeSPaolo Bonzini int64_t granularity; 64b812f671SPaolo Bonzini size_t buf_size; 65b21c7652SMax Reitz int64_t bdev_length; 66b812f671SPaolo Bonzini unsigned long *cow_bitmap; 67e4654d2dSFam Zheng BdrvDirtyBitmap *dirty_bitmap; 68dc162c8eSFam Zheng BdrvDirtyBitmapIter *dbi; 69893f7ebaSPaolo Bonzini uint8_t *buf; 70402a4741SPaolo Bonzini QSIMPLEQ_HEAD(, MirrorBuffer) buf_free; 71402a4741SPaolo Bonzini int buf_free_count; 72bd48bde8SPaolo Bonzini 7349efb1f5SDenis V. Lunev uint64_t last_pause_ns; 74402a4741SPaolo Bonzini unsigned long *in_flight_bitmap; 75bd48bde8SPaolo Bonzini int in_flight; 76b436982fSEric Blake int64_t bytes_in_flight; 77b58deb34SPaolo Bonzini QTAILQ_HEAD(, MirrorOp) ops_in_flight; 78bd48bde8SPaolo Bonzini int ret; 790fc9f8eaSFam Zheng bool unmap; 80b436982fSEric Blake int target_cluster_size; 81e5b43573SFam Zheng int max_iov; 8290ab48ebSAnton Nefedov bool initial_zeroing_ongoing; 83d06107adSMax Reitz int in_active_write_counter; 84737efc1eSJohn Snow bool prepared; 855e771752SSergio Lopez bool in_drain; 86893f7ebaSPaolo Bonzini } MirrorBlockJob; 87893f7ebaSPaolo Bonzini 88429076e8SMax Reitz typedef struct MirrorBDSOpaque { 89429076e8SMax Reitz MirrorBlockJob *job; 90f94dc3b4SMax Reitz bool stop; 91429076e8SMax Reitz } MirrorBDSOpaque; 92429076e8SMax Reitz 9312aa4082SMax Reitz struct MirrorOp { 94bd48bde8SPaolo Bonzini MirrorBlockJob *s; 95bd48bde8SPaolo Bonzini QEMUIOVector qiov; 96b436982fSEric Blake int64_t offset; 97b436982fSEric Blake uint64_t bytes; 982e1990b2SMax Reitz 992e1990b2SMax Reitz /* The pointee is set by mirror_co_read(), mirror_co_zero(), and 1002e1990b2SMax Reitz * mirror_co_discard() before yielding for the first time */ 1012e1990b2SMax Reitz int64_t *bytes_handled; 10212aa4082SMax Reitz 1031181e19aSMax Reitz bool is_pseudo_op; 104d06107adSMax Reitz bool is_active_write; 10512aa4082SMax Reitz CoQueue waiting_requests; 10612aa4082SMax Reitz 10712aa4082SMax Reitz QTAILQ_ENTRY(MirrorOp) next; 10812aa4082SMax Reitz }; 109bd48bde8SPaolo Bonzini 1104295c5fcSMax Reitz typedef enum MirrorMethod { 1114295c5fcSMax Reitz MIRROR_METHOD_COPY, 1124295c5fcSMax Reitz MIRROR_METHOD_ZERO, 1134295c5fcSMax Reitz MIRROR_METHOD_DISCARD, 1144295c5fcSMax Reitz } MirrorMethod; 1154295c5fcSMax Reitz 116b952b558SPaolo Bonzini static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, 117b952b558SPaolo Bonzini int error) 118b952b558SPaolo Bonzini { 119b952b558SPaolo Bonzini s->synced = false; 120d06107adSMax Reitz s->actively_synced = false; 121b952b558SPaolo Bonzini if (read) { 12281e254dcSKevin Wolf return block_job_error_action(&s->common, s->on_source_error, 12381e254dcSKevin Wolf true, error); 124b952b558SPaolo Bonzini } else { 12581e254dcSKevin Wolf return block_job_error_action(&s->common, s->on_target_error, 12681e254dcSKevin Wolf false, error); 127b952b558SPaolo Bonzini } 128b952b558SPaolo Bonzini } 129b952b558SPaolo Bonzini 1301181e19aSMax Reitz static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self, 1311181e19aSMax Reitz MirrorBlockJob *s, 1321181e19aSMax Reitz uint64_t offset, 1331181e19aSMax Reitz uint64_t bytes) 1341181e19aSMax Reitz { 1351181e19aSMax Reitz uint64_t self_start_chunk = offset / s->granularity; 1361181e19aSMax Reitz uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity); 1371181e19aSMax Reitz uint64_t self_nb_chunks = self_end_chunk - self_start_chunk; 1381181e19aSMax Reitz 1391181e19aSMax Reitz while (find_next_bit(s->in_flight_bitmap, self_end_chunk, 1401181e19aSMax Reitz self_start_chunk) < self_end_chunk && 1411181e19aSMax Reitz s->ret >= 0) 1421181e19aSMax Reitz { 1431181e19aSMax Reitz MirrorOp *op; 1441181e19aSMax Reitz 1451181e19aSMax Reitz QTAILQ_FOREACH(op, &s->ops_in_flight, next) { 1461181e19aSMax Reitz uint64_t op_start_chunk = op->offset / s->granularity; 1471181e19aSMax Reitz uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes, 1481181e19aSMax Reitz s->granularity) - 1491181e19aSMax Reitz op_start_chunk; 1501181e19aSMax Reitz 1511181e19aSMax Reitz if (op == self) { 1521181e19aSMax Reitz continue; 1531181e19aSMax Reitz } 1541181e19aSMax Reitz 1551181e19aSMax Reitz if (ranges_overlap(self_start_chunk, self_nb_chunks, 1561181e19aSMax Reitz op_start_chunk, op_nb_chunks)) 1571181e19aSMax Reitz { 1581181e19aSMax Reitz qemu_co_queue_wait(&op->waiting_requests, NULL); 1591181e19aSMax Reitz break; 1601181e19aSMax Reitz } 1611181e19aSMax Reitz } 1621181e19aSMax Reitz } 1631181e19aSMax Reitz } 1641181e19aSMax Reitz 1652e1990b2SMax Reitz static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret) 166bd48bde8SPaolo Bonzini { 167bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 168402a4741SPaolo Bonzini struct iovec *iov; 169bd48bde8SPaolo Bonzini int64_t chunk_num; 170b436982fSEric Blake int i, nb_chunks; 171bd48bde8SPaolo Bonzini 172b436982fSEric Blake trace_mirror_iteration_done(s, op->offset, op->bytes, ret); 173bd48bde8SPaolo Bonzini 174bd48bde8SPaolo Bonzini s->in_flight--; 175b436982fSEric Blake s->bytes_in_flight -= op->bytes; 176402a4741SPaolo Bonzini iov = op->qiov.iov; 177402a4741SPaolo Bonzini for (i = 0; i < op->qiov.niov; i++) { 178402a4741SPaolo Bonzini MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base; 179402a4741SPaolo Bonzini QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next); 180402a4741SPaolo Bonzini s->buf_free_count++; 181402a4741SPaolo Bonzini } 182402a4741SPaolo Bonzini 183b436982fSEric Blake chunk_num = op->offset / s->granularity; 184b436982fSEric Blake nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity); 18512aa4082SMax Reitz 186402a4741SPaolo Bonzini bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); 18712aa4082SMax Reitz QTAILQ_REMOVE(&s->ops_in_flight, op, next); 188b21c7652SMax Reitz if (ret >= 0) { 189b21c7652SMax Reitz if (s->cow_bitmap) { 190bd48bde8SPaolo Bonzini bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); 191bd48bde8SPaolo Bonzini } 19290ab48ebSAnton Nefedov if (!s->initial_zeroing_ongoing) { 19330a5c887SKevin Wolf job_progress_update(&s->common.job, op->bytes); 194b21c7652SMax Reitz } 19590ab48ebSAnton Nefedov } 1966df3bf8eSZhang Min qemu_iovec_destroy(&op->qiov); 1977b770c72SStefan Hajnoczi 19812aa4082SMax Reitz qemu_co_queue_restart_all(&op->waiting_requests); 19912aa4082SMax Reitz g_free(op); 2007b770c72SStefan Hajnoczi } 201bd48bde8SPaolo Bonzini 2022e1990b2SMax Reitz static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret) 203bd48bde8SPaolo Bonzini { 204bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 205b9e413ddSPaolo Bonzini 206bd48bde8SPaolo Bonzini if (ret < 0) { 207bd48bde8SPaolo Bonzini BlockErrorAction action; 208bd48bde8SPaolo Bonzini 209e0d7f73eSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes); 210bd48bde8SPaolo Bonzini action = mirror_error_action(s, false, -ret); 211a589569fSWenchao Xia if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 212bd48bde8SPaolo Bonzini s->ret = ret; 213bd48bde8SPaolo Bonzini } 214bd48bde8SPaolo Bonzini } 215d12ade57SVladimir Sementsov-Ogievskiy 216bd48bde8SPaolo Bonzini mirror_iteration_done(op, ret); 217bd48bde8SPaolo Bonzini } 218bd48bde8SPaolo Bonzini 2192e1990b2SMax Reitz static void coroutine_fn mirror_read_complete(MirrorOp *op, int ret) 220bd48bde8SPaolo Bonzini { 221bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 222b9e413ddSPaolo Bonzini 223bd48bde8SPaolo Bonzini if (ret < 0) { 224bd48bde8SPaolo Bonzini BlockErrorAction action; 225bd48bde8SPaolo Bonzini 226e0d7f73eSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes); 227bd48bde8SPaolo Bonzini action = mirror_error_action(s, true, -ret); 228a589569fSWenchao Xia if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 229bd48bde8SPaolo Bonzini s->ret = ret; 230bd48bde8SPaolo Bonzini } 231bd48bde8SPaolo Bonzini 232bd48bde8SPaolo Bonzini mirror_iteration_done(op, ret); 233d12ade57SVladimir Sementsov-Ogievskiy return; 234bd48bde8SPaolo Bonzini } 235d12ade57SVladimir Sementsov-Ogievskiy 236d12ade57SVladimir Sementsov-Ogievskiy ret = blk_co_pwritev(s->target, op->offset, op->qiov.size, &op->qiov, 0); 237d12ade57SVladimir Sementsov-Ogievskiy mirror_write_complete(op, ret); 238b9e413ddSPaolo Bonzini } 239bd48bde8SPaolo Bonzini 240782d97efSEric Blake /* Clip bytes relative to offset to not exceed end-of-file */ 241782d97efSEric Blake static inline int64_t mirror_clip_bytes(MirrorBlockJob *s, 242782d97efSEric Blake int64_t offset, 243782d97efSEric Blake int64_t bytes) 244782d97efSEric Blake { 245782d97efSEric Blake return MIN(bytes, s->bdev_length - offset); 246782d97efSEric Blake } 247782d97efSEric Blake 248782d97efSEric Blake /* Round offset and/or bytes to target cluster if COW is needed, and 249782d97efSEric Blake * return the offset of the adjusted tail against original. */ 250782d97efSEric Blake static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset, 251ae4cc877SEric Blake uint64_t *bytes) 252893f7ebaSPaolo Bonzini { 253e5b43573SFam Zheng bool need_cow; 254e5b43573SFam Zheng int ret = 0; 255782d97efSEric Blake int64_t align_offset = *offset; 2567cfd5275SEric Blake int64_t align_bytes = *bytes; 257782d97efSEric Blake int max_bytes = s->granularity * s->max_iov; 258893f7ebaSPaolo Bonzini 259782d97efSEric Blake need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap); 260782d97efSEric Blake need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity, 261e5b43573SFam Zheng s->cow_bitmap); 262e5b43573SFam Zheng if (need_cow) { 263782d97efSEric Blake bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes, 264782d97efSEric Blake &align_offset, &align_bytes); 2658f0720ecSPaolo Bonzini } 2668f0720ecSPaolo Bonzini 267782d97efSEric Blake if (align_bytes > max_bytes) { 268782d97efSEric Blake align_bytes = max_bytes; 269e5b43573SFam Zheng if (need_cow) { 270782d97efSEric Blake align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size); 271e5b43573SFam Zheng } 272e5b43573SFam Zheng } 273782d97efSEric Blake /* Clipping may result in align_bytes unaligned to chunk boundary, but 2744150ae60SFam Zheng * that doesn't matter because it's already the end of source image. */ 275782d97efSEric Blake align_bytes = mirror_clip_bytes(s, align_offset, align_bytes); 276402a4741SPaolo Bonzini 277782d97efSEric Blake ret = align_offset + align_bytes - (*offset + *bytes); 278782d97efSEric Blake *offset = align_offset; 279782d97efSEric Blake *bytes = align_bytes; 280e5b43573SFam Zheng assert(ret >= 0); 281e5b43573SFam Zheng return ret; 282e5b43573SFam Zheng } 283e5b43573SFam Zheng 284537c3d4fSStefan Hajnoczi static inline void coroutine_fn 285537c3d4fSStefan Hajnoczi mirror_wait_for_any_operation(MirrorBlockJob *s, bool active) 28621cd917fSFam Zheng { 28712aa4082SMax Reitz MirrorOp *op; 28812aa4082SMax Reitz 2891181e19aSMax Reitz QTAILQ_FOREACH(op, &s->ops_in_flight, next) { 2901181e19aSMax Reitz /* Do not wait on pseudo ops, because it may in turn wait on 2911181e19aSMax Reitz * some other operation to start, which may in fact be the 2921181e19aSMax Reitz * caller of this function. Since there is only one pseudo op 2931181e19aSMax Reitz * at any given time, we will always find some real operation 2941181e19aSMax Reitz * to wait on. */ 295d06107adSMax Reitz if (!op->is_pseudo_op && op->is_active_write == active) { 29612aa4082SMax Reitz qemu_co_queue_wait(&op->waiting_requests, NULL); 2971181e19aSMax Reitz return; 2981181e19aSMax Reitz } 2991181e19aSMax Reitz } 3001181e19aSMax Reitz abort(); 30121cd917fSFam Zheng } 30221cd917fSFam Zheng 303537c3d4fSStefan Hajnoczi static inline void coroutine_fn 304537c3d4fSStefan Hajnoczi mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s) 305d06107adSMax Reitz { 306d06107adSMax Reitz /* Only non-active operations use up in-flight slots */ 307d06107adSMax Reitz mirror_wait_for_any_operation(s, false); 308d06107adSMax Reitz } 309d06107adSMax Reitz 3102e1990b2SMax Reitz /* Perform a mirror copy operation. 3112e1990b2SMax Reitz * 3122e1990b2SMax Reitz * *op->bytes_handled is set to the number of bytes copied after and 3132e1990b2SMax Reitz * including offset, excluding any bytes copied prior to offset due 3142e1990b2SMax Reitz * to alignment. This will be op->bytes if no alignment is necessary, 3152e1990b2SMax Reitz * or (new_end - op->offset) if the tail is rounded up or down due to 316e5b43573SFam Zheng * alignment or buffer limit. 317402a4741SPaolo Bonzini */ 3182e1990b2SMax Reitz static void coroutine_fn mirror_co_read(void *opaque) 319e5b43573SFam Zheng { 3202e1990b2SMax Reitz MirrorOp *op = opaque; 3212e1990b2SMax Reitz MirrorBlockJob *s = op->s; 322ae4cc877SEric Blake int nb_chunks; 323ae4cc877SEric Blake uint64_t ret; 324ae4cc877SEric Blake uint64_t max_bytes; 325402a4741SPaolo Bonzini 326ae4cc877SEric Blake max_bytes = s->granularity * s->max_iov; 327e5b43573SFam Zheng 328e5b43573SFam Zheng /* We can only handle as much as buf_size at a time. */ 3292e1990b2SMax Reitz op->bytes = MIN(s->buf_size, MIN(max_bytes, op->bytes)); 3302e1990b2SMax Reitz assert(op->bytes); 3312e1990b2SMax Reitz assert(op->bytes < BDRV_REQUEST_MAX_BYTES); 3322e1990b2SMax Reitz *op->bytes_handled = op->bytes; 333e5b43573SFam Zheng 334e5b43573SFam Zheng if (s->cow_bitmap) { 3352e1990b2SMax Reitz *op->bytes_handled += mirror_cow_align(s, &op->offset, &op->bytes); 336e5b43573SFam Zheng } 3372e1990b2SMax Reitz /* Cannot exceed BDRV_REQUEST_MAX_BYTES + INT_MAX */ 3382e1990b2SMax Reitz assert(*op->bytes_handled <= UINT_MAX); 3392e1990b2SMax Reitz assert(op->bytes <= s->buf_size); 340ae4cc877SEric Blake /* The offset is granularity-aligned because: 341e5b43573SFam Zheng * 1) Caller passes in aligned values; 342e5b43573SFam Zheng * 2) mirror_cow_align is used only when target cluster is larger. */ 3432e1990b2SMax Reitz assert(QEMU_IS_ALIGNED(op->offset, s->granularity)); 344ae4cc877SEric Blake /* The range is sector-aligned, since bdrv_getlength() rounds up. */ 3452e1990b2SMax Reitz assert(QEMU_IS_ALIGNED(op->bytes, BDRV_SECTOR_SIZE)); 3462e1990b2SMax Reitz nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity); 347e5b43573SFam Zheng 348e5b43573SFam Zheng while (s->buf_free_count < nb_chunks) { 3492e1990b2SMax Reitz trace_mirror_yield_in_flight(s, op->offset, s->in_flight); 3501181e19aSMax Reitz mirror_wait_for_free_in_flight_slot(s); 351b812f671SPaolo Bonzini } 352b812f671SPaolo Bonzini 353402a4741SPaolo Bonzini /* Now make a QEMUIOVector taking enough granularity-sized chunks 354402a4741SPaolo Bonzini * from s->buf_free. 355402a4741SPaolo Bonzini */ 356402a4741SPaolo Bonzini qemu_iovec_init(&op->qiov, nb_chunks); 357402a4741SPaolo Bonzini while (nb_chunks-- > 0) { 358402a4741SPaolo Bonzini MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); 3592e1990b2SMax Reitz size_t remaining = op->bytes - op->qiov.size; 3605a0f6fd5SKevin Wolf 361402a4741SPaolo Bonzini QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); 362402a4741SPaolo Bonzini s->buf_free_count--; 3635a0f6fd5SKevin Wolf qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); 364402a4741SPaolo Bonzini } 365402a4741SPaolo Bonzini 366893f7ebaSPaolo Bonzini /* Copy the dirty cluster. */ 367bd48bde8SPaolo Bonzini s->in_flight++; 3682e1990b2SMax Reitz s->bytes_in_flight += op->bytes; 3692e1990b2SMax Reitz trace_mirror_one_iteration(s, op->offset, op->bytes); 370dcfb3bebSFam Zheng 371138f9fffSMax Reitz ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes, 372138f9fffSMax Reitz &op->qiov, 0); 3732e1990b2SMax Reitz mirror_read_complete(op, ret); 374e5b43573SFam Zheng } 375e5b43573SFam Zheng 3762e1990b2SMax Reitz static void coroutine_fn mirror_co_zero(void *opaque) 377e5b43573SFam Zheng { 3782e1990b2SMax Reitz MirrorOp *op = opaque; 3792e1990b2SMax Reitz int ret; 380e5b43573SFam Zheng 3812e1990b2SMax Reitz op->s->in_flight++; 3822e1990b2SMax Reitz op->s->bytes_in_flight += op->bytes; 3832e1990b2SMax Reitz *op->bytes_handled = op->bytes; 384e5b43573SFam Zheng 3852e1990b2SMax Reitz ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes, 3862e1990b2SMax Reitz op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0); 3872e1990b2SMax Reitz mirror_write_complete(op, ret); 388e5b43573SFam Zheng } 3892e1990b2SMax Reitz 3902e1990b2SMax Reitz static void coroutine_fn mirror_co_discard(void *opaque) 3912e1990b2SMax Reitz { 3922e1990b2SMax Reitz MirrorOp *op = opaque; 3932e1990b2SMax Reitz int ret; 3942e1990b2SMax Reitz 3952e1990b2SMax Reitz op->s->in_flight++; 3962e1990b2SMax Reitz op->s->bytes_in_flight += op->bytes; 3972e1990b2SMax Reitz *op->bytes_handled = op->bytes; 3982e1990b2SMax Reitz 3992e1990b2SMax Reitz ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes); 4002e1990b2SMax Reitz mirror_write_complete(op, ret); 401e5b43573SFam Zheng } 402e5b43573SFam Zheng 4034295c5fcSMax Reitz static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset, 4044295c5fcSMax Reitz unsigned bytes, MirrorMethod mirror_method) 4054295c5fcSMax Reitz { 4062e1990b2SMax Reitz MirrorOp *op; 4072e1990b2SMax Reitz Coroutine *co; 4082e1990b2SMax Reitz int64_t bytes_handled = -1; 4092e1990b2SMax Reitz 4102e1990b2SMax Reitz op = g_new(MirrorOp, 1); 4112e1990b2SMax Reitz *op = (MirrorOp){ 4122e1990b2SMax Reitz .s = s, 4132e1990b2SMax Reitz .offset = offset, 4142e1990b2SMax Reitz .bytes = bytes, 4152e1990b2SMax Reitz .bytes_handled = &bytes_handled, 4162e1990b2SMax Reitz }; 41712aa4082SMax Reitz qemu_co_queue_init(&op->waiting_requests); 4182e1990b2SMax Reitz 4194295c5fcSMax Reitz switch (mirror_method) { 4204295c5fcSMax Reitz case MIRROR_METHOD_COPY: 4212e1990b2SMax Reitz co = qemu_coroutine_create(mirror_co_read, op); 4222e1990b2SMax Reitz break; 4234295c5fcSMax Reitz case MIRROR_METHOD_ZERO: 4242e1990b2SMax Reitz co = qemu_coroutine_create(mirror_co_zero, op); 4252e1990b2SMax Reitz break; 4264295c5fcSMax Reitz case MIRROR_METHOD_DISCARD: 4272e1990b2SMax Reitz co = qemu_coroutine_create(mirror_co_discard, op); 4282e1990b2SMax Reitz break; 4294295c5fcSMax Reitz default: 4304295c5fcSMax Reitz abort(); 4314295c5fcSMax Reitz } 4322e1990b2SMax Reitz 43312aa4082SMax Reitz QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next); 4342e1990b2SMax Reitz qemu_coroutine_enter(co); 4352e1990b2SMax Reitz /* At this point, ownership of op has been moved to the coroutine 4362e1990b2SMax Reitz * and the object may already be freed */ 4372e1990b2SMax Reitz 4382e1990b2SMax Reitz /* Assert that this value has been set */ 4392e1990b2SMax Reitz assert(bytes_handled >= 0); 4402e1990b2SMax Reitz 4412e1990b2SMax Reitz /* Same assertion as in mirror_co_read() (and for mirror_co_read() 4422e1990b2SMax Reitz * and mirror_co_discard(), bytes_handled == op->bytes, which 4432e1990b2SMax Reitz * is the @bytes parameter given to this function) */ 4442e1990b2SMax Reitz assert(bytes_handled <= UINT_MAX); 4452e1990b2SMax Reitz return bytes_handled; 4464295c5fcSMax Reitz } 4474295c5fcSMax Reitz 448e5b43573SFam Zheng static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) 449e5b43573SFam Zheng { 450138f9fffSMax Reitz BlockDriverState *source = s->mirror_top_bs->backing->bs; 4511181e19aSMax Reitz MirrorOp *pseudo_op; 4521181e19aSMax Reitz int64_t offset; 4531181e19aSMax Reitz uint64_t delay_ns = 0, ret = 0; 454e5b43573SFam Zheng /* At least the first dirty chunk is mirrored in one iteration. */ 455e5b43573SFam Zheng int nb_chunks = 1; 4564b5004d9SDenis V. Lunev bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target)); 457b436982fSEric Blake int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES); 458e5b43573SFam Zheng 459b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_lock(s->dirty_bitmap); 460f798184cSEric Blake offset = bdrv_dirty_iter_next(s->dbi); 461fb2ef791SEric Blake if (offset < 0) { 462dc162c8eSFam Zheng bdrv_set_dirty_iter(s->dbi, 0); 463f798184cSEric Blake offset = bdrv_dirty_iter_next(s->dbi); 4649a46dba7SEric Blake trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); 465fb2ef791SEric Blake assert(offset >= 0); 466e5b43573SFam Zheng } 467b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_unlock(s->dirty_bitmap); 468e5b43573SFam Zheng 4691181e19aSMax Reitz mirror_wait_on_conflicts(NULL, s, offset, 1); 4709c83625bSMax Reitz 471da01ff7fSKevin Wolf job_pause_point(&s->common.job); 472565ac01fSStefan Hajnoczi 473e5b43573SFam Zheng /* Find the number of consective dirty chunks following the first dirty 474e5b43573SFam Zheng * one, and wait for in flight requests in them. */ 475b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_lock(s->dirty_bitmap); 476fb2ef791SEric Blake while (nb_chunks * s->granularity < s->buf_size) { 477dc162c8eSFam Zheng int64_t next_dirty; 478fb2ef791SEric Blake int64_t next_offset = offset + nb_chunks * s->granularity; 479fb2ef791SEric Blake int64_t next_chunk = next_offset / s->granularity; 480fb2ef791SEric Blake if (next_offset >= s->bdev_length || 48128636b82SJohn Snow !bdrv_dirty_bitmap_get_locked(s->dirty_bitmap, next_offset)) { 482e5b43573SFam Zheng break; 483e5b43573SFam Zheng } 484e5b43573SFam Zheng if (test_bit(next_chunk, s->in_flight_bitmap)) { 485e5b43573SFam Zheng break; 486e5b43573SFam Zheng } 4879c83625bSMax Reitz 488f798184cSEric Blake next_dirty = bdrv_dirty_iter_next(s->dbi); 489fb2ef791SEric Blake if (next_dirty > next_offset || next_dirty < 0) { 490f27a2742SMax Reitz /* The bitmap iterator's cache is stale, refresh it */ 491715a74d8SEric Blake bdrv_set_dirty_iter(s->dbi, next_offset); 492f798184cSEric Blake next_dirty = bdrv_dirty_iter_next(s->dbi); 493f27a2742SMax Reitz } 494fb2ef791SEric Blake assert(next_dirty == next_offset); 495e5b43573SFam Zheng nb_chunks++; 496e5b43573SFam Zheng } 497e5b43573SFam Zheng 498e5b43573SFam Zheng /* Clear dirty bits before querying the block status, because 49931826642SEric Blake * calling bdrv_block_status_above could yield - if some blocks are 500e5b43573SFam Zheng * marked dirty in this window, we need to know. 501e5b43573SFam Zheng */ 502e0d7f73eSEric Blake bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset, 503e0d7f73eSEric Blake nb_chunks * s->granularity); 504b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_unlock(s->dirty_bitmap); 505b64bd51eSPaolo Bonzini 5061181e19aSMax Reitz /* Before claiming an area in the in-flight bitmap, we have to 5071181e19aSMax Reitz * create a MirrorOp for it so that conflicting requests can wait 5081181e19aSMax Reitz * for it. mirror_perform() will create the real MirrorOps later, 5091181e19aSMax Reitz * for now we just create a pseudo operation that will wake up all 5101181e19aSMax Reitz * conflicting requests once all real operations have been 5111181e19aSMax Reitz * launched. */ 5121181e19aSMax Reitz pseudo_op = g_new(MirrorOp, 1); 5131181e19aSMax Reitz *pseudo_op = (MirrorOp){ 5141181e19aSMax Reitz .offset = offset, 5151181e19aSMax Reitz .bytes = nb_chunks * s->granularity, 5161181e19aSMax Reitz .is_pseudo_op = true, 5171181e19aSMax Reitz }; 5181181e19aSMax Reitz qemu_co_queue_init(&pseudo_op->waiting_requests); 5191181e19aSMax Reitz QTAILQ_INSERT_TAIL(&s->ops_in_flight, pseudo_op, next); 5201181e19aSMax Reitz 521fb2ef791SEric Blake bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks); 522fb2ef791SEric Blake while (nb_chunks > 0 && offset < s->bdev_length) { 52331826642SEric Blake int ret; 5247cfd5275SEric Blake int64_t io_bytes; 525f3e4ce4aSEric Blake int64_t io_bytes_acct; 5264295c5fcSMax Reitz MirrorMethod mirror_method = MIRROR_METHOD_COPY; 527e5b43573SFam Zheng 528fb2ef791SEric Blake assert(!(offset % s->granularity)); 52931826642SEric Blake ret = bdrv_block_status_above(source, NULL, offset, 53031826642SEric Blake nb_chunks * s->granularity, 53131826642SEric Blake &io_bytes, NULL, NULL); 532e5b43573SFam Zheng if (ret < 0) { 533fb2ef791SEric Blake io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes); 5340965a41eSVladimir Sementsov-Ogievskiy } else if (ret & BDRV_BLOCK_DATA) { 535fb2ef791SEric Blake io_bytes = MIN(io_bytes, max_io_bytes); 536e5b43573SFam Zheng } 537e5b43573SFam Zheng 538fb2ef791SEric Blake io_bytes -= io_bytes % s->granularity; 539fb2ef791SEric Blake if (io_bytes < s->granularity) { 540fb2ef791SEric Blake io_bytes = s->granularity; 541e5b43573SFam Zheng } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) { 542fb2ef791SEric Blake int64_t target_offset; 5437cfd5275SEric Blake int64_t target_bytes; 544fb2ef791SEric Blake bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes, 545fb2ef791SEric Blake &target_offset, &target_bytes); 546fb2ef791SEric Blake if (target_offset == offset && 547fb2ef791SEric Blake target_bytes == io_bytes) { 548e5b43573SFam Zheng mirror_method = ret & BDRV_BLOCK_ZERO ? 549e5b43573SFam Zheng MIRROR_METHOD_ZERO : 550e5b43573SFam Zheng MIRROR_METHOD_DISCARD; 551e5b43573SFam Zheng } 552e5b43573SFam Zheng } 553e5b43573SFam Zheng 554cf56a3c6SDenis V. Lunev while (s->in_flight >= MAX_IN_FLIGHT) { 555fb2ef791SEric Blake trace_mirror_yield_in_flight(s, offset, s->in_flight); 5561181e19aSMax Reitz mirror_wait_for_free_in_flight_slot(s); 557cf56a3c6SDenis V. Lunev } 558cf56a3c6SDenis V. Lunev 559dbaa7b57SVladimir Sementsov-Ogievskiy if (s->ret < 0) { 5601181e19aSMax Reitz ret = 0; 5611181e19aSMax Reitz goto fail; 562dbaa7b57SVladimir Sementsov-Ogievskiy } 563dbaa7b57SVladimir Sementsov-Ogievskiy 564fb2ef791SEric Blake io_bytes = mirror_clip_bytes(s, offset, io_bytes); 5654295c5fcSMax Reitz io_bytes = mirror_perform(s, offset, io_bytes, mirror_method); 5664295c5fcSMax Reitz if (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok) { 567f3e4ce4aSEric Blake io_bytes_acct = 0; 5684b5004d9SDenis V. Lunev } else { 569fb2ef791SEric Blake io_bytes_acct = io_bytes; 5704b5004d9SDenis V. Lunev } 571fb2ef791SEric Blake assert(io_bytes); 572fb2ef791SEric Blake offset += io_bytes; 573fb2ef791SEric Blake nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity); 574dee81d51SKevin Wolf delay_ns = block_job_ratelimit_get_delay(&s->common, io_bytes_acct); 575dcfb3bebSFam Zheng } 5761181e19aSMax Reitz 5771181e19aSMax Reitz ret = delay_ns; 5781181e19aSMax Reitz fail: 5791181e19aSMax Reitz QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next); 5801181e19aSMax Reitz qemu_co_queue_restart_all(&pseudo_op->waiting_requests); 5811181e19aSMax Reitz g_free(pseudo_op); 5821181e19aSMax Reitz 5831181e19aSMax Reitz return ret; 584893f7ebaSPaolo Bonzini } 585b952b558SPaolo Bonzini 586402a4741SPaolo Bonzini static void mirror_free_init(MirrorBlockJob *s) 587402a4741SPaolo Bonzini { 588402a4741SPaolo Bonzini int granularity = s->granularity; 589402a4741SPaolo Bonzini size_t buf_size = s->buf_size; 590402a4741SPaolo Bonzini uint8_t *buf = s->buf; 591402a4741SPaolo Bonzini 592402a4741SPaolo Bonzini assert(s->buf_free_count == 0); 593402a4741SPaolo Bonzini QSIMPLEQ_INIT(&s->buf_free); 594402a4741SPaolo Bonzini while (buf_size != 0) { 595402a4741SPaolo Bonzini MirrorBuffer *cur = (MirrorBuffer *)buf; 596402a4741SPaolo Bonzini QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next); 597402a4741SPaolo Bonzini s->buf_free_count++; 598402a4741SPaolo Bonzini buf_size -= granularity; 599402a4741SPaolo Bonzini buf += granularity; 600402a4741SPaolo Bonzini } 601402a4741SPaolo Bonzini } 602402a4741SPaolo Bonzini 603bae8196dSPaolo Bonzini /* This is also used for the .pause callback. There is no matching 604bae8196dSPaolo Bonzini * mirror_resume() because mirror_run() will begin iterating again 605bae8196dSPaolo Bonzini * when the job is resumed. 606bae8196dSPaolo Bonzini */ 607537c3d4fSStefan Hajnoczi static void coroutine_fn mirror_wait_for_all_io(MirrorBlockJob *s) 608bd48bde8SPaolo Bonzini { 609bd48bde8SPaolo Bonzini while (s->in_flight > 0) { 6101181e19aSMax Reitz mirror_wait_for_free_in_flight_slot(s); 611bd48bde8SPaolo Bonzini } 612893f7ebaSPaolo Bonzini } 613893f7ebaSPaolo Bonzini 614737efc1eSJohn Snow /** 615737efc1eSJohn Snow * mirror_exit_common: handle both abort() and prepare() cases. 616737efc1eSJohn Snow * for .prepare, returns 0 on success and -errno on failure. 617737efc1eSJohn Snow * for .abort cases, denoted by abort = true, MUST return 0. 618737efc1eSJohn Snow */ 619737efc1eSJohn Snow static int mirror_exit_common(Job *job) 6205a7e7a0bSStefan Hajnoczi { 6211908a559SKevin Wolf MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 6221908a559SKevin Wolf BlockJob *bjob = &s->common; 623f93c3addSMax Reitz MirrorBDSOpaque *bs_opaque; 6245a7e7a0bSStefan Hajnoczi AioContext *replace_aio_context = NULL; 625f93c3addSMax Reitz BlockDriverState *src; 626f93c3addSMax Reitz BlockDriverState *target_bs; 627f93c3addSMax Reitz BlockDriverState *mirror_top_bs; 62812fa4af6SKevin Wolf Error *local_err = NULL; 629737efc1eSJohn Snow bool abort = job->ret < 0; 630737efc1eSJohn Snow int ret = 0; 631737efc1eSJohn Snow 632737efc1eSJohn Snow if (s->prepared) { 633737efc1eSJohn Snow return 0; 634737efc1eSJohn Snow } 635737efc1eSJohn Snow s->prepared = true; 6363f09bfbcSKevin Wolf 637f93c3addSMax Reitz mirror_top_bs = s->mirror_top_bs; 638f93c3addSMax Reitz bs_opaque = mirror_top_bs->opaque; 639f93c3addSMax Reitz src = mirror_top_bs->backing->bs; 640f93c3addSMax Reitz target_bs = blk_bs(s->target); 641f93c3addSMax Reitz 642ef53dc09SAlberto Garcia if (bdrv_chain_contains(src, target_bs)) { 643ef53dc09SAlberto Garcia bdrv_unfreeze_backing_chain(mirror_top_bs, target_bs); 644ef53dc09SAlberto Garcia } 645ef53dc09SAlberto Garcia 6465deb6cbdSVladimir Sementsov-Ogievskiy bdrv_release_dirty_bitmap(s->dirty_bitmap); 6472119882cSPaolo Bonzini 6487b508f6bSJohn Snow /* Make sure that the source BDS doesn't go away during bdrv_replace_node, 6497b508f6bSJohn Snow * before we can call bdrv_drained_end */ 6503f09bfbcSKevin Wolf bdrv_ref(src); 6514ef85a9cSKevin Wolf bdrv_ref(mirror_top_bs); 6527d9fcb39SKevin Wolf bdrv_ref(target_bs); 6537d9fcb39SKevin Wolf 654bb0c9409SVladimir Sementsov-Ogievskiy /* 655bb0c9409SVladimir Sementsov-Ogievskiy * Remove target parent that still uses BLK_PERM_WRITE/RESIZE before 6567d9fcb39SKevin Wolf * inserting target_bs at s->to_replace, where we might not be able to get 65763c8ef28SKevin Wolf * these permissions. 658bb0c9409SVladimir Sementsov-Ogievskiy */ 6597d9fcb39SKevin Wolf blk_unref(s->target); 6607d9fcb39SKevin Wolf s->target = NULL; 6614ef85a9cSKevin Wolf 6624ef85a9cSKevin Wolf /* We don't access the source any more. Dropping any WRITE/RESIZE is 663d2da5e28SKevin Wolf * required before it could become a backing file of target_bs. Not having 664d2da5e28SKevin Wolf * these permissions any more means that we can't allow any new requests on 665d2da5e28SKevin Wolf * mirror_top_bs from now on, so keep it drained. */ 666d2da5e28SKevin Wolf bdrv_drained_begin(mirror_top_bs); 667f94dc3b4SMax Reitz bs_opaque->stop = true; 668f94dc3b4SMax Reitz bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing, 6694ef85a9cSKevin Wolf &error_abort); 670737efc1eSJohn Snow if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) { 6714ef85a9cSKevin Wolf BlockDriverState *backing = s->is_none_mode ? src : s->base; 6724ef85a9cSKevin Wolf if (backing_bs(target_bs) != backing) { 67312fa4af6SKevin Wolf bdrv_set_backing_hd(target_bs, backing, &local_err); 67412fa4af6SKevin Wolf if (local_err) { 67512fa4af6SKevin Wolf error_report_err(local_err); 6767b508f6bSJohn Snow ret = -EPERM; 67712fa4af6SKevin Wolf } 6784ef85a9cSKevin Wolf } 6794ef85a9cSKevin Wolf } 6805a7e7a0bSStefan Hajnoczi 6815a7e7a0bSStefan Hajnoczi if (s->to_replace) { 6825a7e7a0bSStefan Hajnoczi replace_aio_context = bdrv_get_aio_context(s->to_replace); 6835a7e7a0bSStefan Hajnoczi aio_context_acquire(replace_aio_context); 6845a7e7a0bSStefan Hajnoczi } 6855a7e7a0bSStefan Hajnoczi 686737efc1eSJohn Snow if (s->should_complete && !abort) { 687737efc1eSJohn Snow BlockDriverState *to_replace = s->to_replace ?: src; 6881ba79388SAlberto Garcia bool ro = bdrv_is_read_only(to_replace); 68940365552SKevin Wolf 6901ba79388SAlberto Garcia if (ro != bdrv_is_read_only(target_bs)) { 6911ba79388SAlberto Garcia bdrv_reopen_set_read_only(target_bs, ro, NULL); 6925a7e7a0bSStefan Hajnoczi } 693b8804815SKevin Wolf 694b8804815SKevin Wolf /* The mirror job has no requests in flight any more, but we need to 695b8804815SKevin Wolf * drain potential other users of the BDS before changing the graph. */ 6965e771752SSergio Lopez assert(s->in_drain); 697e253f4b8SKevin Wolf bdrv_drained_begin(target_bs); 6985fe31c25SKevin Wolf bdrv_replace_node(to_replace, target_bs, &local_err); 699e253f4b8SKevin Wolf bdrv_drained_end(target_bs); 7005fe31c25SKevin Wolf if (local_err) { 7015fe31c25SKevin Wolf error_report_err(local_err); 7027b508f6bSJohn Snow ret = -EPERM; 7035fe31c25SKevin Wolf } 7045a7e7a0bSStefan Hajnoczi } 7055a7e7a0bSStefan Hajnoczi if (s->to_replace) { 7065a7e7a0bSStefan Hajnoczi bdrv_op_unblock_all(s->to_replace, s->replace_blocker); 7075a7e7a0bSStefan Hajnoczi error_free(s->replace_blocker); 7085a7e7a0bSStefan Hajnoczi bdrv_unref(s->to_replace); 7095a7e7a0bSStefan Hajnoczi } 7105a7e7a0bSStefan Hajnoczi if (replace_aio_context) { 7115a7e7a0bSStefan Hajnoczi aio_context_release(replace_aio_context); 7125a7e7a0bSStefan Hajnoczi } 7135a7e7a0bSStefan Hajnoczi g_free(s->replaces); 7147d9fcb39SKevin Wolf bdrv_unref(target_bs); 7154ef85a9cSKevin Wolf 716f94dc3b4SMax Reitz /* 717f94dc3b4SMax Reitz * Remove the mirror filter driver from the graph. Before this, get rid of 7184ef85a9cSKevin Wolf * the blockers on the intermediate nodes so that the resulting state is 719f94dc3b4SMax Reitz * valid. 720f94dc3b4SMax Reitz */ 7211908a559SKevin Wolf block_job_remove_all_bdrv(bjob); 7225fe31c25SKevin Wolf bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort); 7234ef85a9cSKevin Wolf 7244ef85a9cSKevin Wolf /* We just changed the BDS the job BB refers to (with either or both of the 7255fe31c25SKevin Wolf * bdrv_replace_node() calls), so switch the BB back so the cleanup does 7265fe31c25SKevin Wolf * the right thing. We don't need any permissions any more now. */ 7271908a559SKevin Wolf blk_remove_bs(bjob->blk); 7281908a559SKevin Wolf blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort); 7291908a559SKevin Wolf blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort); 7304ef85a9cSKevin Wolf 731429076e8SMax Reitz bs_opaque->job = NULL; 7324ef85a9cSKevin Wolf 733176c3699SFam Zheng bdrv_drained_end(src); 734d2da5e28SKevin Wolf bdrv_drained_end(mirror_top_bs); 7355e771752SSergio Lopez s->in_drain = false; 7364ef85a9cSKevin Wolf bdrv_unref(mirror_top_bs); 7373f09bfbcSKevin Wolf bdrv_unref(src); 7387b508f6bSJohn Snow 739737efc1eSJohn Snow return ret; 740737efc1eSJohn Snow } 741737efc1eSJohn Snow 742737efc1eSJohn Snow static int mirror_prepare(Job *job) 743737efc1eSJohn Snow { 744737efc1eSJohn Snow return mirror_exit_common(job); 745737efc1eSJohn Snow } 746737efc1eSJohn Snow 747737efc1eSJohn Snow static void mirror_abort(Job *job) 748737efc1eSJohn Snow { 749737efc1eSJohn Snow int ret = mirror_exit_common(job); 750737efc1eSJohn Snow assert(ret == 0); 7515a7e7a0bSStefan Hajnoczi } 7525a7e7a0bSStefan Hajnoczi 753537c3d4fSStefan Hajnoczi static void coroutine_fn mirror_throttle(MirrorBlockJob *s) 75449efb1f5SDenis V. Lunev { 75549efb1f5SDenis V. Lunev int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 75649efb1f5SDenis V. Lunev 75718bb6928SKevin Wolf if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) { 75849efb1f5SDenis V. Lunev s->last_pause_ns = now; 7595d43e86eSKevin Wolf job_sleep_ns(&s->common.job, 0); 76049efb1f5SDenis V. Lunev } else { 761da01ff7fSKevin Wolf job_pause_point(&s->common.job); 76249efb1f5SDenis V. Lunev } 76349efb1f5SDenis V. Lunev } 76449efb1f5SDenis V. Lunev 765c0b363adSDenis V. Lunev static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s) 766c0b363adSDenis V. Lunev { 76723ca459aSEric Blake int64_t offset; 768c0b363adSDenis V. Lunev BlockDriverState *base = s->base; 769138f9fffSMax Reitz BlockDriverState *bs = s->mirror_top_bs->backing->bs; 770c0b363adSDenis V. Lunev BlockDriverState *target_bs = blk_bs(s->target); 77123ca459aSEric Blake int ret; 77251b0a488SEric Blake int64_t count; 773c0b363adSDenis V. Lunev 774cdf3bc93SMax Reitz if (s->zero_target) { 775c7c2769cSDenis V. Lunev if (!bdrv_can_write_zeroes_with_unmap(target_bs)) { 776e0d7f73eSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length); 777b7d5062cSDenis V. Lunev return 0; 778b7d5062cSDenis V. Lunev } 779b7d5062cSDenis V. Lunev 78090ab48ebSAnton Nefedov s->initial_zeroing_ongoing = true; 78123ca459aSEric Blake for (offset = 0; offset < s->bdev_length; ) { 78223ca459aSEric Blake int bytes = MIN(s->bdev_length - offset, 78323ca459aSEric Blake QEMU_ALIGN_DOWN(INT_MAX, s->granularity)); 784c7c2769cSDenis V. Lunev 785c7c2769cSDenis V. Lunev mirror_throttle(s); 786c7c2769cSDenis V. Lunev 787daa7f2f9SKevin Wolf if (job_is_cancelled(&s->common.job)) { 78890ab48ebSAnton Nefedov s->initial_zeroing_ongoing = false; 789c7c2769cSDenis V. Lunev return 0; 790c7c2769cSDenis V. Lunev } 791c7c2769cSDenis V. Lunev 792c7c2769cSDenis V. Lunev if (s->in_flight >= MAX_IN_FLIGHT) { 79367adf4b3SEric Blake trace_mirror_yield(s, UINT64_MAX, s->buf_free_count, 79467adf4b3SEric Blake s->in_flight); 7951181e19aSMax Reitz mirror_wait_for_free_in_flight_slot(s); 796c7c2769cSDenis V. Lunev continue; 797c7c2769cSDenis V. Lunev } 798c7c2769cSDenis V. Lunev 7994295c5fcSMax Reitz mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO); 80023ca459aSEric Blake offset += bytes; 801c7c2769cSDenis V. Lunev } 802c7c2769cSDenis V. Lunev 803bae8196dSPaolo Bonzini mirror_wait_for_all_io(s); 80490ab48ebSAnton Nefedov s->initial_zeroing_ongoing = false; 805c7c2769cSDenis V. Lunev } 806c7c2769cSDenis V. Lunev 807c0b363adSDenis V. Lunev /* First part, loop on the sectors and initialize the dirty bitmap. */ 80823ca459aSEric Blake for (offset = 0; offset < s->bdev_length; ) { 809c0b363adSDenis V. Lunev /* Just to make sure we are not exceeding int limit. */ 81023ca459aSEric Blake int bytes = MIN(s->bdev_length - offset, 81123ca459aSEric Blake QEMU_ALIGN_DOWN(INT_MAX, s->granularity)); 812c0b363adSDenis V. Lunev 813c0b363adSDenis V. Lunev mirror_throttle(s); 814c0b363adSDenis V. Lunev 815daa7f2f9SKevin Wolf if (job_is_cancelled(&s->common.job)) { 816c0b363adSDenis V. Lunev return 0; 817c0b363adSDenis V. Lunev } 818c0b363adSDenis V. Lunev 819170d3bd3SAndrey Shinkevich ret = bdrv_is_allocated_above(bs, base, false, offset, bytes, &count); 820c0b363adSDenis V. Lunev if (ret < 0) { 821c0b363adSDenis V. Lunev return ret; 822c0b363adSDenis V. Lunev } 823c0b363adSDenis V. Lunev 82423ca459aSEric Blake assert(count); 825b7d5062cSDenis V. Lunev if (ret == 1) { 82623ca459aSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count); 827c0b363adSDenis V. Lunev } 82823ca459aSEric Blake offset += count; 829c0b363adSDenis V. Lunev } 830c0b363adSDenis V. Lunev return 0; 831c0b363adSDenis V. Lunev } 832c0b363adSDenis V. Lunev 833bdffb31dSPaolo Bonzini /* Called when going out of the streaming phase to flush the bulk of the 834bdffb31dSPaolo Bonzini * data to the medium, or just before completing. 835bdffb31dSPaolo Bonzini */ 836bdffb31dSPaolo Bonzini static int mirror_flush(MirrorBlockJob *s) 837bdffb31dSPaolo Bonzini { 838bdffb31dSPaolo Bonzini int ret = blk_flush(s->target); 839bdffb31dSPaolo Bonzini if (ret < 0) { 840bdffb31dSPaolo Bonzini if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) { 841bdffb31dSPaolo Bonzini s->ret = ret; 842bdffb31dSPaolo Bonzini } 843bdffb31dSPaolo Bonzini } 844bdffb31dSPaolo Bonzini return ret; 845bdffb31dSPaolo Bonzini } 846bdffb31dSPaolo Bonzini 847f67432a2SJohn Snow static int coroutine_fn mirror_run(Job *job, Error **errp) 848893f7ebaSPaolo Bonzini { 849f67432a2SJohn Snow MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 850138f9fffSMax Reitz BlockDriverState *bs = s->mirror_top_bs->backing->bs; 851e253f4b8SKevin Wolf BlockDriverState *target_bs = blk_bs(s->target); 8529a0cec66SPaolo Bonzini bool need_drain = true; 853c0b363adSDenis V. Lunev int64_t length; 854b812f671SPaolo Bonzini BlockDriverInfo bdi; 8551d33936eSJeff Cody char backing_filename[2]; /* we only need 2 characters because we are only 8561d33936eSJeff Cody checking for a NULL string */ 857893f7ebaSPaolo Bonzini int ret = 0; 858893f7ebaSPaolo Bonzini 859daa7f2f9SKevin Wolf if (job_is_cancelled(&s->common.job)) { 860893f7ebaSPaolo Bonzini goto immediate_exit; 861893f7ebaSPaolo Bonzini } 862893f7ebaSPaolo Bonzini 863b21c7652SMax Reitz s->bdev_length = bdrv_getlength(bs); 864b21c7652SMax Reitz if (s->bdev_length < 0) { 865b21c7652SMax Reitz ret = s->bdev_length; 866373df5b1SFam Zheng goto immediate_exit; 867becc347eSKevin Wolf } 868becc347eSKevin Wolf 869becc347eSKevin Wolf /* Active commit must resize the base image if its size differs from the 870becc347eSKevin Wolf * active layer. */ 871becc347eSKevin Wolf if (s->base == blk_bs(s->target)) { 872becc347eSKevin Wolf int64_t base_length; 873becc347eSKevin Wolf 874becc347eSKevin Wolf base_length = blk_getlength(s->target); 875becc347eSKevin Wolf if (base_length < 0) { 876becc347eSKevin Wolf ret = base_length; 877becc347eSKevin Wolf goto immediate_exit; 878becc347eSKevin Wolf } 879becc347eSKevin Wolf 880becc347eSKevin Wolf if (s->bdev_length > base_length) { 881*c80d8b06SMax Reitz ret = blk_truncate(s->target, s->bdev_length, false, 882*c80d8b06SMax Reitz PREALLOC_MODE_OFF, NULL); 883becc347eSKevin Wolf if (ret < 0) { 884becc347eSKevin Wolf goto immediate_exit; 885becc347eSKevin Wolf } 886becc347eSKevin Wolf } 887becc347eSKevin Wolf } 888becc347eSKevin Wolf 889becc347eSKevin Wolf if (s->bdev_length == 0) { 8902e1795b5SKevin Wolf /* Transition to the READY state and wait for complete. */ 8912e1795b5SKevin Wolf job_transition_to_ready(&s->common.job); 8929e48b025SFam Zheng s->synced = true; 893d06107adSMax Reitz s->actively_synced = true; 894daa7f2f9SKevin Wolf while (!job_is_cancelled(&s->common.job) && !s->should_complete) { 895198c49ccSKevin Wolf job_yield(&s->common.job); 8969e48b025SFam Zheng } 897daa7f2f9SKevin Wolf s->common.job.cancelled = false; 8989e48b025SFam Zheng goto immediate_exit; 899893f7ebaSPaolo Bonzini } 900893f7ebaSPaolo Bonzini 901b21c7652SMax Reitz length = DIV_ROUND_UP(s->bdev_length, s->granularity); 902402a4741SPaolo Bonzini s->in_flight_bitmap = bitmap_new(length); 903402a4741SPaolo Bonzini 904b812f671SPaolo Bonzini /* If we have no backing file yet in the destination, we cannot let 905b812f671SPaolo Bonzini * the destination do COW. Instead, we copy sectors around the 906b812f671SPaolo Bonzini * dirty data if needed. We need a bitmap to do that. 907b812f671SPaolo Bonzini */ 908e253f4b8SKevin Wolf bdrv_get_backing_filename(target_bs, backing_filename, 909b812f671SPaolo Bonzini sizeof(backing_filename)); 910e253f4b8SKevin Wolf if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) { 911b436982fSEric Blake s->target_cluster_size = bdi.cluster_size; 912b436982fSEric Blake } else { 913b436982fSEric Blake s->target_cluster_size = BDRV_SECTOR_SIZE; 914c3cc95bdSFam Zheng } 915b436982fSEric Blake if (backing_filename[0] && !target_bs->backing && 916b436982fSEric Blake s->granularity < s->target_cluster_size) { 917b436982fSEric Blake s->buf_size = MAX(s->buf_size, s->target_cluster_size); 918b812f671SPaolo Bonzini s->cow_bitmap = bitmap_new(length); 919b812f671SPaolo Bonzini } 920e253f4b8SKevin Wolf s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov); 921b812f671SPaolo Bonzini 9227504edf4SKevin Wolf s->buf = qemu_try_blockalign(bs, s->buf_size); 9237504edf4SKevin Wolf if (s->buf == NULL) { 9247504edf4SKevin Wolf ret = -ENOMEM; 9257504edf4SKevin Wolf goto immediate_exit; 9267504edf4SKevin Wolf } 9277504edf4SKevin Wolf 928402a4741SPaolo Bonzini mirror_free_init(s); 929893f7ebaSPaolo Bonzini 93049efb1f5SDenis V. Lunev s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 93103544a6eSFam Zheng if (!s->is_none_mode) { 932c0b363adSDenis V. Lunev ret = mirror_dirty_init(s); 933daa7f2f9SKevin Wolf if (ret < 0 || job_is_cancelled(&s->common.job)) { 9344c0cbd6fSFam Zheng goto immediate_exit; 9354c0cbd6fSFam Zheng } 936893f7ebaSPaolo Bonzini } 937893f7ebaSPaolo Bonzini 938dc162c8eSFam Zheng assert(!s->dbi); 939715a74d8SEric Blake s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap); 940893f7ebaSPaolo Bonzini for (;;) { 941cc8c9d6cSPaolo Bonzini uint64_t delay_ns = 0; 94249efb1f5SDenis V. Lunev int64_t cnt, delta; 943893f7ebaSPaolo Bonzini bool should_complete; 944893f7ebaSPaolo Bonzini 945d06107adSMax Reitz /* Do not start passive operations while there are active 946d06107adSMax Reitz * writes in progress */ 947d06107adSMax Reitz while (s->in_active_write_counter) { 948d06107adSMax Reitz mirror_wait_for_any_operation(s, true); 949d06107adSMax Reitz } 950d06107adSMax Reitz 951bd48bde8SPaolo Bonzini if (s->ret < 0) { 952bd48bde8SPaolo Bonzini ret = s->ret; 953893f7ebaSPaolo Bonzini goto immediate_exit; 954893f7ebaSPaolo Bonzini } 955bd48bde8SPaolo Bonzini 956da01ff7fSKevin Wolf job_pause_point(&s->common.job); 957565ac01fSStefan Hajnoczi 95820dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 95905df8a6aSKevin Wolf /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is 96005df8a6aSKevin Wolf * the number of bytes currently being processed; together those are 96105df8a6aSKevin Wolf * the current remaining operation length */ 96230a5c887SKevin Wolf job_progress_set_remaining(&s->common.job, s->bytes_in_flight + cnt); 963bd48bde8SPaolo Bonzini 964bd48bde8SPaolo Bonzini /* Note that even when no rate limit is applied we need to yield 965a7282330SFam Zheng * periodically with no pending I/O so that bdrv_drain_all() returns. 96618bb6928SKevin Wolf * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is 96718bb6928SKevin Wolf * an error, or when the source is clean, whichever comes first. */ 96849efb1f5SDenis V. Lunev delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns; 96918bb6928SKevin Wolf if (delta < BLOCK_JOB_SLICE_TIME && 970bd48bde8SPaolo Bonzini s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 971cf56a3c6SDenis V. Lunev if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 || 972402a4741SPaolo Bonzini (cnt == 0 && s->in_flight > 0)) { 9739a46dba7SEric Blake trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight); 9741181e19aSMax Reitz mirror_wait_for_free_in_flight_slot(s); 975bd48bde8SPaolo Bonzini continue; 976bd48bde8SPaolo Bonzini } else if (cnt != 0) { 977cc8c9d6cSPaolo Bonzini delay_ns = mirror_iteration(s); 978893f7ebaSPaolo Bonzini } 979cc8c9d6cSPaolo Bonzini } 980893f7ebaSPaolo Bonzini 981893f7ebaSPaolo Bonzini should_complete = false; 982bd48bde8SPaolo Bonzini if (s->in_flight == 0 && cnt == 0) { 983893f7ebaSPaolo Bonzini trace_mirror_before_flush(s); 984bdffb31dSPaolo Bonzini if (!s->synced) { 985bdffb31dSPaolo Bonzini if (mirror_flush(s) < 0) { 986bdffb31dSPaolo Bonzini /* Go check s->ret. */ 987bdffb31dSPaolo Bonzini continue; 988893f7ebaSPaolo Bonzini } 989893f7ebaSPaolo Bonzini /* We're out of the streaming phase. From now on, if the job 990893f7ebaSPaolo Bonzini * is cancelled we will actually complete all pending I/O and 991893f7ebaSPaolo Bonzini * report completion. This way, block-job-cancel will leave 992893f7ebaSPaolo Bonzini * the target in a consistent state. 993893f7ebaSPaolo Bonzini */ 9942e1795b5SKevin Wolf job_transition_to_ready(&s->common.job); 995d63ffd87SPaolo Bonzini s->synced = true; 996d06107adSMax Reitz if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) { 997d06107adSMax Reitz s->actively_synced = true; 998d06107adSMax Reitz } 999d63ffd87SPaolo Bonzini } 1000d63ffd87SPaolo Bonzini 1001d63ffd87SPaolo Bonzini should_complete = s->should_complete || 1002daa7f2f9SKevin Wolf job_is_cancelled(&s->common.job); 100320dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 1004893f7ebaSPaolo Bonzini } 1005893f7ebaSPaolo Bonzini 1006893f7ebaSPaolo Bonzini if (cnt == 0 && should_complete) { 1007893f7ebaSPaolo Bonzini /* The dirty bitmap is not updated while operations are pending. 1008893f7ebaSPaolo Bonzini * If we're about to exit, wait for pending operations before 1009893f7ebaSPaolo Bonzini * calling bdrv_get_dirty_count(bs), or we may exit while the 1010893f7ebaSPaolo Bonzini * source has dirty data to copy! 1011893f7ebaSPaolo Bonzini * 1012893f7ebaSPaolo Bonzini * Note that I/O can be submitted by the guest while 10139a0cec66SPaolo Bonzini * mirror_populate runs, so pause it now. Before deciding 10149a0cec66SPaolo Bonzini * whether to switch to target check one last time if I/O has 10159a0cec66SPaolo Bonzini * come in the meanwhile, and if not flush the data to disk. 1016893f7ebaSPaolo Bonzini */ 10179a46dba7SEric Blake trace_mirror_before_drain(s, cnt); 10189a0cec66SPaolo Bonzini 10195e771752SSergio Lopez s->in_drain = true; 10209a0cec66SPaolo Bonzini bdrv_drained_begin(bs); 102120dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 1022bdffb31dSPaolo Bonzini if (cnt > 0 || mirror_flush(s) < 0) { 10239a0cec66SPaolo Bonzini bdrv_drained_end(bs); 10245e771752SSergio Lopez s->in_drain = false; 10259a0cec66SPaolo Bonzini continue; 10269a0cec66SPaolo Bonzini } 10279a0cec66SPaolo Bonzini 10289a0cec66SPaolo Bonzini /* The two disks are in sync. Exit and report successful 10299a0cec66SPaolo Bonzini * completion. 10309a0cec66SPaolo Bonzini */ 10319a0cec66SPaolo Bonzini assert(QLIST_EMPTY(&bs->tracked_requests)); 1032daa7f2f9SKevin Wolf s->common.job.cancelled = false; 10339a0cec66SPaolo Bonzini need_drain = false; 10349a0cec66SPaolo Bonzini break; 1035893f7ebaSPaolo Bonzini } 1036893f7ebaSPaolo Bonzini 1037893f7ebaSPaolo Bonzini ret = 0; 1038ddc4115eSStefan Hajnoczi 1039ddc4115eSStefan Hajnoczi if (s->synced && !should_complete) { 104018bb6928SKevin Wolf delay_ns = (s->in_flight == 0 && 104118bb6928SKevin Wolf cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0); 1042ddc4115eSStefan Hajnoczi } 10439a46dba7SEric Blake trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); 10445d43e86eSKevin Wolf job_sleep_ns(&s->common.job, delay_ns); 1045daa7f2f9SKevin Wolf if (job_is_cancelled(&s->common.job) && 1046004e95dfSKevin Wolf (!s->synced || s->common.job.force_cancel)) 1047eb36639fSMax Reitz { 1048893f7ebaSPaolo Bonzini break; 1049893f7ebaSPaolo Bonzini } 105049efb1f5SDenis V. Lunev s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 1051893f7ebaSPaolo Bonzini } 1052893f7ebaSPaolo Bonzini 1053893f7ebaSPaolo Bonzini immediate_exit: 1054bd48bde8SPaolo Bonzini if (s->in_flight > 0) { 1055bd48bde8SPaolo Bonzini /* We get here only if something went wrong. Either the job failed, 1056bd48bde8SPaolo Bonzini * or it was cancelled prematurely so that we do not guarantee that 1057bd48bde8SPaolo Bonzini * the target is a copy of the source. 1058bd48bde8SPaolo Bonzini */ 1059004e95dfSKevin Wolf assert(ret < 0 || ((s->common.job.force_cancel || !s->synced) && 1060daa7f2f9SKevin Wolf job_is_cancelled(&s->common.job))); 10619a0cec66SPaolo Bonzini assert(need_drain); 1062bae8196dSPaolo Bonzini mirror_wait_for_all_io(s); 1063bd48bde8SPaolo Bonzini } 1064bd48bde8SPaolo Bonzini 1065bd48bde8SPaolo Bonzini assert(s->in_flight == 0); 10667191bf31SMarkus Armbruster qemu_vfree(s->buf); 1067b812f671SPaolo Bonzini g_free(s->cow_bitmap); 1068402a4741SPaolo Bonzini g_free(s->in_flight_bitmap); 1069dc162c8eSFam Zheng bdrv_dirty_iter_free(s->dbi); 10705a7e7a0bSStefan Hajnoczi 10719a0cec66SPaolo Bonzini if (need_drain) { 10725e771752SSergio Lopez s->in_drain = true; 1073e253f4b8SKevin Wolf bdrv_drained_begin(bs); 10749a0cec66SPaolo Bonzini } 1075f67432a2SJohn Snow 1076f67432a2SJohn Snow return ret; 1077893f7ebaSPaolo Bonzini } 1078893f7ebaSPaolo Bonzini 10793453d972SKevin Wolf static void mirror_complete(Job *job, Error **errp) 1080d63ffd87SPaolo Bonzini { 10813453d972SKevin Wolf MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 10824ef85a9cSKevin Wolf BlockDriverState *target; 1083d63ffd87SPaolo Bonzini 1084274fcceeSMax Reitz target = blk_bs(s->target); 1085274fcceeSMax Reitz 1086d63ffd87SPaolo Bonzini if (!s->synced) { 10879df229c3SAlberto Garcia error_setg(errp, "The active block job '%s' cannot be completed", 10883453d972SKevin Wolf job->id); 1089d63ffd87SPaolo Bonzini return; 1090d63ffd87SPaolo Bonzini } 1091d63ffd87SPaolo Bonzini 1092274fcceeSMax Reitz if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) { 1093274fcceeSMax Reitz int ret; 1094274fcceeSMax Reitz 1095274fcceeSMax Reitz assert(!target->backing); 1096274fcceeSMax Reitz ret = bdrv_open_backing_file(target, NULL, "backing", errp); 1097274fcceeSMax Reitz if (ret < 0) { 1098274fcceeSMax Reitz return; 1099274fcceeSMax Reitz } 1100274fcceeSMax Reitz } 1101274fcceeSMax Reitz 110215d67298SChanglong Xie /* block all operations on to_replace bs */ 110309158f00SBenoît Canet if (s->replaces) { 11045a7e7a0bSStefan Hajnoczi AioContext *replace_aio_context; 11055a7e7a0bSStefan Hajnoczi 1106e12f3784SWen Congyang s->to_replace = bdrv_find_node(s->replaces); 110709158f00SBenoît Canet if (!s->to_replace) { 1108e12f3784SWen Congyang error_setg(errp, "Node name '%s' not found", s->replaces); 110909158f00SBenoît Canet return; 111009158f00SBenoît Canet } 111109158f00SBenoît Canet 11125a7e7a0bSStefan Hajnoczi replace_aio_context = bdrv_get_aio_context(s->to_replace); 11135a7e7a0bSStefan Hajnoczi aio_context_acquire(replace_aio_context); 11145a7e7a0bSStefan Hajnoczi 11154ef85a9cSKevin Wolf /* TODO Translate this into permission system. Current definition of 11164ef85a9cSKevin Wolf * GRAPH_MOD would require to request it for the parents; they might 11174ef85a9cSKevin Wolf * not even be BlockDriverStates, however, so a BdrvChild can't address 11184ef85a9cSKevin Wolf * them. May need redefinition of GRAPH_MOD. */ 111909158f00SBenoît Canet error_setg(&s->replace_blocker, 112009158f00SBenoît Canet "block device is in use by block-job-complete"); 112109158f00SBenoît Canet bdrv_op_block_all(s->to_replace, s->replace_blocker); 112209158f00SBenoît Canet bdrv_ref(s->to_replace); 11235a7e7a0bSStefan Hajnoczi 11245a7e7a0bSStefan Hajnoczi aio_context_release(replace_aio_context); 112509158f00SBenoît Canet } 112609158f00SBenoît Canet 1127d63ffd87SPaolo Bonzini s->should_complete = true; 11283d70ff53SKevin Wolf job_enter(job); 1129d63ffd87SPaolo Bonzini } 1130d63ffd87SPaolo Bonzini 1131537c3d4fSStefan Hajnoczi static void coroutine_fn mirror_pause(Job *job) 1132565ac01fSStefan Hajnoczi { 1133da01ff7fSKevin Wolf MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 1134565ac01fSStefan Hajnoczi 1135bae8196dSPaolo Bonzini mirror_wait_for_all_io(s); 1136565ac01fSStefan Hajnoczi } 1137565ac01fSStefan Hajnoczi 113889bd0305SKevin Wolf static bool mirror_drained_poll(BlockJob *job) 113989bd0305SKevin Wolf { 114089bd0305SKevin Wolf MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 11415e771752SSergio Lopez 11425e771752SSergio Lopez /* If the job isn't paused nor cancelled, we can't be sure that it won't 11435e771752SSergio Lopez * issue more requests. We make an exception if we've reached this point 11445e771752SSergio Lopez * from one of our own drain sections, to avoid a deadlock waiting for 11455e771752SSergio Lopez * ourselves. 11465e771752SSergio Lopez */ 11475e771752SSergio Lopez if (!s->common.job.paused && !s->common.job.cancelled && !s->in_drain) { 11485e771752SSergio Lopez return true; 11495e771752SSergio Lopez } 11505e771752SSergio Lopez 115189bd0305SKevin Wolf return !!s->in_flight; 115289bd0305SKevin Wolf } 115389bd0305SKevin Wolf 11543fc4b10aSFam Zheng static const BlockJobDriver mirror_job_driver = { 115533e9e9bdSKevin Wolf .job_driver = { 1156893f7ebaSPaolo Bonzini .instance_size = sizeof(MirrorBlockJob), 11578e4c8700SKevin Wolf .job_type = JOB_TYPE_MIRROR, 115880fa2c75SKevin Wolf .free = block_job_free, 1159b15de828SKevin Wolf .user_resume = block_job_user_resume, 1160f67432a2SJohn Snow .run = mirror_run, 1161737efc1eSJohn Snow .prepare = mirror_prepare, 1162737efc1eSJohn Snow .abort = mirror_abort, 1163565ac01fSStefan Hajnoczi .pause = mirror_pause, 1164da01ff7fSKevin Wolf .complete = mirror_complete, 11653453d972SKevin Wolf }, 116689bd0305SKevin Wolf .drained_poll = mirror_drained_poll, 1167893f7ebaSPaolo Bonzini }; 1168893f7ebaSPaolo Bonzini 116903544a6eSFam Zheng static const BlockJobDriver commit_active_job_driver = { 117033e9e9bdSKevin Wolf .job_driver = { 117103544a6eSFam Zheng .instance_size = sizeof(MirrorBlockJob), 11728e4c8700SKevin Wolf .job_type = JOB_TYPE_COMMIT, 117380fa2c75SKevin Wolf .free = block_job_free, 1174b15de828SKevin Wolf .user_resume = block_job_user_resume, 1175f67432a2SJohn Snow .run = mirror_run, 1176737efc1eSJohn Snow .prepare = mirror_prepare, 1177737efc1eSJohn Snow .abort = mirror_abort, 1178565ac01fSStefan Hajnoczi .pause = mirror_pause, 1179da01ff7fSKevin Wolf .complete = mirror_complete, 11803453d972SKevin Wolf }, 118189bd0305SKevin Wolf .drained_poll = mirror_drained_poll, 118203544a6eSFam Zheng }; 118303544a6eSFam Zheng 1184537c3d4fSStefan Hajnoczi static void coroutine_fn 1185537c3d4fSStefan Hajnoczi do_sync_target_write(MirrorBlockJob *job, MirrorMethod method, 1186d06107adSMax Reitz uint64_t offset, uint64_t bytes, 1187d06107adSMax Reitz QEMUIOVector *qiov, int flags) 1188d06107adSMax Reitz { 1189d06107adSMax Reitz int ret; 1190dbdf699cSVladimir Sementsov-Ogievskiy size_t qiov_offset = 0; 1191dbdf699cSVladimir Sementsov-Ogievskiy int64_t bitmap_offset, bitmap_end; 1192d06107adSMax Reitz 1193dbdf699cSVladimir Sementsov-Ogievskiy if (!QEMU_IS_ALIGNED(offset, job->granularity) && 1194dbdf699cSVladimir Sementsov-Ogievskiy bdrv_dirty_bitmap_get(job->dirty_bitmap, offset)) 1195dbdf699cSVladimir Sementsov-Ogievskiy { 1196dbdf699cSVladimir Sementsov-Ogievskiy /* 1197dbdf699cSVladimir Sementsov-Ogievskiy * Dirty unaligned padding: ignore it. 1198dbdf699cSVladimir Sementsov-Ogievskiy * 1199dbdf699cSVladimir Sementsov-Ogievskiy * Reasoning: 1200dbdf699cSVladimir Sementsov-Ogievskiy * 1. If we copy it, we can't reset corresponding bit in 1201dbdf699cSVladimir Sementsov-Ogievskiy * dirty_bitmap as there may be some "dirty" bytes still not 1202dbdf699cSVladimir Sementsov-Ogievskiy * copied. 1203dbdf699cSVladimir Sementsov-Ogievskiy * 2. It's already dirty, so skipping it we don't diverge mirror 1204dbdf699cSVladimir Sementsov-Ogievskiy * progress. 1205dbdf699cSVladimir Sementsov-Ogievskiy * 1206dbdf699cSVladimir Sementsov-Ogievskiy * Note, that because of this, guest write may have no contribution 1207dbdf699cSVladimir Sementsov-Ogievskiy * into mirror converge, but that's not bad, as we have background 1208dbdf699cSVladimir Sementsov-Ogievskiy * process of mirroring. If under some bad circumstances (high guest 1209dbdf699cSVladimir Sementsov-Ogievskiy * IO load) background process starve, we will not converge anyway, 1210dbdf699cSVladimir Sementsov-Ogievskiy * even if each write will contribute, as guest is not guaranteed to 1211dbdf699cSVladimir Sementsov-Ogievskiy * rewrite the whole disk. 1212dbdf699cSVladimir Sementsov-Ogievskiy */ 1213dbdf699cSVladimir Sementsov-Ogievskiy qiov_offset = QEMU_ALIGN_UP(offset, job->granularity) - offset; 1214dbdf699cSVladimir Sementsov-Ogievskiy if (bytes <= qiov_offset) { 1215dbdf699cSVladimir Sementsov-Ogievskiy /* nothing to do after shrink */ 1216dbdf699cSVladimir Sementsov-Ogievskiy return; 1217dbdf699cSVladimir Sementsov-Ogievskiy } 1218dbdf699cSVladimir Sementsov-Ogievskiy offset += qiov_offset; 1219dbdf699cSVladimir Sementsov-Ogievskiy bytes -= qiov_offset; 1220dbdf699cSVladimir Sementsov-Ogievskiy } 1221dbdf699cSVladimir Sementsov-Ogievskiy 1222dbdf699cSVladimir Sementsov-Ogievskiy if (!QEMU_IS_ALIGNED(offset + bytes, job->granularity) && 1223dbdf699cSVladimir Sementsov-Ogievskiy bdrv_dirty_bitmap_get(job->dirty_bitmap, offset + bytes - 1)) 1224dbdf699cSVladimir Sementsov-Ogievskiy { 1225dbdf699cSVladimir Sementsov-Ogievskiy uint64_t tail = (offset + bytes) % job->granularity; 1226dbdf699cSVladimir Sementsov-Ogievskiy 1227dbdf699cSVladimir Sementsov-Ogievskiy if (bytes <= tail) { 1228dbdf699cSVladimir Sementsov-Ogievskiy /* nothing to do after shrink */ 1229dbdf699cSVladimir Sementsov-Ogievskiy return; 1230dbdf699cSVladimir Sementsov-Ogievskiy } 1231dbdf699cSVladimir Sementsov-Ogievskiy bytes -= tail; 1232dbdf699cSVladimir Sementsov-Ogievskiy } 1233dbdf699cSVladimir Sementsov-Ogievskiy 1234dbdf699cSVladimir Sementsov-Ogievskiy /* 1235dbdf699cSVladimir Sementsov-Ogievskiy * Tails are either clean or shrunk, so for bitmap resetting 1236dbdf699cSVladimir Sementsov-Ogievskiy * we safely align the range down. 1237dbdf699cSVladimir Sementsov-Ogievskiy */ 1238dbdf699cSVladimir Sementsov-Ogievskiy bitmap_offset = QEMU_ALIGN_UP(offset, job->granularity); 1239dbdf699cSVladimir Sementsov-Ogievskiy bitmap_end = QEMU_ALIGN_DOWN(offset + bytes, job->granularity); 1240dbdf699cSVladimir Sementsov-Ogievskiy if (bitmap_offset < bitmap_end) { 1241dbdf699cSVladimir Sementsov-Ogievskiy bdrv_reset_dirty_bitmap(job->dirty_bitmap, bitmap_offset, 1242dbdf699cSVladimir Sementsov-Ogievskiy bitmap_end - bitmap_offset); 1243dbdf699cSVladimir Sementsov-Ogievskiy } 1244d06107adSMax Reitz 12455c511ac3SVladimir Sementsov-Ogievskiy job_progress_increase_remaining(&job->common.job, bytes); 1246d06107adSMax Reitz 1247d06107adSMax Reitz switch (method) { 1248d06107adSMax Reitz case MIRROR_METHOD_COPY: 1249dbdf699cSVladimir Sementsov-Ogievskiy ret = blk_co_pwritev_part(job->target, offset, bytes, 1250dbdf699cSVladimir Sementsov-Ogievskiy qiov, qiov_offset, flags); 1251d06107adSMax Reitz break; 1252d06107adSMax Reitz 1253d06107adSMax Reitz case MIRROR_METHOD_ZERO: 1254d06107adSMax Reitz assert(!qiov); 12555c511ac3SVladimir Sementsov-Ogievskiy ret = blk_co_pwrite_zeroes(job->target, offset, bytes, flags); 1256d06107adSMax Reitz break; 1257d06107adSMax Reitz 1258d06107adSMax Reitz case MIRROR_METHOD_DISCARD: 1259d06107adSMax Reitz assert(!qiov); 12605c511ac3SVladimir Sementsov-Ogievskiy ret = blk_co_pdiscard(job->target, offset, bytes); 1261d06107adSMax Reitz break; 1262d06107adSMax Reitz 1263d06107adSMax Reitz default: 1264d06107adSMax Reitz abort(); 1265d06107adSMax Reitz } 1266d06107adSMax Reitz 1267d06107adSMax Reitz if (ret >= 0) { 12685c511ac3SVladimir Sementsov-Ogievskiy job_progress_update(&job->common.job, bytes); 1269d06107adSMax Reitz } else { 1270d06107adSMax Reitz BlockErrorAction action; 1271d06107adSMax Reitz 1272dbdf699cSVladimir Sementsov-Ogievskiy /* 1273dbdf699cSVladimir Sementsov-Ogievskiy * We failed, so we should mark dirty the whole area, aligned up. 1274dbdf699cSVladimir Sementsov-Ogievskiy * Note that we don't care about shrunk tails if any: they were dirty 1275dbdf699cSVladimir Sementsov-Ogievskiy * at function start, and they must be still dirty, as we've locked 1276dbdf699cSVladimir Sementsov-Ogievskiy * the region for in-flight op. 1277dbdf699cSVladimir Sementsov-Ogievskiy */ 1278dbdf699cSVladimir Sementsov-Ogievskiy bitmap_offset = QEMU_ALIGN_DOWN(offset, job->granularity); 1279dbdf699cSVladimir Sementsov-Ogievskiy bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity); 1280dbdf699cSVladimir Sementsov-Ogievskiy bdrv_set_dirty_bitmap(job->dirty_bitmap, bitmap_offset, 1281dbdf699cSVladimir Sementsov-Ogievskiy bitmap_end - bitmap_offset); 1282d06107adSMax Reitz job->actively_synced = false; 1283d06107adSMax Reitz 1284d06107adSMax Reitz action = mirror_error_action(job, false, -ret); 1285d06107adSMax Reitz if (action == BLOCK_ERROR_ACTION_REPORT) { 1286d06107adSMax Reitz if (!job->ret) { 1287d06107adSMax Reitz job->ret = ret; 1288d06107adSMax Reitz } 1289d06107adSMax Reitz } 1290d06107adSMax Reitz } 1291d06107adSMax Reitz } 1292d06107adSMax Reitz 1293d06107adSMax Reitz static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s, 1294d06107adSMax Reitz uint64_t offset, 1295d06107adSMax Reitz uint64_t bytes) 1296d06107adSMax Reitz { 1297d06107adSMax Reitz MirrorOp *op; 1298d06107adSMax Reitz uint64_t start_chunk = offset / s->granularity; 1299d06107adSMax Reitz uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity); 1300d06107adSMax Reitz 1301d06107adSMax Reitz op = g_new(MirrorOp, 1); 1302d06107adSMax Reitz *op = (MirrorOp){ 1303d06107adSMax Reitz .s = s, 1304d06107adSMax Reitz .offset = offset, 1305d06107adSMax Reitz .bytes = bytes, 1306d06107adSMax Reitz .is_active_write = true, 1307d06107adSMax Reitz }; 1308d06107adSMax Reitz qemu_co_queue_init(&op->waiting_requests); 1309d06107adSMax Reitz QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next); 1310d06107adSMax Reitz 1311d06107adSMax Reitz s->in_active_write_counter++; 1312d06107adSMax Reitz 1313d06107adSMax Reitz mirror_wait_on_conflicts(op, s, offset, bytes); 1314d06107adSMax Reitz 1315d06107adSMax Reitz bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk); 1316d06107adSMax Reitz 1317d06107adSMax Reitz return op; 1318d06107adSMax Reitz } 1319d06107adSMax Reitz 1320d06107adSMax Reitz static void coroutine_fn active_write_settle(MirrorOp *op) 1321d06107adSMax Reitz { 1322d06107adSMax Reitz uint64_t start_chunk = op->offset / op->s->granularity; 1323d06107adSMax Reitz uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes, 1324d06107adSMax Reitz op->s->granularity); 1325d06107adSMax Reitz 1326d06107adSMax Reitz if (!--op->s->in_active_write_counter && op->s->actively_synced) { 1327d06107adSMax Reitz BdrvChild *source = op->s->mirror_top_bs->backing; 1328d06107adSMax Reitz 1329d06107adSMax Reitz if (QLIST_FIRST(&source->bs->parents) == source && 1330d06107adSMax Reitz QLIST_NEXT(source, next_parent) == NULL) 1331d06107adSMax Reitz { 1332d06107adSMax Reitz /* Assert that we are back in sync once all active write 1333d06107adSMax Reitz * operations are settled. 1334d06107adSMax Reitz * Note that we can only assert this if the mirror node 1335d06107adSMax Reitz * is the source node's only parent. */ 1336d06107adSMax Reitz assert(!bdrv_get_dirty_count(op->s->dirty_bitmap)); 1337d06107adSMax Reitz } 1338d06107adSMax Reitz } 1339d06107adSMax Reitz bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk); 1340d06107adSMax Reitz QTAILQ_REMOVE(&op->s->ops_in_flight, op, next); 1341d06107adSMax Reitz qemu_co_queue_restart_all(&op->waiting_requests); 1342d06107adSMax Reitz g_free(op); 1343d06107adSMax Reitz } 1344d06107adSMax Reitz 13454ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs, 13464ef85a9cSKevin Wolf uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) 13474ef85a9cSKevin Wolf { 13484ef85a9cSKevin Wolf return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags); 13494ef85a9cSKevin Wolf } 13504ef85a9cSKevin Wolf 1351d06107adSMax Reitz static int coroutine_fn bdrv_mirror_top_do_write(BlockDriverState *bs, 1352d06107adSMax Reitz MirrorMethod method, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, 1353d06107adSMax Reitz int flags) 1354d06107adSMax Reitz { 1355d06107adSMax Reitz MirrorOp *op = NULL; 1356d06107adSMax Reitz MirrorBDSOpaque *s = bs->opaque; 1357d06107adSMax Reitz int ret = 0; 1358d06107adSMax Reitz bool copy_to_target; 1359d06107adSMax Reitz 1360d06107adSMax Reitz copy_to_target = s->job->ret >= 0 && 1361d06107adSMax Reitz s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; 1362d06107adSMax Reitz 1363d06107adSMax Reitz if (copy_to_target) { 1364d06107adSMax Reitz op = active_write_prepare(s->job, offset, bytes); 1365d06107adSMax Reitz } 1366d06107adSMax Reitz 1367d06107adSMax Reitz switch (method) { 1368d06107adSMax Reitz case MIRROR_METHOD_COPY: 1369d06107adSMax Reitz ret = bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags); 1370d06107adSMax Reitz break; 1371d06107adSMax Reitz 1372d06107adSMax Reitz case MIRROR_METHOD_ZERO: 1373d06107adSMax Reitz ret = bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags); 1374d06107adSMax Reitz break; 1375d06107adSMax Reitz 1376d06107adSMax Reitz case MIRROR_METHOD_DISCARD: 13770b9fd3f4SFam Zheng ret = bdrv_co_pdiscard(bs->backing, offset, bytes); 1378d06107adSMax Reitz break; 1379d06107adSMax Reitz 1380d06107adSMax Reitz default: 1381d06107adSMax Reitz abort(); 1382d06107adSMax Reitz } 1383d06107adSMax Reitz 1384d06107adSMax Reitz if (ret < 0) { 1385d06107adSMax Reitz goto out; 1386d06107adSMax Reitz } 1387d06107adSMax Reitz 1388d06107adSMax Reitz if (copy_to_target) { 1389d06107adSMax Reitz do_sync_target_write(s->job, method, offset, bytes, qiov, flags); 1390d06107adSMax Reitz } 1391d06107adSMax Reitz 1392d06107adSMax Reitz out: 1393d06107adSMax Reitz if (copy_to_target) { 1394d06107adSMax Reitz active_write_settle(op); 1395d06107adSMax Reitz } 1396d06107adSMax Reitz return ret; 1397d06107adSMax Reitz } 1398d06107adSMax Reitz 13994ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs, 14004ef85a9cSKevin Wolf uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) 14014ef85a9cSKevin Wolf { 1402d06107adSMax Reitz MirrorBDSOpaque *s = bs->opaque; 1403d06107adSMax Reitz QEMUIOVector bounce_qiov; 1404d06107adSMax Reitz void *bounce_buf; 1405d06107adSMax Reitz int ret = 0; 1406d06107adSMax Reitz bool copy_to_target; 1407d06107adSMax Reitz 1408d06107adSMax Reitz copy_to_target = s->job->ret >= 0 && 1409d06107adSMax Reitz s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; 1410d06107adSMax Reitz 1411d06107adSMax Reitz if (copy_to_target) { 1412d06107adSMax Reitz /* The guest might concurrently modify the data to write; but 1413d06107adSMax Reitz * the data on source and destination must match, so we have 1414d06107adSMax Reitz * to use a bounce buffer if we are going to write to the 1415d06107adSMax Reitz * target now. */ 1416d06107adSMax Reitz bounce_buf = qemu_blockalign(bs, bytes); 1417d06107adSMax Reitz iov_to_buf_full(qiov->iov, qiov->niov, 0, bounce_buf, bytes); 1418d06107adSMax Reitz 1419d06107adSMax Reitz qemu_iovec_init(&bounce_qiov, 1); 1420d06107adSMax Reitz qemu_iovec_add(&bounce_qiov, bounce_buf, bytes); 1421d06107adSMax Reitz qiov = &bounce_qiov; 1422d06107adSMax Reitz } 1423d06107adSMax Reitz 1424d06107adSMax Reitz ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, offset, bytes, qiov, 1425d06107adSMax Reitz flags); 1426d06107adSMax Reitz 1427d06107adSMax Reitz if (copy_to_target) { 1428d06107adSMax Reitz qemu_iovec_destroy(&bounce_qiov); 1429d06107adSMax Reitz qemu_vfree(bounce_buf); 1430d06107adSMax Reitz } 1431d06107adSMax Reitz 1432d06107adSMax Reitz return ret; 14334ef85a9cSKevin Wolf } 14344ef85a9cSKevin Wolf 14354ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs) 14364ef85a9cSKevin Wolf { 1437ce960aa9SVladimir Sementsov-Ogievskiy if (bs->backing == NULL) { 1438ce960aa9SVladimir Sementsov-Ogievskiy /* we can be here after failed bdrv_append in mirror_start_job */ 1439ce960aa9SVladimir Sementsov-Ogievskiy return 0; 1440ce960aa9SVladimir Sementsov-Ogievskiy } 14414ef85a9cSKevin Wolf return bdrv_co_flush(bs->backing->bs); 14424ef85a9cSKevin Wolf } 14434ef85a9cSKevin Wolf 14444ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs, 1445f5a5ca79SManos Pitsidianakis int64_t offset, int bytes, BdrvRequestFlags flags) 14464ef85a9cSKevin Wolf { 1447d06107adSMax Reitz return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL, 1448d06107adSMax Reitz flags); 14494ef85a9cSKevin Wolf } 14504ef85a9cSKevin Wolf 14514ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs, 1452f5a5ca79SManos Pitsidianakis int64_t offset, int bytes) 14534ef85a9cSKevin Wolf { 1454d06107adSMax Reitz return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes, 1455d06107adSMax Reitz NULL, 0); 14564ef85a9cSKevin Wolf } 14574ef85a9cSKevin Wolf 1458998b3a1eSMax Reitz static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs) 1459fd4a6493SKevin Wolf { 146018775ff3SVladimir Sementsov-Ogievskiy if (bs->backing == NULL) { 146118775ff3SVladimir Sementsov-Ogievskiy /* we can be here after failed bdrv_attach_child in 146218775ff3SVladimir Sementsov-Ogievskiy * bdrv_set_backing_hd */ 146318775ff3SVladimir Sementsov-Ogievskiy return; 146418775ff3SVladimir Sementsov-Ogievskiy } 1465fd4a6493SKevin Wolf pstrcpy(bs->exact_filename, sizeof(bs->exact_filename), 1466fd4a6493SKevin Wolf bs->backing->bs->filename); 1467fd4a6493SKevin Wolf } 1468fd4a6493SKevin Wolf 14694ef85a9cSKevin Wolf static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c, 14704ef85a9cSKevin Wolf const BdrvChildRole *role, 1471e0995dc3SKevin Wolf BlockReopenQueue *reopen_queue, 14724ef85a9cSKevin Wolf uint64_t perm, uint64_t shared, 14734ef85a9cSKevin Wolf uint64_t *nperm, uint64_t *nshared) 14744ef85a9cSKevin Wolf { 1475f94dc3b4SMax Reitz MirrorBDSOpaque *s = bs->opaque; 1476f94dc3b4SMax Reitz 1477f94dc3b4SMax Reitz if (s->stop) { 1478f94dc3b4SMax Reitz /* 1479f94dc3b4SMax Reitz * If the job is to be stopped, we do not need to forward 1480f94dc3b4SMax Reitz * anything to the real image. 1481f94dc3b4SMax Reitz */ 1482f94dc3b4SMax Reitz *nperm = 0; 1483f94dc3b4SMax Reitz *nshared = BLK_PERM_ALL; 1484f94dc3b4SMax Reitz return; 1485f94dc3b4SMax Reitz } 1486f94dc3b4SMax Reitz 14874ef85a9cSKevin Wolf /* Must be able to forward guest writes to the real image */ 14884ef85a9cSKevin Wolf *nperm = 0; 14894ef85a9cSKevin Wolf if (perm & BLK_PERM_WRITE) { 14904ef85a9cSKevin Wolf *nperm |= BLK_PERM_WRITE; 14914ef85a9cSKevin Wolf } 14924ef85a9cSKevin Wolf 14934ef85a9cSKevin Wolf *nshared = BLK_PERM_ALL; 14944ef85a9cSKevin Wolf } 14954ef85a9cSKevin Wolf 14964ef85a9cSKevin Wolf /* Dummy node that provides consistent read to its users without requiring it 14974ef85a9cSKevin Wolf * from its backing file and that allows writes on the backing file chain. */ 14984ef85a9cSKevin Wolf static BlockDriver bdrv_mirror_top = { 14994ef85a9cSKevin Wolf .format_name = "mirror_top", 15004ef85a9cSKevin Wolf .bdrv_co_preadv = bdrv_mirror_top_preadv, 15014ef85a9cSKevin Wolf .bdrv_co_pwritev = bdrv_mirror_top_pwritev, 15024ef85a9cSKevin Wolf .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes, 15034ef85a9cSKevin Wolf .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard, 15044ef85a9cSKevin Wolf .bdrv_co_flush = bdrv_mirror_top_flush, 15053e4d0e72SEric Blake .bdrv_co_block_status = bdrv_co_block_status_from_backing, 1506fd4a6493SKevin Wolf .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename, 15074ef85a9cSKevin Wolf .bdrv_child_perm = bdrv_mirror_top_child_perm, 15084ef85a9cSKevin Wolf }; 15094ef85a9cSKevin Wolf 1510cc19f177SVladimir Sementsov-Ogievskiy static BlockJob *mirror_start_job( 1511cc19f177SVladimir Sementsov-Ogievskiy const char *job_id, BlockDriverState *bs, 151247970dfbSJohn Snow int creation_flags, BlockDriverState *target, 151347970dfbSJohn Snow const char *replaces, int64_t speed, 151447970dfbSJohn Snow uint32_t granularity, int64_t buf_size, 1515274fcceeSMax Reitz BlockMirrorBackingMode backing_mode, 1516cdf3bc93SMax Reitz bool zero_target, 151703544a6eSFam Zheng BlockdevOnError on_source_error, 1518b952b558SPaolo Bonzini BlockdevOnError on_target_error, 15190fc9f8eaSFam Zheng bool unmap, 1520097310b5SMarkus Armbruster BlockCompletionFunc *cb, 152151ccfa2dSFam Zheng void *opaque, 152203544a6eSFam Zheng const BlockJobDriver *driver, 1523b49f7eadSWen Congyang bool is_none_mode, BlockDriverState *base, 152451ccfa2dSFam Zheng bool auto_complete, const char *filter_node_name, 1525481debaaSMax Reitz bool is_mirror, MirrorCopyMode copy_mode, 152651ccfa2dSFam Zheng Error **errp) 1527893f7ebaSPaolo Bonzini { 1528893f7ebaSPaolo Bonzini MirrorBlockJob *s; 1529429076e8SMax Reitz MirrorBDSOpaque *bs_opaque; 15304ef85a9cSKevin Wolf BlockDriverState *mirror_top_bs; 15314ef85a9cSKevin Wolf bool target_graph_mod; 15324ef85a9cSKevin Wolf bool target_is_backing; 1533b2c2832cSKevin Wolf Error *local_err = NULL; 1534d7086422SKevin Wolf int ret; 1535893f7ebaSPaolo Bonzini 1536eee13dfeSPaolo Bonzini if (granularity == 0) { 1537341ebc2fSJohn Snow granularity = bdrv_get_default_bitmap_granularity(target); 1538eee13dfeSPaolo Bonzini } 1539eee13dfeSPaolo Bonzini 154031826642SEric Blake assert(is_power_of_2(granularity)); 1541eee13dfeSPaolo Bonzini 154248ac0a4dSWen Congyang if (buf_size < 0) { 154348ac0a4dSWen Congyang error_setg(errp, "Invalid parameter 'buf-size'"); 1544cc19f177SVladimir Sementsov-Ogievskiy return NULL; 154548ac0a4dSWen Congyang } 154648ac0a4dSWen Congyang 154748ac0a4dSWen Congyang if (buf_size == 0) { 154848ac0a4dSWen Congyang buf_size = DEFAULT_MIRROR_BUF_SIZE; 154948ac0a4dSWen Congyang } 15505bc361b8SFam Zheng 155186fae10cSKevin Wolf if (bs == target) { 155286fae10cSKevin Wolf error_setg(errp, "Can't mirror node into itself"); 1553cc19f177SVladimir Sementsov-Ogievskiy return NULL; 155486fae10cSKevin Wolf } 155586fae10cSKevin Wolf 15564ef85a9cSKevin Wolf /* In the case of active commit, add dummy driver to provide consistent 15574ef85a9cSKevin Wolf * reads on the top, while disabling it in the intermediate nodes, and make 15584ef85a9cSKevin Wolf * the backing chain writable. */ 15596cdbceb1SKevin Wolf mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name, 15606cdbceb1SKevin Wolf BDRV_O_RDWR, errp); 15614ef85a9cSKevin Wolf if (mirror_top_bs == NULL) { 1562cc19f177SVladimir Sementsov-Ogievskiy return NULL; 1563893f7ebaSPaolo Bonzini } 1564d3c8c674SKevin Wolf if (!filter_node_name) { 1565d3c8c674SKevin Wolf mirror_top_bs->implicit = true; 1566d3c8c674SKevin Wolf } 1567e5182c1cSMax Reitz 1568e5182c1cSMax Reitz /* So that we can always drop this node */ 1569e5182c1cSMax Reitz mirror_top_bs->never_freeze = true; 1570e5182c1cSMax Reitz 15714ef85a9cSKevin Wolf mirror_top_bs->total_sectors = bs->total_sectors; 1572228345bfSMax Reitz mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED; 157380f5c33fSKevin Wolf mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED | 157480f5c33fSKevin Wolf BDRV_REQ_NO_FALLBACK; 1575429076e8SMax Reitz bs_opaque = g_new0(MirrorBDSOpaque, 1); 1576429076e8SMax Reitz mirror_top_bs->opaque = bs_opaque; 1577893f7ebaSPaolo Bonzini 15784ef85a9cSKevin Wolf /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep 15797a25fcd0SMax Reitz * it alive until block_job_create() succeeds even if bs has no parent. */ 15804ef85a9cSKevin Wolf bdrv_ref(mirror_top_bs); 15814ef85a9cSKevin Wolf bdrv_drained_begin(bs); 1582b2c2832cSKevin Wolf bdrv_append(mirror_top_bs, bs, &local_err); 15834ef85a9cSKevin Wolf bdrv_drained_end(bs); 15844ef85a9cSKevin Wolf 1585b2c2832cSKevin Wolf if (local_err) { 1586b2c2832cSKevin Wolf bdrv_unref(mirror_top_bs); 1587b2c2832cSKevin Wolf error_propagate(errp, local_err); 1588cc19f177SVladimir Sementsov-Ogievskiy return NULL; 1589b2c2832cSKevin Wolf } 1590b2c2832cSKevin Wolf 15914ef85a9cSKevin Wolf /* Make sure that the source is not resized while the job is running */ 159275859b94SJohn Snow s = block_job_create(job_id, driver, NULL, mirror_top_bs, 15934ef85a9cSKevin Wolf BLK_PERM_CONSISTENT_READ, 15944ef85a9cSKevin Wolf BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | 15954ef85a9cSKevin Wolf BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed, 15964ef85a9cSKevin Wolf creation_flags, cb, opaque, errp); 15974ef85a9cSKevin Wolf if (!s) { 15984ef85a9cSKevin Wolf goto fail; 15994ef85a9cSKevin Wolf } 1600429076e8SMax Reitz bs_opaque->job = s; 1601429076e8SMax Reitz 16027a25fcd0SMax Reitz /* The block job now has a reference to this node */ 16037a25fcd0SMax Reitz bdrv_unref(mirror_top_bs); 16047a25fcd0SMax Reitz 16054ef85a9cSKevin Wolf s->mirror_top_bs = mirror_top_bs; 16064ef85a9cSKevin Wolf 16074ef85a9cSKevin Wolf /* No resize for the target either; while the mirror is still running, a 16084ef85a9cSKevin Wolf * consistent read isn't necessarily possible. We could possibly allow 16094ef85a9cSKevin Wolf * writes and graph modifications, though it would likely defeat the 16104ef85a9cSKevin Wolf * purpose of a mirror, so leave them blocked for now. 16114ef85a9cSKevin Wolf * 16124ef85a9cSKevin Wolf * In the case of active commit, things look a bit different, though, 16134ef85a9cSKevin Wolf * because the target is an already populated backing file in active use. 16144ef85a9cSKevin Wolf * We can allow anything except resize there.*/ 16154ef85a9cSKevin Wolf target_is_backing = bdrv_chain_contains(bs, target); 16164ef85a9cSKevin Wolf target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN); 1617d861ab3aSKevin Wolf s->target = blk_new(s->common.job.aio_context, 1618d861ab3aSKevin Wolf BLK_PERM_WRITE | BLK_PERM_RESIZE | 16194ef85a9cSKevin Wolf (target_graph_mod ? BLK_PERM_GRAPH_MOD : 0), 16204ef85a9cSKevin Wolf BLK_PERM_WRITE_UNCHANGED | 16214ef85a9cSKevin Wolf (target_is_backing ? BLK_PERM_CONSISTENT_READ | 16224ef85a9cSKevin Wolf BLK_PERM_WRITE | 16234ef85a9cSKevin Wolf BLK_PERM_GRAPH_MOD : 0)); 1624d7086422SKevin Wolf ret = blk_insert_bs(s->target, target, errp); 1625d7086422SKevin Wolf if (ret < 0) { 16264ef85a9cSKevin Wolf goto fail; 1627d7086422SKevin Wolf } 1628045a2f82SFam Zheng if (is_mirror) { 1629045a2f82SFam Zheng /* XXX: Mirror target could be a NBD server of target QEMU in the case 1630045a2f82SFam Zheng * of non-shared block migration. To allow migration completion, we 1631045a2f82SFam Zheng * have to allow "inactivate" of the target BB. When that happens, we 1632045a2f82SFam Zheng * know the job is drained, and the vcpus are stopped, so no write 1633045a2f82SFam Zheng * operation will be performed. Block layer already has assertions to 1634045a2f82SFam Zheng * ensure that. */ 1635045a2f82SFam Zheng blk_set_force_allow_inactivate(s->target); 1636045a2f82SFam Zheng } 16379ff7f0dfSKevin Wolf blk_set_allow_aio_context_change(s->target, true); 1638cf312932SKevin Wolf blk_set_disable_request_queuing(s->target, true); 1639e253f4b8SKevin Wolf 164009158f00SBenoît Canet s->replaces = g_strdup(replaces); 1641b952b558SPaolo Bonzini s->on_source_error = on_source_error; 1642b952b558SPaolo Bonzini s->on_target_error = on_target_error; 164303544a6eSFam Zheng s->is_none_mode = is_none_mode; 1644274fcceeSMax Reitz s->backing_mode = backing_mode; 1645cdf3bc93SMax Reitz s->zero_target = zero_target; 1646481debaaSMax Reitz s->copy_mode = copy_mode; 16475bc361b8SFam Zheng s->base = base; 1648eee13dfeSPaolo Bonzini s->granularity = granularity; 164948ac0a4dSWen Congyang s->buf_size = ROUND_UP(buf_size, granularity); 16500fc9f8eaSFam Zheng s->unmap = unmap; 1651b49f7eadSWen Congyang if (auto_complete) { 1652b49f7eadSWen Congyang s->should_complete = true; 1653b49f7eadSWen Congyang } 1654b812f671SPaolo Bonzini 16550db6e54aSFam Zheng s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); 1656b8afb520SFam Zheng if (!s->dirty_bitmap) { 165788f9d1b3SKevin Wolf goto fail; 1658b8afb520SFam Zheng } 1659dbdf699cSVladimir Sementsov-Ogievskiy if (s->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING) { 1660dbdf699cSVladimir Sementsov-Ogievskiy bdrv_disable_dirty_bitmap(s->dirty_bitmap); 1661dbdf699cSVladimir Sementsov-Ogievskiy } 166210f3cd15SAlberto Garcia 166367b24427SAlberto Garcia ret = block_job_add_bdrv(&s->common, "source", bs, 0, 166467b24427SAlberto Garcia BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE | 166567b24427SAlberto Garcia BLK_PERM_CONSISTENT_READ, 166667b24427SAlberto Garcia errp); 166767b24427SAlberto Garcia if (ret < 0) { 166867b24427SAlberto Garcia goto fail; 166967b24427SAlberto Garcia } 167067b24427SAlberto Garcia 16714ef85a9cSKevin Wolf /* Required permissions are already taken with blk_new() */ 167276d554e2SKevin Wolf block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL, 167376d554e2SKevin Wolf &error_abort); 167476d554e2SKevin Wolf 1675f3ede4b0SAlberto Garcia /* In commit_active_start() all intermediate nodes disappear, so 1676f3ede4b0SAlberto Garcia * any jobs in them must be blocked */ 16774ef85a9cSKevin Wolf if (target_is_backing) { 1678f3ede4b0SAlberto Garcia BlockDriverState *iter; 1679f3ede4b0SAlberto Garcia for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) { 16804ef85a9cSKevin Wolf /* XXX BLK_PERM_WRITE needs to be allowed so we don't block 16814ef85a9cSKevin Wolf * ourselves at s->base (if writes are blocked for a node, they are 16824ef85a9cSKevin Wolf * also blocked for its backing file). The other options would be a 16834ef85a9cSKevin Wolf * second filter driver above s->base (== target). */ 16844ef85a9cSKevin Wolf ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0, 16854ef85a9cSKevin Wolf BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE, 16864ef85a9cSKevin Wolf errp); 16874ef85a9cSKevin Wolf if (ret < 0) { 16884ef85a9cSKevin Wolf goto fail; 16894ef85a9cSKevin Wolf } 1690f3ede4b0SAlberto Garcia } 1691ef53dc09SAlberto Garcia 1692ef53dc09SAlberto Garcia if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) { 1693ef53dc09SAlberto Garcia goto fail; 1694ef53dc09SAlberto Garcia } 1695f3ede4b0SAlberto Garcia } 169610f3cd15SAlberto Garcia 169712aa4082SMax Reitz QTAILQ_INIT(&s->ops_in_flight); 169812aa4082SMax Reitz 16995ccac6f1SJohn Snow trace_mirror_start(bs, s, opaque); 1700da01ff7fSKevin Wolf job_start(&s->common.job); 1701cc19f177SVladimir Sementsov-Ogievskiy 1702cc19f177SVladimir Sementsov-Ogievskiy return &s->common; 17034ef85a9cSKevin Wolf 17044ef85a9cSKevin Wolf fail: 17054ef85a9cSKevin Wolf if (s) { 17067a25fcd0SMax Reitz /* Make sure this BDS does not go away until we have completed the graph 17077a25fcd0SMax Reitz * changes below */ 17087a25fcd0SMax Reitz bdrv_ref(mirror_top_bs); 17097a25fcd0SMax Reitz 17104ef85a9cSKevin Wolf g_free(s->replaces); 17114ef85a9cSKevin Wolf blk_unref(s->target); 1712429076e8SMax Reitz bs_opaque->job = NULL; 1713e917e2cbSAlberto Garcia if (s->dirty_bitmap) { 17145deb6cbdSVladimir Sementsov-Ogievskiy bdrv_release_dirty_bitmap(s->dirty_bitmap); 1715e917e2cbSAlberto Garcia } 17164ad35181SKevin Wolf job_early_fail(&s->common.job); 17174ef85a9cSKevin Wolf } 17184ef85a9cSKevin Wolf 1719f94dc3b4SMax Reitz bs_opaque->stop = true; 1720f94dc3b4SMax Reitz bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing, 1721c1cef672SFam Zheng &error_abort); 17225fe31c25SKevin Wolf bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort); 17237a25fcd0SMax Reitz 17247a25fcd0SMax Reitz bdrv_unref(mirror_top_bs); 1725cc19f177SVladimir Sementsov-Ogievskiy 1726cc19f177SVladimir Sementsov-Ogievskiy return NULL; 1727893f7ebaSPaolo Bonzini } 172803544a6eSFam Zheng 172971aa9867SAlberto Garcia void mirror_start(const char *job_id, BlockDriverState *bs, 173071aa9867SAlberto Garcia BlockDriverState *target, const char *replaces, 1731a1999b33SJohn Snow int creation_flags, int64_t speed, 1732a1999b33SJohn Snow uint32_t granularity, int64_t buf_size, 1733274fcceeSMax Reitz MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, 1734cdf3bc93SMax Reitz bool zero_target, 1735274fcceeSMax Reitz BlockdevOnError on_source_error, 173603544a6eSFam Zheng BlockdevOnError on_target_error, 1737481debaaSMax Reitz bool unmap, const char *filter_node_name, 1738481debaaSMax Reitz MirrorCopyMode copy_mode, Error **errp) 173903544a6eSFam Zheng { 174003544a6eSFam Zheng bool is_none_mode; 174103544a6eSFam Zheng BlockDriverState *base; 174203544a6eSFam Zheng 1743c8b56501SJohn Snow if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) || 1744c8b56501SJohn Snow (mode == MIRROR_SYNC_MODE_BITMAP)) { 1745c8b56501SJohn Snow error_setg(errp, "Sync mode '%s' not supported", 1746c8b56501SJohn Snow MirrorSyncMode_str(mode)); 1747d58d8453SJohn Snow return; 1748d58d8453SJohn Snow } 174903544a6eSFam Zheng is_none_mode = mode == MIRROR_SYNC_MODE_NONE; 1750760e0063SKevin Wolf base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL; 1751a1999b33SJohn Snow mirror_start_job(job_id, bs, creation_flags, target, replaces, 1752cdf3bc93SMax Reitz speed, granularity, buf_size, backing_mode, zero_target, 175351ccfa2dSFam Zheng on_source_error, on_target_error, unmap, NULL, NULL, 17546cdbceb1SKevin Wolf &mirror_job_driver, is_none_mode, base, false, 1755481debaaSMax Reitz filter_node_name, true, copy_mode, errp); 175603544a6eSFam Zheng } 175703544a6eSFam Zheng 1758cc19f177SVladimir Sementsov-Ogievskiy BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs, 175947970dfbSJohn Snow BlockDriverState *base, int creation_flags, 176047970dfbSJohn Snow int64_t speed, BlockdevOnError on_error, 17610db832f4SKevin Wolf const char *filter_node_name, 176278bbd910SFam Zheng BlockCompletionFunc *cb, void *opaque, 176378bbd910SFam Zheng bool auto_complete, Error **errp) 176403544a6eSFam Zheng { 17651ba79388SAlberto Garcia bool base_read_only; 1766cc67f4d1SJeff Cody Error *local_err = NULL; 1767cc19f177SVladimir Sementsov-Ogievskiy BlockJob *ret; 17684da83585SJeff Cody 17691ba79388SAlberto Garcia base_read_only = bdrv_is_read_only(base); 17704da83585SJeff Cody 17711ba79388SAlberto Garcia if (base_read_only) { 17721ba79388SAlberto Garcia if (bdrv_reopen_set_read_only(base, false, errp) < 0) { 1773cc19f177SVladimir Sementsov-Ogievskiy return NULL; 177420a63d2cSFam Zheng } 17751ba79388SAlberto Garcia } 17764da83585SJeff Cody 1777cc19f177SVladimir Sementsov-Ogievskiy ret = mirror_start_job( 1778cc19f177SVladimir Sementsov-Ogievskiy job_id, bs, creation_flags, base, NULL, speed, 0, 0, 1779cdf3bc93SMax Reitz MIRROR_LEAVE_BACKING_CHAIN, false, 178051ccfa2dSFam Zheng on_error, on_error, true, cb, opaque, 17816cdbceb1SKevin Wolf &commit_active_job_driver, false, base, auto_complete, 1782481debaaSMax Reitz filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND, 1783481debaaSMax Reitz &local_err); 17840fb6395cSMarkus Armbruster if (local_err) { 1785cc67f4d1SJeff Cody error_propagate(errp, local_err); 17864da83585SJeff Cody goto error_restore_flags; 17874da83585SJeff Cody } 17884da83585SJeff Cody 1789cc19f177SVladimir Sementsov-Ogievskiy return ret; 17904da83585SJeff Cody 17914da83585SJeff Cody error_restore_flags: 17924da83585SJeff Cody /* ignore error and errp for bdrv_reopen, because we want to propagate 17934da83585SJeff Cody * the original error */ 17941ba79388SAlberto Garcia if (base_read_only) { 17951ba79388SAlberto Garcia bdrv_reopen_set_read_only(base, true, NULL); 17961ba79388SAlberto Garcia } 1797cc19f177SVladimir Sementsov-Ogievskiy return NULL; 179803544a6eSFam Zheng } 1799