1893f7ebaSPaolo Bonzini /* 2893f7ebaSPaolo Bonzini * Image mirroring 3893f7ebaSPaolo Bonzini * 4893f7ebaSPaolo Bonzini * Copyright Red Hat, Inc. 2012 5893f7ebaSPaolo Bonzini * 6893f7ebaSPaolo Bonzini * Authors: 7893f7ebaSPaolo Bonzini * Paolo Bonzini <pbonzini@redhat.com> 8893f7ebaSPaolo Bonzini * 9893f7ebaSPaolo Bonzini * This work is licensed under the terms of the GNU LGPL, version 2 or later. 10893f7ebaSPaolo Bonzini * See the COPYING.LIB file in the top-level directory. 11893f7ebaSPaolo Bonzini * 12893f7ebaSPaolo Bonzini */ 13893f7ebaSPaolo Bonzini 1480c71a24SPeter Maydell #include "qemu/osdep.h" 15fd4a6493SKevin Wolf #include "qemu/cutils.h" 1612aa4082SMax Reitz #include "qemu/coroutine.h" 171181e19aSMax Reitz #include "qemu/range.h" 18893f7ebaSPaolo Bonzini #include "trace.h" 19c87621eaSJohn Snow #include "block/blockjob_int.h" 20737e150eSPaolo Bonzini #include "block/block_int.h" 21373340b2SMax Reitz #include "sysemu/block-backend.h" 22da34e65cSMarkus Armbruster #include "qapi/error.h" 23cc7a8ea7SMarkus Armbruster #include "qapi/qmp/qerror.h" 24893f7ebaSPaolo Bonzini #include "qemu/ratelimit.h" 25b812f671SPaolo Bonzini #include "qemu/bitmap.h" 26893f7ebaSPaolo Bonzini 27402a4741SPaolo Bonzini #define MAX_IN_FLIGHT 16 28b436982fSEric Blake #define MAX_IO_BYTES (1 << 20) /* 1 Mb */ 29b436982fSEric Blake #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES) 30402a4741SPaolo Bonzini 31402a4741SPaolo Bonzini /* The mirroring buffer is a list of granularity-sized chunks. 32402a4741SPaolo Bonzini * Free chunks are organized in a list. 33402a4741SPaolo Bonzini */ 34402a4741SPaolo Bonzini typedef struct MirrorBuffer { 35402a4741SPaolo Bonzini QSIMPLEQ_ENTRY(MirrorBuffer) next; 36402a4741SPaolo Bonzini } MirrorBuffer; 37893f7ebaSPaolo Bonzini 3812aa4082SMax Reitz typedef struct MirrorOp MirrorOp; 3912aa4082SMax Reitz 40893f7ebaSPaolo Bonzini typedef struct MirrorBlockJob { 41893f7ebaSPaolo Bonzini BlockJob common; 42e253f4b8SKevin Wolf BlockBackend *target; 434ef85a9cSKevin Wolf BlockDriverState *mirror_top_bs; 445bc361b8SFam Zheng BlockDriverState *base; 453f072a7fSMax Reitz BlockDriverState *base_overlay; 464ef85a9cSKevin Wolf 4709158f00SBenoît Canet /* The name of the graph node to replace */ 4809158f00SBenoît Canet char *replaces; 4909158f00SBenoît Canet /* The BDS to replace */ 5009158f00SBenoît Canet BlockDriverState *to_replace; 5109158f00SBenoît Canet /* Used to block operations on the drive-mirror-replace target */ 5209158f00SBenoît Canet Error *replace_blocker; 5303544a6eSFam Zheng bool is_none_mode; 54274fcceeSMax Reitz BlockMirrorBackingMode backing_mode; 55cdf3bc93SMax Reitz /* Whether the target image requires explicit zero-initialization */ 56cdf3bc93SMax Reitz bool zero_target; 57d06107adSMax Reitz MirrorCopyMode copy_mode; 58b952b558SPaolo Bonzini BlockdevOnError on_source_error, on_target_error; 59d63ffd87SPaolo Bonzini bool synced; 60d06107adSMax Reitz /* Set when the target is synced (dirty bitmap is clean, nothing 61d06107adSMax Reitz * in flight) and the job is running in active mode */ 62d06107adSMax Reitz bool actively_synced; 63d63ffd87SPaolo Bonzini bool should_complete; 64eee13dfeSPaolo Bonzini int64_t granularity; 65b812f671SPaolo Bonzini size_t buf_size; 66b21c7652SMax Reitz int64_t bdev_length; 67b812f671SPaolo Bonzini unsigned long *cow_bitmap; 68e4654d2dSFam Zheng BdrvDirtyBitmap *dirty_bitmap; 69dc162c8eSFam Zheng BdrvDirtyBitmapIter *dbi; 70893f7ebaSPaolo Bonzini uint8_t *buf; 71402a4741SPaolo Bonzini QSIMPLEQ_HEAD(, MirrorBuffer) buf_free; 72402a4741SPaolo Bonzini int buf_free_count; 73bd48bde8SPaolo Bonzini 7449efb1f5SDenis V. Lunev uint64_t last_pause_ns; 75402a4741SPaolo Bonzini unsigned long *in_flight_bitmap; 76bd48bde8SPaolo Bonzini int in_flight; 77b436982fSEric Blake int64_t bytes_in_flight; 78b58deb34SPaolo Bonzini QTAILQ_HEAD(, MirrorOp) ops_in_flight; 79bd48bde8SPaolo Bonzini int ret; 800fc9f8eaSFam Zheng bool unmap; 81b436982fSEric Blake int target_cluster_size; 82e5b43573SFam Zheng int max_iov; 8390ab48ebSAnton Nefedov bool initial_zeroing_ongoing; 84d06107adSMax Reitz int in_active_write_counter; 85737efc1eSJohn Snow bool prepared; 865e771752SSergio Lopez bool in_drain; 87893f7ebaSPaolo Bonzini } MirrorBlockJob; 88893f7ebaSPaolo Bonzini 89429076e8SMax Reitz typedef struct MirrorBDSOpaque { 90429076e8SMax Reitz MirrorBlockJob *job; 91f94dc3b4SMax Reitz bool stop; 9253431b90SMax Reitz bool is_commit; 93429076e8SMax Reitz } MirrorBDSOpaque; 94429076e8SMax Reitz 9512aa4082SMax Reitz struct MirrorOp { 96bd48bde8SPaolo Bonzini MirrorBlockJob *s; 97bd48bde8SPaolo Bonzini QEMUIOVector qiov; 98b436982fSEric Blake int64_t offset; 99b436982fSEric Blake uint64_t bytes; 1002e1990b2SMax Reitz 1012e1990b2SMax Reitz /* The pointee is set by mirror_co_read(), mirror_co_zero(), and 1022e1990b2SMax Reitz * mirror_co_discard() before yielding for the first time */ 1032e1990b2SMax Reitz int64_t *bytes_handled; 10412aa4082SMax Reitz 1051181e19aSMax Reitz bool is_pseudo_op; 106d06107adSMax Reitz bool is_active_write; 107ce8cabbdSKevin Wolf bool is_in_flight; 10812aa4082SMax Reitz CoQueue waiting_requests; 109eed325b9SKevin Wolf Coroutine *co; 110*d44dae1aSVladimir Sementsov-Ogievskiy MirrorOp *waiting_for_op; 11112aa4082SMax Reitz 11212aa4082SMax Reitz QTAILQ_ENTRY(MirrorOp) next; 11312aa4082SMax Reitz }; 114bd48bde8SPaolo Bonzini 1154295c5fcSMax Reitz typedef enum MirrorMethod { 1164295c5fcSMax Reitz MIRROR_METHOD_COPY, 1174295c5fcSMax Reitz MIRROR_METHOD_ZERO, 1184295c5fcSMax Reitz MIRROR_METHOD_DISCARD, 1194295c5fcSMax Reitz } MirrorMethod; 1204295c5fcSMax Reitz 121b952b558SPaolo Bonzini static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, 122b952b558SPaolo Bonzini int error) 123b952b558SPaolo Bonzini { 124b952b558SPaolo Bonzini s->synced = false; 125d06107adSMax Reitz s->actively_synced = false; 126b952b558SPaolo Bonzini if (read) { 12781e254dcSKevin Wolf return block_job_error_action(&s->common, s->on_source_error, 12881e254dcSKevin Wolf true, error); 129b952b558SPaolo Bonzini } else { 13081e254dcSKevin Wolf return block_job_error_action(&s->common, s->on_target_error, 13181e254dcSKevin Wolf false, error); 132b952b558SPaolo Bonzini } 133b952b558SPaolo Bonzini } 134b952b558SPaolo Bonzini 1351181e19aSMax Reitz static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self, 1361181e19aSMax Reitz MirrorBlockJob *s, 1371181e19aSMax Reitz uint64_t offset, 1381181e19aSMax Reitz uint64_t bytes) 1391181e19aSMax Reitz { 1401181e19aSMax Reitz uint64_t self_start_chunk = offset / s->granularity; 1411181e19aSMax Reitz uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity); 1421181e19aSMax Reitz uint64_t self_nb_chunks = self_end_chunk - self_start_chunk; 1431181e19aSMax Reitz 1441181e19aSMax Reitz while (find_next_bit(s->in_flight_bitmap, self_end_chunk, 1451181e19aSMax Reitz self_start_chunk) < self_end_chunk && 1461181e19aSMax Reitz s->ret >= 0) 1471181e19aSMax Reitz { 1481181e19aSMax Reitz MirrorOp *op; 1491181e19aSMax Reitz 1501181e19aSMax Reitz QTAILQ_FOREACH(op, &s->ops_in_flight, next) { 1511181e19aSMax Reitz uint64_t op_start_chunk = op->offset / s->granularity; 1521181e19aSMax Reitz uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes, 1531181e19aSMax Reitz s->granularity) - 1541181e19aSMax Reitz op_start_chunk; 1551181e19aSMax Reitz 1561181e19aSMax Reitz if (op == self) { 1571181e19aSMax Reitz continue; 1581181e19aSMax Reitz } 1591181e19aSMax Reitz 1601181e19aSMax Reitz if (ranges_overlap(self_start_chunk, self_nb_chunks, 1611181e19aSMax Reitz op_start_chunk, op_nb_chunks)) 1621181e19aSMax Reitz { 163*d44dae1aSVladimir Sementsov-Ogievskiy /* 164*d44dae1aSVladimir Sementsov-Ogievskiy * If the operation is already (indirectly) waiting for us, or 165*d44dae1aSVladimir Sementsov-Ogievskiy * will wait for us as soon as it wakes up, then just go on 166*d44dae1aSVladimir Sementsov-Ogievskiy * (instead of producing a deadlock in the former case). 167*d44dae1aSVladimir Sementsov-Ogievskiy */ 168*d44dae1aSVladimir Sementsov-Ogievskiy if (op->waiting_for_op) { 169*d44dae1aSVladimir Sementsov-Ogievskiy continue; 170*d44dae1aSVladimir Sementsov-Ogievskiy } 171*d44dae1aSVladimir Sementsov-Ogievskiy 172*d44dae1aSVladimir Sementsov-Ogievskiy self->waiting_for_op = op; 1731181e19aSMax Reitz qemu_co_queue_wait(&op->waiting_requests, NULL); 174*d44dae1aSVladimir Sementsov-Ogievskiy self->waiting_for_op = NULL; 1751181e19aSMax Reitz break; 1761181e19aSMax Reitz } 1771181e19aSMax Reitz } 1781181e19aSMax Reitz } 1791181e19aSMax Reitz } 1801181e19aSMax Reitz 1812e1990b2SMax Reitz static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret) 182bd48bde8SPaolo Bonzini { 183bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 184402a4741SPaolo Bonzini struct iovec *iov; 185bd48bde8SPaolo Bonzini int64_t chunk_num; 186b436982fSEric Blake int i, nb_chunks; 187bd48bde8SPaolo Bonzini 188b436982fSEric Blake trace_mirror_iteration_done(s, op->offset, op->bytes, ret); 189bd48bde8SPaolo Bonzini 190bd48bde8SPaolo Bonzini s->in_flight--; 191b436982fSEric Blake s->bytes_in_flight -= op->bytes; 192402a4741SPaolo Bonzini iov = op->qiov.iov; 193402a4741SPaolo Bonzini for (i = 0; i < op->qiov.niov; i++) { 194402a4741SPaolo Bonzini MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base; 195402a4741SPaolo Bonzini QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next); 196402a4741SPaolo Bonzini s->buf_free_count++; 197402a4741SPaolo Bonzini } 198402a4741SPaolo Bonzini 199b436982fSEric Blake chunk_num = op->offset / s->granularity; 200b436982fSEric Blake nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity); 20112aa4082SMax Reitz 202402a4741SPaolo Bonzini bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); 20312aa4082SMax Reitz QTAILQ_REMOVE(&s->ops_in_flight, op, next); 204b21c7652SMax Reitz if (ret >= 0) { 205b21c7652SMax Reitz if (s->cow_bitmap) { 206bd48bde8SPaolo Bonzini bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); 207bd48bde8SPaolo Bonzini } 20890ab48ebSAnton Nefedov if (!s->initial_zeroing_ongoing) { 20930a5c887SKevin Wolf job_progress_update(&s->common.job, op->bytes); 210b21c7652SMax Reitz } 21190ab48ebSAnton Nefedov } 2126df3bf8eSZhang Min qemu_iovec_destroy(&op->qiov); 2137b770c72SStefan Hajnoczi 21412aa4082SMax Reitz qemu_co_queue_restart_all(&op->waiting_requests); 21512aa4082SMax Reitz g_free(op); 2167b770c72SStefan Hajnoczi } 217bd48bde8SPaolo Bonzini 2182e1990b2SMax Reitz static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret) 219bd48bde8SPaolo Bonzini { 220bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 221b9e413ddSPaolo Bonzini 222bd48bde8SPaolo Bonzini if (ret < 0) { 223bd48bde8SPaolo Bonzini BlockErrorAction action; 224bd48bde8SPaolo Bonzini 225e0d7f73eSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes); 226bd48bde8SPaolo Bonzini action = mirror_error_action(s, false, -ret); 227a589569fSWenchao Xia if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 228bd48bde8SPaolo Bonzini s->ret = ret; 229bd48bde8SPaolo Bonzini } 230bd48bde8SPaolo Bonzini } 231d12ade57SVladimir Sementsov-Ogievskiy 232bd48bde8SPaolo Bonzini mirror_iteration_done(op, ret); 233bd48bde8SPaolo Bonzini } 234bd48bde8SPaolo Bonzini 2352e1990b2SMax Reitz static void coroutine_fn mirror_read_complete(MirrorOp *op, int ret) 236bd48bde8SPaolo Bonzini { 237bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 238b9e413ddSPaolo Bonzini 239bd48bde8SPaolo Bonzini if (ret < 0) { 240bd48bde8SPaolo Bonzini BlockErrorAction action; 241bd48bde8SPaolo Bonzini 242e0d7f73eSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes); 243bd48bde8SPaolo Bonzini action = mirror_error_action(s, true, -ret); 244a589569fSWenchao Xia if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 245bd48bde8SPaolo Bonzini s->ret = ret; 246bd48bde8SPaolo Bonzini } 247bd48bde8SPaolo Bonzini 248bd48bde8SPaolo Bonzini mirror_iteration_done(op, ret); 249d12ade57SVladimir Sementsov-Ogievskiy return; 250bd48bde8SPaolo Bonzini } 251d12ade57SVladimir Sementsov-Ogievskiy 252d12ade57SVladimir Sementsov-Ogievskiy ret = blk_co_pwritev(s->target, op->offset, op->qiov.size, &op->qiov, 0); 253d12ade57SVladimir Sementsov-Ogievskiy mirror_write_complete(op, ret); 254b9e413ddSPaolo Bonzini } 255bd48bde8SPaolo Bonzini 256782d97efSEric Blake /* Clip bytes relative to offset to not exceed end-of-file */ 257782d97efSEric Blake static inline int64_t mirror_clip_bytes(MirrorBlockJob *s, 258782d97efSEric Blake int64_t offset, 259782d97efSEric Blake int64_t bytes) 260782d97efSEric Blake { 261782d97efSEric Blake return MIN(bytes, s->bdev_length - offset); 262782d97efSEric Blake } 263782d97efSEric Blake 264782d97efSEric Blake /* Round offset and/or bytes to target cluster if COW is needed, and 265782d97efSEric Blake * return the offset of the adjusted tail against original. */ 266782d97efSEric Blake static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset, 267ae4cc877SEric Blake uint64_t *bytes) 268893f7ebaSPaolo Bonzini { 269e5b43573SFam Zheng bool need_cow; 270e5b43573SFam Zheng int ret = 0; 271782d97efSEric Blake int64_t align_offset = *offset; 2727cfd5275SEric Blake int64_t align_bytes = *bytes; 273782d97efSEric Blake int max_bytes = s->granularity * s->max_iov; 274893f7ebaSPaolo Bonzini 275782d97efSEric Blake need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap); 276782d97efSEric Blake need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity, 277e5b43573SFam Zheng s->cow_bitmap); 278e5b43573SFam Zheng if (need_cow) { 279782d97efSEric Blake bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes, 280782d97efSEric Blake &align_offset, &align_bytes); 2818f0720ecSPaolo Bonzini } 2828f0720ecSPaolo Bonzini 283782d97efSEric Blake if (align_bytes > max_bytes) { 284782d97efSEric Blake align_bytes = max_bytes; 285e5b43573SFam Zheng if (need_cow) { 286782d97efSEric Blake align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size); 287e5b43573SFam Zheng } 288e5b43573SFam Zheng } 289782d97efSEric Blake /* Clipping may result in align_bytes unaligned to chunk boundary, but 2904150ae60SFam Zheng * that doesn't matter because it's already the end of source image. */ 291782d97efSEric Blake align_bytes = mirror_clip_bytes(s, align_offset, align_bytes); 292402a4741SPaolo Bonzini 293782d97efSEric Blake ret = align_offset + align_bytes - (*offset + *bytes); 294782d97efSEric Blake *offset = align_offset; 295782d97efSEric Blake *bytes = align_bytes; 296e5b43573SFam Zheng assert(ret >= 0); 297e5b43573SFam Zheng return ret; 298e5b43573SFam Zheng } 299e5b43573SFam Zheng 300537c3d4fSStefan Hajnoczi static inline void coroutine_fn 3019178f4feSKevin Wolf mirror_wait_for_any_operation(MirrorBlockJob *s, bool active) 30221cd917fSFam Zheng { 30312aa4082SMax Reitz MirrorOp *op; 30412aa4082SMax Reitz 3051181e19aSMax Reitz QTAILQ_FOREACH(op, &s->ops_in_flight, next) { 3061181e19aSMax Reitz /* Do not wait on pseudo ops, because it may in turn wait on 3071181e19aSMax Reitz * some other operation to start, which may in fact be the 3081181e19aSMax Reitz * caller of this function. Since there is only one pseudo op 3091181e19aSMax Reitz * at any given time, we will always find some real operation 3101181e19aSMax Reitz * to wait on. */ 311ce8cabbdSKevin Wolf if (!op->is_pseudo_op && op->is_in_flight && 312ce8cabbdSKevin Wolf op->is_active_write == active) 313ce8cabbdSKevin Wolf { 31412aa4082SMax Reitz qemu_co_queue_wait(&op->waiting_requests, NULL); 3151181e19aSMax Reitz return; 3161181e19aSMax Reitz } 3171181e19aSMax Reitz } 3181181e19aSMax Reitz abort(); 31921cd917fSFam Zheng } 32021cd917fSFam Zheng 321537c3d4fSStefan Hajnoczi static inline void coroutine_fn 3229178f4feSKevin Wolf mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s) 323d06107adSMax Reitz { 324d06107adSMax Reitz /* Only non-active operations use up in-flight slots */ 3259178f4feSKevin Wolf mirror_wait_for_any_operation(s, false); 326d06107adSMax Reitz } 327d06107adSMax Reitz 3282e1990b2SMax Reitz /* Perform a mirror copy operation. 3292e1990b2SMax Reitz * 3302e1990b2SMax Reitz * *op->bytes_handled is set to the number of bytes copied after and 3312e1990b2SMax Reitz * including offset, excluding any bytes copied prior to offset due 3322e1990b2SMax Reitz * to alignment. This will be op->bytes if no alignment is necessary, 3332e1990b2SMax Reitz * or (new_end - op->offset) if the tail is rounded up or down due to 334e5b43573SFam Zheng * alignment or buffer limit. 335402a4741SPaolo Bonzini */ 3362e1990b2SMax Reitz static void coroutine_fn mirror_co_read(void *opaque) 337e5b43573SFam Zheng { 3382e1990b2SMax Reitz MirrorOp *op = opaque; 3392e1990b2SMax Reitz MirrorBlockJob *s = op->s; 340ae4cc877SEric Blake int nb_chunks; 341ae4cc877SEric Blake uint64_t ret; 342ae4cc877SEric Blake uint64_t max_bytes; 343402a4741SPaolo Bonzini 344ae4cc877SEric Blake max_bytes = s->granularity * s->max_iov; 345e5b43573SFam Zheng 346e5b43573SFam Zheng /* We can only handle as much as buf_size at a time. */ 3472e1990b2SMax Reitz op->bytes = MIN(s->buf_size, MIN(max_bytes, op->bytes)); 3482e1990b2SMax Reitz assert(op->bytes); 3492e1990b2SMax Reitz assert(op->bytes < BDRV_REQUEST_MAX_BYTES); 3502e1990b2SMax Reitz *op->bytes_handled = op->bytes; 351e5b43573SFam Zheng 352e5b43573SFam Zheng if (s->cow_bitmap) { 3532e1990b2SMax Reitz *op->bytes_handled += mirror_cow_align(s, &op->offset, &op->bytes); 354e5b43573SFam Zheng } 3552e1990b2SMax Reitz /* Cannot exceed BDRV_REQUEST_MAX_BYTES + INT_MAX */ 3562e1990b2SMax Reitz assert(*op->bytes_handled <= UINT_MAX); 3572e1990b2SMax Reitz assert(op->bytes <= s->buf_size); 358ae4cc877SEric Blake /* The offset is granularity-aligned because: 359e5b43573SFam Zheng * 1) Caller passes in aligned values; 360e5b43573SFam Zheng * 2) mirror_cow_align is used only when target cluster is larger. */ 3612e1990b2SMax Reitz assert(QEMU_IS_ALIGNED(op->offset, s->granularity)); 362ae4cc877SEric Blake /* The range is sector-aligned, since bdrv_getlength() rounds up. */ 3632e1990b2SMax Reitz assert(QEMU_IS_ALIGNED(op->bytes, BDRV_SECTOR_SIZE)); 3642e1990b2SMax Reitz nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity); 365e5b43573SFam Zheng 366e5b43573SFam Zheng while (s->buf_free_count < nb_chunks) { 3672e1990b2SMax Reitz trace_mirror_yield_in_flight(s, op->offset, s->in_flight); 3689178f4feSKevin Wolf mirror_wait_for_free_in_flight_slot(s); 369b812f671SPaolo Bonzini } 370b812f671SPaolo Bonzini 371402a4741SPaolo Bonzini /* Now make a QEMUIOVector taking enough granularity-sized chunks 372402a4741SPaolo Bonzini * from s->buf_free. 373402a4741SPaolo Bonzini */ 374402a4741SPaolo Bonzini qemu_iovec_init(&op->qiov, nb_chunks); 375402a4741SPaolo Bonzini while (nb_chunks-- > 0) { 376402a4741SPaolo Bonzini MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); 3772e1990b2SMax Reitz size_t remaining = op->bytes - op->qiov.size; 3785a0f6fd5SKevin Wolf 379402a4741SPaolo Bonzini QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); 380402a4741SPaolo Bonzini s->buf_free_count--; 3815a0f6fd5SKevin Wolf qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); 382402a4741SPaolo Bonzini } 383402a4741SPaolo Bonzini 384893f7ebaSPaolo Bonzini /* Copy the dirty cluster. */ 385bd48bde8SPaolo Bonzini s->in_flight++; 3862e1990b2SMax Reitz s->bytes_in_flight += op->bytes; 387ce8cabbdSKevin Wolf op->is_in_flight = true; 3882e1990b2SMax Reitz trace_mirror_one_iteration(s, op->offset, op->bytes); 389dcfb3bebSFam Zheng 390138f9fffSMax Reitz ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes, 391138f9fffSMax Reitz &op->qiov, 0); 3922e1990b2SMax Reitz mirror_read_complete(op, ret); 393e5b43573SFam Zheng } 394e5b43573SFam Zheng 3952e1990b2SMax Reitz static void coroutine_fn mirror_co_zero(void *opaque) 396e5b43573SFam Zheng { 3972e1990b2SMax Reitz MirrorOp *op = opaque; 3982e1990b2SMax Reitz int ret; 399e5b43573SFam Zheng 4002e1990b2SMax Reitz op->s->in_flight++; 4012e1990b2SMax Reitz op->s->bytes_in_flight += op->bytes; 4022e1990b2SMax Reitz *op->bytes_handled = op->bytes; 403ce8cabbdSKevin Wolf op->is_in_flight = true; 404e5b43573SFam Zheng 4052e1990b2SMax Reitz ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes, 4062e1990b2SMax Reitz op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0); 4072e1990b2SMax Reitz mirror_write_complete(op, ret); 408e5b43573SFam Zheng } 4092e1990b2SMax Reitz 4102e1990b2SMax Reitz static void coroutine_fn mirror_co_discard(void *opaque) 4112e1990b2SMax Reitz { 4122e1990b2SMax Reitz MirrorOp *op = opaque; 4132e1990b2SMax Reitz int ret; 4142e1990b2SMax Reitz 4152e1990b2SMax Reitz op->s->in_flight++; 4162e1990b2SMax Reitz op->s->bytes_in_flight += op->bytes; 4172e1990b2SMax Reitz *op->bytes_handled = op->bytes; 418ce8cabbdSKevin Wolf op->is_in_flight = true; 4192e1990b2SMax Reitz 4202e1990b2SMax Reitz ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes); 4212e1990b2SMax Reitz mirror_write_complete(op, ret); 422e5b43573SFam Zheng } 423e5b43573SFam Zheng 4244295c5fcSMax Reitz static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset, 4254295c5fcSMax Reitz unsigned bytes, MirrorMethod mirror_method) 4264295c5fcSMax Reitz { 4272e1990b2SMax Reitz MirrorOp *op; 4282e1990b2SMax Reitz Coroutine *co; 4292e1990b2SMax Reitz int64_t bytes_handled = -1; 4302e1990b2SMax Reitz 4312e1990b2SMax Reitz op = g_new(MirrorOp, 1); 4322e1990b2SMax Reitz *op = (MirrorOp){ 4332e1990b2SMax Reitz .s = s, 4342e1990b2SMax Reitz .offset = offset, 4352e1990b2SMax Reitz .bytes = bytes, 4362e1990b2SMax Reitz .bytes_handled = &bytes_handled, 4372e1990b2SMax Reitz }; 43812aa4082SMax Reitz qemu_co_queue_init(&op->waiting_requests); 4392e1990b2SMax Reitz 4404295c5fcSMax Reitz switch (mirror_method) { 4414295c5fcSMax Reitz case MIRROR_METHOD_COPY: 4422e1990b2SMax Reitz co = qemu_coroutine_create(mirror_co_read, op); 4432e1990b2SMax Reitz break; 4444295c5fcSMax Reitz case MIRROR_METHOD_ZERO: 4452e1990b2SMax Reitz co = qemu_coroutine_create(mirror_co_zero, op); 4462e1990b2SMax Reitz break; 4474295c5fcSMax Reitz case MIRROR_METHOD_DISCARD: 4482e1990b2SMax Reitz co = qemu_coroutine_create(mirror_co_discard, op); 4492e1990b2SMax Reitz break; 4504295c5fcSMax Reitz default: 4514295c5fcSMax Reitz abort(); 4524295c5fcSMax Reitz } 453eed325b9SKevin Wolf op->co = co; 4542e1990b2SMax Reitz 45512aa4082SMax Reitz QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next); 4562e1990b2SMax Reitz qemu_coroutine_enter(co); 4572e1990b2SMax Reitz /* At this point, ownership of op has been moved to the coroutine 4582e1990b2SMax Reitz * and the object may already be freed */ 4592e1990b2SMax Reitz 4602e1990b2SMax Reitz /* Assert that this value has been set */ 4612e1990b2SMax Reitz assert(bytes_handled >= 0); 4622e1990b2SMax Reitz 4632e1990b2SMax Reitz /* Same assertion as in mirror_co_read() (and for mirror_co_read() 4642e1990b2SMax Reitz * and mirror_co_discard(), bytes_handled == op->bytes, which 4652e1990b2SMax Reitz * is the @bytes parameter given to this function) */ 4662e1990b2SMax Reitz assert(bytes_handled <= UINT_MAX); 4672e1990b2SMax Reitz return bytes_handled; 4684295c5fcSMax Reitz } 4694295c5fcSMax Reitz 470e5b43573SFam Zheng static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) 471e5b43573SFam Zheng { 472138f9fffSMax Reitz BlockDriverState *source = s->mirror_top_bs->backing->bs; 4731181e19aSMax Reitz MirrorOp *pseudo_op; 4741181e19aSMax Reitz int64_t offset; 4751181e19aSMax Reitz uint64_t delay_ns = 0, ret = 0; 476e5b43573SFam Zheng /* At least the first dirty chunk is mirrored in one iteration. */ 477e5b43573SFam Zheng int nb_chunks = 1; 4784b5004d9SDenis V. Lunev bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target)); 479b436982fSEric Blake int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES); 480e5b43573SFam Zheng 481b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_lock(s->dirty_bitmap); 482f798184cSEric Blake offset = bdrv_dirty_iter_next(s->dbi); 483fb2ef791SEric Blake if (offset < 0) { 484dc162c8eSFam Zheng bdrv_set_dirty_iter(s->dbi, 0); 485f798184cSEric Blake offset = bdrv_dirty_iter_next(s->dbi); 4869a46dba7SEric Blake trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); 487fb2ef791SEric Blake assert(offset >= 0); 488e5b43573SFam Zheng } 489b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_unlock(s->dirty_bitmap); 490e5b43573SFam Zheng 4911181e19aSMax Reitz mirror_wait_on_conflicts(NULL, s, offset, 1); 4929c83625bSMax Reitz 493da01ff7fSKevin Wolf job_pause_point(&s->common.job); 494565ac01fSStefan Hajnoczi 495e5b43573SFam Zheng /* Find the number of consective dirty chunks following the first dirty 496e5b43573SFam Zheng * one, and wait for in flight requests in them. */ 497b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_lock(s->dirty_bitmap); 498fb2ef791SEric Blake while (nb_chunks * s->granularity < s->buf_size) { 499dc162c8eSFam Zheng int64_t next_dirty; 500fb2ef791SEric Blake int64_t next_offset = offset + nb_chunks * s->granularity; 501fb2ef791SEric Blake int64_t next_chunk = next_offset / s->granularity; 502fb2ef791SEric Blake if (next_offset >= s->bdev_length || 50328636b82SJohn Snow !bdrv_dirty_bitmap_get_locked(s->dirty_bitmap, next_offset)) { 504e5b43573SFam Zheng break; 505e5b43573SFam Zheng } 506e5b43573SFam Zheng if (test_bit(next_chunk, s->in_flight_bitmap)) { 507e5b43573SFam Zheng break; 508e5b43573SFam Zheng } 5099c83625bSMax Reitz 510f798184cSEric Blake next_dirty = bdrv_dirty_iter_next(s->dbi); 511fb2ef791SEric Blake if (next_dirty > next_offset || next_dirty < 0) { 512f27a2742SMax Reitz /* The bitmap iterator's cache is stale, refresh it */ 513715a74d8SEric Blake bdrv_set_dirty_iter(s->dbi, next_offset); 514f798184cSEric Blake next_dirty = bdrv_dirty_iter_next(s->dbi); 515f27a2742SMax Reitz } 516fb2ef791SEric Blake assert(next_dirty == next_offset); 517e5b43573SFam Zheng nb_chunks++; 518e5b43573SFam Zheng } 519e5b43573SFam Zheng 520e5b43573SFam Zheng /* Clear dirty bits before querying the block status, because 52131826642SEric Blake * calling bdrv_block_status_above could yield - if some blocks are 522e5b43573SFam Zheng * marked dirty in this window, we need to know. 523e5b43573SFam Zheng */ 524e0d7f73eSEric Blake bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset, 525e0d7f73eSEric Blake nb_chunks * s->granularity); 526b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_unlock(s->dirty_bitmap); 527b64bd51eSPaolo Bonzini 5281181e19aSMax Reitz /* Before claiming an area in the in-flight bitmap, we have to 5291181e19aSMax Reitz * create a MirrorOp for it so that conflicting requests can wait 5301181e19aSMax Reitz * for it. mirror_perform() will create the real MirrorOps later, 5311181e19aSMax Reitz * for now we just create a pseudo operation that will wake up all 5321181e19aSMax Reitz * conflicting requests once all real operations have been 5331181e19aSMax Reitz * launched. */ 5341181e19aSMax Reitz pseudo_op = g_new(MirrorOp, 1); 5351181e19aSMax Reitz *pseudo_op = (MirrorOp){ 5361181e19aSMax Reitz .offset = offset, 5371181e19aSMax Reitz .bytes = nb_chunks * s->granularity, 5381181e19aSMax Reitz .is_pseudo_op = true, 5391181e19aSMax Reitz }; 5401181e19aSMax Reitz qemu_co_queue_init(&pseudo_op->waiting_requests); 5411181e19aSMax Reitz QTAILQ_INSERT_TAIL(&s->ops_in_flight, pseudo_op, next); 5421181e19aSMax Reitz 543fb2ef791SEric Blake bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks); 544fb2ef791SEric Blake while (nb_chunks > 0 && offset < s->bdev_length) { 54531826642SEric Blake int ret; 5467cfd5275SEric Blake int64_t io_bytes; 547f3e4ce4aSEric Blake int64_t io_bytes_acct; 5484295c5fcSMax Reitz MirrorMethod mirror_method = MIRROR_METHOD_COPY; 549e5b43573SFam Zheng 550fb2ef791SEric Blake assert(!(offset % s->granularity)); 55131826642SEric Blake ret = bdrv_block_status_above(source, NULL, offset, 55231826642SEric Blake nb_chunks * s->granularity, 55331826642SEric Blake &io_bytes, NULL, NULL); 554e5b43573SFam Zheng if (ret < 0) { 555fb2ef791SEric Blake io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes); 5560965a41eSVladimir Sementsov-Ogievskiy } else if (ret & BDRV_BLOCK_DATA) { 557fb2ef791SEric Blake io_bytes = MIN(io_bytes, max_io_bytes); 558e5b43573SFam Zheng } 559e5b43573SFam Zheng 560fb2ef791SEric Blake io_bytes -= io_bytes % s->granularity; 561fb2ef791SEric Blake if (io_bytes < s->granularity) { 562fb2ef791SEric Blake io_bytes = s->granularity; 563e5b43573SFam Zheng } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) { 564fb2ef791SEric Blake int64_t target_offset; 5657cfd5275SEric Blake int64_t target_bytes; 566fb2ef791SEric Blake bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes, 567fb2ef791SEric Blake &target_offset, &target_bytes); 568fb2ef791SEric Blake if (target_offset == offset && 569fb2ef791SEric Blake target_bytes == io_bytes) { 570e5b43573SFam Zheng mirror_method = ret & BDRV_BLOCK_ZERO ? 571e5b43573SFam Zheng MIRROR_METHOD_ZERO : 572e5b43573SFam Zheng MIRROR_METHOD_DISCARD; 573e5b43573SFam Zheng } 574e5b43573SFam Zheng } 575e5b43573SFam Zheng 576cf56a3c6SDenis V. Lunev while (s->in_flight >= MAX_IN_FLIGHT) { 577fb2ef791SEric Blake trace_mirror_yield_in_flight(s, offset, s->in_flight); 5789178f4feSKevin Wolf mirror_wait_for_free_in_flight_slot(s); 579cf56a3c6SDenis V. Lunev } 580cf56a3c6SDenis V. Lunev 581dbaa7b57SVladimir Sementsov-Ogievskiy if (s->ret < 0) { 5821181e19aSMax Reitz ret = 0; 5831181e19aSMax Reitz goto fail; 584dbaa7b57SVladimir Sementsov-Ogievskiy } 585dbaa7b57SVladimir Sementsov-Ogievskiy 586fb2ef791SEric Blake io_bytes = mirror_clip_bytes(s, offset, io_bytes); 5874295c5fcSMax Reitz io_bytes = mirror_perform(s, offset, io_bytes, mirror_method); 5884295c5fcSMax Reitz if (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok) { 589f3e4ce4aSEric Blake io_bytes_acct = 0; 5904b5004d9SDenis V. Lunev } else { 591fb2ef791SEric Blake io_bytes_acct = io_bytes; 5924b5004d9SDenis V. Lunev } 593fb2ef791SEric Blake assert(io_bytes); 594fb2ef791SEric Blake offset += io_bytes; 595fb2ef791SEric Blake nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity); 596dee81d51SKevin Wolf delay_ns = block_job_ratelimit_get_delay(&s->common, io_bytes_acct); 597dcfb3bebSFam Zheng } 5981181e19aSMax Reitz 5991181e19aSMax Reitz ret = delay_ns; 6001181e19aSMax Reitz fail: 6011181e19aSMax Reitz QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next); 6021181e19aSMax Reitz qemu_co_queue_restart_all(&pseudo_op->waiting_requests); 6031181e19aSMax Reitz g_free(pseudo_op); 6041181e19aSMax Reitz 6051181e19aSMax Reitz return ret; 606893f7ebaSPaolo Bonzini } 607b952b558SPaolo Bonzini 608402a4741SPaolo Bonzini static void mirror_free_init(MirrorBlockJob *s) 609402a4741SPaolo Bonzini { 610402a4741SPaolo Bonzini int granularity = s->granularity; 611402a4741SPaolo Bonzini size_t buf_size = s->buf_size; 612402a4741SPaolo Bonzini uint8_t *buf = s->buf; 613402a4741SPaolo Bonzini 614402a4741SPaolo Bonzini assert(s->buf_free_count == 0); 615402a4741SPaolo Bonzini QSIMPLEQ_INIT(&s->buf_free); 616402a4741SPaolo Bonzini while (buf_size != 0) { 617402a4741SPaolo Bonzini MirrorBuffer *cur = (MirrorBuffer *)buf; 618402a4741SPaolo Bonzini QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next); 619402a4741SPaolo Bonzini s->buf_free_count++; 620402a4741SPaolo Bonzini buf_size -= granularity; 621402a4741SPaolo Bonzini buf += granularity; 622402a4741SPaolo Bonzini } 623402a4741SPaolo Bonzini } 624402a4741SPaolo Bonzini 625bae8196dSPaolo Bonzini /* This is also used for the .pause callback. There is no matching 626bae8196dSPaolo Bonzini * mirror_resume() because mirror_run() will begin iterating again 627bae8196dSPaolo Bonzini * when the job is resumed. 628bae8196dSPaolo Bonzini */ 629537c3d4fSStefan Hajnoczi static void coroutine_fn mirror_wait_for_all_io(MirrorBlockJob *s) 630bd48bde8SPaolo Bonzini { 631bd48bde8SPaolo Bonzini while (s->in_flight > 0) { 6329178f4feSKevin Wolf mirror_wait_for_free_in_flight_slot(s); 633bd48bde8SPaolo Bonzini } 634893f7ebaSPaolo Bonzini } 635893f7ebaSPaolo Bonzini 636737efc1eSJohn Snow /** 637737efc1eSJohn Snow * mirror_exit_common: handle both abort() and prepare() cases. 638737efc1eSJohn Snow * for .prepare, returns 0 on success and -errno on failure. 639737efc1eSJohn Snow * for .abort cases, denoted by abort = true, MUST return 0. 640737efc1eSJohn Snow */ 641737efc1eSJohn Snow static int mirror_exit_common(Job *job) 6425a7e7a0bSStefan Hajnoczi { 6431908a559SKevin Wolf MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 6441908a559SKevin Wolf BlockJob *bjob = &s->common; 645f93c3addSMax Reitz MirrorBDSOpaque *bs_opaque; 6465a7e7a0bSStefan Hajnoczi AioContext *replace_aio_context = NULL; 647f93c3addSMax Reitz BlockDriverState *src; 648f93c3addSMax Reitz BlockDriverState *target_bs; 649f93c3addSMax Reitz BlockDriverState *mirror_top_bs; 65012fa4af6SKevin Wolf Error *local_err = NULL; 651737efc1eSJohn Snow bool abort = job->ret < 0; 652737efc1eSJohn Snow int ret = 0; 653737efc1eSJohn Snow 654737efc1eSJohn Snow if (s->prepared) { 655737efc1eSJohn Snow return 0; 656737efc1eSJohn Snow } 657737efc1eSJohn Snow s->prepared = true; 6583f09bfbcSKevin Wolf 659f93c3addSMax Reitz mirror_top_bs = s->mirror_top_bs; 660f93c3addSMax Reitz bs_opaque = mirror_top_bs->opaque; 661f93c3addSMax Reitz src = mirror_top_bs->backing->bs; 662f93c3addSMax Reitz target_bs = blk_bs(s->target); 663f93c3addSMax Reitz 664ef53dc09SAlberto Garcia if (bdrv_chain_contains(src, target_bs)) { 665ef53dc09SAlberto Garcia bdrv_unfreeze_backing_chain(mirror_top_bs, target_bs); 666ef53dc09SAlberto Garcia } 667ef53dc09SAlberto Garcia 6685deb6cbdSVladimir Sementsov-Ogievskiy bdrv_release_dirty_bitmap(s->dirty_bitmap); 6692119882cSPaolo Bonzini 6707b508f6bSJohn Snow /* Make sure that the source BDS doesn't go away during bdrv_replace_node, 6717b508f6bSJohn Snow * before we can call bdrv_drained_end */ 6723f09bfbcSKevin Wolf bdrv_ref(src); 6734ef85a9cSKevin Wolf bdrv_ref(mirror_top_bs); 6747d9fcb39SKevin Wolf bdrv_ref(target_bs); 6757d9fcb39SKevin Wolf 676bb0c9409SVladimir Sementsov-Ogievskiy /* 677bb0c9409SVladimir Sementsov-Ogievskiy * Remove target parent that still uses BLK_PERM_WRITE/RESIZE before 6787d9fcb39SKevin Wolf * inserting target_bs at s->to_replace, where we might not be able to get 67963c8ef28SKevin Wolf * these permissions. 680bb0c9409SVladimir Sementsov-Ogievskiy */ 6817d9fcb39SKevin Wolf blk_unref(s->target); 6827d9fcb39SKevin Wolf s->target = NULL; 6834ef85a9cSKevin Wolf 6844ef85a9cSKevin Wolf /* We don't access the source any more. Dropping any WRITE/RESIZE is 685d2da5e28SKevin Wolf * required before it could become a backing file of target_bs. Not having 686d2da5e28SKevin Wolf * these permissions any more means that we can't allow any new requests on 687d2da5e28SKevin Wolf * mirror_top_bs from now on, so keep it drained. */ 688d2da5e28SKevin Wolf bdrv_drained_begin(mirror_top_bs); 689f94dc3b4SMax Reitz bs_opaque->stop = true; 690f94dc3b4SMax Reitz bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing, 6914ef85a9cSKevin Wolf &error_abort); 692737efc1eSJohn Snow if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) { 6934ef85a9cSKevin Wolf BlockDriverState *backing = s->is_none_mode ? src : s->base; 6943f072a7fSMax Reitz BlockDriverState *unfiltered_target = bdrv_skip_filters(target_bs); 6953f072a7fSMax Reitz 6963f072a7fSMax Reitz if (bdrv_cow_bs(unfiltered_target) != backing) { 6973f072a7fSMax Reitz bdrv_set_backing_hd(unfiltered_target, backing, &local_err); 69812fa4af6SKevin Wolf if (local_err) { 69912fa4af6SKevin Wolf error_report_err(local_err); 70066c8672dSVladimir Sementsov-Ogievskiy local_err = NULL; 7017b508f6bSJohn Snow ret = -EPERM; 70212fa4af6SKevin Wolf } 7034ef85a9cSKevin Wolf } 704c41f5b96SMax Reitz } else if (!abort && s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) { 705c41f5b96SMax Reitz assert(!bdrv_backing_chain_next(target_bs)); 706c41f5b96SMax Reitz ret = bdrv_open_backing_file(bdrv_skip_filters(target_bs), NULL, 707c41f5b96SMax Reitz "backing", &local_err); 708c41f5b96SMax Reitz if (ret < 0) { 709c41f5b96SMax Reitz error_report_err(local_err); 710c41f5b96SMax Reitz local_err = NULL; 711c41f5b96SMax Reitz } 7124ef85a9cSKevin Wolf } 7135a7e7a0bSStefan Hajnoczi 7145a7e7a0bSStefan Hajnoczi if (s->to_replace) { 7155a7e7a0bSStefan Hajnoczi replace_aio_context = bdrv_get_aio_context(s->to_replace); 7165a7e7a0bSStefan Hajnoczi aio_context_acquire(replace_aio_context); 7175a7e7a0bSStefan Hajnoczi } 7185a7e7a0bSStefan Hajnoczi 719737efc1eSJohn Snow if (s->should_complete && !abort) { 720737efc1eSJohn Snow BlockDriverState *to_replace = s->to_replace ?: src; 7211ba79388SAlberto Garcia bool ro = bdrv_is_read_only(to_replace); 72240365552SKevin Wolf 7231ba79388SAlberto Garcia if (ro != bdrv_is_read_only(target_bs)) { 7241ba79388SAlberto Garcia bdrv_reopen_set_read_only(target_bs, ro, NULL); 7255a7e7a0bSStefan Hajnoczi } 726b8804815SKevin Wolf 727b8804815SKevin Wolf /* The mirror job has no requests in flight any more, but we need to 728b8804815SKevin Wolf * drain potential other users of the BDS before changing the graph. */ 7295e771752SSergio Lopez assert(s->in_drain); 730e253f4b8SKevin Wolf bdrv_drained_begin(target_bs); 7316e9cc051SMax Reitz /* 7326e9cc051SMax Reitz * Cannot use check_to_replace_node() here, because that would 7336e9cc051SMax Reitz * check for an op blocker on @to_replace, and we have our own 7346e9cc051SMax Reitz * there. 7356e9cc051SMax Reitz */ 7366e9cc051SMax Reitz if (bdrv_recurse_can_replace(src, to_replace)) { 7375fe31c25SKevin Wolf bdrv_replace_node(to_replace, target_bs, &local_err); 7386e9cc051SMax Reitz } else { 7396e9cc051SMax Reitz error_setg(&local_err, "Can no longer replace '%s' by '%s', " 7406e9cc051SMax Reitz "because it can no longer be guaranteed that doing so " 7416e9cc051SMax Reitz "would not lead to an abrupt change of visible data", 7426e9cc051SMax Reitz to_replace->node_name, target_bs->node_name); 7436e9cc051SMax Reitz } 744e253f4b8SKevin Wolf bdrv_drained_end(target_bs); 7455fe31c25SKevin Wolf if (local_err) { 7465fe31c25SKevin Wolf error_report_err(local_err); 7477b508f6bSJohn Snow ret = -EPERM; 7485fe31c25SKevin Wolf } 7495a7e7a0bSStefan Hajnoczi } 7505a7e7a0bSStefan Hajnoczi if (s->to_replace) { 7515a7e7a0bSStefan Hajnoczi bdrv_op_unblock_all(s->to_replace, s->replace_blocker); 7525a7e7a0bSStefan Hajnoczi error_free(s->replace_blocker); 7535a7e7a0bSStefan Hajnoczi bdrv_unref(s->to_replace); 7545a7e7a0bSStefan Hajnoczi } 7555a7e7a0bSStefan Hajnoczi if (replace_aio_context) { 7565a7e7a0bSStefan Hajnoczi aio_context_release(replace_aio_context); 7575a7e7a0bSStefan Hajnoczi } 7585a7e7a0bSStefan Hajnoczi g_free(s->replaces); 7597d9fcb39SKevin Wolf bdrv_unref(target_bs); 7604ef85a9cSKevin Wolf 761f94dc3b4SMax Reitz /* 762f94dc3b4SMax Reitz * Remove the mirror filter driver from the graph. Before this, get rid of 7634ef85a9cSKevin Wolf * the blockers on the intermediate nodes so that the resulting state is 764f94dc3b4SMax Reitz * valid. 765f94dc3b4SMax Reitz */ 7661908a559SKevin Wolf block_job_remove_all_bdrv(bjob); 7673f072a7fSMax Reitz bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort); 7684ef85a9cSKevin Wolf 7694ef85a9cSKevin Wolf /* We just changed the BDS the job BB refers to (with either or both of the 7705fe31c25SKevin Wolf * bdrv_replace_node() calls), so switch the BB back so the cleanup does 7715fe31c25SKevin Wolf * the right thing. We don't need any permissions any more now. */ 7721908a559SKevin Wolf blk_remove_bs(bjob->blk); 7731908a559SKevin Wolf blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort); 7741908a559SKevin Wolf blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort); 7754ef85a9cSKevin Wolf 776429076e8SMax Reitz bs_opaque->job = NULL; 7774ef85a9cSKevin Wolf 778176c3699SFam Zheng bdrv_drained_end(src); 779d2da5e28SKevin Wolf bdrv_drained_end(mirror_top_bs); 7805e771752SSergio Lopez s->in_drain = false; 7814ef85a9cSKevin Wolf bdrv_unref(mirror_top_bs); 7823f09bfbcSKevin Wolf bdrv_unref(src); 7837b508f6bSJohn Snow 784737efc1eSJohn Snow return ret; 785737efc1eSJohn Snow } 786737efc1eSJohn Snow 787737efc1eSJohn Snow static int mirror_prepare(Job *job) 788737efc1eSJohn Snow { 789737efc1eSJohn Snow return mirror_exit_common(job); 790737efc1eSJohn Snow } 791737efc1eSJohn Snow 792737efc1eSJohn Snow static void mirror_abort(Job *job) 793737efc1eSJohn Snow { 794737efc1eSJohn Snow int ret = mirror_exit_common(job); 795737efc1eSJohn Snow assert(ret == 0); 7965a7e7a0bSStefan Hajnoczi } 7975a7e7a0bSStefan Hajnoczi 798537c3d4fSStefan Hajnoczi static void coroutine_fn mirror_throttle(MirrorBlockJob *s) 79949efb1f5SDenis V. Lunev { 80049efb1f5SDenis V. Lunev int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 80149efb1f5SDenis V. Lunev 80218bb6928SKevin Wolf if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) { 80349efb1f5SDenis V. Lunev s->last_pause_ns = now; 8045d43e86eSKevin Wolf job_sleep_ns(&s->common.job, 0); 80549efb1f5SDenis V. Lunev } else { 806da01ff7fSKevin Wolf job_pause_point(&s->common.job); 80749efb1f5SDenis V. Lunev } 80849efb1f5SDenis V. Lunev } 80949efb1f5SDenis V. Lunev 810c0b363adSDenis V. Lunev static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s) 811c0b363adSDenis V. Lunev { 81223ca459aSEric Blake int64_t offset; 813138f9fffSMax Reitz BlockDriverState *bs = s->mirror_top_bs->backing->bs; 814c0b363adSDenis V. Lunev BlockDriverState *target_bs = blk_bs(s->target); 81523ca459aSEric Blake int ret; 81651b0a488SEric Blake int64_t count; 817c0b363adSDenis V. Lunev 818cdf3bc93SMax Reitz if (s->zero_target) { 819c7c2769cSDenis V. Lunev if (!bdrv_can_write_zeroes_with_unmap(target_bs)) { 820e0d7f73eSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length); 821b7d5062cSDenis V. Lunev return 0; 822b7d5062cSDenis V. Lunev } 823b7d5062cSDenis V. Lunev 82490ab48ebSAnton Nefedov s->initial_zeroing_ongoing = true; 82523ca459aSEric Blake for (offset = 0; offset < s->bdev_length; ) { 82623ca459aSEric Blake int bytes = MIN(s->bdev_length - offset, 82723ca459aSEric Blake QEMU_ALIGN_DOWN(INT_MAX, s->granularity)); 828c7c2769cSDenis V. Lunev 829c7c2769cSDenis V. Lunev mirror_throttle(s); 830c7c2769cSDenis V. Lunev 831daa7f2f9SKevin Wolf if (job_is_cancelled(&s->common.job)) { 83290ab48ebSAnton Nefedov s->initial_zeroing_ongoing = false; 833c7c2769cSDenis V. Lunev return 0; 834c7c2769cSDenis V. Lunev } 835c7c2769cSDenis V. Lunev 836c7c2769cSDenis V. Lunev if (s->in_flight >= MAX_IN_FLIGHT) { 83767adf4b3SEric Blake trace_mirror_yield(s, UINT64_MAX, s->buf_free_count, 83867adf4b3SEric Blake s->in_flight); 8399178f4feSKevin Wolf mirror_wait_for_free_in_flight_slot(s); 840c7c2769cSDenis V. Lunev continue; 841c7c2769cSDenis V. Lunev } 842c7c2769cSDenis V. Lunev 8434295c5fcSMax Reitz mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO); 84423ca459aSEric Blake offset += bytes; 845c7c2769cSDenis V. Lunev } 846c7c2769cSDenis V. Lunev 847bae8196dSPaolo Bonzini mirror_wait_for_all_io(s); 84890ab48ebSAnton Nefedov s->initial_zeroing_ongoing = false; 849c7c2769cSDenis V. Lunev } 850c7c2769cSDenis V. Lunev 851c0b363adSDenis V. Lunev /* First part, loop on the sectors and initialize the dirty bitmap. */ 85223ca459aSEric Blake for (offset = 0; offset < s->bdev_length; ) { 853c0b363adSDenis V. Lunev /* Just to make sure we are not exceeding int limit. */ 85423ca459aSEric Blake int bytes = MIN(s->bdev_length - offset, 85523ca459aSEric Blake QEMU_ALIGN_DOWN(INT_MAX, s->granularity)); 856c0b363adSDenis V. Lunev 857c0b363adSDenis V. Lunev mirror_throttle(s); 858c0b363adSDenis V. Lunev 859daa7f2f9SKevin Wolf if (job_is_cancelled(&s->common.job)) { 860c0b363adSDenis V. Lunev return 0; 861c0b363adSDenis V. Lunev } 862c0b363adSDenis V. Lunev 8633f072a7fSMax Reitz ret = bdrv_is_allocated_above(bs, s->base_overlay, true, offset, bytes, 8643f072a7fSMax Reitz &count); 865c0b363adSDenis V. Lunev if (ret < 0) { 866c0b363adSDenis V. Lunev return ret; 867c0b363adSDenis V. Lunev } 868c0b363adSDenis V. Lunev 86923ca459aSEric Blake assert(count); 870a92b1b06SEric Blake if (ret > 0) { 87123ca459aSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count); 872c0b363adSDenis V. Lunev } 87323ca459aSEric Blake offset += count; 874c0b363adSDenis V. Lunev } 875c0b363adSDenis V. Lunev return 0; 876c0b363adSDenis V. Lunev } 877c0b363adSDenis V. Lunev 878bdffb31dSPaolo Bonzini /* Called when going out of the streaming phase to flush the bulk of the 879bdffb31dSPaolo Bonzini * data to the medium, or just before completing. 880bdffb31dSPaolo Bonzini */ 881bdffb31dSPaolo Bonzini static int mirror_flush(MirrorBlockJob *s) 882bdffb31dSPaolo Bonzini { 883bdffb31dSPaolo Bonzini int ret = blk_flush(s->target); 884bdffb31dSPaolo Bonzini if (ret < 0) { 885bdffb31dSPaolo Bonzini if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) { 886bdffb31dSPaolo Bonzini s->ret = ret; 887bdffb31dSPaolo Bonzini } 888bdffb31dSPaolo Bonzini } 889bdffb31dSPaolo Bonzini return ret; 890bdffb31dSPaolo Bonzini } 891bdffb31dSPaolo Bonzini 892f67432a2SJohn Snow static int coroutine_fn mirror_run(Job *job, Error **errp) 893893f7ebaSPaolo Bonzini { 894f67432a2SJohn Snow MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 895138f9fffSMax Reitz BlockDriverState *bs = s->mirror_top_bs->backing->bs; 896e253f4b8SKevin Wolf BlockDriverState *target_bs = blk_bs(s->target); 8979a0cec66SPaolo Bonzini bool need_drain = true; 898c0b363adSDenis V. Lunev int64_t length; 899e83dd680SKevin Wolf int64_t target_length; 900b812f671SPaolo Bonzini BlockDriverInfo bdi; 9011d33936eSJeff Cody char backing_filename[2]; /* we only need 2 characters because we are only 9021d33936eSJeff Cody checking for a NULL string */ 903893f7ebaSPaolo Bonzini int ret = 0; 904893f7ebaSPaolo Bonzini 905daa7f2f9SKevin Wolf if (job_is_cancelled(&s->common.job)) { 906893f7ebaSPaolo Bonzini goto immediate_exit; 907893f7ebaSPaolo Bonzini } 908893f7ebaSPaolo Bonzini 909b21c7652SMax Reitz s->bdev_length = bdrv_getlength(bs); 910b21c7652SMax Reitz if (s->bdev_length < 0) { 911b21c7652SMax Reitz ret = s->bdev_length; 912373df5b1SFam Zheng goto immediate_exit; 913becc347eSKevin Wolf } 914becc347eSKevin Wolf 915e83dd680SKevin Wolf target_length = blk_getlength(s->target); 916e83dd680SKevin Wolf if (target_length < 0) { 917e83dd680SKevin Wolf ret = target_length; 918becc347eSKevin Wolf goto immediate_exit; 919becc347eSKevin Wolf } 920becc347eSKevin Wolf 921e83dd680SKevin Wolf /* Active commit must resize the base image if its size differs from the 922e83dd680SKevin Wolf * active layer. */ 923e83dd680SKevin Wolf if (s->base == blk_bs(s->target)) { 924e83dd680SKevin Wolf if (s->bdev_length > target_length) { 925c80d8b06SMax Reitz ret = blk_truncate(s->target, s->bdev_length, false, 9268c6242b6SKevin Wolf PREALLOC_MODE_OFF, 0, NULL); 927becc347eSKevin Wolf if (ret < 0) { 928becc347eSKevin Wolf goto immediate_exit; 929becc347eSKevin Wolf } 930becc347eSKevin Wolf } 931e83dd680SKevin Wolf } else if (s->bdev_length != target_length) { 932e83dd680SKevin Wolf error_setg(errp, "Source and target image have different sizes"); 933e83dd680SKevin Wolf ret = -EINVAL; 934e83dd680SKevin Wolf goto immediate_exit; 935becc347eSKevin Wolf } 936becc347eSKevin Wolf 937becc347eSKevin Wolf if (s->bdev_length == 0) { 9382e1795b5SKevin Wolf /* Transition to the READY state and wait for complete. */ 9392e1795b5SKevin Wolf job_transition_to_ready(&s->common.job); 9409e48b025SFam Zheng s->synced = true; 941d06107adSMax Reitz s->actively_synced = true; 942daa7f2f9SKevin Wolf while (!job_is_cancelled(&s->common.job) && !s->should_complete) { 943198c49ccSKevin Wolf job_yield(&s->common.job); 9449e48b025SFam Zheng } 945daa7f2f9SKevin Wolf s->common.job.cancelled = false; 9469e48b025SFam Zheng goto immediate_exit; 947893f7ebaSPaolo Bonzini } 948893f7ebaSPaolo Bonzini 949b21c7652SMax Reitz length = DIV_ROUND_UP(s->bdev_length, s->granularity); 950402a4741SPaolo Bonzini s->in_flight_bitmap = bitmap_new(length); 951402a4741SPaolo Bonzini 952b812f671SPaolo Bonzini /* If we have no backing file yet in the destination, we cannot let 953b812f671SPaolo Bonzini * the destination do COW. Instead, we copy sectors around the 954b812f671SPaolo Bonzini * dirty data if needed. We need a bitmap to do that. 955b812f671SPaolo Bonzini */ 956e253f4b8SKevin Wolf bdrv_get_backing_filename(target_bs, backing_filename, 957b812f671SPaolo Bonzini sizeof(backing_filename)); 958e253f4b8SKevin Wolf if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) { 959b436982fSEric Blake s->target_cluster_size = bdi.cluster_size; 960b436982fSEric Blake } else { 961b436982fSEric Blake s->target_cluster_size = BDRV_SECTOR_SIZE; 962c3cc95bdSFam Zheng } 9633f072a7fSMax Reitz if (backing_filename[0] && !bdrv_backing_chain_next(target_bs) && 964b436982fSEric Blake s->granularity < s->target_cluster_size) { 965b436982fSEric Blake s->buf_size = MAX(s->buf_size, s->target_cluster_size); 966b812f671SPaolo Bonzini s->cow_bitmap = bitmap_new(length); 967b812f671SPaolo Bonzini } 968e253f4b8SKevin Wolf s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov); 969b812f671SPaolo Bonzini 9707504edf4SKevin Wolf s->buf = qemu_try_blockalign(bs, s->buf_size); 9717504edf4SKevin Wolf if (s->buf == NULL) { 9727504edf4SKevin Wolf ret = -ENOMEM; 9737504edf4SKevin Wolf goto immediate_exit; 9747504edf4SKevin Wolf } 9757504edf4SKevin Wolf 976402a4741SPaolo Bonzini mirror_free_init(s); 977893f7ebaSPaolo Bonzini 97849efb1f5SDenis V. Lunev s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 97903544a6eSFam Zheng if (!s->is_none_mode) { 980c0b363adSDenis V. Lunev ret = mirror_dirty_init(s); 981daa7f2f9SKevin Wolf if (ret < 0 || job_is_cancelled(&s->common.job)) { 9824c0cbd6fSFam Zheng goto immediate_exit; 9834c0cbd6fSFam Zheng } 984893f7ebaSPaolo Bonzini } 985893f7ebaSPaolo Bonzini 986dc162c8eSFam Zheng assert(!s->dbi); 987715a74d8SEric Blake s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap); 988893f7ebaSPaolo Bonzini for (;;) { 989cc8c9d6cSPaolo Bonzini uint64_t delay_ns = 0; 99049efb1f5SDenis V. Lunev int64_t cnt, delta; 991893f7ebaSPaolo Bonzini bool should_complete; 992893f7ebaSPaolo Bonzini 993d06107adSMax Reitz /* Do not start passive operations while there are active 994d06107adSMax Reitz * writes in progress */ 995d06107adSMax Reitz while (s->in_active_write_counter) { 9969178f4feSKevin Wolf mirror_wait_for_any_operation(s, true); 997d06107adSMax Reitz } 998d06107adSMax Reitz 999bd48bde8SPaolo Bonzini if (s->ret < 0) { 1000bd48bde8SPaolo Bonzini ret = s->ret; 1001893f7ebaSPaolo Bonzini goto immediate_exit; 1002893f7ebaSPaolo Bonzini } 1003bd48bde8SPaolo Bonzini 1004da01ff7fSKevin Wolf job_pause_point(&s->common.job); 1005565ac01fSStefan Hajnoczi 100620dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 100705df8a6aSKevin Wolf /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is 100805df8a6aSKevin Wolf * the number of bytes currently being processed; together those are 100905df8a6aSKevin Wolf * the current remaining operation length */ 101030a5c887SKevin Wolf job_progress_set_remaining(&s->common.job, s->bytes_in_flight + cnt); 1011bd48bde8SPaolo Bonzini 1012bd48bde8SPaolo Bonzini /* Note that even when no rate limit is applied we need to yield 1013a7282330SFam Zheng * periodically with no pending I/O so that bdrv_drain_all() returns. 101418bb6928SKevin Wolf * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is 101518bb6928SKevin Wolf * an error, or when the source is clean, whichever comes first. */ 101649efb1f5SDenis V. Lunev delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns; 101718bb6928SKevin Wolf if (delta < BLOCK_JOB_SLICE_TIME && 1018bd48bde8SPaolo Bonzini s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 1019cf56a3c6SDenis V. Lunev if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 || 1020402a4741SPaolo Bonzini (cnt == 0 && s->in_flight > 0)) { 10219a46dba7SEric Blake trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight); 10229178f4feSKevin Wolf mirror_wait_for_free_in_flight_slot(s); 1023bd48bde8SPaolo Bonzini continue; 1024bd48bde8SPaolo Bonzini } else if (cnt != 0) { 1025cc8c9d6cSPaolo Bonzini delay_ns = mirror_iteration(s); 1026893f7ebaSPaolo Bonzini } 1027cc8c9d6cSPaolo Bonzini } 1028893f7ebaSPaolo Bonzini 1029893f7ebaSPaolo Bonzini should_complete = false; 1030bd48bde8SPaolo Bonzini if (s->in_flight == 0 && cnt == 0) { 1031893f7ebaSPaolo Bonzini trace_mirror_before_flush(s); 1032bdffb31dSPaolo Bonzini if (!s->synced) { 1033bdffb31dSPaolo Bonzini if (mirror_flush(s) < 0) { 1034bdffb31dSPaolo Bonzini /* Go check s->ret. */ 1035bdffb31dSPaolo Bonzini continue; 1036893f7ebaSPaolo Bonzini } 1037893f7ebaSPaolo Bonzini /* We're out of the streaming phase. From now on, if the job 1038893f7ebaSPaolo Bonzini * is cancelled we will actually complete all pending I/O and 1039893f7ebaSPaolo Bonzini * report completion. This way, block-job-cancel will leave 1040893f7ebaSPaolo Bonzini * the target in a consistent state. 1041893f7ebaSPaolo Bonzini */ 10422e1795b5SKevin Wolf job_transition_to_ready(&s->common.job); 1043d63ffd87SPaolo Bonzini s->synced = true; 1044d06107adSMax Reitz if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) { 1045d06107adSMax Reitz s->actively_synced = true; 1046d06107adSMax Reitz } 1047d63ffd87SPaolo Bonzini } 1048d63ffd87SPaolo Bonzini 1049d63ffd87SPaolo Bonzini should_complete = s->should_complete || 1050daa7f2f9SKevin Wolf job_is_cancelled(&s->common.job); 105120dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 1052893f7ebaSPaolo Bonzini } 1053893f7ebaSPaolo Bonzini 1054893f7ebaSPaolo Bonzini if (cnt == 0 && should_complete) { 1055893f7ebaSPaolo Bonzini /* The dirty bitmap is not updated while operations are pending. 1056893f7ebaSPaolo Bonzini * If we're about to exit, wait for pending operations before 1057893f7ebaSPaolo Bonzini * calling bdrv_get_dirty_count(bs), or we may exit while the 1058893f7ebaSPaolo Bonzini * source has dirty data to copy! 1059893f7ebaSPaolo Bonzini * 1060893f7ebaSPaolo Bonzini * Note that I/O can be submitted by the guest while 10619a0cec66SPaolo Bonzini * mirror_populate runs, so pause it now. Before deciding 10629a0cec66SPaolo Bonzini * whether to switch to target check one last time if I/O has 10639a0cec66SPaolo Bonzini * come in the meanwhile, and if not flush the data to disk. 1064893f7ebaSPaolo Bonzini */ 10659a46dba7SEric Blake trace_mirror_before_drain(s, cnt); 10669a0cec66SPaolo Bonzini 10675e771752SSergio Lopez s->in_drain = true; 10689a0cec66SPaolo Bonzini bdrv_drained_begin(bs); 106920dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 1070bdffb31dSPaolo Bonzini if (cnt > 0 || mirror_flush(s) < 0) { 10719a0cec66SPaolo Bonzini bdrv_drained_end(bs); 10725e771752SSergio Lopez s->in_drain = false; 10739a0cec66SPaolo Bonzini continue; 10749a0cec66SPaolo Bonzini } 10759a0cec66SPaolo Bonzini 10769a0cec66SPaolo Bonzini /* The two disks are in sync. Exit and report successful 10779a0cec66SPaolo Bonzini * completion. 10789a0cec66SPaolo Bonzini */ 10799a0cec66SPaolo Bonzini assert(QLIST_EMPTY(&bs->tracked_requests)); 1080daa7f2f9SKevin Wolf s->common.job.cancelled = false; 10819a0cec66SPaolo Bonzini need_drain = false; 10829a0cec66SPaolo Bonzini break; 1083893f7ebaSPaolo Bonzini } 1084893f7ebaSPaolo Bonzini 1085893f7ebaSPaolo Bonzini ret = 0; 1086ddc4115eSStefan Hajnoczi 1087ddc4115eSStefan Hajnoczi if (s->synced && !should_complete) { 108818bb6928SKevin Wolf delay_ns = (s->in_flight == 0 && 108918bb6928SKevin Wolf cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0); 1090ddc4115eSStefan Hajnoczi } 10919a46dba7SEric Blake trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); 10925d43e86eSKevin Wolf job_sleep_ns(&s->common.job, delay_ns); 1093daa7f2f9SKevin Wolf if (job_is_cancelled(&s->common.job) && 1094004e95dfSKevin Wolf (!s->synced || s->common.job.force_cancel)) 1095eb36639fSMax Reitz { 1096893f7ebaSPaolo Bonzini break; 1097893f7ebaSPaolo Bonzini } 109849efb1f5SDenis V. Lunev s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 1099893f7ebaSPaolo Bonzini } 1100893f7ebaSPaolo Bonzini 1101893f7ebaSPaolo Bonzini immediate_exit: 1102bd48bde8SPaolo Bonzini if (s->in_flight > 0) { 1103bd48bde8SPaolo Bonzini /* We get here only if something went wrong. Either the job failed, 1104bd48bde8SPaolo Bonzini * or it was cancelled prematurely so that we do not guarantee that 1105bd48bde8SPaolo Bonzini * the target is a copy of the source. 1106bd48bde8SPaolo Bonzini */ 1107004e95dfSKevin Wolf assert(ret < 0 || ((s->common.job.force_cancel || !s->synced) && 1108daa7f2f9SKevin Wolf job_is_cancelled(&s->common.job))); 11099a0cec66SPaolo Bonzini assert(need_drain); 1110bae8196dSPaolo Bonzini mirror_wait_for_all_io(s); 1111bd48bde8SPaolo Bonzini } 1112bd48bde8SPaolo Bonzini 1113bd48bde8SPaolo Bonzini assert(s->in_flight == 0); 11147191bf31SMarkus Armbruster qemu_vfree(s->buf); 1115b812f671SPaolo Bonzini g_free(s->cow_bitmap); 1116402a4741SPaolo Bonzini g_free(s->in_flight_bitmap); 1117dc162c8eSFam Zheng bdrv_dirty_iter_free(s->dbi); 11185a7e7a0bSStefan Hajnoczi 11199a0cec66SPaolo Bonzini if (need_drain) { 11205e771752SSergio Lopez s->in_drain = true; 1121e253f4b8SKevin Wolf bdrv_drained_begin(bs); 11229a0cec66SPaolo Bonzini } 1123f67432a2SJohn Snow 1124f67432a2SJohn Snow return ret; 1125893f7ebaSPaolo Bonzini } 1126893f7ebaSPaolo Bonzini 11273453d972SKevin Wolf static void mirror_complete(Job *job, Error **errp) 1128d63ffd87SPaolo Bonzini { 11293453d972SKevin Wolf MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 1130274fcceeSMax Reitz 1131d63ffd87SPaolo Bonzini if (!s->synced) { 11329df229c3SAlberto Garcia error_setg(errp, "The active block job '%s' cannot be completed", 11333453d972SKevin Wolf job->id); 1134d63ffd87SPaolo Bonzini return; 1135d63ffd87SPaolo Bonzini } 1136d63ffd87SPaolo Bonzini 113715d67298SChanglong Xie /* block all operations on to_replace bs */ 113809158f00SBenoît Canet if (s->replaces) { 11395a7e7a0bSStefan Hajnoczi AioContext *replace_aio_context; 11405a7e7a0bSStefan Hajnoczi 1141e12f3784SWen Congyang s->to_replace = bdrv_find_node(s->replaces); 114209158f00SBenoît Canet if (!s->to_replace) { 1143e12f3784SWen Congyang error_setg(errp, "Node name '%s' not found", s->replaces); 114409158f00SBenoît Canet return; 114509158f00SBenoît Canet } 114609158f00SBenoît Canet 11475a7e7a0bSStefan Hajnoczi replace_aio_context = bdrv_get_aio_context(s->to_replace); 11485a7e7a0bSStefan Hajnoczi aio_context_acquire(replace_aio_context); 11495a7e7a0bSStefan Hajnoczi 11504ef85a9cSKevin Wolf /* TODO Translate this into permission system. Current definition of 11514ef85a9cSKevin Wolf * GRAPH_MOD would require to request it for the parents; they might 11524ef85a9cSKevin Wolf * not even be BlockDriverStates, however, so a BdrvChild can't address 11534ef85a9cSKevin Wolf * them. May need redefinition of GRAPH_MOD. */ 115409158f00SBenoît Canet error_setg(&s->replace_blocker, 115509158f00SBenoît Canet "block device is in use by block-job-complete"); 115609158f00SBenoît Canet bdrv_op_block_all(s->to_replace, s->replace_blocker); 115709158f00SBenoît Canet bdrv_ref(s->to_replace); 11585a7e7a0bSStefan Hajnoczi 11595a7e7a0bSStefan Hajnoczi aio_context_release(replace_aio_context); 116009158f00SBenoît Canet } 116109158f00SBenoît Canet 1162d63ffd87SPaolo Bonzini s->should_complete = true; 116300769414SMax Reitz 116400769414SMax Reitz /* If the job is paused, it will be re-entered when it is resumed */ 116500769414SMax Reitz if (!job->paused) { 11663d70ff53SKevin Wolf job_enter(job); 1167d63ffd87SPaolo Bonzini } 116800769414SMax Reitz } 1169d63ffd87SPaolo Bonzini 1170537c3d4fSStefan Hajnoczi static void coroutine_fn mirror_pause(Job *job) 1171565ac01fSStefan Hajnoczi { 1172da01ff7fSKevin Wolf MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 1173565ac01fSStefan Hajnoczi 1174bae8196dSPaolo Bonzini mirror_wait_for_all_io(s); 1175565ac01fSStefan Hajnoczi } 1176565ac01fSStefan Hajnoczi 117789bd0305SKevin Wolf static bool mirror_drained_poll(BlockJob *job) 117889bd0305SKevin Wolf { 117989bd0305SKevin Wolf MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 11805e771752SSergio Lopez 11815e771752SSergio Lopez /* If the job isn't paused nor cancelled, we can't be sure that it won't 11825e771752SSergio Lopez * issue more requests. We make an exception if we've reached this point 11835e771752SSergio Lopez * from one of our own drain sections, to avoid a deadlock waiting for 11845e771752SSergio Lopez * ourselves. 11855e771752SSergio Lopez */ 11865e771752SSergio Lopez if (!s->common.job.paused && !s->common.job.cancelled && !s->in_drain) { 11875e771752SSergio Lopez return true; 11885e771752SSergio Lopez } 11895e771752SSergio Lopez 119089bd0305SKevin Wolf return !!s->in_flight; 119189bd0305SKevin Wolf } 119289bd0305SKevin Wolf 11939c785cd7SVladimir Sementsov-Ogievskiy static void mirror_cancel(Job *job, bool force) 1194521ff8b7SVladimir Sementsov-Ogievskiy { 1195521ff8b7SVladimir Sementsov-Ogievskiy MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 1196521ff8b7SVladimir Sementsov-Ogievskiy BlockDriverState *target = blk_bs(s->target); 1197521ff8b7SVladimir Sementsov-Ogievskiy 11989c785cd7SVladimir Sementsov-Ogievskiy if (force || !job_is_ready(job)) { 1199521ff8b7SVladimir Sementsov-Ogievskiy bdrv_cancel_in_flight(target); 1200521ff8b7SVladimir Sementsov-Ogievskiy } 12019c785cd7SVladimir Sementsov-Ogievskiy } 1202521ff8b7SVladimir Sementsov-Ogievskiy 12033fc4b10aSFam Zheng static const BlockJobDriver mirror_job_driver = { 120433e9e9bdSKevin Wolf .job_driver = { 1205893f7ebaSPaolo Bonzini .instance_size = sizeof(MirrorBlockJob), 12068e4c8700SKevin Wolf .job_type = JOB_TYPE_MIRROR, 120780fa2c75SKevin Wolf .free = block_job_free, 1208b15de828SKevin Wolf .user_resume = block_job_user_resume, 1209f67432a2SJohn Snow .run = mirror_run, 1210737efc1eSJohn Snow .prepare = mirror_prepare, 1211737efc1eSJohn Snow .abort = mirror_abort, 1212565ac01fSStefan Hajnoczi .pause = mirror_pause, 1213da01ff7fSKevin Wolf .complete = mirror_complete, 1214521ff8b7SVladimir Sementsov-Ogievskiy .cancel = mirror_cancel, 12153453d972SKevin Wolf }, 121689bd0305SKevin Wolf .drained_poll = mirror_drained_poll, 1217893f7ebaSPaolo Bonzini }; 1218893f7ebaSPaolo Bonzini 121903544a6eSFam Zheng static const BlockJobDriver commit_active_job_driver = { 122033e9e9bdSKevin Wolf .job_driver = { 122103544a6eSFam Zheng .instance_size = sizeof(MirrorBlockJob), 12228e4c8700SKevin Wolf .job_type = JOB_TYPE_COMMIT, 122380fa2c75SKevin Wolf .free = block_job_free, 1224b15de828SKevin Wolf .user_resume = block_job_user_resume, 1225f67432a2SJohn Snow .run = mirror_run, 1226737efc1eSJohn Snow .prepare = mirror_prepare, 1227737efc1eSJohn Snow .abort = mirror_abort, 1228565ac01fSStefan Hajnoczi .pause = mirror_pause, 1229da01ff7fSKevin Wolf .complete = mirror_complete, 12303453d972SKevin Wolf }, 123189bd0305SKevin Wolf .drained_poll = mirror_drained_poll, 123203544a6eSFam Zheng }; 123303544a6eSFam Zheng 1234537c3d4fSStefan Hajnoczi static void coroutine_fn 1235537c3d4fSStefan Hajnoczi do_sync_target_write(MirrorBlockJob *job, MirrorMethod method, 1236d06107adSMax Reitz uint64_t offset, uint64_t bytes, 1237d06107adSMax Reitz QEMUIOVector *qiov, int flags) 1238d06107adSMax Reitz { 1239d06107adSMax Reitz int ret; 1240dbdf699cSVladimir Sementsov-Ogievskiy size_t qiov_offset = 0; 1241dbdf699cSVladimir Sementsov-Ogievskiy int64_t bitmap_offset, bitmap_end; 1242d06107adSMax Reitz 1243dbdf699cSVladimir Sementsov-Ogievskiy if (!QEMU_IS_ALIGNED(offset, job->granularity) && 1244dbdf699cSVladimir Sementsov-Ogievskiy bdrv_dirty_bitmap_get(job->dirty_bitmap, offset)) 1245dbdf699cSVladimir Sementsov-Ogievskiy { 1246dbdf699cSVladimir Sementsov-Ogievskiy /* 1247dbdf699cSVladimir Sementsov-Ogievskiy * Dirty unaligned padding: ignore it. 1248dbdf699cSVladimir Sementsov-Ogievskiy * 1249dbdf699cSVladimir Sementsov-Ogievskiy * Reasoning: 1250dbdf699cSVladimir Sementsov-Ogievskiy * 1. If we copy it, we can't reset corresponding bit in 1251dbdf699cSVladimir Sementsov-Ogievskiy * dirty_bitmap as there may be some "dirty" bytes still not 1252dbdf699cSVladimir Sementsov-Ogievskiy * copied. 1253dbdf699cSVladimir Sementsov-Ogievskiy * 2. It's already dirty, so skipping it we don't diverge mirror 1254dbdf699cSVladimir Sementsov-Ogievskiy * progress. 1255dbdf699cSVladimir Sementsov-Ogievskiy * 1256dbdf699cSVladimir Sementsov-Ogievskiy * Note, that because of this, guest write may have no contribution 1257dbdf699cSVladimir Sementsov-Ogievskiy * into mirror converge, but that's not bad, as we have background 1258dbdf699cSVladimir Sementsov-Ogievskiy * process of mirroring. If under some bad circumstances (high guest 1259dbdf699cSVladimir Sementsov-Ogievskiy * IO load) background process starve, we will not converge anyway, 1260dbdf699cSVladimir Sementsov-Ogievskiy * even if each write will contribute, as guest is not guaranteed to 1261dbdf699cSVladimir Sementsov-Ogievskiy * rewrite the whole disk. 1262dbdf699cSVladimir Sementsov-Ogievskiy */ 1263dbdf699cSVladimir Sementsov-Ogievskiy qiov_offset = QEMU_ALIGN_UP(offset, job->granularity) - offset; 1264dbdf699cSVladimir Sementsov-Ogievskiy if (bytes <= qiov_offset) { 1265dbdf699cSVladimir Sementsov-Ogievskiy /* nothing to do after shrink */ 1266dbdf699cSVladimir Sementsov-Ogievskiy return; 1267dbdf699cSVladimir Sementsov-Ogievskiy } 1268dbdf699cSVladimir Sementsov-Ogievskiy offset += qiov_offset; 1269dbdf699cSVladimir Sementsov-Ogievskiy bytes -= qiov_offset; 1270dbdf699cSVladimir Sementsov-Ogievskiy } 1271dbdf699cSVladimir Sementsov-Ogievskiy 1272dbdf699cSVladimir Sementsov-Ogievskiy if (!QEMU_IS_ALIGNED(offset + bytes, job->granularity) && 1273dbdf699cSVladimir Sementsov-Ogievskiy bdrv_dirty_bitmap_get(job->dirty_bitmap, offset + bytes - 1)) 1274dbdf699cSVladimir Sementsov-Ogievskiy { 1275dbdf699cSVladimir Sementsov-Ogievskiy uint64_t tail = (offset + bytes) % job->granularity; 1276dbdf699cSVladimir Sementsov-Ogievskiy 1277dbdf699cSVladimir Sementsov-Ogievskiy if (bytes <= tail) { 1278dbdf699cSVladimir Sementsov-Ogievskiy /* nothing to do after shrink */ 1279dbdf699cSVladimir Sementsov-Ogievskiy return; 1280dbdf699cSVladimir Sementsov-Ogievskiy } 1281dbdf699cSVladimir Sementsov-Ogievskiy bytes -= tail; 1282dbdf699cSVladimir Sementsov-Ogievskiy } 1283dbdf699cSVladimir Sementsov-Ogievskiy 1284dbdf699cSVladimir Sementsov-Ogievskiy /* 1285dbdf699cSVladimir Sementsov-Ogievskiy * Tails are either clean or shrunk, so for bitmap resetting 1286dbdf699cSVladimir Sementsov-Ogievskiy * we safely align the range down. 1287dbdf699cSVladimir Sementsov-Ogievskiy */ 1288dbdf699cSVladimir Sementsov-Ogievskiy bitmap_offset = QEMU_ALIGN_UP(offset, job->granularity); 1289dbdf699cSVladimir Sementsov-Ogievskiy bitmap_end = QEMU_ALIGN_DOWN(offset + bytes, job->granularity); 1290dbdf699cSVladimir Sementsov-Ogievskiy if (bitmap_offset < bitmap_end) { 1291dbdf699cSVladimir Sementsov-Ogievskiy bdrv_reset_dirty_bitmap(job->dirty_bitmap, bitmap_offset, 1292dbdf699cSVladimir Sementsov-Ogievskiy bitmap_end - bitmap_offset); 1293dbdf699cSVladimir Sementsov-Ogievskiy } 1294d06107adSMax Reitz 12955c511ac3SVladimir Sementsov-Ogievskiy job_progress_increase_remaining(&job->common.job, bytes); 1296d06107adSMax Reitz 1297d06107adSMax Reitz switch (method) { 1298d06107adSMax Reitz case MIRROR_METHOD_COPY: 1299dbdf699cSVladimir Sementsov-Ogievskiy ret = blk_co_pwritev_part(job->target, offset, bytes, 1300dbdf699cSVladimir Sementsov-Ogievskiy qiov, qiov_offset, flags); 1301d06107adSMax Reitz break; 1302d06107adSMax Reitz 1303d06107adSMax Reitz case MIRROR_METHOD_ZERO: 1304d06107adSMax Reitz assert(!qiov); 13055c511ac3SVladimir Sementsov-Ogievskiy ret = blk_co_pwrite_zeroes(job->target, offset, bytes, flags); 1306d06107adSMax Reitz break; 1307d06107adSMax Reitz 1308d06107adSMax Reitz case MIRROR_METHOD_DISCARD: 1309d06107adSMax Reitz assert(!qiov); 13105c511ac3SVladimir Sementsov-Ogievskiy ret = blk_co_pdiscard(job->target, offset, bytes); 1311d06107adSMax Reitz break; 1312d06107adSMax Reitz 1313d06107adSMax Reitz default: 1314d06107adSMax Reitz abort(); 1315d06107adSMax Reitz } 1316d06107adSMax Reitz 1317d06107adSMax Reitz if (ret >= 0) { 13185c511ac3SVladimir Sementsov-Ogievskiy job_progress_update(&job->common.job, bytes); 1319d06107adSMax Reitz } else { 1320d06107adSMax Reitz BlockErrorAction action; 1321d06107adSMax Reitz 1322dbdf699cSVladimir Sementsov-Ogievskiy /* 1323dbdf699cSVladimir Sementsov-Ogievskiy * We failed, so we should mark dirty the whole area, aligned up. 1324dbdf699cSVladimir Sementsov-Ogievskiy * Note that we don't care about shrunk tails if any: they were dirty 1325dbdf699cSVladimir Sementsov-Ogievskiy * at function start, and they must be still dirty, as we've locked 1326dbdf699cSVladimir Sementsov-Ogievskiy * the region for in-flight op. 1327dbdf699cSVladimir Sementsov-Ogievskiy */ 1328dbdf699cSVladimir Sementsov-Ogievskiy bitmap_offset = QEMU_ALIGN_DOWN(offset, job->granularity); 1329dbdf699cSVladimir Sementsov-Ogievskiy bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity); 1330dbdf699cSVladimir Sementsov-Ogievskiy bdrv_set_dirty_bitmap(job->dirty_bitmap, bitmap_offset, 1331dbdf699cSVladimir Sementsov-Ogievskiy bitmap_end - bitmap_offset); 1332d06107adSMax Reitz job->actively_synced = false; 1333d06107adSMax Reitz 1334d06107adSMax Reitz action = mirror_error_action(job, false, -ret); 1335d06107adSMax Reitz if (action == BLOCK_ERROR_ACTION_REPORT) { 1336d06107adSMax Reitz if (!job->ret) { 1337d06107adSMax Reitz job->ret = ret; 1338d06107adSMax Reitz } 1339d06107adSMax Reitz } 1340d06107adSMax Reitz } 1341d06107adSMax Reitz } 1342d06107adSMax Reitz 1343d06107adSMax Reitz static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s, 1344d06107adSMax Reitz uint64_t offset, 1345d06107adSMax Reitz uint64_t bytes) 1346d06107adSMax Reitz { 1347d06107adSMax Reitz MirrorOp *op; 1348d06107adSMax Reitz uint64_t start_chunk = offset / s->granularity; 1349d06107adSMax Reitz uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity); 1350d06107adSMax Reitz 1351d06107adSMax Reitz op = g_new(MirrorOp, 1); 1352d06107adSMax Reitz *op = (MirrorOp){ 1353d06107adSMax Reitz .s = s, 1354d06107adSMax Reitz .offset = offset, 1355d06107adSMax Reitz .bytes = bytes, 1356d06107adSMax Reitz .is_active_write = true, 1357ce8cabbdSKevin Wolf .is_in_flight = true, 1358ead3f1bfSVladimir Sementsov-Ogievskiy .co = qemu_coroutine_self(), 1359d06107adSMax Reitz }; 1360d06107adSMax Reitz qemu_co_queue_init(&op->waiting_requests); 1361d06107adSMax Reitz QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next); 1362d06107adSMax Reitz 1363d06107adSMax Reitz s->in_active_write_counter++; 1364d06107adSMax Reitz 1365d06107adSMax Reitz mirror_wait_on_conflicts(op, s, offset, bytes); 1366d06107adSMax Reitz 1367d06107adSMax Reitz bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk); 1368d06107adSMax Reitz 1369d06107adSMax Reitz return op; 1370d06107adSMax Reitz } 1371d06107adSMax Reitz 1372d06107adSMax Reitz static void coroutine_fn active_write_settle(MirrorOp *op) 1373d06107adSMax Reitz { 1374d06107adSMax Reitz uint64_t start_chunk = op->offset / op->s->granularity; 1375d06107adSMax Reitz uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes, 1376d06107adSMax Reitz op->s->granularity); 1377d06107adSMax Reitz 1378d06107adSMax Reitz if (!--op->s->in_active_write_counter && op->s->actively_synced) { 1379d06107adSMax Reitz BdrvChild *source = op->s->mirror_top_bs->backing; 1380d06107adSMax Reitz 1381d06107adSMax Reitz if (QLIST_FIRST(&source->bs->parents) == source && 1382d06107adSMax Reitz QLIST_NEXT(source, next_parent) == NULL) 1383d06107adSMax Reitz { 1384d06107adSMax Reitz /* Assert that we are back in sync once all active write 1385d06107adSMax Reitz * operations are settled. 1386d06107adSMax Reitz * Note that we can only assert this if the mirror node 1387d06107adSMax Reitz * is the source node's only parent. */ 1388d06107adSMax Reitz assert(!bdrv_get_dirty_count(op->s->dirty_bitmap)); 1389d06107adSMax Reitz } 1390d06107adSMax Reitz } 1391d06107adSMax Reitz bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk); 1392d06107adSMax Reitz QTAILQ_REMOVE(&op->s->ops_in_flight, op, next); 1393d06107adSMax Reitz qemu_co_queue_restart_all(&op->waiting_requests); 1394d06107adSMax Reitz g_free(op); 1395d06107adSMax Reitz } 1396d06107adSMax Reitz 13974ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs, 13984ef85a9cSKevin Wolf uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) 13994ef85a9cSKevin Wolf { 14004ef85a9cSKevin Wolf return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags); 14014ef85a9cSKevin Wolf } 14024ef85a9cSKevin Wolf 1403d06107adSMax Reitz static int coroutine_fn bdrv_mirror_top_do_write(BlockDriverState *bs, 1404d06107adSMax Reitz MirrorMethod method, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, 1405d06107adSMax Reitz int flags) 1406d06107adSMax Reitz { 1407d06107adSMax Reitz MirrorOp *op = NULL; 1408d06107adSMax Reitz MirrorBDSOpaque *s = bs->opaque; 1409d06107adSMax Reitz int ret = 0; 1410d06107adSMax Reitz bool copy_to_target; 1411d06107adSMax Reitz 1412d06107adSMax Reitz copy_to_target = s->job->ret >= 0 && 1413d06107adSMax Reitz s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; 1414d06107adSMax Reitz 1415d06107adSMax Reitz if (copy_to_target) { 1416d06107adSMax Reitz op = active_write_prepare(s->job, offset, bytes); 1417d06107adSMax Reitz } 1418d06107adSMax Reitz 1419d06107adSMax Reitz switch (method) { 1420d06107adSMax Reitz case MIRROR_METHOD_COPY: 1421d06107adSMax Reitz ret = bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags); 1422d06107adSMax Reitz break; 1423d06107adSMax Reitz 1424d06107adSMax Reitz case MIRROR_METHOD_ZERO: 1425d06107adSMax Reitz ret = bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags); 1426d06107adSMax Reitz break; 1427d06107adSMax Reitz 1428d06107adSMax Reitz case MIRROR_METHOD_DISCARD: 14290b9fd3f4SFam Zheng ret = bdrv_co_pdiscard(bs->backing, offset, bytes); 1430d06107adSMax Reitz break; 1431d06107adSMax Reitz 1432d06107adSMax Reitz default: 1433d06107adSMax Reitz abort(); 1434d06107adSMax Reitz } 1435d06107adSMax Reitz 1436d06107adSMax Reitz if (ret < 0) { 1437d06107adSMax Reitz goto out; 1438d06107adSMax Reitz } 1439d06107adSMax Reitz 1440d06107adSMax Reitz if (copy_to_target) { 1441d06107adSMax Reitz do_sync_target_write(s->job, method, offset, bytes, qiov, flags); 1442d06107adSMax Reitz } 1443d06107adSMax Reitz 1444d06107adSMax Reitz out: 1445d06107adSMax Reitz if (copy_to_target) { 1446d06107adSMax Reitz active_write_settle(op); 1447d06107adSMax Reitz } 1448d06107adSMax Reitz return ret; 1449d06107adSMax Reitz } 1450d06107adSMax Reitz 14514ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs, 14524ef85a9cSKevin Wolf uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) 14534ef85a9cSKevin Wolf { 1454d06107adSMax Reitz MirrorBDSOpaque *s = bs->opaque; 1455d06107adSMax Reitz QEMUIOVector bounce_qiov; 1456d06107adSMax Reitz void *bounce_buf; 1457d06107adSMax Reitz int ret = 0; 1458d06107adSMax Reitz bool copy_to_target; 1459d06107adSMax Reitz 1460d06107adSMax Reitz copy_to_target = s->job->ret >= 0 && 1461d06107adSMax Reitz s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; 1462d06107adSMax Reitz 1463d06107adSMax Reitz if (copy_to_target) { 1464d06107adSMax Reitz /* The guest might concurrently modify the data to write; but 1465d06107adSMax Reitz * the data on source and destination must match, so we have 1466d06107adSMax Reitz * to use a bounce buffer if we are going to write to the 1467d06107adSMax Reitz * target now. */ 1468d06107adSMax Reitz bounce_buf = qemu_blockalign(bs, bytes); 1469d06107adSMax Reitz iov_to_buf_full(qiov->iov, qiov->niov, 0, bounce_buf, bytes); 1470d06107adSMax Reitz 1471d06107adSMax Reitz qemu_iovec_init(&bounce_qiov, 1); 1472d06107adSMax Reitz qemu_iovec_add(&bounce_qiov, bounce_buf, bytes); 1473d06107adSMax Reitz qiov = &bounce_qiov; 1474d06107adSMax Reitz } 1475d06107adSMax Reitz 1476d06107adSMax Reitz ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, offset, bytes, qiov, 1477d06107adSMax Reitz flags); 1478d06107adSMax Reitz 1479d06107adSMax Reitz if (copy_to_target) { 1480d06107adSMax Reitz qemu_iovec_destroy(&bounce_qiov); 1481d06107adSMax Reitz qemu_vfree(bounce_buf); 1482d06107adSMax Reitz } 1483d06107adSMax Reitz 1484d06107adSMax Reitz return ret; 14854ef85a9cSKevin Wolf } 14864ef85a9cSKevin Wolf 14874ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs) 14884ef85a9cSKevin Wolf { 1489ce960aa9SVladimir Sementsov-Ogievskiy if (bs->backing == NULL) { 1490ce960aa9SVladimir Sementsov-Ogievskiy /* we can be here after failed bdrv_append in mirror_start_job */ 1491ce960aa9SVladimir Sementsov-Ogievskiy return 0; 1492ce960aa9SVladimir Sementsov-Ogievskiy } 14934ef85a9cSKevin Wolf return bdrv_co_flush(bs->backing->bs); 14944ef85a9cSKevin Wolf } 14954ef85a9cSKevin Wolf 14964ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs, 1497f5a5ca79SManos Pitsidianakis int64_t offset, int bytes, BdrvRequestFlags flags) 14984ef85a9cSKevin Wolf { 1499d06107adSMax Reitz return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL, 1500d06107adSMax Reitz flags); 15014ef85a9cSKevin Wolf } 15024ef85a9cSKevin Wolf 15034ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs, 1504f5a5ca79SManos Pitsidianakis int64_t offset, int bytes) 15054ef85a9cSKevin Wolf { 1506d06107adSMax Reitz return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes, 1507d06107adSMax Reitz NULL, 0); 15084ef85a9cSKevin Wolf } 15094ef85a9cSKevin Wolf 1510998b3a1eSMax Reitz static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs) 1511fd4a6493SKevin Wolf { 151218775ff3SVladimir Sementsov-Ogievskiy if (bs->backing == NULL) { 151318775ff3SVladimir Sementsov-Ogievskiy /* we can be here after failed bdrv_attach_child in 151418775ff3SVladimir Sementsov-Ogievskiy * bdrv_set_backing_hd */ 151518775ff3SVladimir Sementsov-Ogievskiy return; 151618775ff3SVladimir Sementsov-Ogievskiy } 1517fd4a6493SKevin Wolf pstrcpy(bs->exact_filename, sizeof(bs->exact_filename), 1518fd4a6493SKevin Wolf bs->backing->bs->filename); 1519fd4a6493SKevin Wolf } 1520fd4a6493SKevin Wolf 15214ef85a9cSKevin Wolf static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c, 1522bf8e925eSMax Reitz BdrvChildRole role, 1523e0995dc3SKevin Wolf BlockReopenQueue *reopen_queue, 15244ef85a9cSKevin Wolf uint64_t perm, uint64_t shared, 15254ef85a9cSKevin Wolf uint64_t *nperm, uint64_t *nshared) 15264ef85a9cSKevin Wolf { 1527f94dc3b4SMax Reitz MirrorBDSOpaque *s = bs->opaque; 1528f94dc3b4SMax Reitz 1529f94dc3b4SMax Reitz if (s->stop) { 1530f94dc3b4SMax Reitz /* 1531f94dc3b4SMax Reitz * If the job is to be stopped, we do not need to forward 1532f94dc3b4SMax Reitz * anything to the real image. 1533f94dc3b4SMax Reitz */ 1534f94dc3b4SMax Reitz *nperm = 0; 1535f94dc3b4SMax Reitz *nshared = BLK_PERM_ALL; 1536f94dc3b4SMax Reitz return; 1537f94dc3b4SMax Reitz } 1538f94dc3b4SMax Reitz 153953431b90SMax Reitz bdrv_default_perms(bs, c, role, reopen_queue, 154053431b90SMax Reitz perm, shared, nperm, nshared); 15414ef85a9cSKevin Wolf 154253431b90SMax Reitz if (s->is_commit) { 154353431b90SMax Reitz /* 154453431b90SMax Reitz * For commit jobs, we cannot take CONSISTENT_READ, because 154553431b90SMax Reitz * that permission is unshared for everything above the base 154653431b90SMax Reitz * node (except for filters on the base node). 154753431b90SMax Reitz * We also have to force-share the WRITE permission, or 154853431b90SMax Reitz * otherwise we would block ourselves at the base node (if 154953431b90SMax Reitz * writes are blocked for a node, they are also blocked for 155053431b90SMax Reitz * its backing file). 155153431b90SMax Reitz * (We could also share RESIZE, because it may be needed for 155253431b90SMax Reitz * the target if its size is less than the top node's; but 155353431b90SMax Reitz * bdrv_default_perms_for_cow() automatically shares RESIZE 155453431b90SMax Reitz * for backing nodes if WRITE is shared, so there is no need 155553431b90SMax Reitz * to do it here.) 155653431b90SMax Reitz */ 155753431b90SMax Reitz *nperm &= ~BLK_PERM_CONSISTENT_READ; 155853431b90SMax Reitz *nshared |= BLK_PERM_WRITE; 155953431b90SMax Reitz } 15604ef85a9cSKevin Wolf } 15614ef85a9cSKevin Wolf 15624ef85a9cSKevin Wolf /* Dummy node that provides consistent read to its users without requiring it 15634ef85a9cSKevin Wolf * from its backing file and that allows writes on the backing file chain. */ 15644ef85a9cSKevin Wolf static BlockDriver bdrv_mirror_top = { 15654ef85a9cSKevin Wolf .format_name = "mirror_top", 15664ef85a9cSKevin Wolf .bdrv_co_preadv = bdrv_mirror_top_preadv, 15674ef85a9cSKevin Wolf .bdrv_co_pwritev = bdrv_mirror_top_pwritev, 15684ef85a9cSKevin Wolf .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes, 15694ef85a9cSKevin Wolf .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard, 15704ef85a9cSKevin Wolf .bdrv_co_flush = bdrv_mirror_top_flush, 1571fd4a6493SKevin Wolf .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename, 15724ef85a9cSKevin Wolf .bdrv_child_perm = bdrv_mirror_top_child_perm, 15736540fd15SMax Reitz 15746540fd15SMax Reitz .is_filter = true, 15754ef85a9cSKevin Wolf }; 15764ef85a9cSKevin Wolf 1577cc19f177SVladimir Sementsov-Ogievskiy static BlockJob *mirror_start_job( 1578cc19f177SVladimir Sementsov-Ogievskiy const char *job_id, BlockDriverState *bs, 157947970dfbSJohn Snow int creation_flags, BlockDriverState *target, 158047970dfbSJohn Snow const char *replaces, int64_t speed, 158147970dfbSJohn Snow uint32_t granularity, int64_t buf_size, 1582274fcceeSMax Reitz BlockMirrorBackingMode backing_mode, 1583cdf3bc93SMax Reitz bool zero_target, 158403544a6eSFam Zheng BlockdevOnError on_source_error, 1585b952b558SPaolo Bonzini BlockdevOnError on_target_error, 15860fc9f8eaSFam Zheng bool unmap, 1587097310b5SMarkus Armbruster BlockCompletionFunc *cb, 158851ccfa2dSFam Zheng void *opaque, 158903544a6eSFam Zheng const BlockJobDriver *driver, 1590b49f7eadSWen Congyang bool is_none_mode, BlockDriverState *base, 159151ccfa2dSFam Zheng bool auto_complete, const char *filter_node_name, 1592481debaaSMax Reitz bool is_mirror, MirrorCopyMode copy_mode, 159351ccfa2dSFam Zheng Error **errp) 1594893f7ebaSPaolo Bonzini { 1595893f7ebaSPaolo Bonzini MirrorBlockJob *s; 1596429076e8SMax Reitz MirrorBDSOpaque *bs_opaque; 15974ef85a9cSKevin Wolf BlockDriverState *mirror_top_bs; 15984ef85a9cSKevin Wolf bool target_is_backing; 15993f072a7fSMax Reitz uint64_t target_perms, target_shared_perms; 1600d7086422SKevin Wolf int ret; 1601893f7ebaSPaolo Bonzini 1602eee13dfeSPaolo Bonzini if (granularity == 0) { 1603341ebc2fSJohn Snow granularity = bdrv_get_default_bitmap_granularity(target); 1604eee13dfeSPaolo Bonzini } 1605eee13dfeSPaolo Bonzini 160631826642SEric Blake assert(is_power_of_2(granularity)); 1607eee13dfeSPaolo Bonzini 160848ac0a4dSWen Congyang if (buf_size < 0) { 160948ac0a4dSWen Congyang error_setg(errp, "Invalid parameter 'buf-size'"); 1610cc19f177SVladimir Sementsov-Ogievskiy return NULL; 161148ac0a4dSWen Congyang } 161248ac0a4dSWen Congyang 161348ac0a4dSWen Congyang if (buf_size == 0) { 161448ac0a4dSWen Congyang buf_size = DEFAULT_MIRROR_BUF_SIZE; 161548ac0a4dSWen Congyang } 16165bc361b8SFam Zheng 16173f072a7fSMax Reitz if (bdrv_skip_filters(bs) == bdrv_skip_filters(target)) { 161886fae10cSKevin Wolf error_setg(errp, "Can't mirror node into itself"); 1619cc19f177SVladimir Sementsov-Ogievskiy return NULL; 162086fae10cSKevin Wolf } 162186fae10cSKevin Wolf 162253431b90SMax Reitz target_is_backing = bdrv_chain_contains(bs, target); 162353431b90SMax Reitz 16244ef85a9cSKevin Wolf /* In the case of active commit, add dummy driver to provide consistent 16254ef85a9cSKevin Wolf * reads on the top, while disabling it in the intermediate nodes, and make 16264ef85a9cSKevin Wolf * the backing chain writable. */ 16276cdbceb1SKevin Wolf mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name, 16286cdbceb1SKevin Wolf BDRV_O_RDWR, errp); 16294ef85a9cSKevin Wolf if (mirror_top_bs == NULL) { 1630cc19f177SVladimir Sementsov-Ogievskiy return NULL; 1631893f7ebaSPaolo Bonzini } 1632d3c8c674SKevin Wolf if (!filter_node_name) { 1633d3c8c674SKevin Wolf mirror_top_bs->implicit = true; 1634d3c8c674SKevin Wolf } 1635e5182c1cSMax Reitz 1636e5182c1cSMax Reitz /* So that we can always drop this node */ 1637e5182c1cSMax Reitz mirror_top_bs->never_freeze = true; 1638e5182c1cSMax Reitz 16394ef85a9cSKevin Wolf mirror_top_bs->total_sectors = bs->total_sectors; 1640228345bfSMax Reitz mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED; 164180f5c33fSKevin Wolf mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED | 164280f5c33fSKevin Wolf BDRV_REQ_NO_FALLBACK; 1643429076e8SMax Reitz bs_opaque = g_new0(MirrorBDSOpaque, 1); 1644429076e8SMax Reitz mirror_top_bs->opaque = bs_opaque; 1645893f7ebaSPaolo Bonzini 164653431b90SMax Reitz bs_opaque->is_commit = target_is_backing; 164753431b90SMax Reitz 16484ef85a9cSKevin Wolf bdrv_drained_begin(bs); 1649934aee14SVladimir Sementsov-Ogievskiy ret = bdrv_append(mirror_top_bs, bs, errp); 16504ef85a9cSKevin Wolf bdrv_drained_end(bs); 16514ef85a9cSKevin Wolf 1652934aee14SVladimir Sementsov-Ogievskiy if (ret < 0) { 1653b2c2832cSKevin Wolf bdrv_unref(mirror_top_bs); 1654cc19f177SVladimir Sementsov-Ogievskiy return NULL; 1655b2c2832cSKevin Wolf } 1656b2c2832cSKevin Wolf 16574ef85a9cSKevin Wolf /* Make sure that the source is not resized while the job is running */ 165875859b94SJohn Snow s = block_job_create(job_id, driver, NULL, mirror_top_bs, 16594ef85a9cSKevin Wolf BLK_PERM_CONSISTENT_READ, 16604ef85a9cSKevin Wolf BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | 16614ef85a9cSKevin Wolf BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed, 16624ef85a9cSKevin Wolf creation_flags, cb, opaque, errp); 16634ef85a9cSKevin Wolf if (!s) { 16644ef85a9cSKevin Wolf goto fail; 16654ef85a9cSKevin Wolf } 1666429076e8SMax Reitz bs_opaque->job = s; 1667429076e8SMax Reitz 16687a25fcd0SMax Reitz /* The block job now has a reference to this node */ 16697a25fcd0SMax Reitz bdrv_unref(mirror_top_bs); 16707a25fcd0SMax Reitz 16714ef85a9cSKevin Wolf s->mirror_top_bs = mirror_top_bs; 16724ef85a9cSKevin Wolf 16734ef85a9cSKevin Wolf /* No resize for the target either; while the mirror is still running, a 16744ef85a9cSKevin Wolf * consistent read isn't necessarily possible. We could possibly allow 16754ef85a9cSKevin Wolf * writes and graph modifications, though it would likely defeat the 16764ef85a9cSKevin Wolf * purpose of a mirror, so leave them blocked for now. 16774ef85a9cSKevin Wolf * 16784ef85a9cSKevin Wolf * In the case of active commit, things look a bit different, though, 16794ef85a9cSKevin Wolf * because the target is an already populated backing file in active use. 16804ef85a9cSKevin Wolf * We can allow anything except resize there.*/ 16813f072a7fSMax Reitz 16823f072a7fSMax Reitz target_perms = BLK_PERM_WRITE; 16833f072a7fSMax Reitz target_shared_perms = BLK_PERM_WRITE_UNCHANGED; 16843f072a7fSMax Reitz 16853f072a7fSMax Reitz if (target_is_backing) { 16863f072a7fSMax Reitz int64_t bs_size, target_size; 16873f072a7fSMax Reitz bs_size = bdrv_getlength(bs); 16883f072a7fSMax Reitz if (bs_size < 0) { 16893f072a7fSMax Reitz error_setg_errno(errp, -bs_size, 16903f072a7fSMax Reitz "Could not inquire top image size"); 16913f072a7fSMax Reitz goto fail; 16923f072a7fSMax Reitz } 16933f072a7fSMax Reitz 16943f072a7fSMax Reitz target_size = bdrv_getlength(target); 16953f072a7fSMax Reitz if (target_size < 0) { 16963f072a7fSMax Reitz error_setg_errno(errp, -target_size, 16973f072a7fSMax Reitz "Could not inquire base image size"); 16983f072a7fSMax Reitz goto fail; 16993f072a7fSMax Reitz } 17003f072a7fSMax Reitz 17013f072a7fSMax Reitz if (target_size < bs_size) { 17023f072a7fSMax Reitz target_perms |= BLK_PERM_RESIZE; 17033f072a7fSMax Reitz } 17043f072a7fSMax Reitz 17053f072a7fSMax Reitz target_shared_perms |= BLK_PERM_CONSISTENT_READ 17063f072a7fSMax Reitz | BLK_PERM_WRITE 17073f072a7fSMax Reitz | BLK_PERM_GRAPH_MOD; 17083f072a7fSMax Reitz } else if (bdrv_chain_contains(bs, bdrv_skip_filters(target))) { 17093f072a7fSMax Reitz /* 17103f072a7fSMax Reitz * We may want to allow this in the future, but it would 17113f072a7fSMax Reitz * require taking some extra care. 17123f072a7fSMax Reitz */ 17133f072a7fSMax Reitz error_setg(errp, "Cannot mirror to a filter on top of a node in the " 17143f072a7fSMax Reitz "source's backing chain"); 17153f072a7fSMax Reitz goto fail; 17163f072a7fSMax Reitz } 17173f072a7fSMax Reitz 17183f072a7fSMax Reitz if (backing_mode != MIRROR_LEAVE_BACKING_CHAIN) { 17193f072a7fSMax Reitz target_perms |= BLK_PERM_GRAPH_MOD; 17203f072a7fSMax Reitz } 17213f072a7fSMax Reitz 1722d861ab3aSKevin Wolf s->target = blk_new(s->common.job.aio_context, 17233f072a7fSMax Reitz target_perms, target_shared_perms); 1724d7086422SKevin Wolf ret = blk_insert_bs(s->target, target, errp); 1725d7086422SKevin Wolf if (ret < 0) { 17264ef85a9cSKevin Wolf goto fail; 1727d7086422SKevin Wolf } 1728045a2f82SFam Zheng if (is_mirror) { 1729045a2f82SFam Zheng /* XXX: Mirror target could be a NBD server of target QEMU in the case 1730045a2f82SFam Zheng * of non-shared block migration. To allow migration completion, we 1731045a2f82SFam Zheng * have to allow "inactivate" of the target BB. When that happens, we 1732045a2f82SFam Zheng * know the job is drained, and the vcpus are stopped, so no write 1733045a2f82SFam Zheng * operation will be performed. Block layer already has assertions to 1734045a2f82SFam Zheng * ensure that. */ 1735045a2f82SFam Zheng blk_set_force_allow_inactivate(s->target); 1736045a2f82SFam Zheng } 17379ff7f0dfSKevin Wolf blk_set_allow_aio_context_change(s->target, true); 1738cf312932SKevin Wolf blk_set_disable_request_queuing(s->target, true); 1739e253f4b8SKevin Wolf 174009158f00SBenoît Canet s->replaces = g_strdup(replaces); 1741b952b558SPaolo Bonzini s->on_source_error = on_source_error; 1742b952b558SPaolo Bonzini s->on_target_error = on_target_error; 174303544a6eSFam Zheng s->is_none_mode = is_none_mode; 1744274fcceeSMax Reitz s->backing_mode = backing_mode; 1745cdf3bc93SMax Reitz s->zero_target = zero_target; 1746481debaaSMax Reitz s->copy_mode = copy_mode; 17475bc361b8SFam Zheng s->base = base; 17483f072a7fSMax Reitz s->base_overlay = bdrv_find_overlay(bs, base); 1749eee13dfeSPaolo Bonzini s->granularity = granularity; 175048ac0a4dSWen Congyang s->buf_size = ROUND_UP(buf_size, granularity); 17510fc9f8eaSFam Zheng s->unmap = unmap; 1752b49f7eadSWen Congyang if (auto_complete) { 1753b49f7eadSWen Congyang s->should_complete = true; 1754b49f7eadSWen Congyang } 1755b812f671SPaolo Bonzini 17560db6e54aSFam Zheng s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); 1757b8afb520SFam Zheng if (!s->dirty_bitmap) { 175888f9d1b3SKevin Wolf goto fail; 1759b8afb520SFam Zheng } 1760dbdf699cSVladimir Sementsov-Ogievskiy if (s->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING) { 1761dbdf699cSVladimir Sementsov-Ogievskiy bdrv_disable_dirty_bitmap(s->dirty_bitmap); 1762dbdf699cSVladimir Sementsov-Ogievskiy } 176310f3cd15SAlberto Garcia 176467b24427SAlberto Garcia ret = block_job_add_bdrv(&s->common, "source", bs, 0, 176567b24427SAlberto Garcia BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE | 176667b24427SAlberto Garcia BLK_PERM_CONSISTENT_READ, 176767b24427SAlberto Garcia errp); 176867b24427SAlberto Garcia if (ret < 0) { 176967b24427SAlberto Garcia goto fail; 177067b24427SAlberto Garcia } 177167b24427SAlberto Garcia 17724ef85a9cSKevin Wolf /* Required permissions are already taken with blk_new() */ 177376d554e2SKevin Wolf block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL, 177476d554e2SKevin Wolf &error_abort); 177576d554e2SKevin Wolf 1776f3ede4b0SAlberto Garcia /* In commit_active_start() all intermediate nodes disappear, so 1777f3ede4b0SAlberto Garcia * any jobs in them must be blocked */ 17784ef85a9cSKevin Wolf if (target_is_backing) { 17793f072a7fSMax Reitz BlockDriverState *iter, *filtered_target; 17803f072a7fSMax Reitz uint64_t iter_shared_perms; 17813f072a7fSMax Reitz 17823f072a7fSMax Reitz /* 17833f072a7fSMax Reitz * The topmost node with 17843f072a7fSMax Reitz * bdrv_skip_filters(filtered_target) == bdrv_skip_filters(target) 17853f072a7fSMax Reitz */ 17863f072a7fSMax Reitz filtered_target = bdrv_cow_bs(bdrv_find_overlay(bs, target)); 17873f072a7fSMax Reitz 17883f072a7fSMax Reitz assert(bdrv_skip_filters(filtered_target) == 17893f072a7fSMax Reitz bdrv_skip_filters(target)); 17903f072a7fSMax Reitz 17913f072a7fSMax Reitz /* 17923f072a7fSMax Reitz * XXX BLK_PERM_WRITE needs to be allowed so we don't block 17934ef85a9cSKevin Wolf * ourselves at s->base (if writes are blocked for a node, they are 17944ef85a9cSKevin Wolf * also blocked for its backing file). The other options would be a 17953f072a7fSMax Reitz * second filter driver above s->base (== target). 17963f072a7fSMax Reitz */ 17973f072a7fSMax Reitz iter_shared_perms = BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE; 17983f072a7fSMax Reitz 17993f072a7fSMax Reitz for (iter = bdrv_filter_or_cow_bs(bs); iter != target; 18003f072a7fSMax Reitz iter = bdrv_filter_or_cow_bs(iter)) 18013f072a7fSMax Reitz { 18023f072a7fSMax Reitz if (iter == filtered_target) { 18033f072a7fSMax Reitz /* 18043f072a7fSMax Reitz * From here on, all nodes are filters on the base. 18053f072a7fSMax Reitz * This allows us to share BLK_PERM_CONSISTENT_READ. 18063f072a7fSMax Reitz */ 18073f072a7fSMax Reitz iter_shared_perms |= BLK_PERM_CONSISTENT_READ; 18083f072a7fSMax Reitz } 18093f072a7fSMax Reitz 18104ef85a9cSKevin Wolf ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0, 18113f072a7fSMax Reitz iter_shared_perms, errp); 18124ef85a9cSKevin Wolf if (ret < 0) { 18134ef85a9cSKevin Wolf goto fail; 18144ef85a9cSKevin Wolf } 1815f3ede4b0SAlberto Garcia } 1816ef53dc09SAlberto Garcia 1817ef53dc09SAlberto Garcia if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) { 1818ef53dc09SAlberto Garcia goto fail; 1819ef53dc09SAlberto Garcia } 1820f3ede4b0SAlberto Garcia } 182110f3cd15SAlberto Garcia 182212aa4082SMax Reitz QTAILQ_INIT(&s->ops_in_flight); 182312aa4082SMax Reitz 18245ccac6f1SJohn Snow trace_mirror_start(bs, s, opaque); 1825da01ff7fSKevin Wolf job_start(&s->common.job); 1826cc19f177SVladimir Sementsov-Ogievskiy 1827cc19f177SVladimir Sementsov-Ogievskiy return &s->common; 18284ef85a9cSKevin Wolf 18294ef85a9cSKevin Wolf fail: 18304ef85a9cSKevin Wolf if (s) { 18317a25fcd0SMax Reitz /* Make sure this BDS does not go away until we have completed the graph 18327a25fcd0SMax Reitz * changes below */ 18337a25fcd0SMax Reitz bdrv_ref(mirror_top_bs); 18347a25fcd0SMax Reitz 18354ef85a9cSKevin Wolf g_free(s->replaces); 18364ef85a9cSKevin Wolf blk_unref(s->target); 1837429076e8SMax Reitz bs_opaque->job = NULL; 1838e917e2cbSAlberto Garcia if (s->dirty_bitmap) { 18395deb6cbdSVladimir Sementsov-Ogievskiy bdrv_release_dirty_bitmap(s->dirty_bitmap); 1840e917e2cbSAlberto Garcia } 18414ad35181SKevin Wolf job_early_fail(&s->common.job); 18424ef85a9cSKevin Wolf } 18434ef85a9cSKevin Wolf 1844f94dc3b4SMax Reitz bs_opaque->stop = true; 1845f94dc3b4SMax Reitz bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing, 1846c1cef672SFam Zheng &error_abort); 18473f072a7fSMax Reitz bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort); 18487a25fcd0SMax Reitz 18497a25fcd0SMax Reitz bdrv_unref(mirror_top_bs); 1850cc19f177SVladimir Sementsov-Ogievskiy 1851cc19f177SVladimir Sementsov-Ogievskiy return NULL; 1852893f7ebaSPaolo Bonzini } 185303544a6eSFam Zheng 185471aa9867SAlberto Garcia void mirror_start(const char *job_id, BlockDriverState *bs, 185571aa9867SAlberto Garcia BlockDriverState *target, const char *replaces, 1856a1999b33SJohn Snow int creation_flags, int64_t speed, 1857a1999b33SJohn Snow uint32_t granularity, int64_t buf_size, 1858274fcceeSMax Reitz MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, 1859cdf3bc93SMax Reitz bool zero_target, 1860274fcceeSMax Reitz BlockdevOnError on_source_error, 186103544a6eSFam Zheng BlockdevOnError on_target_error, 1862481debaaSMax Reitz bool unmap, const char *filter_node_name, 1863481debaaSMax Reitz MirrorCopyMode copy_mode, Error **errp) 186403544a6eSFam Zheng { 186503544a6eSFam Zheng bool is_none_mode; 186603544a6eSFam Zheng BlockDriverState *base; 186703544a6eSFam Zheng 1868c8b56501SJohn Snow if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) || 1869c8b56501SJohn Snow (mode == MIRROR_SYNC_MODE_BITMAP)) { 1870c8b56501SJohn Snow error_setg(errp, "Sync mode '%s' not supported", 1871c8b56501SJohn Snow MirrorSyncMode_str(mode)); 1872d58d8453SJohn Snow return; 1873d58d8453SJohn Snow } 187403544a6eSFam Zheng is_none_mode = mode == MIRROR_SYNC_MODE_NONE; 18753f072a7fSMax Reitz base = mode == MIRROR_SYNC_MODE_TOP ? bdrv_backing_chain_next(bs) : NULL; 1876a1999b33SJohn Snow mirror_start_job(job_id, bs, creation_flags, target, replaces, 1877cdf3bc93SMax Reitz speed, granularity, buf_size, backing_mode, zero_target, 187851ccfa2dSFam Zheng on_source_error, on_target_error, unmap, NULL, NULL, 18796cdbceb1SKevin Wolf &mirror_job_driver, is_none_mode, base, false, 1880481debaaSMax Reitz filter_node_name, true, copy_mode, errp); 188103544a6eSFam Zheng } 188203544a6eSFam Zheng 1883cc19f177SVladimir Sementsov-Ogievskiy BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs, 188447970dfbSJohn Snow BlockDriverState *base, int creation_flags, 188547970dfbSJohn Snow int64_t speed, BlockdevOnError on_error, 18860db832f4SKevin Wolf const char *filter_node_name, 188778bbd910SFam Zheng BlockCompletionFunc *cb, void *opaque, 188878bbd910SFam Zheng bool auto_complete, Error **errp) 188903544a6eSFam Zheng { 18901ba79388SAlberto Garcia bool base_read_only; 1891eb5becc1SVladimir Sementsov-Ogievskiy BlockJob *job; 18924da83585SJeff Cody 18931ba79388SAlberto Garcia base_read_only = bdrv_is_read_only(base); 18944da83585SJeff Cody 18951ba79388SAlberto Garcia if (base_read_only) { 18961ba79388SAlberto Garcia if (bdrv_reopen_set_read_only(base, false, errp) < 0) { 1897cc19f177SVladimir Sementsov-Ogievskiy return NULL; 189820a63d2cSFam Zheng } 18991ba79388SAlberto Garcia } 19004da83585SJeff Cody 1901eb5becc1SVladimir Sementsov-Ogievskiy job = mirror_start_job( 1902cc19f177SVladimir Sementsov-Ogievskiy job_id, bs, creation_flags, base, NULL, speed, 0, 0, 1903cdf3bc93SMax Reitz MIRROR_LEAVE_BACKING_CHAIN, false, 190451ccfa2dSFam Zheng on_error, on_error, true, cb, opaque, 19056cdbceb1SKevin Wolf &commit_active_job_driver, false, base, auto_complete, 1906481debaaSMax Reitz filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND, 1907eb5becc1SVladimir Sementsov-Ogievskiy errp); 1908eb5becc1SVladimir Sementsov-Ogievskiy if (!job) { 19094da83585SJeff Cody goto error_restore_flags; 19104da83585SJeff Cody } 19114da83585SJeff Cody 1912eb5becc1SVladimir Sementsov-Ogievskiy return job; 19134da83585SJeff Cody 19144da83585SJeff Cody error_restore_flags: 19154da83585SJeff Cody /* ignore error and errp for bdrv_reopen, because we want to propagate 19164da83585SJeff Cody * the original error */ 19171ba79388SAlberto Garcia if (base_read_only) { 19181ba79388SAlberto Garcia bdrv_reopen_set_read_only(base, true, NULL); 19191ba79388SAlberto Garcia } 1920cc19f177SVladimir Sementsov-Ogievskiy return NULL; 192103544a6eSFam Zheng } 1922