1893f7ebaSPaolo Bonzini /* 2893f7ebaSPaolo Bonzini * Image mirroring 3893f7ebaSPaolo Bonzini * 4893f7ebaSPaolo Bonzini * Copyright Red Hat, Inc. 2012 5893f7ebaSPaolo Bonzini * 6893f7ebaSPaolo Bonzini * Authors: 7893f7ebaSPaolo Bonzini * Paolo Bonzini <pbonzini@redhat.com> 8893f7ebaSPaolo Bonzini * 9893f7ebaSPaolo Bonzini * This work is licensed under the terms of the GNU LGPL, version 2 or later. 10893f7ebaSPaolo Bonzini * See the COPYING.LIB file in the top-level directory. 11893f7ebaSPaolo Bonzini * 12893f7ebaSPaolo Bonzini */ 13893f7ebaSPaolo Bonzini 1480c71a24SPeter Maydell #include "qemu/osdep.h" 15fd4a6493SKevin Wolf #include "qemu/cutils.h" 1612aa4082SMax Reitz #include "qemu/coroutine.h" 171181e19aSMax Reitz #include "qemu/range.h" 18893f7ebaSPaolo Bonzini #include "trace.h" 19c87621eaSJohn Snow #include "block/blockjob_int.h" 20737e150eSPaolo Bonzini #include "block/block_int.h" 21373340b2SMax Reitz #include "sysemu/block-backend.h" 22da34e65cSMarkus Armbruster #include "qapi/error.h" 23cc7a8ea7SMarkus Armbruster #include "qapi/qmp/qerror.h" 24893f7ebaSPaolo Bonzini #include "qemu/ratelimit.h" 25b812f671SPaolo Bonzini #include "qemu/bitmap.h" 26893f7ebaSPaolo Bonzini 27402a4741SPaolo Bonzini #define MAX_IN_FLIGHT 16 28b436982fSEric Blake #define MAX_IO_BYTES (1 << 20) /* 1 Mb */ 29b436982fSEric Blake #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES) 30402a4741SPaolo Bonzini 31402a4741SPaolo Bonzini /* The mirroring buffer is a list of granularity-sized chunks. 32402a4741SPaolo Bonzini * Free chunks are organized in a list. 33402a4741SPaolo Bonzini */ 34402a4741SPaolo Bonzini typedef struct MirrorBuffer { 35402a4741SPaolo Bonzini QSIMPLEQ_ENTRY(MirrorBuffer) next; 36402a4741SPaolo Bonzini } MirrorBuffer; 37893f7ebaSPaolo Bonzini 3812aa4082SMax Reitz typedef struct MirrorOp MirrorOp; 3912aa4082SMax Reitz 40893f7ebaSPaolo Bonzini typedef struct MirrorBlockJob { 41893f7ebaSPaolo Bonzini BlockJob common; 42e253f4b8SKevin Wolf BlockBackend *target; 434ef85a9cSKevin Wolf BlockDriverState *mirror_top_bs; 445bc361b8SFam Zheng BlockDriverState *base; 453f072a7fSMax Reitz BlockDriverState *base_overlay; 464ef85a9cSKevin Wolf 4709158f00SBenoît Canet /* The name of the graph node to replace */ 4809158f00SBenoît Canet char *replaces; 4909158f00SBenoît Canet /* The BDS to replace */ 5009158f00SBenoît Canet BlockDriverState *to_replace; 5109158f00SBenoît Canet /* Used to block operations on the drive-mirror-replace target */ 5209158f00SBenoît Canet Error *replace_blocker; 5303544a6eSFam Zheng bool is_none_mode; 54274fcceeSMax Reitz BlockMirrorBackingMode backing_mode; 55cdf3bc93SMax Reitz /* Whether the target image requires explicit zero-initialization */ 56cdf3bc93SMax Reitz bool zero_target; 57d06107adSMax Reitz MirrorCopyMode copy_mode; 58b952b558SPaolo Bonzini BlockdevOnError on_source_error, on_target_error; 59d63ffd87SPaolo Bonzini bool synced; 60d06107adSMax Reitz /* Set when the target is synced (dirty bitmap is clean, nothing 61d06107adSMax Reitz * in flight) and the job is running in active mode */ 62d06107adSMax Reitz bool actively_synced; 63d63ffd87SPaolo Bonzini bool should_complete; 64eee13dfeSPaolo Bonzini int64_t granularity; 65b812f671SPaolo Bonzini size_t buf_size; 66b21c7652SMax Reitz int64_t bdev_length; 67b812f671SPaolo Bonzini unsigned long *cow_bitmap; 68e4654d2dSFam Zheng BdrvDirtyBitmap *dirty_bitmap; 69dc162c8eSFam Zheng BdrvDirtyBitmapIter *dbi; 70893f7ebaSPaolo Bonzini uint8_t *buf; 71402a4741SPaolo Bonzini QSIMPLEQ_HEAD(, MirrorBuffer) buf_free; 72402a4741SPaolo Bonzini int buf_free_count; 73bd48bde8SPaolo Bonzini 7449efb1f5SDenis V. Lunev uint64_t last_pause_ns; 75402a4741SPaolo Bonzini unsigned long *in_flight_bitmap; 76bd48bde8SPaolo Bonzini int in_flight; 77b436982fSEric Blake int64_t bytes_in_flight; 78b58deb34SPaolo Bonzini QTAILQ_HEAD(, MirrorOp) ops_in_flight; 79bd48bde8SPaolo Bonzini int ret; 800fc9f8eaSFam Zheng bool unmap; 81b436982fSEric Blake int target_cluster_size; 82e5b43573SFam Zheng int max_iov; 8390ab48ebSAnton Nefedov bool initial_zeroing_ongoing; 84d06107adSMax Reitz int in_active_write_counter; 85737efc1eSJohn Snow bool prepared; 865e771752SSergio Lopez bool in_drain; 87893f7ebaSPaolo Bonzini } MirrorBlockJob; 88893f7ebaSPaolo Bonzini 89429076e8SMax Reitz typedef struct MirrorBDSOpaque { 90429076e8SMax Reitz MirrorBlockJob *job; 91f94dc3b4SMax Reitz bool stop; 9253431b90SMax Reitz bool is_commit; 93429076e8SMax Reitz } MirrorBDSOpaque; 94429076e8SMax Reitz 9512aa4082SMax Reitz struct MirrorOp { 96bd48bde8SPaolo Bonzini MirrorBlockJob *s; 97bd48bde8SPaolo Bonzini QEMUIOVector qiov; 98b436982fSEric Blake int64_t offset; 99b436982fSEric Blake uint64_t bytes; 1002e1990b2SMax Reitz 1012e1990b2SMax Reitz /* The pointee is set by mirror_co_read(), mirror_co_zero(), and 1022e1990b2SMax Reitz * mirror_co_discard() before yielding for the first time */ 1032e1990b2SMax Reitz int64_t *bytes_handled; 10412aa4082SMax Reitz 1051181e19aSMax Reitz bool is_pseudo_op; 106d06107adSMax Reitz bool is_active_write; 107ce8cabbdSKevin Wolf bool is_in_flight; 10812aa4082SMax Reitz CoQueue waiting_requests; 109eed325b9SKevin Wolf Coroutine *co; 11012aa4082SMax Reitz 11112aa4082SMax Reitz QTAILQ_ENTRY(MirrorOp) next; 11212aa4082SMax Reitz }; 113bd48bde8SPaolo Bonzini 1144295c5fcSMax Reitz typedef enum MirrorMethod { 1154295c5fcSMax Reitz MIRROR_METHOD_COPY, 1164295c5fcSMax Reitz MIRROR_METHOD_ZERO, 1174295c5fcSMax Reitz MIRROR_METHOD_DISCARD, 1184295c5fcSMax Reitz } MirrorMethod; 1194295c5fcSMax Reitz 120b952b558SPaolo Bonzini static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, 121b952b558SPaolo Bonzini int error) 122b952b558SPaolo Bonzini { 123b952b558SPaolo Bonzini s->synced = false; 124d06107adSMax Reitz s->actively_synced = false; 125b952b558SPaolo Bonzini if (read) { 12681e254dcSKevin Wolf return block_job_error_action(&s->common, s->on_source_error, 12781e254dcSKevin Wolf true, error); 128b952b558SPaolo Bonzini } else { 12981e254dcSKevin Wolf return block_job_error_action(&s->common, s->on_target_error, 13081e254dcSKevin Wolf false, error); 131b952b558SPaolo Bonzini } 132b952b558SPaolo Bonzini } 133b952b558SPaolo Bonzini 1341181e19aSMax Reitz static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self, 1351181e19aSMax Reitz MirrorBlockJob *s, 1361181e19aSMax Reitz uint64_t offset, 1371181e19aSMax Reitz uint64_t bytes) 1381181e19aSMax Reitz { 1391181e19aSMax Reitz uint64_t self_start_chunk = offset / s->granularity; 1401181e19aSMax Reitz uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity); 1411181e19aSMax Reitz uint64_t self_nb_chunks = self_end_chunk - self_start_chunk; 1421181e19aSMax Reitz 1431181e19aSMax Reitz while (find_next_bit(s->in_flight_bitmap, self_end_chunk, 1441181e19aSMax Reitz self_start_chunk) < self_end_chunk && 1451181e19aSMax Reitz s->ret >= 0) 1461181e19aSMax Reitz { 1471181e19aSMax Reitz MirrorOp *op; 1481181e19aSMax Reitz 1491181e19aSMax Reitz QTAILQ_FOREACH(op, &s->ops_in_flight, next) { 1501181e19aSMax Reitz uint64_t op_start_chunk = op->offset / s->granularity; 1511181e19aSMax Reitz uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes, 1521181e19aSMax Reitz s->granularity) - 1531181e19aSMax Reitz op_start_chunk; 1541181e19aSMax Reitz 1551181e19aSMax Reitz if (op == self) { 1561181e19aSMax Reitz continue; 1571181e19aSMax Reitz } 1581181e19aSMax Reitz 1591181e19aSMax Reitz if (ranges_overlap(self_start_chunk, self_nb_chunks, 1601181e19aSMax Reitz op_start_chunk, op_nb_chunks)) 1611181e19aSMax Reitz { 1621181e19aSMax Reitz qemu_co_queue_wait(&op->waiting_requests, NULL); 1631181e19aSMax Reitz break; 1641181e19aSMax Reitz } 1651181e19aSMax Reitz } 1661181e19aSMax Reitz } 1671181e19aSMax Reitz } 1681181e19aSMax Reitz 1692e1990b2SMax Reitz static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret) 170bd48bde8SPaolo Bonzini { 171bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 172402a4741SPaolo Bonzini struct iovec *iov; 173bd48bde8SPaolo Bonzini int64_t chunk_num; 174b436982fSEric Blake int i, nb_chunks; 175bd48bde8SPaolo Bonzini 176b436982fSEric Blake trace_mirror_iteration_done(s, op->offset, op->bytes, ret); 177bd48bde8SPaolo Bonzini 178bd48bde8SPaolo Bonzini s->in_flight--; 179b436982fSEric Blake s->bytes_in_flight -= op->bytes; 180402a4741SPaolo Bonzini iov = op->qiov.iov; 181402a4741SPaolo Bonzini for (i = 0; i < op->qiov.niov; i++) { 182402a4741SPaolo Bonzini MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base; 183402a4741SPaolo Bonzini QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next); 184402a4741SPaolo Bonzini s->buf_free_count++; 185402a4741SPaolo Bonzini } 186402a4741SPaolo Bonzini 187b436982fSEric Blake chunk_num = op->offset / s->granularity; 188b436982fSEric Blake nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity); 18912aa4082SMax Reitz 190402a4741SPaolo Bonzini bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); 19112aa4082SMax Reitz QTAILQ_REMOVE(&s->ops_in_flight, op, next); 192b21c7652SMax Reitz if (ret >= 0) { 193b21c7652SMax Reitz if (s->cow_bitmap) { 194bd48bde8SPaolo Bonzini bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); 195bd48bde8SPaolo Bonzini } 19690ab48ebSAnton Nefedov if (!s->initial_zeroing_ongoing) { 19730a5c887SKevin Wolf job_progress_update(&s->common.job, op->bytes); 198b21c7652SMax Reitz } 19990ab48ebSAnton Nefedov } 2006df3bf8eSZhang Min qemu_iovec_destroy(&op->qiov); 2017b770c72SStefan Hajnoczi 20212aa4082SMax Reitz qemu_co_queue_restart_all(&op->waiting_requests); 20312aa4082SMax Reitz g_free(op); 2047b770c72SStefan Hajnoczi } 205bd48bde8SPaolo Bonzini 2062e1990b2SMax Reitz static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret) 207bd48bde8SPaolo Bonzini { 208bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 209b9e413ddSPaolo Bonzini 210bd48bde8SPaolo Bonzini if (ret < 0) { 211bd48bde8SPaolo Bonzini BlockErrorAction action; 212bd48bde8SPaolo Bonzini 213e0d7f73eSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes); 214bd48bde8SPaolo Bonzini action = mirror_error_action(s, false, -ret); 215a589569fSWenchao Xia if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 216bd48bde8SPaolo Bonzini s->ret = ret; 217bd48bde8SPaolo Bonzini } 218bd48bde8SPaolo Bonzini } 219d12ade57SVladimir Sementsov-Ogievskiy 220bd48bde8SPaolo Bonzini mirror_iteration_done(op, ret); 221bd48bde8SPaolo Bonzini } 222bd48bde8SPaolo Bonzini 2232e1990b2SMax Reitz static void coroutine_fn mirror_read_complete(MirrorOp *op, int ret) 224bd48bde8SPaolo Bonzini { 225bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 226b9e413ddSPaolo Bonzini 227bd48bde8SPaolo Bonzini if (ret < 0) { 228bd48bde8SPaolo Bonzini BlockErrorAction action; 229bd48bde8SPaolo Bonzini 230e0d7f73eSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes); 231bd48bde8SPaolo Bonzini action = mirror_error_action(s, true, -ret); 232a589569fSWenchao Xia if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 233bd48bde8SPaolo Bonzini s->ret = ret; 234bd48bde8SPaolo Bonzini } 235bd48bde8SPaolo Bonzini 236bd48bde8SPaolo Bonzini mirror_iteration_done(op, ret); 237d12ade57SVladimir Sementsov-Ogievskiy return; 238bd48bde8SPaolo Bonzini } 239d12ade57SVladimir Sementsov-Ogievskiy 240d12ade57SVladimir Sementsov-Ogievskiy ret = blk_co_pwritev(s->target, op->offset, op->qiov.size, &op->qiov, 0); 241d12ade57SVladimir Sementsov-Ogievskiy mirror_write_complete(op, ret); 242b9e413ddSPaolo Bonzini } 243bd48bde8SPaolo Bonzini 244782d97efSEric Blake /* Clip bytes relative to offset to not exceed end-of-file */ 245782d97efSEric Blake static inline int64_t mirror_clip_bytes(MirrorBlockJob *s, 246782d97efSEric Blake int64_t offset, 247782d97efSEric Blake int64_t bytes) 248782d97efSEric Blake { 249782d97efSEric Blake return MIN(bytes, s->bdev_length - offset); 250782d97efSEric Blake } 251782d97efSEric Blake 252782d97efSEric Blake /* Round offset and/or bytes to target cluster if COW is needed, and 253782d97efSEric Blake * return the offset of the adjusted tail against original. */ 254782d97efSEric Blake static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset, 255ae4cc877SEric Blake uint64_t *bytes) 256893f7ebaSPaolo Bonzini { 257e5b43573SFam Zheng bool need_cow; 258e5b43573SFam Zheng int ret = 0; 259782d97efSEric Blake int64_t align_offset = *offset; 2607cfd5275SEric Blake int64_t align_bytes = *bytes; 261782d97efSEric Blake int max_bytes = s->granularity * s->max_iov; 262893f7ebaSPaolo Bonzini 263782d97efSEric Blake need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap); 264782d97efSEric Blake need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity, 265e5b43573SFam Zheng s->cow_bitmap); 266e5b43573SFam Zheng if (need_cow) { 267782d97efSEric Blake bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes, 268782d97efSEric Blake &align_offset, &align_bytes); 2698f0720ecSPaolo Bonzini } 2708f0720ecSPaolo Bonzini 271782d97efSEric Blake if (align_bytes > max_bytes) { 272782d97efSEric Blake align_bytes = max_bytes; 273e5b43573SFam Zheng if (need_cow) { 274782d97efSEric Blake align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size); 275e5b43573SFam Zheng } 276e5b43573SFam Zheng } 277782d97efSEric Blake /* Clipping may result in align_bytes unaligned to chunk boundary, but 2784150ae60SFam Zheng * that doesn't matter because it's already the end of source image. */ 279782d97efSEric Blake align_bytes = mirror_clip_bytes(s, align_offset, align_bytes); 280402a4741SPaolo Bonzini 281782d97efSEric Blake ret = align_offset + align_bytes - (*offset + *bytes); 282782d97efSEric Blake *offset = align_offset; 283782d97efSEric Blake *bytes = align_bytes; 284e5b43573SFam Zheng assert(ret >= 0); 285e5b43573SFam Zheng return ret; 286e5b43573SFam Zheng } 287e5b43573SFam Zheng 288537c3d4fSStefan Hajnoczi static inline void coroutine_fn 2899178f4feSKevin Wolf mirror_wait_for_any_operation(MirrorBlockJob *s, bool active) 29021cd917fSFam Zheng { 29112aa4082SMax Reitz MirrorOp *op; 29212aa4082SMax Reitz 2931181e19aSMax Reitz QTAILQ_FOREACH(op, &s->ops_in_flight, next) { 2941181e19aSMax Reitz /* Do not wait on pseudo ops, because it may in turn wait on 2951181e19aSMax Reitz * some other operation to start, which may in fact be the 2961181e19aSMax Reitz * caller of this function. Since there is only one pseudo op 2971181e19aSMax Reitz * at any given time, we will always find some real operation 2981181e19aSMax Reitz * to wait on. */ 299ce8cabbdSKevin Wolf if (!op->is_pseudo_op && op->is_in_flight && 300ce8cabbdSKevin Wolf op->is_active_write == active) 301ce8cabbdSKevin Wolf { 30212aa4082SMax Reitz qemu_co_queue_wait(&op->waiting_requests, NULL); 3031181e19aSMax Reitz return; 3041181e19aSMax Reitz } 3051181e19aSMax Reitz } 3061181e19aSMax Reitz abort(); 30721cd917fSFam Zheng } 30821cd917fSFam Zheng 309537c3d4fSStefan Hajnoczi static inline void coroutine_fn 3109178f4feSKevin Wolf mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s) 311d06107adSMax Reitz { 312d06107adSMax Reitz /* Only non-active operations use up in-flight slots */ 3139178f4feSKevin Wolf mirror_wait_for_any_operation(s, false); 314d06107adSMax Reitz } 315d06107adSMax Reitz 3162e1990b2SMax Reitz /* Perform a mirror copy operation. 3172e1990b2SMax Reitz * 3182e1990b2SMax Reitz * *op->bytes_handled is set to the number of bytes copied after and 3192e1990b2SMax Reitz * including offset, excluding any bytes copied prior to offset due 3202e1990b2SMax Reitz * to alignment. This will be op->bytes if no alignment is necessary, 3212e1990b2SMax Reitz * or (new_end - op->offset) if the tail is rounded up or down due to 322e5b43573SFam Zheng * alignment or buffer limit. 323402a4741SPaolo Bonzini */ 3242e1990b2SMax Reitz static void coroutine_fn mirror_co_read(void *opaque) 325e5b43573SFam Zheng { 3262e1990b2SMax Reitz MirrorOp *op = opaque; 3272e1990b2SMax Reitz MirrorBlockJob *s = op->s; 328ae4cc877SEric Blake int nb_chunks; 329ae4cc877SEric Blake uint64_t ret; 330ae4cc877SEric Blake uint64_t max_bytes; 331402a4741SPaolo Bonzini 332ae4cc877SEric Blake max_bytes = s->granularity * s->max_iov; 333e5b43573SFam Zheng 334e5b43573SFam Zheng /* We can only handle as much as buf_size at a time. */ 3352e1990b2SMax Reitz op->bytes = MIN(s->buf_size, MIN(max_bytes, op->bytes)); 3362e1990b2SMax Reitz assert(op->bytes); 3372e1990b2SMax Reitz assert(op->bytes < BDRV_REQUEST_MAX_BYTES); 3382e1990b2SMax Reitz *op->bytes_handled = op->bytes; 339e5b43573SFam Zheng 340e5b43573SFam Zheng if (s->cow_bitmap) { 3412e1990b2SMax Reitz *op->bytes_handled += mirror_cow_align(s, &op->offset, &op->bytes); 342e5b43573SFam Zheng } 3432e1990b2SMax Reitz /* Cannot exceed BDRV_REQUEST_MAX_BYTES + INT_MAX */ 3442e1990b2SMax Reitz assert(*op->bytes_handled <= UINT_MAX); 3452e1990b2SMax Reitz assert(op->bytes <= s->buf_size); 346ae4cc877SEric Blake /* The offset is granularity-aligned because: 347e5b43573SFam Zheng * 1) Caller passes in aligned values; 348e5b43573SFam Zheng * 2) mirror_cow_align is used only when target cluster is larger. */ 3492e1990b2SMax Reitz assert(QEMU_IS_ALIGNED(op->offset, s->granularity)); 350ae4cc877SEric Blake /* The range is sector-aligned, since bdrv_getlength() rounds up. */ 3512e1990b2SMax Reitz assert(QEMU_IS_ALIGNED(op->bytes, BDRV_SECTOR_SIZE)); 3522e1990b2SMax Reitz nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity); 353e5b43573SFam Zheng 354e5b43573SFam Zheng while (s->buf_free_count < nb_chunks) { 3552e1990b2SMax Reitz trace_mirror_yield_in_flight(s, op->offset, s->in_flight); 3569178f4feSKevin Wolf mirror_wait_for_free_in_flight_slot(s); 357b812f671SPaolo Bonzini } 358b812f671SPaolo Bonzini 359402a4741SPaolo Bonzini /* Now make a QEMUIOVector taking enough granularity-sized chunks 360402a4741SPaolo Bonzini * from s->buf_free. 361402a4741SPaolo Bonzini */ 362402a4741SPaolo Bonzini qemu_iovec_init(&op->qiov, nb_chunks); 363402a4741SPaolo Bonzini while (nb_chunks-- > 0) { 364402a4741SPaolo Bonzini MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); 3652e1990b2SMax Reitz size_t remaining = op->bytes - op->qiov.size; 3665a0f6fd5SKevin Wolf 367402a4741SPaolo Bonzini QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); 368402a4741SPaolo Bonzini s->buf_free_count--; 3695a0f6fd5SKevin Wolf qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); 370402a4741SPaolo Bonzini } 371402a4741SPaolo Bonzini 372893f7ebaSPaolo Bonzini /* Copy the dirty cluster. */ 373bd48bde8SPaolo Bonzini s->in_flight++; 3742e1990b2SMax Reitz s->bytes_in_flight += op->bytes; 375ce8cabbdSKevin Wolf op->is_in_flight = true; 3762e1990b2SMax Reitz trace_mirror_one_iteration(s, op->offset, op->bytes); 377dcfb3bebSFam Zheng 378138f9fffSMax Reitz ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes, 379138f9fffSMax Reitz &op->qiov, 0); 3802e1990b2SMax Reitz mirror_read_complete(op, ret); 381e5b43573SFam Zheng } 382e5b43573SFam Zheng 3832e1990b2SMax Reitz static void coroutine_fn mirror_co_zero(void *opaque) 384e5b43573SFam Zheng { 3852e1990b2SMax Reitz MirrorOp *op = opaque; 3862e1990b2SMax Reitz int ret; 387e5b43573SFam Zheng 3882e1990b2SMax Reitz op->s->in_flight++; 3892e1990b2SMax Reitz op->s->bytes_in_flight += op->bytes; 3902e1990b2SMax Reitz *op->bytes_handled = op->bytes; 391ce8cabbdSKevin Wolf op->is_in_flight = true; 392e5b43573SFam Zheng 3932e1990b2SMax Reitz ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes, 3942e1990b2SMax Reitz op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0); 3952e1990b2SMax Reitz mirror_write_complete(op, ret); 396e5b43573SFam Zheng } 3972e1990b2SMax Reitz 3982e1990b2SMax Reitz static void coroutine_fn mirror_co_discard(void *opaque) 3992e1990b2SMax Reitz { 4002e1990b2SMax Reitz MirrorOp *op = opaque; 4012e1990b2SMax Reitz int ret; 4022e1990b2SMax Reitz 4032e1990b2SMax Reitz op->s->in_flight++; 4042e1990b2SMax Reitz op->s->bytes_in_flight += op->bytes; 4052e1990b2SMax Reitz *op->bytes_handled = op->bytes; 406ce8cabbdSKevin Wolf op->is_in_flight = true; 4072e1990b2SMax Reitz 4082e1990b2SMax Reitz ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes); 4092e1990b2SMax Reitz mirror_write_complete(op, ret); 410e5b43573SFam Zheng } 411e5b43573SFam Zheng 4124295c5fcSMax Reitz static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset, 4134295c5fcSMax Reitz unsigned bytes, MirrorMethod mirror_method) 4144295c5fcSMax Reitz { 4152e1990b2SMax Reitz MirrorOp *op; 4162e1990b2SMax Reitz Coroutine *co; 4172e1990b2SMax Reitz int64_t bytes_handled = -1; 4182e1990b2SMax Reitz 4192e1990b2SMax Reitz op = g_new(MirrorOp, 1); 4202e1990b2SMax Reitz *op = (MirrorOp){ 4212e1990b2SMax Reitz .s = s, 4222e1990b2SMax Reitz .offset = offset, 4232e1990b2SMax Reitz .bytes = bytes, 4242e1990b2SMax Reitz .bytes_handled = &bytes_handled, 4252e1990b2SMax Reitz }; 42612aa4082SMax Reitz qemu_co_queue_init(&op->waiting_requests); 4272e1990b2SMax Reitz 4284295c5fcSMax Reitz switch (mirror_method) { 4294295c5fcSMax Reitz case MIRROR_METHOD_COPY: 4302e1990b2SMax Reitz co = qemu_coroutine_create(mirror_co_read, op); 4312e1990b2SMax Reitz break; 4324295c5fcSMax Reitz case MIRROR_METHOD_ZERO: 4332e1990b2SMax Reitz co = qemu_coroutine_create(mirror_co_zero, op); 4342e1990b2SMax Reitz break; 4354295c5fcSMax Reitz case MIRROR_METHOD_DISCARD: 4362e1990b2SMax Reitz co = qemu_coroutine_create(mirror_co_discard, op); 4372e1990b2SMax Reitz break; 4384295c5fcSMax Reitz default: 4394295c5fcSMax Reitz abort(); 4404295c5fcSMax Reitz } 441eed325b9SKevin Wolf op->co = co; 4422e1990b2SMax Reitz 44312aa4082SMax Reitz QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next); 4442e1990b2SMax Reitz qemu_coroutine_enter(co); 4452e1990b2SMax Reitz /* At this point, ownership of op has been moved to the coroutine 4462e1990b2SMax Reitz * and the object may already be freed */ 4472e1990b2SMax Reitz 4482e1990b2SMax Reitz /* Assert that this value has been set */ 4492e1990b2SMax Reitz assert(bytes_handled >= 0); 4502e1990b2SMax Reitz 4512e1990b2SMax Reitz /* Same assertion as in mirror_co_read() (and for mirror_co_read() 4522e1990b2SMax Reitz * and mirror_co_discard(), bytes_handled == op->bytes, which 4532e1990b2SMax Reitz * is the @bytes parameter given to this function) */ 4542e1990b2SMax Reitz assert(bytes_handled <= UINT_MAX); 4552e1990b2SMax Reitz return bytes_handled; 4564295c5fcSMax Reitz } 4574295c5fcSMax Reitz 458e5b43573SFam Zheng static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) 459e5b43573SFam Zheng { 460138f9fffSMax Reitz BlockDriverState *source = s->mirror_top_bs->backing->bs; 4611181e19aSMax Reitz MirrorOp *pseudo_op; 4621181e19aSMax Reitz int64_t offset; 4631181e19aSMax Reitz uint64_t delay_ns = 0, ret = 0; 464e5b43573SFam Zheng /* At least the first dirty chunk is mirrored in one iteration. */ 465e5b43573SFam Zheng int nb_chunks = 1; 4664b5004d9SDenis V. Lunev bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target)); 467b436982fSEric Blake int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES); 468e5b43573SFam Zheng 469b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_lock(s->dirty_bitmap); 470f798184cSEric Blake offset = bdrv_dirty_iter_next(s->dbi); 471fb2ef791SEric Blake if (offset < 0) { 472dc162c8eSFam Zheng bdrv_set_dirty_iter(s->dbi, 0); 473f798184cSEric Blake offset = bdrv_dirty_iter_next(s->dbi); 4749a46dba7SEric Blake trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); 475fb2ef791SEric Blake assert(offset >= 0); 476e5b43573SFam Zheng } 477b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_unlock(s->dirty_bitmap); 478e5b43573SFam Zheng 4791181e19aSMax Reitz mirror_wait_on_conflicts(NULL, s, offset, 1); 4809c83625bSMax Reitz 481da01ff7fSKevin Wolf job_pause_point(&s->common.job); 482565ac01fSStefan Hajnoczi 483e5b43573SFam Zheng /* Find the number of consective dirty chunks following the first dirty 484e5b43573SFam Zheng * one, and wait for in flight requests in them. */ 485b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_lock(s->dirty_bitmap); 486fb2ef791SEric Blake while (nb_chunks * s->granularity < s->buf_size) { 487dc162c8eSFam Zheng int64_t next_dirty; 488fb2ef791SEric Blake int64_t next_offset = offset + nb_chunks * s->granularity; 489fb2ef791SEric Blake int64_t next_chunk = next_offset / s->granularity; 490fb2ef791SEric Blake if (next_offset >= s->bdev_length || 49128636b82SJohn Snow !bdrv_dirty_bitmap_get_locked(s->dirty_bitmap, next_offset)) { 492e5b43573SFam Zheng break; 493e5b43573SFam Zheng } 494e5b43573SFam Zheng if (test_bit(next_chunk, s->in_flight_bitmap)) { 495e5b43573SFam Zheng break; 496e5b43573SFam Zheng } 4979c83625bSMax Reitz 498f798184cSEric Blake next_dirty = bdrv_dirty_iter_next(s->dbi); 499fb2ef791SEric Blake if (next_dirty > next_offset || next_dirty < 0) { 500f27a2742SMax Reitz /* The bitmap iterator's cache is stale, refresh it */ 501715a74d8SEric Blake bdrv_set_dirty_iter(s->dbi, next_offset); 502f798184cSEric Blake next_dirty = bdrv_dirty_iter_next(s->dbi); 503f27a2742SMax Reitz } 504fb2ef791SEric Blake assert(next_dirty == next_offset); 505e5b43573SFam Zheng nb_chunks++; 506e5b43573SFam Zheng } 507e5b43573SFam Zheng 508e5b43573SFam Zheng /* Clear dirty bits before querying the block status, because 50931826642SEric Blake * calling bdrv_block_status_above could yield - if some blocks are 510e5b43573SFam Zheng * marked dirty in this window, we need to know. 511e5b43573SFam Zheng */ 512e0d7f73eSEric Blake bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset, 513e0d7f73eSEric Blake nb_chunks * s->granularity); 514b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_unlock(s->dirty_bitmap); 515b64bd51eSPaolo Bonzini 5161181e19aSMax Reitz /* Before claiming an area in the in-flight bitmap, we have to 5171181e19aSMax Reitz * create a MirrorOp for it so that conflicting requests can wait 5181181e19aSMax Reitz * for it. mirror_perform() will create the real MirrorOps later, 5191181e19aSMax Reitz * for now we just create a pseudo operation that will wake up all 5201181e19aSMax Reitz * conflicting requests once all real operations have been 5211181e19aSMax Reitz * launched. */ 5221181e19aSMax Reitz pseudo_op = g_new(MirrorOp, 1); 5231181e19aSMax Reitz *pseudo_op = (MirrorOp){ 5241181e19aSMax Reitz .offset = offset, 5251181e19aSMax Reitz .bytes = nb_chunks * s->granularity, 5261181e19aSMax Reitz .is_pseudo_op = true, 5271181e19aSMax Reitz }; 5281181e19aSMax Reitz qemu_co_queue_init(&pseudo_op->waiting_requests); 5291181e19aSMax Reitz QTAILQ_INSERT_TAIL(&s->ops_in_flight, pseudo_op, next); 5301181e19aSMax Reitz 531fb2ef791SEric Blake bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks); 532fb2ef791SEric Blake while (nb_chunks > 0 && offset < s->bdev_length) { 53331826642SEric Blake int ret; 5347cfd5275SEric Blake int64_t io_bytes; 535f3e4ce4aSEric Blake int64_t io_bytes_acct; 5364295c5fcSMax Reitz MirrorMethod mirror_method = MIRROR_METHOD_COPY; 537e5b43573SFam Zheng 538fb2ef791SEric Blake assert(!(offset % s->granularity)); 53931826642SEric Blake ret = bdrv_block_status_above(source, NULL, offset, 54031826642SEric Blake nb_chunks * s->granularity, 54131826642SEric Blake &io_bytes, NULL, NULL); 542e5b43573SFam Zheng if (ret < 0) { 543fb2ef791SEric Blake io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes); 5440965a41eSVladimir Sementsov-Ogievskiy } else if (ret & BDRV_BLOCK_DATA) { 545fb2ef791SEric Blake io_bytes = MIN(io_bytes, max_io_bytes); 546e5b43573SFam Zheng } 547e5b43573SFam Zheng 548fb2ef791SEric Blake io_bytes -= io_bytes % s->granularity; 549fb2ef791SEric Blake if (io_bytes < s->granularity) { 550fb2ef791SEric Blake io_bytes = s->granularity; 551e5b43573SFam Zheng } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) { 552fb2ef791SEric Blake int64_t target_offset; 5537cfd5275SEric Blake int64_t target_bytes; 554fb2ef791SEric Blake bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes, 555fb2ef791SEric Blake &target_offset, &target_bytes); 556fb2ef791SEric Blake if (target_offset == offset && 557fb2ef791SEric Blake target_bytes == io_bytes) { 558e5b43573SFam Zheng mirror_method = ret & BDRV_BLOCK_ZERO ? 559e5b43573SFam Zheng MIRROR_METHOD_ZERO : 560e5b43573SFam Zheng MIRROR_METHOD_DISCARD; 561e5b43573SFam Zheng } 562e5b43573SFam Zheng } 563e5b43573SFam Zheng 564cf56a3c6SDenis V. Lunev while (s->in_flight >= MAX_IN_FLIGHT) { 565fb2ef791SEric Blake trace_mirror_yield_in_flight(s, offset, s->in_flight); 5669178f4feSKevin Wolf mirror_wait_for_free_in_flight_slot(s); 567cf56a3c6SDenis V. Lunev } 568cf56a3c6SDenis V. Lunev 569dbaa7b57SVladimir Sementsov-Ogievskiy if (s->ret < 0) { 5701181e19aSMax Reitz ret = 0; 5711181e19aSMax Reitz goto fail; 572dbaa7b57SVladimir Sementsov-Ogievskiy } 573dbaa7b57SVladimir Sementsov-Ogievskiy 574fb2ef791SEric Blake io_bytes = mirror_clip_bytes(s, offset, io_bytes); 5754295c5fcSMax Reitz io_bytes = mirror_perform(s, offset, io_bytes, mirror_method); 5764295c5fcSMax Reitz if (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok) { 577f3e4ce4aSEric Blake io_bytes_acct = 0; 5784b5004d9SDenis V. Lunev } else { 579fb2ef791SEric Blake io_bytes_acct = io_bytes; 5804b5004d9SDenis V. Lunev } 581fb2ef791SEric Blake assert(io_bytes); 582fb2ef791SEric Blake offset += io_bytes; 583fb2ef791SEric Blake nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity); 584dee81d51SKevin Wolf delay_ns = block_job_ratelimit_get_delay(&s->common, io_bytes_acct); 585dcfb3bebSFam Zheng } 5861181e19aSMax Reitz 5871181e19aSMax Reitz ret = delay_ns; 5881181e19aSMax Reitz fail: 5891181e19aSMax Reitz QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next); 5901181e19aSMax Reitz qemu_co_queue_restart_all(&pseudo_op->waiting_requests); 5911181e19aSMax Reitz g_free(pseudo_op); 5921181e19aSMax Reitz 5931181e19aSMax Reitz return ret; 594893f7ebaSPaolo Bonzini } 595b952b558SPaolo Bonzini 596402a4741SPaolo Bonzini static void mirror_free_init(MirrorBlockJob *s) 597402a4741SPaolo Bonzini { 598402a4741SPaolo Bonzini int granularity = s->granularity; 599402a4741SPaolo Bonzini size_t buf_size = s->buf_size; 600402a4741SPaolo Bonzini uint8_t *buf = s->buf; 601402a4741SPaolo Bonzini 602402a4741SPaolo Bonzini assert(s->buf_free_count == 0); 603402a4741SPaolo Bonzini QSIMPLEQ_INIT(&s->buf_free); 604402a4741SPaolo Bonzini while (buf_size != 0) { 605402a4741SPaolo Bonzini MirrorBuffer *cur = (MirrorBuffer *)buf; 606402a4741SPaolo Bonzini QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next); 607402a4741SPaolo Bonzini s->buf_free_count++; 608402a4741SPaolo Bonzini buf_size -= granularity; 609402a4741SPaolo Bonzini buf += granularity; 610402a4741SPaolo Bonzini } 611402a4741SPaolo Bonzini } 612402a4741SPaolo Bonzini 613bae8196dSPaolo Bonzini /* This is also used for the .pause callback. There is no matching 614bae8196dSPaolo Bonzini * mirror_resume() because mirror_run() will begin iterating again 615bae8196dSPaolo Bonzini * when the job is resumed. 616bae8196dSPaolo Bonzini */ 617537c3d4fSStefan Hajnoczi static void coroutine_fn mirror_wait_for_all_io(MirrorBlockJob *s) 618bd48bde8SPaolo Bonzini { 619bd48bde8SPaolo Bonzini while (s->in_flight > 0) { 6209178f4feSKevin Wolf mirror_wait_for_free_in_flight_slot(s); 621bd48bde8SPaolo Bonzini } 622893f7ebaSPaolo Bonzini } 623893f7ebaSPaolo Bonzini 624737efc1eSJohn Snow /** 625737efc1eSJohn Snow * mirror_exit_common: handle both abort() and prepare() cases. 626737efc1eSJohn Snow * for .prepare, returns 0 on success and -errno on failure. 627737efc1eSJohn Snow * for .abort cases, denoted by abort = true, MUST return 0. 628737efc1eSJohn Snow */ 629737efc1eSJohn Snow static int mirror_exit_common(Job *job) 6305a7e7a0bSStefan Hajnoczi { 6311908a559SKevin Wolf MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 6321908a559SKevin Wolf BlockJob *bjob = &s->common; 633f93c3addSMax Reitz MirrorBDSOpaque *bs_opaque; 6345a7e7a0bSStefan Hajnoczi AioContext *replace_aio_context = NULL; 635f93c3addSMax Reitz BlockDriverState *src; 636f93c3addSMax Reitz BlockDriverState *target_bs; 637f93c3addSMax Reitz BlockDriverState *mirror_top_bs; 63812fa4af6SKevin Wolf Error *local_err = NULL; 639737efc1eSJohn Snow bool abort = job->ret < 0; 640737efc1eSJohn Snow int ret = 0; 641737efc1eSJohn Snow 642737efc1eSJohn Snow if (s->prepared) { 643737efc1eSJohn Snow return 0; 644737efc1eSJohn Snow } 645737efc1eSJohn Snow s->prepared = true; 6463f09bfbcSKevin Wolf 647f93c3addSMax Reitz mirror_top_bs = s->mirror_top_bs; 648f93c3addSMax Reitz bs_opaque = mirror_top_bs->opaque; 649f93c3addSMax Reitz src = mirror_top_bs->backing->bs; 650f93c3addSMax Reitz target_bs = blk_bs(s->target); 651f93c3addSMax Reitz 652ef53dc09SAlberto Garcia if (bdrv_chain_contains(src, target_bs)) { 653ef53dc09SAlberto Garcia bdrv_unfreeze_backing_chain(mirror_top_bs, target_bs); 654ef53dc09SAlberto Garcia } 655ef53dc09SAlberto Garcia 6565deb6cbdSVladimir Sementsov-Ogievskiy bdrv_release_dirty_bitmap(s->dirty_bitmap); 6572119882cSPaolo Bonzini 6587b508f6bSJohn Snow /* Make sure that the source BDS doesn't go away during bdrv_replace_node, 6597b508f6bSJohn Snow * before we can call bdrv_drained_end */ 6603f09bfbcSKevin Wolf bdrv_ref(src); 6614ef85a9cSKevin Wolf bdrv_ref(mirror_top_bs); 6627d9fcb39SKevin Wolf bdrv_ref(target_bs); 6637d9fcb39SKevin Wolf 664bb0c9409SVladimir Sementsov-Ogievskiy /* 665bb0c9409SVladimir Sementsov-Ogievskiy * Remove target parent that still uses BLK_PERM_WRITE/RESIZE before 6667d9fcb39SKevin Wolf * inserting target_bs at s->to_replace, where we might not be able to get 66763c8ef28SKevin Wolf * these permissions. 668bb0c9409SVladimir Sementsov-Ogievskiy */ 6697d9fcb39SKevin Wolf blk_unref(s->target); 6707d9fcb39SKevin Wolf s->target = NULL; 6714ef85a9cSKevin Wolf 6724ef85a9cSKevin Wolf /* We don't access the source any more. Dropping any WRITE/RESIZE is 673d2da5e28SKevin Wolf * required before it could become a backing file of target_bs. Not having 674d2da5e28SKevin Wolf * these permissions any more means that we can't allow any new requests on 675d2da5e28SKevin Wolf * mirror_top_bs from now on, so keep it drained. */ 676d2da5e28SKevin Wolf bdrv_drained_begin(mirror_top_bs); 677f94dc3b4SMax Reitz bs_opaque->stop = true; 678f94dc3b4SMax Reitz bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing, 6794ef85a9cSKevin Wolf &error_abort); 680737efc1eSJohn Snow if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) { 6814ef85a9cSKevin Wolf BlockDriverState *backing = s->is_none_mode ? src : s->base; 6823f072a7fSMax Reitz BlockDriverState *unfiltered_target = bdrv_skip_filters(target_bs); 6833f072a7fSMax Reitz 6843f072a7fSMax Reitz if (bdrv_cow_bs(unfiltered_target) != backing) { 6853f072a7fSMax Reitz bdrv_set_backing_hd(unfiltered_target, backing, &local_err); 68612fa4af6SKevin Wolf if (local_err) { 68712fa4af6SKevin Wolf error_report_err(local_err); 68866c8672dSVladimir Sementsov-Ogievskiy local_err = NULL; 6897b508f6bSJohn Snow ret = -EPERM; 69012fa4af6SKevin Wolf } 6914ef85a9cSKevin Wolf } 692c41f5b96SMax Reitz } else if (!abort && s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) { 693c41f5b96SMax Reitz assert(!bdrv_backing_chain_next(target_bs)); 694c41f5b96SMax Reitz ret = bdrv_open_backing_file(bdrv_skip_filters(target_bs), NULL, 695c41f5b96SMax Reitz "backing", &local_err); 696c41f5b96SMax Reitz if (ret < 0) { 697c41f5b96SMax Reitz error_report_err(local_err); 698c41f5b96SMax Reitz local_err = NULL; 699c41f5b96SMax Reitz } 7004ef85a9cSKevin Wolf } 7015a7e7a0bSStefan Hajnoczi 7025a7e7a0bSStefan Hajnoczi if (s->to_replace) { 7035a7e7a0bSStefan Hajnoczi replace_aio_context = bdrv_get_aio_context(s->to_replace); 7045a7e7a0bSStefan Hajnoczi aio_context_acquire(replace_aio_context); 7055a7e7a0bSStefan Hajnoczi } 7065a7e7a0bSStefan Hajnoczi 707737efc1eSJohn Snow if (s->should_complete && !abort) { 708737efc1eSJohn Snow BlockDriverState *to_replace = s->to_replace ?: src; 7091ba79388SAlberto Garcia bool ro = bdrv_is_read_only(to_replace); 71040365552SKevin Wolf 7111ba79388SAlberto Garcia if (ro != bdrv_is_read_only(target_bs)) { 7121ba79388SAlberto Garcia bdrv_reopen_set_read_only(target_bs, ro, NULL); 7135a7e7a0bSStefan Hajnoczi } 714b8804815SKevin Wolf 715b8804815SKevin Wolf /* The mirror job has no requests in flight any more, but we need to 716b8804815SKevin Wolf * drain potential other users of the BDS before changing the graph. */ 7175e771752SSergio Lopez assert(s->in_drain); 718e253f4b8SKevin Wolf bdrv_drained_begin(target_bs); 7196e9cc051SMax Reitz /* 7206e9cc051SMax Reitz * Cannot use check_to_replace_node() here, because that would 7216e9cc051SMax Reitz * check for an op blocker on @to_replace, and we have our own 7226e9cc051SMax Reitz * there. 7236e9cc051SMax Reitz */ 7246e9cc051SMax Reitz if (bdrv_recurse_can_replace(src, to_replace)) { 7255fe31c25SKevin Wolf bdrv_replace_node(to_replace, target_bs, &local_err); 7266e9cc051SMax Reitz } else { 7276e9cc051SMax Reitz error_setg(&local_err, "Can no longer replace '%s' by '%s', " 7286e9cc051SMax Reitz "because it can no longer be guaranteed that doing so " 7296e9cc051SMax Reitz "would not lead to an abrupt change of visible data", 7306e9cc051SMax Reitz to_replace->node_name, target_bs->node_name); 7316e9cc051SMax Reitz } 732e253f4b8SKevin Wolf bdrv_drained_end(target_bs); 7335fe31c25SKevin Wolf if (local_err) { 7345fe31c25SKevin Wolf error_report_err(local_err); 7357b508f6bSJohn Snow ret = -EPERM; 7365fe31c25SKevin Wolf } 7375a7e7a0bSStefan Hajnoczi } 7385a7e7a0bSStefan Hajnoczi if (s->to_replace) { 7395a7e7a0bSStefan Hajnoczi bdrv_op_unblock_all(s->to_replace, s->replace_blocker); 7405a7e7a0bSStefan Hajnoczi error_free(s->replace_blocker); 7415a7e7a0bSStefan Hajnoczi bdrv_unref(s->to_replace); 7425a7e7a0bSStefan Hajnoczi } 7435a7e7a0bSStefan Hajnoczi if (replace_aio_context) { 7445a7e7a0bSStefan Hajnoczi aio_context_release(replace_aio_context); 7455a7e7a0bSStefan Hajnoczi } 7465a7e7a0bSStefan Hajnoczi g_free(s->replaces); 7477d9fcb39SKevin Wolf bdrv_unref(target_bs); 7484ef85a9cSKevin Wolf 749f94dc3b4SMax Reitz /* 750f94dc3b4SMax Reitz * Remove the mirror filter driver from the graph. Before this, get rid of 7514ef85a9cSKevin Wolf * the blockers on the intermediate nodes so that the resulting state is 752f94dc3b4SMax Reitz * valid. 753f94dc3b4SMax Reitz */ 7541908a559SKevin Wolf block_job_remove_all_bdrv(bjob); 7553f072a7fSMax Reitz bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort); 7564ef85a9cSKevin Wolf 7574ef85a9cSKevin Wolf /* We just changed the BDS the job BB refers to (with either or both of the 7585fe31c25SKevin Wolf * bdrv_replace_node() calls), so switch the BB back so the cleanup does 7595fe31c25SKevin Wolf * the right thing. We don't need any permissions any more now. */ 7601908a559SKevin Wolf blk_remove_bs(bjob->blk); 7611908a559SKevin Wolf blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort); 7621908a559SKevin Wolf blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort); 7634ef85a9cSKevin Wolf 764429076e8SMax Reitz bs_opaque->job = NULL; 7654ef85a9cSKevin Wolf 766176c3699SFam Zheng bdrv_drained_end(src); 767d2da5e28SKevin Wolf bdrv_drained_end(mirror_top_bs); 7685e771752SSergio Lopez s->in_drain = false; 7694ef85a9cSKevin Wolf bdrv_unref(mirror_top_bs); 7703f09bfbcSKevin Wolf bdrv_unref(src); 7717b508f6bSJohn Snow 772737efc1eSJohn Snow return ret; 773737efc1eSJohn Snow } 774737efc1eSJohn Snow 775737efc1eSJohn Snow static int mirror_prepare(Job *job) 776737efc1eSJohn Snow { 777737efc1eSJohn Snow return mirror_exit_common(job); 778737efc1eSJohn Snow } 779737efc1eSJohn Snow 780737efc1eSJohn Snow static void mirror_abort(Job *job) 781737efc1eSJohn Snow { 782737efc1eSJohn Snow int ret = mirror_exit_common(job); 783737efc1eSJohn Snow assert(ret == 0); 7845a7e7a0bSStefan Hajnoczi } 7855a7e7a0bSStefan Hajnoczi 786537c3d4fSStefan Hajnoczi static void coroutine_fn mirror_throttle(MirrorBlockJob *s) 78749efb1f5SDenis V. Lunev { 78849efb1f5SDenis V. Lunev int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 78949efb1f5SDenis V. Lunev 79018bb6928SKevin Wolf if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) { 79149efb1f5SDenis V. Lunev s->last_pause_ns = now; 7925d43e86eSKevin Wolf job_sleep_ns(&s->common.job, 0); 79349efb1f5SDenis V. Lunev } else { 794da01ff7fSKevin Wolf job_pause_point(&s->common.job); 79549efb1f5SDenis V. Lunev } 79649efb1f5SDenis V. Lunev } 79749efb1f5SDenis V. Lunev 798c0b363adSDenis V. Lunev static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s) 799c0b363adSDenis V. Lunev { 80023ca459aSEric Blake int64_t offset; 801138f9fffSMax Reitz BlockDriverState *bs = s->mirror_top_bs->backing->bs; 802c0b363adSDenis V. Lunev BlockDriverState *target_bs = blk_bs(s->target); 80323ca459aSEric Blake int ret; 80451b0a488SEric Blake int64_t count; 805c0b363adSDenis V. Lunev 806cdf3bc93SMax Reitz if (s->zero_target) { 807c7c2769cSDenis V. Lunev if (!bdrv_can_write_zeroes_with_unmap(target_bs)) { 808e0d7f73eSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length); 809b7d5062cSDenis V. Lunev return 0; 810b7d5062cSDenis V. Lunev } 811b7d5062cSDenis V. Lunev 81290ab48ebSAnton Nefedov s->initial_zeroing_ongoing = true; 81323ca459aSEric Blake for (offset = 0; offset < s->bdev_length; ) { 81423ca459aSEric Blake int bytes = MIN(s->bdev_length - offset, 81523ca459aSEric Blake QEMU_ALIGN_DOWN(INT_MAX, s->granularity)); 816c7c2769cSDenis V. Lunev 817c7c2769cSDenis V. Lunev mirror_throttle(s); 818c7c2769cSDenis V. Lunev 819daa7f2f9SKevin Wolf if (job_is_cancelled(&s->common.job)) { 82090ab48ebSAnton Nefedov s->initial_zeroing_ongoing = false; 821c7c2769cSDenis V. Lunev return 0; 822c7c2769cSDenis V. Lunev } 823c7c2769cSDenis V. Lunev 824c7c2769cSDenis V. Lunev if (s->in_flight >= MAX_IN_FLIGHT) { 82567adf4b3SEric Blake trace_mirror_yield(s, UINT64_MAX, s->buf_free_count, 82667adf4b3SEric Blake s->in_flight); 8279178f4feSKevin Wolf mirror_wait_for_free_in_flight_slot(s); 828c7c2769cSDenis V. Lunev continue; 829c7c2769cSDenis V. Lunev } 830c7c2769cSDenis V. Lunev 8314295c5fcSMax Reitz mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO); 83223ca459aSEric Blake offset += bytes; 833c7c2769cSDenis V. Lunev } 834c7c2769cSDenis V. Lunev 835bae8196dSPaolo Bonzini mirror_wait_for_all_io(s); 83690ab48ebSAnton Nefedov s->initial_zeroing_ongoing = false; 837c7c2769cSDenis V. Lunev } 838c7c2769cSDenis V. Lunev 839c0b363adSDenis V. Lunev /* First part, loop on the sectors and initialize the dirty bitmap. */ 84023ca459aSEric Blake for (offset = 0; offset < s->bdev_length; ) { 841c0b363adSDenis V. Lunev /* Just to make sure we are not exceeding int limit. */ 84223ca459aSEric Blake int bytes = MIN(s->bdev_length - offset, 84323ca459aSEric Blake QEMU_ALIGN_DOWN(INT_MAX, s->granularity)); 844c0b363adSDenis V. Lunev 845c0b363adSDenis V. Lunev mirror_throttle(s); 846c0b363adSDenis V. Lunev 847daa7f2f9SKevin Wolf if (job_is_cancelled(&s->common.job)) { 848c0b363adSDenis V. Lunev return 0; 849c0b363adSDenis V. Lunev } 850c0b363adSDenis V. Lunev 8513f072a7fSMax Reitz ret = bdrv_is_allocated_above(bs, s->base_overlay, true, offset, bytes, 8523f072a7fSMax Reitz &count); 853c0b363adSDenis V. Lunev if (ret < 0) { 854c0b363adSDenis V. Lunev return ret; 855c0b363adSDenis V. Lunev } 856c0b363adSDenis V. Lunev 85723ca459aSEric Blake assert(count); 858a92b1b06SEric Blake if (ret > 0) { 85923ca459aSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count); 860c0b363adSDenis V. Lunev } 86123ca459aSEric Blake offset += count; 862c0b363adSDenis V. Lunev } 863c0b363adSDenis V. Lunev return 0; 864c0b363adSDenis V. Lunev } 865c0b363adSDenis V. Lunev 866bdffb31dSPaolo Bonzini /* Called when going out of the streaming phase to flush the bulk of the 867bdffb31dSPaolo Bonzini * data to the medium, or just before completing. 868bdffb31dSPaolo Bonzini */ 869bdffb31dSPaolo Bonzini static int mirror_flush(MirrorBlockJob *s) 870bdffb31dSPaolo Bonzini { 871bdffb31dSPaolo Bonzini int ret = blk_flush(s->target); 872bdffb31dSPaolo Bonzini if (ret < 0) { 873bdffb31dSPaolo Bonzini if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) { 874bdffb31dSPaolo Bonzini s->ret = ret; 875bdffb31dSPaolo Bonzini } 876bdffb31dSPaolo Bonzini } 877bdffb31dSPaolo Bonzini return ret; 878bdffb31dSPaolo Bonzini } 879bdffb31dSPaolo Bonzini 880f67432a2SJohn Snow static int coroutine_fn mirror_run(Job *job, Error **errp) 881893f7ebaSPaolo Bonzini { 882f67432a2SJohn Snow MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 883138f9fffSMax Reitz BlockDriverState *bs = s->mirror_top_bs->backing->bs; 884e253f4b8SKevin Wolf BlockDriverState *target_bs = blk_bs(s->target); 8859a0cec66SPaolo Bonzini bool need_drain = true; 886c0b363adSDenis V. Lunev int64_t length; 887e83dd680SKevin Wolf int64_t target_length; 888b812f671SPaolo Bonzini BlockDriverInfo bdi; 8891d33936eSJeff Cody char backing_filename[2]; /* we only need 2 characters because we are only 8901d33936eSJeff Cody checking for a NULL string */ 891893f7ebaSPaolo Bonzini int ret = 0; 892893f7ebaSPaolo Bonzini 893daa7f2f9SKevin Wolf if (job_is_cancelled(&s->common.job)) { 894893f7ebaSPaolo Bonzini goto immediate_exit; 895893f7ebaSPaolo Bonzini } 896893f7ebaSPaolo Bonzini 897b21c7652SMax Reitz s->bdev_length = bdrv_getlength(bs); 898b21c7652SMax Reitz if (s->bdev_length < 0) { 899b21c7652SMax Reitz ret = s->bdev_length; 900373df5b1SFam Zheng goto immediate_exit; 901becc347eSKevin Wolf } 902becc347eSKevin Wolf 903e83dd680SKevin Wolf target_length = blk_getlength(s->target); 904e83dd680SKevin Wolf if (target_length < 0) { 905e83dd680SKevin Wolf ret = target_length; 906becc347eSKevin Wolf goto immediate_exit; 907becc347eSKevin Wolf } 908becc347eSKevin Wolf 909e83dd680SKevin Wolf /* Active commit must resize the base image if its size differs from the 910e83dd680SKevin Wolf * active layer. */ 911e83dd680SKevin Wolf if (s->base == blk_bs(s->target)) { 912e83dd680SKevin Wolf if (s->bdev_length > target_length) { 913c80d8b06SMax Reitz ret = blk_truncate(s->target, s->bdev_length, false, 9148c6242b6SKevin Wolf PREALLOC_MODE_OFF, 0, NULL); 915becc347eSKevin Wolf if (ret < 0) { 916becc347eSKevin Wolf goto immediate_exit; 917becc347eSKevin Wolf } 918becc347eSKevin Wolf } 919e83dd680SKevin Wolf } else if (s->bdev_length != target_length) { 920e83dd680SKevin Wolf error_setg(errp, "Source and target image have different sizes"); 921e83dd680SKevin Wolf ret = -EINVAL; 922e83dd680SKevin Wolf goto immediate_exit; 923becc347eSKevin Wolf } 924becc347eSKevin Wolf 925becc347eSKevin Wolf if (s->bdev_length == 0) { 9262e1795b5SKevin Wolf /* Transition to the READY state and wait for complete. */ 9272e1795b5SKevin Wolf job_transition_to_ready(&s->common.job); 9289e48b025SFam Zheng s->synced = true; 929d06107adSMax Reitz s->actively_synced = true; 930daa7f2f9SKevin Wolf while (!job_is_cancelled(&s->common.job) && !s->should_complete) { 931198c49ccSKevin Wolf job_yield(&s->common.job); 9329e48b025SFam Zheng } 933daa7f2f9SKevin Wolf s->common.job.cancelled = false; 9349e48b025SFam Zheng goto immediate_exit; 935893f7ebaSPaolo Bonzini } 936893f7ebaSPaolo Bonzini 937b21c7652SMax Reitz length = DIV_ROUND_UP(s->bdev_length, s->granularity); 938402a4741SPaolo Bonzini s->in_flight_bitmap = bitmap_new(length); 939402a4741SPaolo Bonzini 940b812f671SPaolo Bonzini /* If we have no backing file yet in the destination, we cannot let 941b812f671SPaolo Bonzini * the destination do COW. Instead, we copy sectors around the 942b812f671SPaolo Bonzini * dirty data if needed. We need a bitmap to do that. 943b812f671SPaolo Bonzini */ 944e253f4b8SKevin Wolf bdrv_get_backing_filename(target_bs, backing_filename, 945b812f671SPaolo Bonzini sizeof(backing_filename)); 946e253f4b8SKevin Wolf if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) { 947b436982fSEric Blake s->target_cluster_size = bdi.cluster_size; 948b436982fSEric Blake } else { 949b436982fSEric Blake s->target_cluster_size = BDRV_SECTOR_SIZE; 950c3cc95bdSFam Zheng } 9513f072a7fSMax Reitz if (backing_filename[0] && !bdrv_backing_chain_next(target_bs) && 952b436982fSEric Blake s->granularity < s->target_cluster_size) { 953b436982fSEric Blake s->buf_size = MAX(s->buf_size, s->target_cluster_size); 954b812f671SPaolo Bonzini s->cow_bitmap = bitmap_new(length); 955b812f671SPaolo Bonzini } 956e253f4b8SKevin Wolf s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov); 957b812f671SPaolo Bonzini 9587504edf4SKevin Wolf s->buf = qemu_try_blockalign(bs, s->buf_size); 9597504edf4SKevin Wolf if (s->buf == NULL) { 9607504edf4SKevin Wolf ret = -ENOMEM; 9617504edf4SKevin Wolf goto immediate_exit; 9627504edf4SKevin Wolf } 9637504edf4SKevin Wolf 964402a4741SPaolo Bonzini mirror_free_init(s); 965893f7ebaSPaolo Bonzini 96649efb1f5SDenis V. Lunev s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 96703544a6eSFam Zheng if (!s->is_none_mode) { 968c0b363adSDenis V. Lunev ret = mirror_dirty_init(s); 969daa7f2f9SKevin Wolf if (ret < 0 || job_is_cancelled(&s->common.job)) { 9704c0cbd6fSFam Zheng goto immediate_exit; 9714c0cbd6fSFam Zheng } 972893f7ebaSPaolo Bonzini } 973893f7ebaSPaolo Bonzini 974dc162c8eSFam Zheng assert(!s->dbi); 975715a74d8SEric Blake s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap); 976893f7ebaSPaolo Bonzini for (;;) { 977cc8c9d6cSPaolo Bonzini uint64_t delay_ns = 0; 97849efb1f5SDenis V. Lunev int64_t cnt, delta; 979893f7ebaSPaolo Bonzini bool should_complete; 980893f7ebaSPaolo Bonzini 981d06107adSMax Reitz /* Do not start passive operations while there are active 982d06107adSMax Reitz * writes in progress */ 983d06107adSMax Reitz while (s->in_active_write_counter) { 9849178f4feSKevin Wolf mirror_wait_for_any_operation(s, true); 985d06107adSMax Reitz } 986d06107adSMax Reitz 987bd48bde8SPaolo Bonzini if (s->ret < 0) { 988bd48bde8SPaolo Bonzini ret = s->ret; 989893f7ebaSPaolo Bonzini goto immediate_exit; 990893f7ebaSPaolo Bonzini } 991bd48bde8SPaolo Bonzini 992da01ff7fSKevin Wolf job_pause_point(&s->common.job); 993565ac01fSStefan Hajnoczi 99420dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 99505df8a6aSKevin Wolf /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is 99605df8a6aSKevin Wolf * the number of bytes currently being processed; together those are 99705df8a6aSKevin Wolf * the current remaining operation length */ 99830a5c887SKevin Wolf job_progress_set_remaining(&s->common.job, s->bytes_in_flight + cnt); 999bd48bde8SPaolo Bonzini 1000bd48bde8SPaolo Bonzini /* Note that even when no rate limit is applied we need to yield 1001a7282330SFam Zheng * periodically with no pending I/O so that bdrv_drain_all() returns. 100218bb6928SKevin Wolf * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is 100318bb6928SKevin Wolf * an error, or when the source is clean, whichever comes first. */ 100449efb1f5SDenis V. Lunev delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns; 100518bb6928SKevin Wolf if (delta < BLOCK_JOB_SLICE_TIME && 1006bd48bde8SPaolo Bonzini s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 1007cf56a3c6SDenis V. Lunev if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 || 1008402a4741SPaolo Bonzini (cnt == 0 && s->in_flight > 0)) { 10099a46dba7SEric Blake trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight); 10109178f4feSKevin Wolf mirror_wait_for_free_in_flight_slot(s); 1011bd48bde8SPaolo Bonzini continue; 1012bd48bde8SPaolo Bonzini } else if (cnt != 0) { 1013cc8c9d6cSPaolo Bonzini delay_ns = mirror_iteration(s); 1014893f7ebaSPaolo Bonzini } 1015cc8c9d6cSPaolo Bonzini } 1016893f7ebaSPaolo Bonzini 1017893f7ebaSPaolo Bonzini should_complete = false; 1018bd48bde8SPaolo Bonzini if (s->in_flight == 0 && cnt == 0) { 1019893f7ebaSPaolo Bonzini trace_mirror_before_flush(s); 1020bdffb31dSPaolo Bonzini if (!s->synced) { 1021bdffb31dSPaolo Bonzini if (mirror_flush(s) < 0) { 1022bdffb31dSPaolo Bonzini /* Go check s->ret. */ 1023bdffb31dSPaolo Bonzini continue; 1024893f7ebaSPaolo Bonzini } 1025893f7ebaSPaolo Bonzini /* We're out of the streaming phase. From now on, if the job 1026893f7ebaSPaolo Bonzini * is cancelled we will actually complete all pending I/O and 1027893f7ebaSPaolo Bonzini * report completion. This way, block-job-cancel will leave 1028893f7ebaSPaolo Bonzini * the target in a consistent state. 1029893f7ebaSPaolo Bonzini */ 10302e1795b5SKevin Wolf job_transition_to_ready(&s->common.job); 1031d63ffd87SPaolo Bonzini s->synced = true; 1032d06107adSMax Reitz if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) { 1033d06107adSMax Reitz s->actively_synced = true; 1034d06107adSMax Reitz } 1035d63ffd87SPaolo Bonzini } 1036d63ffd87SPaolo Bonzini 1037d63ffd87SPaolo Bonzini should_complete = s->should_complete || 1038daa7f2f9SKevin Wolf job_is_cancelled(&s->common.job); 103920dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 1040893f7ebaSPaolo Bonzini } 1041893f7ebaSPaolo Bonzini 1042893f7ebaSPaolo Bonzini if (cnt == 0 && should_complete) { 1043893f7ebaSPaolo Bonzini /* The dirty bitmap is not updated while operations are pending. 1044893f7ebaSPaolo Bonzini * If we're about to exit, wait for pending operations before 1045893f7ebaSPaolo Bonzini * calling bdrv_get_dirty_count(bs), or we may exit while the 1046893f7ebaSPaolo Bonzini * source has dirty data to copy! 1047893f7ebaSPaolo Bonzini * 1048893f7ebaSPaolo Bonzini * Note that I/O can be submitted by the guest while 10499a0cec66SPaolo Bonzini * mirror_populate runs, so pause it now. Before deciding 10509a0cec66SPaolo Bonzini * whether to switch to target check one last time if I/O has 10519a0cec66SPaolo Bonzini * come in the meanwhile, and if not flush the data to disk. 1052893f7ebaSPaolo Bonzini */ 10539a46dba7SEric Blake trace_mirror_before_drain(s, cnt); 10549a0cec66SPaolo Bonzini 10555e771752SSergio Lopez s->in_drain = true; 10569a0cec66SPaolo Bonzini bdrv_drained_begin(bs); 105720dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 1058bdffb31dSPaolo Bonzini if (cnt > 0 || mirror_flush(s) < 0) { 10599a0cec66SPaolo Bonzini bdrv_drained_end(bs); 10605e771752SSergio Lopez s->in_drain = false; 10619a0cec66SPaolo Bonzini continue; 10629a0cec66SPaolo Bonzini } 10639a0cec66SPaolo Bonzini 10649a0cec66SPaolo Bonzini /* The two disks are in sync. Exit and report successful 10659a0cec66SPaolo Bonzini * completion. 10669a0cec66SPaolo Bonzini */ 10679a0cec66SPaolo Bonzini assert(QLIST_EMPTY(&bs->tracked_requests)); 1068daa7f2f9SKevin Wolf s->common.job.cancelled = false; 10699a0cec66SPaolo Bonzini need_drain = false; 10709a0cec66SPaolo Bonzini break; 1071893f7ebaSPaolo Bonzini } 1072893f7ebaSPaolo Bonzini 1073893f7ebaSPaolo Bonzini ret = 0; 1074ddc4115eSStefan Hajnoczi 1075ddc4115eSStefan Hajnoczi if (s->synced && !should_complete) { 107618bb6928SKevin Wolf delay_ns = (s->in_flight == 0 && 107718bb6928SKevin Wolf cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0); 1078ddc4115eSStefan Hajnoczi } 10799a46dba7SEric Blake trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); 10805d43e86eSKevin Wolf job_sleep_ns(&s->common.job, delay_ns); 1081daa7f2f9SKevin Wolf if (job_is_cancelled(&s->common.job) && 1082004e95dfSKevin Wolf (!s->synced || s->common.job.force_cancel)) 1083eb36639fSMax Reitz { 1084893f7ebaSPaolo Bonzini break; 1085893f7ebaSPaolo Bonzini } 108649efb1f5SDenis V. Lunev s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 1087893f7ebaSPaolo Bonzini } 1088893f7ebaSPaolo Bonzini 1089893f7ebaSPaolo Bonzini immediate_exit: 1090bd48bde8SPaolo Bonzini if (s->in_flight > 0) { 1091bd48bde8SPaolo Bonzini /* We get here only if something went wrong. Either the job failed, 1092bd48bde8SPaolo Bonzini * or it was cancelled prematurely so that we do not guarantee that 1093bd48bde8SPaolo Bonzini * the target is a copy of the source. 1094bd48bde8SPaolo Bonzini */ 1095004e95dfSKevin Wolf assert(ret < 0 || ((s->common.job.force_cancel || !s->synced) && 1096daa7f2f9SKevin Wolf job_is_cancelled(&s->common.job))); 10979a0cec66SPaolo Bonzini assert(need_drain); 1098bae8196dSPaolo Bonzini mirror_wait_for_all_io(s); 1099bd48bde8SPaolo Bonzini } 1100bd48bde8SPaolo Bonzini 1101bd48bde8SPaolo Bonzini assert(s->in_flight == 0); 11027191bf31SMarkus Armbruster qemu_vfree(s->buf); 1103b812f671SPaolo Bonzini g_free(s->cow_bitmap); 1104402a4741SPaolo Bonzini g_free(s->in_flight_bitmap); 1105dc162c8eSFam Zheng bdrv_dirty_iter_free(s->dbi); 11065a7e7a0bSStefan Hajnoczi 11079a0cec66SPaolo Bonzini if (need_drain) { 11085e771752SSergio Lopez s->in_drain = true; 1109e253f4b8SKevin Wolf bdrv_drained_begin(bs); 11109a0cec66SPaolo Bonzini } 1111f67432a2SJohn Snow 1112f67432a2SJohn Snow return ret; 1113893f7ebaSPaolo Bonzini } 1114893f7ebaSPaolo Bonzini 11153453d972SKevin Wolf static void mirror_complete(Job *job, Error **errp) 1116d63ffd87SPaolo Bonzini { 11173453d972SKevin Wolf MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 1118274fcceeSMax Reitz 1119d63ffd87SPaolo Bonzini if (!s->synced) { 11209df229c3SAlberto Garcia error_setg(errp, "The active block job '%s' cannot be completed", 11213453d972SKevin Wolf job->id); 1122d63ffd87SPaolo Bonzini return; 1123d63ffd87SPaolo Bonzini } 1124d63ffd87SPaolo Bonzini 112515d67298SChanglong Xie /* block all operations on to_replace bs */ 112609158f00SBenoît Canet if (s->replaces) { 11275a7e7a0bSStefan Hajnoczi AioContext *replace_aio_context; 11285a7e7a0bSStefan Hajnoczi 1129e12f3784SWen Congyang s->to_replace = bdrv_find_node(s->replaces); 113009158f00SBenoît Canet if (!s->to_replace) { 1131e12f3784SWen Congyang error_setg(errp, "Node name '%s' not found", s->replaces); 113209158f00SBenoît Canet return; 113309158f00SBenoît Canet } 113409158f00SBenoît Canet 11355a7e7a0bSStefan Hajnoczi replace_aio_context = bdrv_get_aio_context(s->to_replace); 11365a7e7a0bSStefan Hajnoczi aio_context_acquire(replace_aio_context); 11375a7e7a0bSStefan Hajnoczi 11384ef85a9cSKevin Wolf /* TODO Translate this into permission system. Current definition of 11394ef85a9cSKevin Wolf * GRAPH_MOD would require to request it for the parents; they might 11404ef85a9cSKevin Wolf * not even be BlockDriverStates, however, so a BdrvChild can't address 11414ef85a9cSKevin Wolf * them. May need redefinition of GRAPH_MOD. */ 114209158f00SBenoît Canet error_setg(&s->replace_blocker, 114309158f00SBenoît Canet "block device is in use by block-job-complete"); 114409158f00SBenoît Canet bdrv_op_block_all(s->to_replace, s->replace_blocker); 114509158f00SBenoît Canet bdrv_ref(s->to_replace); 11465a7e7a0bSStefan Hajnoczi 11475a7e7a0bSStefan Hajnoczi aio_context_release(replace_aio_context); 114809158f00SBenoît Canet } 114909158f00SBenoît Canet 1150d63ffd87SPaolo Bonzini s->should_complete = true; 115100769414SMax Reitz 115200769414SMax Reitz /* If the job is paused, it will be re-entered when it is resumed */ 115300769414SMax Reitz if (!job->paused) { 11543d70ff53SKevin Wolf job_enter(job); 1155d63ffd87SPaolo Bonzini } 115600769414SMax Reitz } 1157d63ffd87SPaolo Bonzini 1158537c3d4fSStefan Hajnoczi static void coroutine_fn mirror_pause(Job *job) 1159565ac01fSStefan Hajnoczi { 1160da01ff7fSKevin Wolf MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 1161565ac01fSStefan Hajnoczi 1162bae8196dSPaolo Bonzini mirror_wait_for_all_io(s); 1163565ac01fSStefan Hajnoczi } 1164565ac01fSStefan Hajnoczi 116589bd0305SKevin Wolf static bool mirror_drained_poll(BlockJob *job) 116689bd0305SKevin Wolf { 116789bd0305SKevin Wolf MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 11685e771752SSergio Lopez 11695e771752SSergio Lopez /* If the job isn't paused nor cancelled, we can't be sure that it won't 11705e771752SSergio Lopez * issue more requests. We make an exception if we've reached this point 11715e771752SSergio Lopez * from one of our own drain sections, to avoid a deadlock waiting for 11725e771752SSergio Lopez * ourselves. 11735e771752SSergio Lopez */ 11745e771752SSergio Lopez if (!s->common.job.paused && !s->common.job.cancelled && !s->in_drain) { 11755e771752SSergio Lopez return true; 11765e771752SSergio Lopez } 11775e771752SSergio Lopez 117889bd0305SKevin Wolf return !!s->in_flight; 117989bd0305SKevin Wolf } 118089bd0305SKevin Wolf 1181*9c785cd7SVladimir Sementsov-Ogievskiy static void mirror_cancel(Job *job, bool force) 1182521ff8b7SVladimir Sementsov-Ogievskiy { 1183521ff8b7SVladimir Sementsov-Ogievskiy MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 1184521ff8b7SVladimir Sementsov-Ogievskiy BlockDriverState *target = blk_bs(s->target); 1185521ff8b7SVladimir Sementsov-Ogievskiy 1186*9c785cd7SVladimir Sementsov-Ogievskiy if (force || !job_is_ready(job)) { 1187521ff8b7SVladimir Sementsov-Ogievskiy bdrv_cancel_in_flight(target); 1188521ff8b7SVladimir Sementsov-Ogievskiy } 1189*9c785cd7SVladimir Sementsov-Ogievskiy } 1190521ff8b7SVladimir Sementsov-Ogievskiy 11913fc4b10aSFam Zheng static const BlockJobDriver mirror_job_driver = { 119233e9e9bdSKevin Wolf .job_driver = { 1193893f7ebaSPaolo Bonzini .instance_size = sizeof(MirrorBlockJob), 11948e4c8700SKevin Wolf .job_type = JOB_TYPE_MIRROR, 119580fa2c75SKevin Wolf .free = block_job_free, 1196b15de828SKevin Wolf .user_resume = block_job_user_resume, 1197f67432a2SJohn Snow .run = mirror_run, 1198737efc1eSJohn Snow .prepare = mirror_prepare, 1199737efc1eSJohn Snow .abort = mirror_abort, 1200565ac01fSStefan Hajnoczi .pause = mirror_pause, 1201da01ff7fSKevin Wolf .complete = mirror_complete, 1202521ff8b7SVladimir Sementsov-Ogievskiy .cancel = mirror_cancel, 12033453d972SKevin Wolf }, 120489bd0305SKevin Wolf .drained_poll = mirror_drained_poll, 1205893f7ebaSPaolo Bonzini }; 1206893f7ebaSPaolo Bonzini 120703544a6eSFam Zheng static const BlockJobDriver commit_active_job_driver = { 120833e9e9bdSKevin Wolf .job_driver = { 120903544a6eSFam Zheng .instance_size = sizeof(MirrorBlockJob), 12108e4c8700SKevin Wolf .job_type = JOB_TYPE_COMMIT, 121180fa2c75SKevin Wolf .free = block_job_free, 1212b15de828SKevin Wolf .user_resume = block_job_user_resume, 1213f67432a2SJohn Snow .run = mirror_run, 1214737efc1eSJohn Snow .prepare = mirror_prepare, 1215737efc1eSJohn Snow .abort = mirror_abort, 1216565ac01fSStefan Hajnoczi .pause = mirror_pause, 1217da01ff7fSKevin Wolf .complete = mirror_complete, 12183453d972SKevin Wolf }, 121989bd0305SKevin Wolf .drained_poll = mirror_drained_poll, 122003544a6eSFam Zheng }; 122103544a6eSFam Zheng 1222537c3d4fSStefan Hajnoczi static void coroutine_fn 1223537c3d4fSStefan Hajnoczi do_sync_target_write(MirrorBlockJob *job, MirrorMethod method, 1224d06107adSMax Reitz uint64_t offset, uint64_t bytes, 1225d06107adSMax Reitz QEMUIOVector *qiov, int flags) 1226d06107adSMax Reitz { 1227d06107adSMax Reitz int ret; 1228dbdf699cSVladimir Sementsov-Ogievskiy size_t qiov_offset = 0; 1229dbdf699cSVladimir Sementsov-Ogievskiy int64_t bitmap_offset, bitmap_end; 1230d06107adSMax Reitz 1231dbdf699cSVladimir Sementsov-Ogievskiy if (!QEMU_IS_ALIGNED(offset, job->granularity) && 1232dbdf699cSVladimir Sementsov-Ogievskiy bdrv_dirty_bitmap_get(job->dirty_bitmap, offset)) 1233dbdf699cSVladimir Sementsov-Ogievskiy { 1234dbdf699cSVladimir Sementsov-Ogievskiy /* 1235dbdf699cSVladimir Sementsov-Ogievskiy * Dirty unaligned padding: ignore it. 1236dbdf699cSVladimir Sementsov-Ogievskiy * 1237dbdf699cSVladimir Sementsov-Ogievskiy * Reasoning: 1238dbdf699cSVladimir Sementsov-Ogievskiy * 1. If we copy it, we can't reset corresponding bit in 1239dbdf699cSVladimir Sementsov-Ogievskiy * dirty_bitmap as there may be some "dirty" bytes still not 1240dbdf699cSVladimir Sementsov-Ogievskiy * copied. 1241dbdf699cSVladimir Sementsov-Ogievskiy * 2. It's already dirty, so skipping it we don't diverge mirror 1242dbdf699cSVladimir Sementsov-Ogievskiy * progress. 1243dbdf699cSVladimir Sementsov-Ogievskiy * 1244dbdf699cSVladimir Sementsov-Ogievskiy * Note, that because of this, guest write may have no contribution 1245dbdf699cSVladimir Sementsov-Ogievskiy * into mirror converge, but that's not bad, as we have background 1246dbdf699cSVladimir Sementsov-Ogievskiy * process of mirroring. If under some bad circumstances (high guest 1247dbdf699cSVladimir Sementsov-Ogievskiy * IO load) background process starve, we will not converge anyway, 1248dbdf699cSVladimir Sementsov-Ogievskiy * even if each write will contribute, as guest is not guaranteed to 1249dbdf699cSVladimir Sementsov-Ogievskiy * rewrite the whole disk. 1250dbdf699cSVladimir Sementsov-Ogievskiy */ 1251dbdf699cSVladimir Sementsov-Ogievskiy qiov_offset = QEMU_ALIGN_UP(offset, job->granularity) - offset; 1252dbdf699cSVladimir Sementsov-Ogievskiy if (bytes <= qiov_offset) { 1253dbdf699cSVladimir Sementsov-Ogievskiy /* nothing to do after shrink */ 1254dbdf699cSVladimir Sementsov-Ogievskiy return; 1255dbdf699cSVladimir Sementsov-Ogievskiy } 1256dbdf699cSVladimir Sementsov-Ogievskiy offset += qiov_offset; 1257dbdf699cSVladimir Sementsov-Ogievskiy bytes -= qiov_offset; 1258dbdf699cSVladimir Sementsov-Ogievskiy } 1259dbdf699cSVladimir Sementsov-Ogievskiy 1260dbdf699cSVladimir Sementsov-Ogievskiy if (!QEMU_IS_ALIGNED(offset + bytes, job->granularity) && 1261dbdf699cSVladimir Sementsov-Ogievskiy bdrv_dirty_bitmap_get(job->dirty_bitmap, offset + bytes - 1)) 1262dbdf699cSVladimir Sementsov-Ogievskiy { 1263dbdf699cSVladimir Sementsov-Ogievskiy uint64_t tail = (offset + bytes) % job->granularity; 1264dbdf699cSVladimir Sementsov-Ogievskiy 1265dbdf699cSVladimir Sementsov-Ogievskiy if (bytes <= tail) { 1266dbdf699cSVladimir Sementsov-Ogievskiy /* nothing to do after shrink */ 1267dbdf699cSVladimir Sementsov-Ogievskiy return; 1268dbdf699cSVladimir Sementsov-Ogievskiy } 1269dbdf699cSVladimir Sementsov-Ogievskiy bytes -= tail; 1270dbdf699cSVladimir Sementsov-Ogievskiy } 1271dbdf699cSVladimir Sementsov-Ogievskiy 1272dbdf699cSVladimir Sementsov-Ogievskiy /* 1273dbdf699cSVladimir Sementsov-Ogievskiy * Tails are either clean or shrunk, so for bitmap resetting 1274dbdf699cSVladimir Sementsov-Ogievskiy * we safely align the range down. 1275dbdf699cSVladimir Sementsov-Ogievskiy */ 1276dbdf699cSVladimir Sementsov-Ogievskiy bitmap_offset = QEMU_ALIGN_UP(offset, job->granularity); 1277dbdf699cSVladimir Sementsov-Ogievskiy bitmap_end = QEMU_ALIGN_DOWN(offset + bytes, job->granularity); 1278dbdf699cSVladimir Sementsov-Ogievskiy if (bitmap_offset < bitmap_end) { 1279dbdf699cSVladimir Sementsov-Ogievskiy bdrv_reset_dirty_bitmap(job->dirty_bitmap, bitmap_offset, 1280dbdf699cSVladimir Sementsov-Ogievskiy bitmap_end - bitmap_offset); 1281dbdf699cSVladimir Sementsov-Ogievskiy } 1282d06107adSMax Reitz 12835c511ac3SVladimir Sementsov-Ogievskiy job_progress_increase_remaining(&job->common.job, bytes); 1284d06107adSMax Reitz 1285d06107adSMax Reitz switch (method) { 1286d06107adSMax Reitz case MIRROR_METHOD_COPY: 1287dbdf699cSVladimir Sementsov-Ogievskiy ret = blk_co_pwritev_part(job->target, offset, bytes, 1288dbdf699cSVladimir Sementsov-Ogievskiy qiov, qiov_offset, flags); 1289d06107adSMax Reitz break; 1290d06107adSMax Reitz 1291d06107adSMax Reitz case MIRROR_METHOD_ZERO: 1292d06107adSMax Reitz assert(!qiov); 12935c511ac3SVladimir Sementsov-Ogievskiy ret = blk_co_pwrite_zeroes(job->target, offset, bytes, flags); 1294d06107adSMax Reitz break; 1295d06107adSMax Reitz 1296d06107adSMax Reitz case MIRROR_METHOD_DISCARD: 1297d06107adSMax Reitz assert(!qiov); 12985c511ac3SVladimir Sementsov-Ogievskiy ret = blk_co_pdiscard(job->target, offset, bytes); 1299d06107adSMax Reitz break; 1300d06107adSMax Reitz 1301d06107adSMax Reitz default: 1302d06107adSMax Reitz abort(); 1303d06107adSMax Reitz } 1304d06107adSMax Reitz 1305d06107adSMax Reitz if (ret >= 0) { 13065c511ac3SVladimir Sementsov-Ogievskiy job_progress_update(&job->common.job, bytes); 1307d06107adSMax Reitz } else { 1308d06107adSMax Reitz BlockErrorAction action; 1309d06107adSMax Reitz 1310dbdf699cSVladimir Sementsov-Ogievskiy /* 1311dbdf699cSVladimir Sementsov-Ogievskiy * We failed, so we should mark dirty the whole area, aligned up. 1312dbdf699cSVladimir Sementsov-Ogievskiy * Note that we don't care about shrunk tails if any: they were dirty 1313dbdf699cSVladimir Sementsov-Ogievskiy * at function start, and they must be still dirty, as we've locked 1314dbdf699cSVladimir Sementsov-Ogievskiy * the region for in-flight op. 1315dbdf699cSVladimir Sementsov-Ogievskiy */ 1316dbdf699cSVladimir Sementsov-Ogievskiy bitmap_offset = QEMU_ALIGN_DOWN(offset, job->granularity); 1317dbdf699cSVladimir Sementsov-Ogievskiy bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity); 1318dbdf699cSVladimir Sementsov-Ogievskiy bdrv_set_dirty_bitmap(job->dirty_bitmap, bitmap_offset, 1319dbdf699cSVladimir Sementsov-Ogievskiy bitmap_end - bitmap_offset); 1320d06107adSMax Reitz job->actively_synced = false; 1321d06107adSMax Reitz 1322d06107adSMax Reitz action = mirror_error_action(job, false, -ret); 1323d06107adSMax Reitz if (action == BLOCK_ERROR_ACTION_REPORT) { 1324d06107adSMax Reitz if (!job->ret) { 1325d06107adSMax Reitz job->ret = ret; 1326d06107adSMax Reitz } 1327d06107adSMax Reitz } 1328d06107adSMax Reitz } 1329d06107adSMax Reitz } 1330d06107adSMax Reitz 1331d06107adSMax Reitz static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s, 1332d06107adSMax Reitz uint64_t offset, 1333d06107adSMax Reitz uint64_t bytes) 1334d06107adSMax Reitz { 1335d06107adSMax Reitz MirrorOp *op; 1336d06107adSMax Reitz uint64_t start_chunk = offset / s->granularity; 1337d06107adSMax Reitz uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity); 1338d06107adSMax Reitz 1339d06107adSMax Reitz op = g_new(MirrorOp, 1); 1340d06107adSMax Reitz *op = (MirrorOp){ 1341d06107adSMax Reitz .s = s, 1342d06107adSMax Reitz .offset = offset, 1343d06107adSMax Reitz .bytes = bytes, 1344d06107adSMax Reitz .is_active_write = true, 1345ce8cabbdSKevin Wolf .is_in_flight = true, 1346d06107adSMax Reitz }; 1347d06107adSMax Reitz qemu_co_queue_init(&op->waiting_requests); 1348d06107adSMax Reitz QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next); 1349d06107adSMax Reitz 1350d06107adSMax Reitz s->in_active_write_counter++; 1351d06107adSMax Reitz 1352d06107adSMax Reitz mirror_wait_on_conflicts(op, s, offset, bytes); 1353d06107adSMax Reitz 1354d06107adSMax Reitz bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk); 1355d06107adSMax Reitz 1356d06107adSMax Reitz return op; 1357d06107adSMax Reitz } 1358d06107adSMax Reitz 1359d06107adSMax Reitz static void coroutine_fn active_write_settle(MirrorOp *op) 1360d06107adSMax Reitz { 1361d06107adSMax Reitz uint64_t start_chunk = op->offset / op->s->granularity; 1362d06107adSMax Reitz uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes, 1363d06107adSMax Reitz op->s->granularity); 1364d06107adSMax Reitz 1365d06107adSMax Reitz if (!--op->s->in_active_write_counter && op->s->actively_synced) { 1366d06107adSMax Reitz BdrvChild *source = op->s->mirror_top_bs->backing; 1367d06107adSMax Reitz 1368d06107adSMax Reitz if (QLIST_FIRST(&source->bs->parents) == source && 1369d06107adSMax Reitz QLIST_NEXT(source, next_parent) == NULL) 1370d06107adSMax Reitz { 1371d06107adSMax Reitz /* Assert that we are back in sync once all active write 1372d06107adSMax Reitz * operations are settled. 1373d06107adSMax Reitz * Note that we can only assert this if the mirror node 1374d06107adSMax Reitz * is the source node's only parent. */ 1375d06107adSMax Reitz assert(!bdrv_get_dirty_count(op->s->dirty_bitmap)); 1376d06107adSMax Reitz } 1377d06107adSMax Reitz } 1378d06107adSMax Reitz bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk); 1379d06107adSMax Reitz QTAILQ_REMOVE(&op->s->ops_in_flight, op, next); 1380d06107adSMax Reitz qemu_co_queue_restart_all(&op->waiting_requests); 1381d06107adSMax Reitz g_free(op); 1382d06107adSMax Reitz } 1383d06107adSMax Reitz 13844ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs, 13854ef85a9cSKevin Wolf uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) 13864ef85a9cSKevin Wolf { 13874ef85a9cSKevin Wolf return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags); 13884ef85a9cSKevin Wolf } 13894ef85a9cSKevin Wolf 1390d06107adSMax Reitz static int coroutine_fn bdrv_mirror_top_do_write(BlockDriverState *bs, 1391d06107adSMax Reitz MirrorMethod method, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, 1392d06107adSMax Reitz int flags) 1393d06107adSMax Reitz { 1394d06107adSMax Reitz MirrorOp *op = NULL; 1395d06107adSMax Reitz MirrorBDSOpaque *s = bs->opaque; 1396d06107adSMax Reitz int ret = 0; 1397d06107adSMax Reitz bool copy_to_target; 1398d06107adSMax Reitz 1399d06107adSMax Reitz copy_to_target = s->job->ret >= 0 && 1400d06107adSMax Reitz s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; 1401d06107adSMax Reitz 1402d06107adSMax Reitz if (copy_to_target) { 1403d06107adSMax Reitz op = active_write_prepare(s->job, offset, bytes); 1404d06107adSMax Reitz } 1405d06107adSMax Reitz 1406d06107adSMax Reitz switch (method) { 1407d06107adSMax Reitz case MIRROR_METHOD_COPY: 1408d06107adSMax Reitz ret = bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags); 1409d06107adSMax Reitz break; 1410d06107adSMax Reitz 1411d06107adSMax Reitz case MIRROR_METHOD_ZERO: 1412d06107adSMax Reitz ret = bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags); 1413d06107adSMax Reitz break; 1414d06107adSMax Reitz 1415d06107adSMax Reitz case MIRROR_METHOD_DISCARD: 14160b9fd3f4SFam Zheng ret = bdrv_co_pdiscard(bs->backing, offset, bytes); 1417d06107adSMax Reitz break; 1418d06107adSMax Reitz 1419d06107adSMax Reitz default: 1420d06107adSMax Reitz abort(); 1421d06107adSMax Reitz } 1422d06107adSMax Reitz 1423d06107adSMax Reitz if (ret < 0) { 1424d06107adSMax Reitz goto out; 1425d06107adSMax Reitz } 1426d06107adSMax Reitz 1427d06107adSMax Reitz if (copy_to_target) { 1428d06107adSMax Reitz do_sync_target_write(s->job, method, offset, bytes, qiov, flags); 1429d06107adSMax Reitz } 1430d06107adSMax Reitz 1431d06107adSMax Reitz out: 1432d06107adSMax Reitz if (copy_to_target) { 1433d06107adSMax Reitz active_write_settle(op); 1434d06107adSMax Reitz } 1435d06107adSMax Reitz return ret; 1436d06107adSMax Reitz } 1437d06107adSMax Reitz 14384ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs, 14394ef85a9cSKevin Wolf uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) 14404ef85a9cSKevin Wolf { 1441d06107adSMax Reitz MirrorBDSOpaque *s = bs->opaque; 1442d06107adSMax Reitz QEMUIOVector bounce_qiov; 1443d06107adSMax Reitz void *bounce_buf; 1444d06107adSMax Reitz int ret = 0; 1445d06107adSMax Reitz bool copy_to_target; 1446d06107adSMax Reitz 1447d06107adSMax Reitz copy_to_target = s->job->ret >= 0 && 1448d06107adSMax Reitz s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; 1449d06107adSMax Reitz 1450d06107adSMax Reitz if (copy_to_target) { 1451d06107adSMax Reitz /* The guest might concurrently modify the data to write; but 1452d06107adSMax Reitz * the data on source and destination must match, so we have 1453d06107adSMax Reitz * to use a bounce buffer if we are going to write to the 1454d06107adSMax Reitz * target now. */ 1455d06107adSMax Reitz bounce_buf = qemu_blockalign(bs, bytes); 1456d06107adSMax Reitz iov_to_buf_full(qiov->iov, qiov->niov, 0, bounce_buf, bytes); 1457d06107adSMax Reitz 1458d06107adSMax Reitz qemu_iovec_init(&bounce_qiov, 1); 1459d06107adSMax Reitz qemu_iovec_add(&bounce_qiov, bounce_buf, bytes); 1460d06107adSMax Reitz qiov = &bounce_qiov; 1461d06107adSMax Reitz } 1462d06107adSMax Reitz 1463d06107adSMax Reitz ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, offset, bytes, qiov, 1464d06107adSMax Reitz flags); 1465d06107adSMax Reitz 1466d06107adSMax Reitz if (copy_to_target) { 1467d06107adSMax Reitz qemu_iovec_destroy(&bounce_qiov); 1468d06107adSMax Reitz qemu_vfree(bounce_buf); 1469d06107adSMax Reitz } 1470d06107adSMax Reitz 1471d06107adSMax Reitz return ret; 14724ef85a9cSKevin Wolf } 14734ef85a9cSKevin Wolf 14744ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs) 14754ef85a9cSKevin Wolf { 1476ce960aa9SVladimir Sementsov-Ogievskiy if (bs->backing == NULL) { 1477ce960aa9SVladimir Sementsov-Ogievskiy /* we can be here after failed bdrv_append in mirror_start_job */ 1478ce960aa9SVladimir Sementsov-Ogievskiy return 0; 1479ce960aa9SVladimir Sementsov-Ogievskiy } 14804ef85a9cSKevin Wolf return bdrv_co_flush(bs->backing->bs); 14814ef85a9cSKevin Wolf } 14824ef85a9cSKevin Wolf 14834ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs, 1484f5a5ca79SManos Pitsidianakis int64_t offset, int bytes, BdrvRequestFlags flags) 14854ef85a9cSKevin Wolf { 1486d06107adSMax Reitz return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL, 1487d06107adSMax Reitz flags); 14884ef85a9cSKevin Wolf } 14894ef85a9cSKevin Wolf 14904ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs, 1491f5a5ca79SManos Pitsidianakis int64_t offset, int bytes) 14924ef85a9cSKevin Wolf { 1493d06107adSMax Reitz return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes, 1494d06107adSMax Reitz NULL, 0); 14954ef85a9cSKevin Wolf } 14964ef85a9cSKevin Wolf 1497998b3a1eSMax Reitz static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs) 1498fd4a6493SKevin Wolf { 149918775ff3SVladimir Sementsov-Ogievskiy if (bs->backing == NULL) { 150018775ff3SVladimir Sementsov-Ogievskiy /* we can be here after failed bdrv_attach_child in 150118775ff3SVladimir Sementsov-Ogievskiy * bdrv_set_backing_hd */ 150218775ff3SVladimir Sementsov-Ogievskiy return; 150318775ff3SVladimir Sementsov-Ogievskiy } 1504fd4a6493SKevin Wolf pstrcpy(bs->exact_filename, sizeof(bs->exact_filename), 1505fd4a6493SKevin Wolf bs->backing->bs->filename); 1506fd4a6493SKevin Wolf } 1507fd4a6493SKevin Wolf 15084ef85a9cSKevin Wolf static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c, 1509bf8e925eSMax Reitz BdrvChildRole role, 1510e0995dc3SKevin Wolf BlockReopenQueue *reopen_queue, 15114ef85a9cSKevin Wolf uint64_t perm, uint64_t shared, 15124ef85a9cSKevin Wolf uint64_t *nperm, uint64_t *nshared) 15134ef85a9cSKevin Wolf { 1514f94dc3b4SMax Reitz MirrorBDSOpaque *s = bs->opaque; 1515f94dc3b4SMax Reitz 1516f94dc3b4SMax Reitz if (s->stop) { 1517f94dc3b4SMax Reitz /* 1518f94dc3b4SMax Reitz * If the job is to be stopped, we do not need to forward 1519f94dc3b4SMax Reitz * anything to the real image. 1520f94dc3b4SMax Reitz */ 1521f94dc3b4SMax Reitz *nperm = 0; 1522f94dc3b4SMax Reitz *nshared = BLK_PERM_ALL; 1523f94dc3b4SMax Reitz return; 1524f94dc3b4SMax Reitz } 1525f94dc3b4SMax Reitz 152653431b90SMax Reitz bdrv_default_perms(bs, c, role, reopen_queue, 152753431b90SMax Reitz perm, shared, nperm, nshared); 15284ef85a9cSKevin Wolf 152953431b90SMax Reitz if (s->is_commit) { 153053431b90SMax Reitz /* 153153431b90SMax Reitz * For commit jobs, we cannot take CONSISTENT_READ, because 153253431b90SMax Reitz * that permission is unshared for everything above the base 153353431b90SMax Reitz * node (except for filters on the base node). 153453431b90SMax Reitz * We also have to force-share the WRITE permission, or 153553431b90SMax Reitz * otherwise we would block ourselves at the base node (if 153653431b90SMax Reitz * writes are blocked for a node, they are also blocked for 153753431b90SMax Reitz * its backing file). 153853431b90SMax Reitz * (We could also share RESIZE, because it may be needed for 153953431b90SMax Reitz * the target if its size is less than the top node's; but 154053431b90SMax Reitz * bdrv_default_perms_for_cow() automatically shares RESIZE 154153431b90SMax Reitz * for backing nodes if WRITE is shared, so there is no need 154253431b90SMax Reitz * to do it here.) 154353431b90SMax Reitz */ 154453431b90SMax Reitz *nperm &= ~BLK_PERM_CONSISTENT_READ; 154553431b90SMax Reitz *nshared |= BLK_PERM_WRITE; 154653431b90SMax Reitz } 15474ef85a9cSKevin Wolf } 15484ef85a9cSKevin Wolf 15494ef85a9cSKevin Wolf /* Dummy node that provides consistent read to its users without requiring it 15504ef85a9cSKevin Wolf * from its backing file and that allows writes on the backing file chain. */ 15514ef85a9cSKevin Wolf static BlockDriver bdrv_mirror_top = { 15524ef85a9cSKevin Wolf .format_name = "mirror_top", 15534ef85a9cSKevin Wolf .bdrv_co_preadv = bdrv_mirror_top_preadv, 15544ef85a9cSKevin Wolf .bdrv_co_pwritev = bdrv_mirror_top_pwritev, 15554ef85a9cSKevin Wolf .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes, 15564ef85a9cSKevin Wolf .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard, 15574ef85a9cSKevin Wolf .bdrv_co_flush = bdrv_mirror_top_flush, 1558fd4a6493SKevin Wolf .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename, 15594ef85a9cSKevin Wolf .bdrv_child_perm = bdrv_mirror_top_child_perm, 15606540fd15SMax Reitz 15616540fd15SMax Reitz .is_filter = true, 15624ef85a9cSKevin Wolf }; 15634ef85a9cSKevin Wolf 1564cc19f177SVladimir Sementsov-Ogievskiy static BlockJob *mirror_start_job( 1565cc19f177SVladimir Sementsov-Ogievskiy const char *job_id, BlockDriverState *bs, 156647970dfbSJohn Snow int creation_flags, BlockDriverState *target, 156747970dfbSJohn Snow const char *replaces, int64_t speed, 156847970dfbSJohn Snow uint32_t granularity, int64_t buf_size, 1569274fcceeSMax Reitz BlockMirrorBackingMode backing_mode, 1570cdf3bc93SMax Reitz bool zero_target, 157103544a6eSFam Zheng BlockdevOnError on_source_error, 1572b952b558SPaolo Bonzini BlockdevOnError on_target_error, 15730fc9f8eaSFam Zheng bool unmap, 1574097310b5SMarkus Armbruster BlockCompletionFunc *cb, 157551ccfa2dSFam Zheng void *opaque, 157603544a6eSFam Zheng const BlockJobDriver *driver, 1577b49f7eadSWen Congyang bool is_none_mode, BlockDriverState *base, 157851ccfa2dSFam Zheng bool auto_complete, const char *filter_node_name, 1579481debaaSMax Reitz bool is_mirror, MirrorCopyMode copy_mode, 158051ccfa2dSFam Zheng Error **errp) 1581893f7ebaSPaolo Bonzini { 1582893f7ebaSPaolo Bonzini MirrorBlockJob *s; 1583429076e8SMax Reitz MirrorBDSOpaque *bs_opaque; 15844ef85a9cSKevin Wolf BlockDriverState *mirror_top_bs; 15854ef85a9cSKevin Wolf bool target_is_backing; 15863f072a7fSMax Reitz uint64_t target_perms, target_shared_perms; 1587d7086422SKevin Wolf int ret; 1588893f7ebaSPaolo Bonzini 1589eee13dfeSPaolo Bonzini if (granularity == 0) { 1590341ebc2fSJohn Snow granularity = bdrv_get_default_bitmap_granularity(target); 1591eee13dfeSPaolo Bonzini } 1592eee13dfeSPaolo Bonzini 159331826642SEric Blake assert(is_power_of_2(granularity)); 1594eee13dfeSPaolo Bonzini 159548ac0a4dSWen Congyang if (buf_size < 0) { 159648ac0a4dSWen Congyang error_setg(errp, "Invalid parameter 'buf-size'"); 1597cc19f177SVladimir Sementsov-Ogievskiy return NULL; 159848ac0a4dSWen Congyang } 159948ac0a4dSWen Congyang 160048ac0a4dSWen Congyang if (buf_size == 0) { 160148ac0a4dSWen Congyang buf_size = DEFAULT_MIRROR_BUF_SIZE; 160248ac0a4dSWen Congyang } 16035bc361b8SFam Zheng 16043f072a7fSMax Reitz if (bdrv_skip_filters(bs) == bdrv_skip_filters(target)) { 160586fae10cSKevin Wolf error_setg(errp, "Can't mirror node into itself"); 1606cc19f177SVladimir Sementsov-Ogievskiy return NULL; 160786fae10cSKevin Wolf } 160886fae10cSKevin Wolf 160953431b90SMax Reitz target_is_backing = bdrv_chain_contains(bs, target); 161053431b90SMax Reitz 16114ef85a9cSKevin Wolf /* In the case of active commit, add dummy driver to provide consistent 16124ef85a9cSKevin Wolf * reads on the top, while disabling it in the intermediate nodes, and make 16134ef85a9cSKevin Wolf * the backing chain writable. */ 16146cdbceb1SKevin Wolf mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name, 16156cdbceb1SKevin Wolf BDRV_O_RDWR, errp); 16164ef85a9cSKevin Wolf if (mirror_top_bs == NULL) { 1617cc19f177SVladimir Sementsov-Ogievskiy return NULL; 1618893f7ebaSPaolo Bonzini } 1619d3c8c674SKevin Wolf if (!filter_node_name) { 1620d3c8c674SKevin Wolf mirror_top_bs->implicit = true; 1621d3c8c674SKevin Wolf } 1622e5182c1cSMax Reitz 1623e5182c1cSMax Reitz /* So that we can always drop this node */ 1624e5182c1cSMax Reitz mirror_top_bs->never_freeze = true; 1625e5182c1cSMax Reitz 16264ef85a9cSKevin Wolf mirror_top_bs->total_sectors = bs->total_sectors; 1627228345bfSMax Reitz mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED; 162880f5c33fSKevin Wolf mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED | 162980f5c33fSKevin Wolf BDRV_REQ_NO_FALLBACK; 1630429076e8SMax Reitz bs_opaque = g_new0(MirrorBDSOpaque, 1); 1631429076e8SMax Reitz mirror_top_bs->opaque = bs_opaque; 1632893f7ebaSPaolo Bonzini 163353431b90SMax Reitz bs_opaque->is_commit = target_is_backing; 163453431b90SMax Reitz 16354ef85a9cSKevin Wolf bdrv_drained_begin(bs); 1636934aee14SVladimir Sementsov-Ogievskiy ret = bdrv_append(mirror_top_bs, bs, errp); 16374ef85a9cSKevin Wolf bdrv_drained_end(bs); 16384ef85a9cSKevin Wolf 1639934aee14SVladimir Sementsov-Ogievskiy if (ret < 0) { 1640b2c2832cSKevin Wolf bdrv_unref(mirror_top_bs); 1641cc19f177SVladimir Sementsov-Ogievskiy return NULL; 1642b2c2832cSKevin Wolf } 1643b2c2832cSKevin Wolf 16444ef85a9cSKevin Wolf /* Make sure that the source is not resized while the job is running */ 164575859b94SJohn Snow s = block_job_create(job_id, driver, NULL, mirror_top_bs, 16464ef85a9cSKevin Wolf BLK_PERM_CONSISTENT_READ, 16474ef85a9cSKevin Wolf BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | 16484ef85a9cSKevin Wolf BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed, 16494ef85a9cSKevin Wolf creation_flags, cb, opaque, errp); 16504ef85a9cSKevin Wolf if (!s) { 16514ef85a9cSKevin Wolf goto fail; 16524ef85a9cSKevin Wolf } 1653429076e8SMax Reitz bs_opaque->job = s; 1654429076e8SMax Reitz 16557a25fcd0SMax Reitz /* The block job now has a reference to this node */ 16567a25fcd0SMax Reitz bdrv_unref(mirror_top_bs); 16577a25fcd0SMax Reitz 16584ef85a9cSKevin Wolf s->mirror_top_bs = mirror_top_bs; 16594ef85a9cSKevin Wolf 16604ef85a9cSKevin Wolf /* No resize for the target either; while the mirror is still running, a 16614ef85a9cSKevin Wolf * consistent read isn't necessarily possible. We could possibly allow 16624ef85a9cSKevin Wolf * writes and graph modifications, though it would likely defeat the 16634ef85a9cSKevin Wolf * purpose of a mirror, so leave them blocked for now. 16644ef85a9cSKevin Wolf * 16654ef85a9cSKevin Wolf * In the case of active commit, things look a bit different, though, 16664ef85a9cSKevin Wolf * because the target is an already populated backing file in active use. 16674ef85a9cSKevin Wolf * We can allow anything except resize there.*/ 16683f072a7fSMax Reitz 16693f072a7fSMax Reitz target_perms = BLK_PERM_WRITE; 16703f072a7fSMax Reitz target_shared_perms = BLK_PERM_WRITE_UNCHANGED; 16713f072a7fSMax Reitz 16723f072a7fSMax Reitz if (target_is_backing) { 16733f072a7fSMax Reitz int64_t bs_size, target_size; 16743f072a7fSMax Reitz bs_size = bdrv_getlength(bs); 16753f072a7fSMax Reitz if (bs_size < 0) { 16763f072a7fSMax Reitz error_setg_errno(errp, -bs_size, 16773f072a7fSMax Reitz "Could not inquire top image size"); 16783f072a7fSMax Reitz goto fail; 16793f072a7fSMax Reitz } 16803f072a7fSMax Reitz 16813f072a7fSMax Reitz target_size = bdrv_getlength(target); 16823f072a7fSMax Reitz if (target_size < 0) { 16833f072a7fSMax Reitz error_setg_errno(errp, -target_size, 16843f072a7fSMax Reitz "Could not inquire base image size"); 16853f072a7fSMax Reitz goto fail; 16863f072a7fSMax Reitz } 16873f072a7fSMax Reitz 16883f072a7fSMax Reitz if (target_size < bs_size) { 16893f072a7fSMax Reitz target_perms |= BLK_PERM_RESIZE; 16903f072a7fSMax Reitz } 16913f072a7fSMax Reitz 16923f072a7fSMax Reitz target_shared_perms |= BLK_PERM_CONSISTENT_READ 16933f072a7fSMax Reitz | BLK_PERM_WRITE 16943f072a7fSMax Reitz | BLK_PERM_GRAPH_MOD; 16953f072a7fSMax Reitz } else if (bdrv_chain_contains(bs, bdrv_skip_filters(target))) { 16963f072a7fSMax Reitz /* 16973f072a7fSMax Reitz * We may want to allow this in the future, but it would 16983f072a7fSMax Reitz * require taking some extra care. 16993f072a7fSMax Reitz */ 17003f072a7fSMax Reitz error_setg(errp, "Cannot mirror to a filter on top of a node in the " 17013f072a7fSMax Reitz "source's backing chain"); 17023f072a7fSMax Reitz goto fail; 17033f072a7fSMax Reitz } 17043f072a7fSMax Reitz 17053f072a7fSMax Reitz if (backing_mode != MIRROR_LEAVE_BACKING_CHAIN) { 17063f072a7fSMax Reitz target_perms |= BLK_PERM_GRAPH_MOD; 17073f072a7fSMax Reitz } 17083f072a7fSMax Reitz 1709d861ab3aSKevin Wolf s->target = blk_new(s->common.job.aio_context, 17103f072a7fSMax Reitz target_perms, target_shared_perms); 1711d7086422SKevin Wolf ret = blk_insert_bs(s->target, target, errp); 1712d7086422SKevin Wolf if (ret < 0) { 17134ef85a9cSKevin Wolf goto fail; 1714d7086422SKevin Wolf } 1715045a2f82SFam Zheng if (is_mirror) { 1716045a2f82SFam Zheng /* XXX: Mirror target could be a NBD server of target QEMU in the case 1717045a2f82SFam Zheng * of non-shared block migration. To allow migration completion, we 1718045a2f82SFam Zheng * have to allow "inactivate" of the target BB. When that happens, we 1719045a2f82SFam Zheng * know the job is drained, and the vcpus are stopped, so no write 1720045a2f82SFam Zheng * operation will be performed. Block layer already has assertions to 1721045a2f82SFam Zheng * ensure that. */ 1722045a2f82SFam Zheng blk_set_force_allow_inactivate(s->target); 1723045a2f82SFam Zheng } 17249ff7f0dfSKevin Wolf blk_set_allow_aio_context_change(s->target, true); 1725cf312932SKevin Wolf blk_set_disable_request_queuing(s->target, true); 1726e253f4b8SKevin Wolf 172709158f00SBenoît Canet s->replaces = g_strdup(replaces); 1728b952b558SPaolo Bonzini s->on_source_error = on_source_error; 1729b952b558SPaolo Bonzini s->on_target_error = on_target_error; 173003544a6eSFam Zheng s->is_none_mode = is_none_mode; 1731274fcceeSMax Reitz s->backing_mode = backing_mode; 1732cdf3bc93SMax Reitz s->zero_target = zero_target; 1733481debaaSMax Reitz s->copy_mode = copy_mode; 17345bc361b8SFam Zheng s->base = base; 17353f072a7fSMax Reitz s->base_overlay = bdrv_find_overlay(bs, base); 1736eee13dfeSPaolo Bonzini s->granularity = granularity; 173748ac0a4dSWen Congyang s->buf_size = ROUND_UP(buf_size, granularity); 17380fc9f8eaSFam Zheng s->unmap = unmap; 1739b49f7eadSWen Congyang if (auto_complete) { 1740b49f7eadSWen Congyang s->should_complete = true; 1741b49f7eadSWen Congyang } 1742b812f671SPaolo Bonzini 17430db6e54aSFam Zheng s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); 1744b8afb520SFam Zheng if (!s->dirty_bitmap) { 174588f9d1b3SKevin Wolf goto fail; 1746b8afb520SFam Zheng } 1747dbdf699cSVladimir Sementsov-Ogievskiy if (s->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING) { 1748dbdf699cSVladimir Sementsov-Ogievskiy bdrv_disable_dirty_bitmap(s->dirty_bitmap); 1749dbdf699cSVladimir Sementsov-Ogievskiy } 175010f3cd15SAlberto Garcia 175167b24427SAlberto Garcia ret = block_job_add_bdrv(&s->common, "source", bs, 0, 175267b24427SAlberto Garcia BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE | 175367b24427SAlberto Garcia BLK_PERM_CONSISTENT_READ, 175467b24427SAlberto Garcia errp); 175567b24427SAlberto Garcia if (ret < 0) { 175667b24427SAlberto Garcia goto fail; 175767b24427SAlberto Garcia } 175867b24427SAlberto Garcia 17594ef85a9cSKevin Wolf /* Required permissions are already taken with blk_new() */ 176076d554e2SKevin Wolf block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL, 176176d554e2SKevin Wolf &error_abort); 176276d554e2SKevin Wolf 1763f3ede4b0SAlberto Garcia /* In commit_active_start() all intermediate nodes disappear, so 1764f3ede4b0SAlberto Garcia * any jobs in them must be blocked */ 17654ef85a9cSKevin Wolf if (target_is_backing) { 17663f072a7fSMax Reitz BlockDriverState *iter, *filtered_target; 17673f072a7fSMax Reitz uint64_t iter_shared_perms; 17683f072a7fSMax Reitz 17693f072a7fSMax Reitz /* 17703f072a7fSMax Reitz * The topmost node with 17713f072a7fSMax Reitz * bdrv_skip_filters(filtered_target) == bdrv_skip_filters(target) 17723f072a7fSMax Reitz */ 17733f072a7fSMax Reitz filtered_target = bdrv_cow_bs(bdrv_find_overlay(bs, target)); 17743f072a7fSMax Reitz 17753f072a7fSMax Reitz assert(bdrv_skip_filters(filtered_target) == 17763f072a7fSMax Reitz bdrv_skip_filters(target)); 17773f072a7fSMax Reitz 17783f072a7fSMax Reitz /* 17793f072a7fSMax Reitz * XXX BLK_PERM_WRITE needs to be allowed so we don't block 17804ef85a9cSKevin Wolf * ourselves at s->base (if writes are blocked for a node, they are 17814ef85a9cSKevin Wolf * also blocked for its backing file). The other options would be a 17823f072a7fSMax Reitz * second filter driver above s->base (== target). 17833f072a7fSMax Reitz */ 17843f072a7fSMax Reitz iter_shared_perms = BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE; 17853f072a7fSMax Reitz 17863f072a7fSMax Reitz for (iter = bdrv_filter_or_cow_bs(bs); iter != target; 17873f072a7fSMax Reitz iter = bdrv_filter_or_cow_bs(iter)) 17883f072a7fSMax Reitz { 17893f072a7fSMax Reitz if (iter == filtered_target) { 17903f072a7fSMax Reitz /* 17913f072a7fSMax Reitz * From here on, all nodes are filters on the base. 17923f072a7fSMax Reitz * This allows us to share BLK_PERM_CONSISTENT_READ. 17933f072a7fSMax Reitz */ 17943f072a7fSMax Reitz iter_shared_perms |= BLK_PERM_CONSISTENT_READ; 17953f072a7fSMax Reitz } 17963f072a7fSMax Reitz 17974ef85a9cSKevin Wolf ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0, 17983f072a7fSMax Reitz iter_shared_perms, errp); 17994ef85a9cSKevin Wolf if (ret < 0) { 18004ef85a9cSKevin Wolf goto fail; 18014ef85a9cSKevin Wolf } 1802f3ede4b0SAlberto Garcia } 1803ef53dc09SAlberto Garcia 1804ef53dc09SAlberto Garcia if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) { 1805ef53dc09SAlberto Garcia goto fail; 1806ef53dc09SAlberto Garcia } 1807f3ede4b0SAlberto Garcia } 180810f3cd15SAlberto Garcia 180912aa4082SMax Reitz QTAILQ_INIT(&s->ops_in_flight); 181012aa4082SMax Reitz 18115ccac6f1SJohn Snow trace_mirror_start(bs, s, opaque); 1812da01ff7fSKevin Wolf job_start(&s->common.job); 1813cc19f177SVladimir Sementsov-Ogievskiy 1814cc19f177SVladimir Sementsov-Ogievskiy return &s->common; 18154ef85a9cSKevin Wolf 18164ef85a9cSKevin Wolf fail: 18174ef85a9cSKevin Wolf if (s) { 18187a25fcd0SMax Reitz /* Make sure this BDS does not go away until we have completed the graph 18197a25fcd0SMax Reitz * changes below */ 18207a25fcd0SMax Reitz bdrv_ref(mirror_top_bs); 18217a25fcd0SMax Reitz 18224ef85a9cSKevin Wolf g_free(s->replaces); 18234ef85a9cSKevin Wolf blk_unref(s->target); 1824429076e8SMax Reitz bs_opaque->job = NULL; 1825e917e2cbSAlberto Garcia if (s->dirty_bitmap) { 18265deb6cbdSVladimir Sementsov-Ogievskiy bdrv_release_dirty_bitmap(s->dirty_bitmap); 1827e917e2cbSAlberto Garcia } 18284ad35181SKevin Wolf job_early_fail(&s->common.job); 18294ef85a9cSKevin Wolf } 18304ef85a9cSKevin Wolf 1831f94dc3b4SMax Reitz bs_opaque->stop = true; 1832f94dc3b4SMax Reitz bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing, 1833c1cef672SFam Zheng &error_abort); 18343f072a7fSMax Reitz bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort); 18357a25fcd0SMax Reitz 18367a25fcd0SMax Reitz bdrv_unref(mirror_top_bs); 1837cc19f177SVladimir Sementsov-Ogievskiy 1838cc19f177SVladimir Sementsov-Ogievskiy return NULL; 1839893f7ebaSPaolo Bonzini } 184003544a6eSFam Zheng 184171aa9867SAlberto Garcia void mirror_start(const char *job_id, BlockDriverState *bs, 184271aa9867SAlberto Garcia BlockDriverState *target, const char *replaces, 1843a1999b33SJohn Snow int creation_flags, int64_t speed, 1844a1999b33SJohn Snow uint32_t granularity, int64_t buf_size, 1845274fcceeSMax Reitz MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, 1846cdf3bc93SMax Reitz bool zero_target, 1847274fcceeSMax Reitz BlockdevOnError on_source_error, 184803544a6eSFam Zheng BlockdevOnError on_target_error, 1849481debaaSMax Reitz bool unmap, const char *filter_node_name, 1850481debaaSMax Reitz MirrorCopyMode copy_mode, Error **errp) 185103544a6eSFam Zheng { 185203544a6eSFam Zheng bool is_none_mode; 185303544a6eSFam Zheng BlockDriverState *base; 185403544a6eSFam Zheng 1855c8b56501SJohn Snow if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) || 1856c8b56501SJohn Snow (mode == MIRROR_SYNC_MODE_BITMAP)) { 1857c8b56501SJohn Snow error_setg(errp, "Sync mode '%s' not supported", 1858c8b56501SJohn Snow MirrorSyncMode_str(mode)); 1859d58d8453SJohn Snow return; 1860d58d8453SJohn Snow } 186103544a6eSFam Zheng is_none_mode = mode == MIRROR_SYNC_MODE_NONE; 18623f072a7fSMax Reitz base = mode == MIRROR_SYNC_MODE_TOP ? bdrv_backing_chain_next(bs) : NULL; 1863a1999b33SJohn Snow mirror_start_job(job_id, bs, creation_flags, target, replaces, 1864cdf3bc93SMax Reitz speed, granularity, buf_size, backing_mode, zero_target, 186551ccfa2dSFam Zheng on_source_error, on_target_error, unmap, NULL, NULL, 18666cdbceb1SKevin Wolf &mirror_job_driver, is_none_mode, base, false, 1867481debaaSMax Reitz filter_node_name, true, copy_mode, errp); 186803544a6eSFam Zheng } 186903544a6eSFam Zheng 1870cc19f177SVladimir Sementsov-Ogievskiy BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs, 187147970dfbSJohn Snow BlockDriverState *base, int creation_flags, 187247970dfbSJohn Snow int64_t speed, BlockdevOnError on_error, 18730db832f4SKevin Wolf const char *filter_node_name, 187478bbd910SFam Zheng BlockCompletionFunc *cb, void *opaque, 187578bbd910SFam Zheng bool auto_complete, Error **errp) 187603544a6eSFam Zheng { 18771ba79388SAlberto Garcia bool base_read_only; 1878eb5becc1SVladimir Sementsov-Ogievskiy BlockJob *job; 18794da83585SJeff Cody 18801ba79388SAlberto Garcia base_read_only = bdrv_is_read_only(base); 18814da83585SJeff Cody 18821ba79388SAlberto Garcia if (base_read_only) { 18831ba79388SAlberto Garcia if (bdrv_reopen_set_read_only(base, false, errp) < 0) { 1884cc19f177SVladimir Sementsov-Ogievskiy return NULL; 188520a63d2cSFam Zheng } 18861ba79388SAlberto Garcia } 18874da83585SJeff Cody 1888eb5becc1SVladimir Sementsov-Ogievskiy job = mirror_start_job( 1889cc19f177SVladimir Sementsov-Ogievskiy job_id, bs, creation_flags, base, NULL, speed, 0, 0, 1890cdf3bc93SMax Reitz MIRROR_LEAVE_BACKING_CHAIN, false, 189151ccfa2dSFam Zheng on_error, on_error, true, cb, opaque, 18926cdbceb1SKevin Wolf &commit_active_job_driver, false, base, auto_complete, 1893481debaaSMax Reitz filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND, 1894eb5becc1SVladimir Sementsov-Ogievskiy errp); 1895eb5becc1SVladimir Sementsov-Ogievskiy if (!job) { 18964da83585SJeff Cody goto error_restore_flags; 18974da83585SJeff Cody } 18984da83585SJeff Cody 1899eb5becc1SVladimir Sementsov-Ogievskiy return job; 19004da83585SJeff Cody 19014da83585SJeff Cody error_restore_flags: 19024da83585SJeff Cody /* ignore error and errp for bdrv_reopen, because we want to propagate 19034da83585SJeff Cody * the original error */ 19041ba79388SAlberto Garcia if (base_read_only) { 19051ba79388SAlberto Garcia bdrv_reopen_set_read_only(base, true, NULL); 19061ba79388SAlberto Garcia } 1907cc19f177SVladimir Sementsov-Ogievskiy return NULL; 190803544a6eSFam Zheng } 1909