1893f7ebaSPaolo Bonzini /* 2893f7ebaSPaolo Bonzini * Image mirroring 3893f7ebaSPaolo Bonzini * 4893f7ebaSPaolo Bonzini * Copyright Red Hat, Inc. 2012 5893f7ebaSPaolo Bonzini * 6893f7ebaSPaolo Bonzini * Authors: 7893f7ebaSPaolo Bonzini * Paolo Bonzini <pbonzini@redhat.com> 8893f7ebaSPaolo Bonzini * 9893f7ebaSPaolo Bonzini * This work is licensed under the terms of the GNU LGPL, version 2 or later. 10893f7ebaSPaolo Bonzini * See the COPYING.LIB file in the top-level directory. 11893f7ebaSPaolo Bonzini * 12893f7ebaSPaolo Bonzini */ 13893f7ebaSPaolo Bonzini 1480c71a24SPeter Maydell #include "qemu/osdep.h" 15fd4a6493SKevin Wolf #include "qemu/cutils.h" 1612aa4082SMax Reitz #include "qemu/coroutine.h" 171181e19aSMax Reitz #include "qemu/range.h" 18893f7ebaSPaolo Bonzini #include "trace.h" 19c87621eaSJohn Snow #include "block/blockjob_int.h" 20737e150eSPaolo Bonzini #include "block/block_int.h" 21373340b2SMax Reitz #include "sysemu/block-backend.h" 22da34e65cSMarkus Armbruster #include "qapi/error.h" 23cc7a8ea7SMarkus Armbruster #include "qapi/qmp/qerror.h" 24893f7ebaSPaolo Bonzini #include "qemu/ratelimit.h" 25b812f671SPaolo Bonzini #include "qemu/bitmap.h" 26893f7ebaSPaolo Bonzini 27402a4741SPaolo Bonzini #define MAX_IN_FLIGHT 16 28b436982fSEric Blake #define MAX_IO_BYTES (1 << 20) /* 1 Mb */ 29b436982fSEric Blake #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES) 30402a4741SPaolo Bonzini 31402a4741SPaolo Bonzini /* The mirroring buffer is a list of granularity-sized chunks. 32402a4741SPaolo Bonzini * Free chunks are organized in a list. 33402a4741SPaolo Bonzini */ 34402a4741SPaolo Bonzini typedef struct MirrorBuffer { 35402a4741SPaolo Bonzini QSIMPLEQ_ENTRY(MirrorBuffer) next; 36402a4741SPaolo Bonzini } MirrorBuffer; 37893f7ebaSPaolo Bonzini 3812aa4082SMax Reitz typedef struct MirrorOp MirrorOp; 3912aa4082SMax Reitz 40893f7ebaSPaolo Bonzini typedef struct MirrorBlockJob { 41893f7ebaSPaolo Bonzini BlockJob common; 42e253f4b8SKevin Wolf BlockBackend *target; 434ef85a9cSKevin Wolf BlockDriverState *mirror_top_bs; 445bc361b8SFam Zheng BlockDriverState *base; 454ef85a9cSKevin Wolf 4609158f00SBenoît Canet /* The name of the graph node to replace */ 4709158f00SBenoît Canet char *replaces; 4809158f00SBenoît Canet /* The BDS to replace */ 4909158f00SBenoît Canet BlockDriverState *to_replace; 5009158f00SBenoît Canet /* Used to block operations on the drive-mirror-replace target */ 5109158f00SBenoît Canet Error *replace_blocker; 5203544a6eSFam Zheng bool is_none_mode; 53274fcceeSMax Reitz BlockMirrorBackingMode backing_mode; 54*d06107adSMax Reitz MirrorCopyMode copy_mode; 55b952b558SPaolo Bonzini BlockdevOnError on_source_error, on_target_error; 56d63ffd87SPaolo Bonzini bool synced; 57*d06107adSMax Reitz /* Set when the target is synced (dirty bitmap is clean, nothing 58*d06107adSMax Reitz * in flight) and the job is running in active mode */ 59*d06107adSMax Reitz bool actively_synced; 60d63ffd87SPaolo Bonzini bool should_complete; 61eee13dfeSPaolo Bonzini int64_t granularity; 62b812f671SPaolo Bonzini size_t buf_size; 63b21c7652SMax Reitz int64_t bdev_length; 64b812f671SPaolo Bonzini unsigned long *cow_bitmap; 65e4654d2dSFam Zheng BdrvDirtyBitmap *dirty_bitmap; 66dc162c8eSFam Zheng BdrvDirtyBitmapIter *dbi; 67893f7ebaSPaolo Bonzini uint8_t *buf; 68402a4741SPaolo Bonzini QSIMPLEQ_HEAD(, MirrorBuffer) buf_free; 69402a4741SPaolo Bonzini int buf_free_count; 70bd48bde8SPaolo Bonzini 7149efb1f5SDenis V. Lunev uint64_t last_pause_ns; 72402a4741SPaolo Bonzini unsigned long *in_flight_bitmap; 73bd48bde8SPaolo Bonzini int in_flight; 74b436982fSEric Blake int64_t bytes_in_flight; 7512aa4082SMax Reitz QTAILQ_HEAD(MirrorOpList, MirrorOp) ops_in_flight; 76bd48bde8SPaolo Bonzini int ret; 770fc9f8eaSFam Zheng bool unmap; 78b436982fSEric Blake int target_cluster_size; 79e5b43573SFam Zheng int max_iov; 8090ab48ebSAnton Nefedov bool initial_zeroing_ongoing; 81*d06107adSMax Reitz int in_active_write_counter; 82893f7ebaSPaolo Bonzini } MirrorBlockJob; 83893f7ebaSPaolo Bonzini 84429076e8SMax Reitz typedef struct MirrorBDSOpaque { 85429076e8SMax Reitz MirrorBlockJob *job; 86429076e8SMax Reitz } MirrorBDSOpaque; 87429076e8SMax Reitz 8812aa4082SMax Reitz struct MirrorOp { 89bd48bde8SPaolo Bonzini MirrorBlockJob *s; 90bd48bde8SPaolo Bonzini QEMUIOVector qiov; 91b436982fSEric Blake int64_t offset; 92b436982fSEric Blake uint64_t bytes; 932e1990b2SMax Reitz 942e1990b2SMax Reitz /* The pointee is set by mirror_co_read(), mirror_co_zero(), and 952e1990b2SMax Reitz * mirror_co_discard() before yielding for the first time */ 962e1990b2SMax Reitz int64_t *bytes_handled; 9712aa4082SMax Reitz 981181e19aSMax Reitz bool is_pseudo_op; 99*d06107adSMax Reitz bool is_active_write; 10012aa4082SMax Reitz CoQueue waiting_requests; 10112aa4082SMax Reitz 10212aa4082SMax Reitz QTAILQ_ENTRY(MirrorOp) next; 10312aa4082SMax Reitz }; 104bd48bde8SPaolo Bonzini 1054295c5fcSMax Reitz typedef enum MirrorMethod { 1064295c5fcSMax Reitz MIRROR_METHOD_COPY, 1074295c5fcSMax Reitz MIRROR_METHOD_ZERO, 1084295c5fcSMax Reitz MIRROR_METHOD_DISCARD, 1094295c5fcSMax Reitz } MirrorMethod; 1104295c5fcSMax Reitz 111b952b558SPaolo Bonzini static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, 112b952b558SPaolo Bonzini int error) 113b952b558SPaolo Bonzini { 114b952b558SPaolo Bonzini s->synced = false; 115*d06107adSMax Reitz s->actively_synced = false; 116b952b558SPaolo Bonzini if (read) { 11781e254dcSKevin Wolf return block_job_error_action(&s->common, s->on_source_error, 11881e254dcSKevin Wolf true, error); 119b952b558SPaolo Bonzini } else { 12081e254dcSKevin Wolf return block_job_error_action(&s->common, s->on_target_error, 12181e254dcSKevin Wolf false, error); 122b952b558SPaolo Bonzini } 123b952b558SPaolo Bonzini } 124b952b558SPaolo Bonzini 1251181e19aSMax Reitz static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self, 1261181e19aSMax Reitz MirrorBlockJob *s, 1271181e19aSMax Reitz uint64_t offset, 1281181e19aSMax Reitz uint64_t bytes) 1291181e19aSMax Reitz { 1301181e19aSMax Reitz uint64_t self_start_chunk = offset / s->granularity; 1311181e19aSMax Reitz uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity); 1321181e19aSMax Reitz uint64_t self_nb_chunks = self_end_chunk - self_start_chunk; 1331181e19aSMax Reitz 1341181e19aSMax Reitz while (find_next_bit(s->in_flight_bitmap, self_end_chunk, 1351181e19aSMax Reitz self_start_chunk) < self_end_chunk && 1361181e19aSMax Reitz s->ret >= 0) 1371181e19aSMax Reitz { 1381181e19aSMax Reitz MirrorOp *op; 1391181e19aSMax Reitz 1401181e19aSMax Reitz QTAILQ_FOREACH(op, &s->ops_in_flight, next) { 1411181e19aSMax Reitz uint64_t op_start_chunk = op->offset / s->granularity; 1421181e19aSMax Reitz uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes, 1431181e19aSMax Reitz s->granularity) - 1441181e19aSMax Reitz op_start_chunk; 1451181e19aSMax Reitz 1461181e19aSMax Reitz if (op == self) { 1471181e19aSMax Reitz continue; 1481181e19aSMax Reitz } 1491181e19aSMax Reitz 1501181e19aSMax Reitz if (ranges_overlap(self_start_chunk, self_nb_chunks, 1511181e19aSMax Reitz op_start_chunk, op_nb_chunks)) 1521181e19aSMax Reitz { 1531181e19aSMax Reitz qemu_co_queue_wait(&op->waiting_requests, NULL); 1541181e19aSMax Reitz break; 1551181e19aSMax Reitz } 1561181e19aSMax Reitz } 1571181e19aSMax Reitz } 1581181e19aSMax Reitz } 1591181e19aSMax Reitz 1602e1990b2SMax Reitz static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret) 161bd48bde8SPaolo Bonzini { 162bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 163402a4741SPaolo Bonzini struct iovec *iov; 164bd48bde8SPaolo Bonzini int64_t chunk_num; 165b436982fSEric Blake int i, nb_chunks; 166bd48bde8SPaolo Bonzini 167b436982fSEric Blake trace_mirror_iteration_done(s, op->offset, op->bytes, ret); 168bd48bde8SPaolo Bonzini 169bd48bde8SPaolo Bonzini s->in_flight--; 170b436982fSEric Blake s->bytes_in_flight -= op->bytes; 171402a4741SPaolo Bonzini iov = op->qiov.iov; 172402a4741SPaolo Bonzini for (i = 0; i < op->qiov.niov; i++) { 173402a4741SPaolo Bonzini MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base; 174402a4741SPaolo Bonzini QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next); 175402a4741SPaolo Bonzini s->buf_free_count++; 176402a4741SPaolo Bonzini } 177402a4741SPaolo Bonzini 178b436982fSEric Blake chunk_num = op->offset / s->granularity; 179b436982fSEric Blake nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity); 18012aa4082SMax Reitz 181402a4741SPaolo Bonzini bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); 18212aa4082SMax Reitz QTAILQ_REMOVE(&s->ops_in_flight, op, next); 183b21c7652SMax Reitz if (ret >= 0) { 184b21c7652SMax Reitz if (s->cow_bitmap) { 185bd48bde8SPaolo Bonzini bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); 186bd48bde8SPaolo Bonzini } 18790ab48ebSAnton Nefedov if (!s->initial_zeroing_ongoing) { 18830a5c887SKevin Wolf job_progress_update(&s->common.job, op->bytes); 189b21c7652SMax Reitz } 19090ab48ebSAnton Nefedov } 1916df3bf8eSZhang Min qemu_iovec_destroy(&op->qiov); 1927b770c72SStefan Hajnoczi 19312aa4082SMax Reitz qemu_co_queue_restart_all(&op->waiting_requests); 19412aa4082SMax Reitz g_free(op); 1957b770c72SStefan Hajnoczi } 196bd48bde8SPaolo Bonzini 1972e1990b2SMax Reitz static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret) 198bd48bde8SPaolo Bonzini { 199bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 200b9e413ddSPaolo Bonzini 201b9e413ddSPaolo Bonzini aio_context_acquire(blk_get_aio_context(s->common.blk)); 202bd48bde8SPaolo Bonzini if (ret < 0) { 203bd48bde8SPaolo Bonzini BlockErrorAction action; 204bd48bde8SPaolo Bonzini 205e0d7f73eSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes); 206bd48bde8SPaolo Bonzini action = mirror_error_action(s, false, -ret); 207a589569fSWenchao Xia if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 208bd48bde8SPaolo Bonzini s->ret = ret; 209bd48bde8SPaolo Bonzini } 210bd48bde8SPaolo Bonzini } 211bd48bde8SPaolo Bonzini mirror_iteration_done(op, ret); 212b9e413ddSPaolo Bonzini aio_context_release(blk_get_aio_context(s->common.blk)); 213bd48bde8SPaolo Bonzini } 214bd48bde8SPaolo Bonzini 2152e1990b2SMax Reitz static void coroutine_fn mirror_read_complete(MirrorOp *op, int ret) 216bd48bde8SPaolo Bonzini { 217bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 218b9e413ddSPaolo Bonzini 219b9e413ddSPaolo Bonzini aio_context_acquire(blk_get_aio_context(s->common.blk)); 220bd48bde8SPaolo Bonzini if (ret < 0) { 221bd48bde8SPaolo Bonzini BlockErrorAction action; 222bd48bde8SPaolo Bonzini 223e0d7f73eSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes); 224bd48bde8SPaolo Bonzini action = mirror_error_action(s, true, -ret); 225a589569fSWenchao Xia if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 226bd48bde8SPaolo Bonzini s->ret = ret; 227bd48bde8SPaolo Bonzini } 228bd48bde8SPaolo Bonzini 229bd48bde8SPaolo Bonzini mirror_iteration_done(op, ret); 230b9e413ddSPaolo Bonzini } else { 2312e1990b2SMax Reitz ret = blk_co_pwritev(s->target, op->offset, 2322e1990b2SMax Reitz op->qiov.size, &op->qiov, 0); 2332e1990b2SMax Reitz mirror_write_complete(op, ret); 234bd48bde8SPaolo Bonzini } 235b9e413ddSPaolo Bonzini aio_context_release(blk_get_aio_context(s->common.blk)); 236b9e413ddSPaolo Bonzini } 237bd48bde8SPaolo Bonzini 238782d97efSEric Blake /* Clip bytes relative to offset to not exceed end-of-file */ 239782d97efSEric Blake static inline int64_t mirror_clip_bytes(MirrorBlockJob *s, 240782d97efSEric Blake int64_t offset, 241782d97efSEric Blake int64_t bytes) 242782d97efSEric Blake { 243782d97efSEric Blake return MIN(bytes, s->bdev_length - offset); 244782d97efSEric Blake } 245782d97efSEric Blake 246782d97efSEric Blake /* Round offset and/or bytes to target cluster if COW is needed, and 247782d97efSEric Blake * return the offset of the adjusted tail against original. */ 248782d97efSEric Blake static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset, 249ae4cc877SEric Blake uint64_t *bytes) 250893f7ebaSPaolo Bonzini { 251e5b43573SFam Zheng bool need_cow; 252e5b43573SFam Zheng int ret = 0; 253782d97efSEric Blake int64_t align_offset = *offset; 2547cfd5275SEric Blake int64_t align_bytes = *bytes; 255782d97efSEric Blake int max_bytes = s->granularity * s->max_iov; 256893f7ebaSPaolo Bonzini 257782d97efSEric Blake need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap); 258782d97efSEric Blake need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity, 259e5b43573SFam Zheng s->cow_bitmap); 260e5b43573SFam Zheng if (need_cow) { 261782d97efSEric Blake bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes, 262782d97efSEric Blake &align_offset, &align_bytes); 2638f0720ecSPaolo Bonzini } 2648f0720ecSPaolo Bonzini 265782d97efSEric Blake if (align_bytes > max_bytes) { 266782d97efSEric Blake align_bytes = max_bytes; 267e5b43573SFam Zheng if (need_cow) { 268782d97efSEric Blake align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size); 269e5b43573SFam Zheng } 270e5b43573SFam Zheng } 271782d97efSEric Blake /* Clipping may result in align_bytes unaligned to chunk boundary, but 2724150ae60SFam Zheng * that doesn't matter because it's already the end of source image. */ 273782d97efSEric Blake align_bytes = mirror_clip_bytes(s, align_offset, align_bytes); 274402a4741SPaolo Bonzini 275782d97efSEric Blake ret = align_offset + align_bytes - (*offset + *bytes); 276782d97efSEric Blake *offset = align_offset; 277782d97efSEric Blake *bytes = align_bytes; 278e5b43573SFam Zheng assert(ret >= 0); 279e5b43573SFam Zheng return ret; 280e5b43573SFam Zheng } 281e5b43573SFam Zheng 282*d06107adSMax Reitz static inline void mirror_wait_for_any_operation(MirrorBlockJob *s, bool active) 28321cd917fSFam Zheng { 28412aa4082SMax Reitz MirrorOp *op; 28512aa4082SMax Reitz 2861181e19aSMax Reitz QTAILQ_FOREACH(op, &s->ops_in_flight, next) { 2871181e19aSMax Reitz /* Do not wait on pseudo ops, because it may in turn wait on 2881181e19aSMax Reitz * some other operation to start, which may in fact be the 2891181e19aSMax Reitz * caller of this function. Since there is only one pseudo op 2901181e19aSMax Reitz * at any given time, we will always find some real operation 2911181e19aSMax Reitz * to wait on. */ 292*d06107adSMax Reitz if (!op->is_pseudo_op && op->is_active_write == active) { 29312aa4082SMax Reitz qemu_co_queue_wait(&op->waiting_requests, NULL); 2941181e19aSMax Reitz return; 2951181e19aSMax Reitz } 2961181e19aSMax Reitz } 2971181e19aSMax Reitz abort(); 29821cd917fSFam Zheng } 29921cd917fSFam Zheng 300*d06107adSMax Reitz static inline void mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s) 301*d06107adSMax Reitz { 302*d06107adSMax Reitz /* Only non-active operations use up in-flight slots */ 303*d06107adSMax Reitz mirror_wait_for_any_operation(s, false); 304*d06107adSMax Reitz } 305*d06107adSMax Reitz 3062e1990b2SMax Reitz /* Perform a mirror copy operation. 3072e1990b2SMax Reitz * 3082e1990b2SMax Reitz * *op->bytes_handled is set to the number of bytes copied after and 3092e1990b2SMax Reitz * including offset, excluding any bytes copied prior to offset due 3102e1990b2SMax Reitz * to alignment. This will be op->bytes if no alignment is necessary, 3112e1990b2SMax Reitz * or (new_end - op->offset) if the tail is rounded up or down due to 312e5b43573SFam Zheng * alignment or buffer limit. 313402a4741SPaolo Bonzini */ 3142e1990b2SMax Reitz static void coroutine_fn mirror_co_read(void *opaque) 315e5b43573SFam Zheng { 3162e1990b2SMax Reitz MirrorOp *op = opaque; 3172e1990b2SMax Reitz MirrorBlockJob *s = op->s; 318ae4cc877SEric Blake int nb_chunks; 319ae4cc877SEric Blake uint64_t ret; 320ae4cc877SEric Blake uint64_t max_bytes; 321402a4741SPaolo Bonzini 322ae4cc877SEric Blake max_bytes = s->granularity * s->max_iov; 323e5b43573SFam Zheng 324e5b43573SFam Zheng /* We can only handle as much as buf_size at a time. */ 3252e1990b2SMax Reitz op->bytes = MIN(s->buf_size, MIN(max_bytes, op->bytes)); 3262e1990b2SMax Reitz assert(op->bytes); 3272e1990b2SMax Reitz assert(op->bytes < BDRV_REQUEST_MAX_BYTES); 3282e1990b2SMax Reitz *op->bytes_handled = op->bytes; 329e5b43573SFam Zheng 330e5b43573SFam Zheng if (s->cow_bitmap) { 3312e1990b2SMax Reitz *op->bytes_handled += mirror_cow_align(s, &op->offset, &op->bytes); 332e5b43573SFam Zheng } 3332e1990b2SMax Reitz /* Cannot exceed BDRV_REQUEST_MAX_BYTES + INT_MAX */ 3342e1990b2SMax Reitz assert(*op->bytes_handled <= UINT_MAX); 3352e1990b2SMax Reitz assert(op->bytes <= s->buf_size); 336ae4cc877SEric Blake /* The offset is granularity-aligned because: 337e5b43573SFam Zheng * 1) Caller passes in aligned values; 338e5b43573SFam Zheng * 2) mirror_cow_align is used only when target cluster is larger. */ 3392e1990b2SMax Reitz assert(QEMU_IS_ALIGNED(op->offset, s->granularity)); 340ae4cc877SEric Blake /* The range is sector-aligned, since bdrv_getlength() rounds up. */ 3412e1990b2SMax Reitz assert(QEMU_IS_ALIGNED(op->bytes, BDRV_SECTOR_SIZE)); 3422e1990b2SMax Reitz nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity); 343e5b43573SFam Zheng 344e5b43573SFam Zheng while (s->buf_free_count < nb_chunks) { 3452e1990b2SMax Reitz trace_mirror_yield_in_flight(s, op->offset, s->in_flight); 3461181e19aSMax Reitz mirror_wait_for_free_in_flight_slot(s); 347b812f671SPaolo Bonzini } 348b812f671SPaolo Bonzini 349402a4741SPaolo Bonzini /* Now make a QEMUIOVector taking enough granularity-sized chunks 350402a4741SPaolo Bonzini * from s->buf_free. 351402a4741SPaolo Bonzini */ 352402a4741SPaolo Bonzini qemu_iovec_init(&op->qiov, nb_chunks); 353402a4741SPaolo Bonzini while (nb_chunks-- > 0) { 354402a4741SPaolo Bonzini MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); 3552e1990b2SMax Reitz size_t remaining = op->bytes - op->qiov.size; 3565a0f6fd5SKevin Wolf 357402a4741SPaolo Bonzini QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); 358402a4741SPaolo Bonzini s->buf_free_count--; 3595a0f6fd5SKevin Wolf qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); 360402a4741SPaolo Bonzini } 361402a4741SPaolo Bonzini 362893f7ebaSPaolo Bonzini /* Copy the dirty cluster. */ 363bd48bde8SPaolo Bonzini s->in_flight++; 3642e1990b2SMax Reitz s->bytes_in_flight += op->bytes; 3652e1990b2SMax Reitz trace_mirror_one_iteration(s, op->offset, op->bytes); 366dcfb3bebSFam Zheng 367138f9fffSMax Reitz ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes, 368138f9fffSMax Reitz &op->qiov, 0); 3692e1990b2SMax Reitz mirror_read_complete(op, ret); 370e5b43573SFam Zheng } 371e5b43573SFam Zheng 3722e1990b2SMax Reitz static void coroutine_fn mirror_co_zero(void *opaque) 373e5b43573SFam Zheng { 3742e1990b2SMax Reitz MirrorOp *op = opaque; 3752e1990b2SMax Reitz int ret; 376e5b43573SFam Zheng 3772e1990b2SMax Reitz op->s->in_flight++; 3782e1990b2SMax Reitz op->s->bytes_in_flight += op->bytes; 3792e1990b2SMax Reitz *op->bytes_handled = op->bytes; 380e5b43573SFam Zheng 3812e1990b2SMax Reitz ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes, 3822e1990b2SMax Reitz op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0); 3832e1990b2SMax Reitz mirror_write_complete(op, ret); 384e5b43573SFam Zheng } 3852e1990b2SMax Reitz 3862e1990b2SMax Reitz static void coroutine_fn mirror_co_discard(void *opaque) 3872e1990b2SMax Reitz { 3882e1990b2SMax Reitz MirrorOp *op = opaque; 3892e1990b2SMax Reitz int ret; 3902e1990b2SMax Reitz 3912e1990b2SMax Reitz op->s->in_flight++; 3922e1990b2SMax Reitz op->s->bytes_in_flight += op->bytes; 3932e1990b2SMax Reitz *op->bytes_handled = op->bytes; 3942e1990b2SMax Reitz 3952e1990b2SMax Reitz ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes); 3962e1990b2SMax Reitz mirror_write_complete(op, ret); 397e5b43573SFam Zheng } 398e5b43573SFam Zheng 3994295c5fcSMax Reitz static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset, 4004295c5fcSMax Reitz unsigned bytes, MirrorMethod mirror_method) 4014295c5fcSMax Reitz { 4022e1990b2SMax Reitz MirrorOp *op; 4032e1990b2SMax Reitz Coroutine *co; 4042e1990b2SMax Reitz int64_t bytes_handled = -1; 4052e1990b2SMax Reitz 4062e1990b2SMax Reitz op = g_new(MirrorOp, 1); 4072e1990b2SMax Reitz *op = (MirrorOp){ 4082e1990b2SMax Reitz .s = s, 4092e1990b2SMax Reitz .offset = offset, 4102e1990b2SMax Reitz .bytes = bytes, 4112e1990b2SMax Reitz .bytes_handled = &bytes_handled, 4122e1990b2SMax Reitz }; 41312aa4082SMax Reitz qemu_co_queue_init(&op->waiting_requests); 4142e1990b2SMax Reitz 4154295c5fcSMax Reitz switch (mirror_method) { 4164295c5fcSMax Reitz case MIRROR_METHOD_COPY: 4172e1990b2SMax Reitz co = qemu_coroutine_create(mirror_co_read, op); 4182e1990b2SMax Reitz break; 4194295c5fcSMax Reitz case MIRROR_METHOD_ZERO: 4202e1990b2SMax Reitz co = qemu_coroutine_create(mirror_co_zero, op); 4212e1990b2SMax Reitz break; 4224295c5fcSMax Reitz case MIRROR_METHOD_DISCARD: 4232e1990b2SMax Reitz co = qemu_coroutine_create(mirror_co_discard, op); 4242e1990b2SMax Reitz break; 4254295c5fcSMax Reitz default: 4264295c5fcSMax Reitz abort(); 4274295c5fcSMax Reitz } 4282e1990b2SMax Reitz 42912aa4082SMax Reitz QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next); 4302e1990b2SMax Reitz qemu_coroutine_enter(co); 4312e1990b2SMax Reitz /* At this point, ownership of op has been moved to the coroutine 4322e1990b2SMax Reitz * and the object may already be freed */ 4332e1990b2SMax Reitz 4342e1990b2SMax Reitz /* Assert that this value has been set */ 4352e1990b2SMax Reitz assert(bytes_handled >= 0); 4362e1990b2SMax Reitz 4372e1990b2SMax Reitz /* Same assertion as in mirror_co_read() (and for mirror_co_read() 4382e1990b2SMax Reitz * and mirror_co_discard(), bytes_handled == op->bytes, which 4392e1990b2SMax Reitz * is the @bytes parameter given to this function) */ 4402e1990b2SMax Reitz assert(bytes_handled <= UINT_MAX); 4412e1990b2SMax Reitz return bytes_handled; 4424295c5fcSMax Reitz } 4434295c5fcSMax Reitz 444e5b43573SFam Zheng static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) 445e5b43573SFam Zheng { 446138f9fffSMax Reitz BlockDriverState *source = s->mirror_top_bs->backing->bs; 4471181e19aSMax Reitz MirrorOp *pseudo_op; 4481181e19aSMax Reitz int64_t offset; 4491181e19aSMax Reitz uint64_t delay_ns = 0, ret = 0; 450e5b43573SFam Zheng /* At least the first dirty chunk is mirrored in one iteration. */ 451e5b43573SFam Zheng int nb_chunks = 1; 4524b5004d9SDenis V. Lunev bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target)); 453b436982fSEric Blake int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES); 454e5b43573SFam Zheng 455b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_lock(s->dirty_bitmap); 456f798184cSEric Blake offset = bdrv_dirty_iter_next(s->dbi); 457fb2ef791SEric Blake if (offset < 0) { 458dc162c8eSFam Zheng bdrv_set_dirty_iter(s->dbi, 0); 459f798184cSEric Blake offset = bdrv_dirty_iter_next(s->dbi); 4609a46dba7SEric Blake trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); 461fb2ef791SEric Blake assert(offset >= 0); 462e5b43573SFam Zheng } 463b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_unlock(s->dirty_bitmap); 464e5b43573SFam Zheng 4651181e19aSMax Reitz mirror_wait_on_conflicts(NULL, s, offset, 1); 4669c83625bSMax Reitz 467da01ff7fSKevin Wolf job_pause_point(&s->common.job); 468565ac01fSStefan Hajnoczi 469e5b43573SFam Zheng /* Find the number of consective dirty chunks following the first dirty 470e5b43573SFam Zheng * one, and wait for in flight requests in them. */ 471b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_lock(s->dirty_bitmap); 472fb2ef791SEric Blake while (nb_chunks * s->granularity < s->buf_size) { 473dc162c8eSFam Zheng int64_t next_dirty; 474fb2ef791SEric Blake int64_t next_offset = offset + nb_chunks * s->granularity; 475fb2ef791SEric Blake int64_t next_chunk = next_offset / s->granularity; 476fb2ef791SEric Blake if (next_offset >= s->bdev_length || 4773b5d4df0SEric Blake !bdrv_get_dirty_locked(source, s->dirty_bitmap, next_offset)) { 478e5b43573SFam Zheng break; 479e5b43573SFam Zheng } 480e5b43573SFam Zheng if (test_bit(next_chunk, s->in_flight_bitmap)) { 481e5b43573SFam Zheng break; 482e5b43573SFam Zheng } 4839c83625bSMax Reitz 484f798184cSEric Blake next_dirty = bdrv_dirty_iter_next(s->dbi); 485fb2ef791SEric Blake if (next_dirty > next_offset || next_dirty < 0) { 486f27a2742SMax Reitz /* The bitmap iterator's cache is stale, refresh it */ 487715a74d8SEric Blake bdrv_set_dirty_iter(s->dbi, next_offset); 488f798184cSEric Blake next_dirty = bdrv_dirty_iter_next(s->dbi); 489f27a2742SMax Reitz } 490fb2ef791SEric Blake assert(next_dirty == next_offset); 491e5b43573SFam Zheng nb_chunks++; 492e5b43573SFam Zheng } 493e5b43573SFam Zheng 494e5b43573SFam Zheng /* Clear dirty bits before querying the block status, because 49531826642SEric Blake * calling bdrv_block_status_above could yield - if some blocks are 496e5b43573SFam Zheng * marked dirty in this window, we need to know. 497e5b43573SFam Zheng */ 498e0d7f73eSEric Blake bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset, 499e0d7f73eSEric Blake nb_chunks * s->granularity); 500b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_unlock(s->dirty_bitmap); 501b64bd51eSPaolo Bonzini 5021181e19aSMax Reitz /* Before claiming an area in the in-flight bitmap, we have to 5031181e19aSMax Reitz * create a MirrorOp for it so that conflicting requests can wait 5041181e19aSMax Reitz * for it. mirror_perform() will create the real MirrorOps later, 5051181e19aSMax Reitz * for now we just create a pseudo operation that will wake up all 5061181e19aSMax Reitz * conflicting requests once all real operations have been 5071181e19aSMax Reitz * launched. */ 5081181e19aSMax Reitz pseudo_op = g_new(MirrorOp, 1); 5091181e19aSMax Reitz *pseudo_op = (MirrorOp){ 5101181e19aSMax Reitz .offset = offset, 5111181e19aSMax Reitz .bytes = nb_chunks * s->granularity, 5121181e19aSMax Reitz .is_pseudo_op = true, 5131181e19aSMax Reitz }; 5141181e19aSMax Reitz qemu_co_queue_init(&pseudo_op->waiting_requests); 5151181e19aSMax Reitz QTAILQ_INSERT_TAIL(&s->ops_in_flight, pseudo_op, next); 5161181e19aSMax Reitz 517fb2ef791SEric Blake bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks); 518fb2ef791SEric Blake while (nb_chunks > 0 && offset < s->bdev_length) { 51931826642SEric Blake int ret; 5207cfd5275SEric Blake int64_t io_bytes; 521f3e4ce4aSEric Blake int64_t io_bytes_acct; 5224295c5fcSMax Reitz MirrorMethod mirror_method = MIRROR_METHOD_COPY; 523e5b43573SFam Zheng 524fb2ef791SEric Blake assert(!(offset % s->granularity)); 52531826642SEric Blake ret = bdrv_block_status_above(source, NULL, offset, 52631826642SEric Blake nb_chunks * s->granularity, 52731826642SEric Blake &io_bytes, NULL, NULL); 528e5b43573SFam Zheng if (ret < 0) { 529fb2ef791SEric Blake io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes); 5300965a41eSVladimir Sementsov-Ogievskiy } else if (ret & BDRV_BLOCK_DATA) { 531fb2ef791SEric Blake io_bytes = MIN(io_bytes, max_io_bytes); 532e5b43573SFam Zheng } 533e5b43573SFam Zheng 534fb2ef791SEric Blake io_bytes -= io_bytes % s->granularity; 535fb2ef791SEric Blake if (io_bytes < s->granularity) { 536fb2ef791SEric Blake io_bytes = s->granularity; 537e5b43573SFam Zheng } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) { 538fb2ef791SEric Blake int64_t target_offset; 5397cfd5275SEric Blake int64_t target_bytes; 540fb2ef791SEric Blake bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes, 541fb2ef791SEric Blake &target_offset, &target_bytes); 542fb2ef791SEric Blake if (target_offset == offset && 543fb2ef791SEric Blake target_bytes == io_bytes) { 544e5b43573SFam Zheng mirror_method = ret & BDRV_BLOCK_ZERO ? 545e5b43573SFam Zheng MIRROR_METHOD_ZERO : 546e5b43573SFam Zheng MIRROR_METHOD_DISCARD; 547e5b43573SFam Zheng } 548e5b43573SFam Zheng } 549e5b43573SFam Zheng 550cf56a3c6SDenis V. Lunev while (s->in_flight >= MAX_IN_FLIGHT) { 551fb2ef791SEric Blake trace_mirror_yield_in_flight(s, offset, s->in_flight); 5521181e19aSMax Reitz mirror_wait_for_free_in_flight_slot(s); 553cf56a3c6SDenis V. Lunev } 554cf56a3c6SDenis V. Lunev 555dbaa7b57SVladimir Sementsov-Ogievskiy if (s->ret < 0) { 5561181e19aSMax Reitz ret = 0; 5571181e19aSMax Reitz goto fail; 558dbaa7b57SVladimir Sementsov-Ogievskiy } 559dbaa7b57SVladimir Sementsov-Ogievskiy 560fb2ef791SEric Blake io_bytes = mirror_clip_bytes(s, offset, io_bytes); 5614295c5fcSMax Reitz io_bytes = mirror_perform(s, offset, io_bytes, mirror_method); 5624295c5fcSMax Reitz if (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok) { 563f3e4ce4aSEric Blake io_bytes_acct = 0; 5644b5004d9SDenis V. Lunev } else { 565fb2ef791SEric Blake io_bytes_acct = io_bytes; 5664b5004d9SDenis V. Lunev } 567fb2ef791SEric Blake assert(io_bytes); 568fb2ef791SEric Blake offset += io_bytes; 569fb2ef791SEric Blake nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity); 570dee81d51SKevin Wolf delay_ns = block_job_ratelimit_get_delay(&s->common, io_bytes_acct); 571dcfb3bebSFam Zheng } 5721181e19aSMax Reitz 5731181e19aSMax Reitz ret = delay_ns; 5741181e19aSMax Reitz fail: 5751181e19aSMax Reitz QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next); 5761181e19aSMax Reitz qemu_co_queue_restart_all(&pseudo_op->waiting_requests); 5771181e19aSMax Reitz g_free(pseudo_op); 5781181e19aSMax Reitz 5791181e19aSMax Reitz return ret; 580893f7ebaSPaolo Bonzini } 581b952b558SPaolo Bonzini 582402a4741SPaolo Bonzini static void mirror_free_init(MirrorBlockJob *s) 583402a4741SPaolo Bonzini { 584402a4741SPaolo Bonzini int granularity = s->granularity; 585402a4741SPaolo Bonzini size_t buf_size = s->buf_size; 586402a4741SPaolo Bonzini uint8_t *buf = s->buf; 587402a4741SPaolo Bonzini 588402a4741SPaolo Bonzini assert(s->buf_free_count == 0); 589402a4741SPaolo Bonzini QSIMPLEQ_INIT(&s->buf_free); 590402a4741SPaolo Bonzini while (buf_size != 0) { 591402a4741SPaolo Bonzini MirrorBuffer *cur = (MirrorBuffer *)buf; 592402a4741SPaolo Bonzini QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next); 593402a4741SPaolo Bonzini s->buf_free_count++; 594402a4741SPaolo Bonzini buf_size -= granularity; 595402a4741SPaolo Bonzini buf += granularity; 596402a4741SPaolo Bonzini } 597402a4741SPaolo Bonzini } 598402a4741SPaolo Bonzini 599bae8196dSPaolo Bonzini /* This is also used for the .pause callback. There is no matching 600bae8196dSPaolo Bonzini * mirror_resume() because mirror_run() will begin iterating again 601bae8196dSPaolo Bonzini * when the job is resumed. 602bae8196dSPaolo Bonzini */ 603bae8196dSPaolo Bonzini static void mirror_wait_for_all_io(MirrorBlockJob *s) 604bd48bde8SPaolo Bonzini { 605bd48bde8SPaolo Bonzini while (s->in_flight > 0) { 6061181e19aSMax Reitz mirror_wait_for_free_in_flight_slot(s); 607bd48bde8SPaolo Bonzini } 608893f7ebaSPaolo Bonzini } 609893f7ebaSPaolo Bonzini 6105a7e7a0bSStefan Hajnoczi typedef struct { 6115a7e7a0bSStefan Hajnoczi int ret; 6125a7e7a0bSStefan Hajnoczi } MirrorExitData; 6135a7e7a0bSStefan Hajnoczi 6141908a559SKevin Wolf static void mirror_exit(Job *job, void *opaque) 6155a7e7a0bSStefan Hajnoczi { 6161908a559SKevin Wolf MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 6171908a559SKevin Wolf BlockJob *bjob = &s->common; 6185a7e7a0bSStefan Hajnoczi MirrorExitData *data = opaque; 619429076e8SMax Reitz MirrorBDSOpaque *bs_opaque = s->mirror_top_bs->opaque; 6205a7e7a0bSStefan Hajnoczi AioContext *replace_aio_context = NULL; 621138f9fffSMax Reitz BlockDriverState *src = s->mirror_top_bs->backing->bs; 622e253f4b8SKevin Wolf BlockDriverState *target_bs = blk_bs(s->target); 6234ef85a9cSKevin Wolf BlockDriverState *mirror_top_bs = s->mirror_top_bs; 62412fa4af6SKevin Wolf Error *local_err = NULL; 6253f09bfbcSKevin Wolf 6262119882cSPaolo Bonzini bdrv_release_dirty_bitmap(src, s->dirty_bitmap); 6272119882cSPaolo Bonzini 6283f09bfbcSKevin Wolf /* Make sure that the source BDS doesn't go away before we called 6293d70ff53SKevin Wolf * job_completed(). */ 6303f09bfbcSKevin Wolf bdrv_ref(src); 6314ef85a9cSKevin Wolf bdrv_ref(mirror_top_bs); 6327d9fcb39SKevin Wolf bdrv_ref(target_bs); 6337d9fcb39SKevin Wolf 6347d9fcb39SKevin Wolf /* Remove target parent that still uses BLK_PERM_WRITE/RESIZE before 6357d9fcb39SKevin Wolf * inserting target_bs at s->to_replace, where we might not be able to get 63663c8ef28SKevin Wolf * these permissions. 63763c8ef28SKevin Wolf * 63863c8ef28SKevin Wolf * Note that blk_unref() alone doesn't necessarily drop permissions because 63963c8ef28SKevin Wolf * we might be running nested inside mirror_drain(), which takes an extra 64063c8ef28SKevin Wolf * reference, so use an explicit blk_set_perm() first. */ 64163c8ef28SKevin Wolf blk_set_perm(s->target, 0, BLK_PERM_ALL, &error_abort); 6427d9fcb39SKevin Wolf blk_unref(s->target); 6437d9fcb39SKevin Wolf s->target = NULL; 6444ef85a9cSKevin Wolf 6454ef85a9cSKevin Wolf /* We don't access the source any more. Dropping any WRITE/RESIZE is 6464ef85a9cSKevin Wolf * required before it could become a backing file of target_bs. */ 6474ef85a9cSKevin Wolf bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL, 6484ef85a9cSKevin Wolf &error_abort); 6494ef85a9cSKevin Wolf if (s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) { 6504ef85a9cSKevin Wolf BlockDriverState *backing = s->is_none_mode ? src : s->base; 6514ef85a9cSKevin Wolf if (backing_bs(target_bs) != backing) { 65212fa4af6SKevin Wolf bdrv_set_backing_hd(target_bs, backing, &local_err); 65312fa4af6SKevin Wolf if (local_err) { 65412fa4af6SKevin Wolf error_report_err(local_err); 65512fa4af6SKevin Wolf data->ret = -EPERM; 65612fa4af6SKevin Wolf } 6574ef85a9cSKevin Wolf } 6584ef85a9cSKevin Wolf } 6595a7e7a0bSStefan Hajnoczi 6605a7e7a0bSStefan Hajnoczi if (s->to_replace) { 6615a7e7a0bSStefan Hajnoczi replace_aio_context = bdrv_get_aio_context(s->to_replace); 6625a7e7a0bSStefan Hajnoczi aio_context_acquire(replace_aio_context); 6635a7e7a0bSStefan Hajnoczi } 6645a7e7a0bSStefan Hajnoczi 6655a7e7a0bSStefan Hajnoczi if (s->should_complete && data->ret == 0) { 666e253f4b8SKevin Wolf BlockDriverState *to_replace = src; 6675a7e7a0bSStefan Hajnoczi if (s->to_replace) { 6685a7e7a0bSStefan Hajnoczi to_replace = s->to_replace; 6695a7e7a0bSStefan Hajnoczi } 67040365552SKevin Wolf 671e253f4b8SKevin Wolf if (bdrv_get_flags(target_bs) != bdrv_get_flags(to_replace)) { 672e253f4b8SKevin Wolf bdrv_reopen(target_bs, bdrv_get_flags(to_replace), NULL); 6735a7e7a0bSStefan Hajnoczi } 674b8804815SKevin Wolf 675b8804815SKevin Wolf /* The mirror job has no requests in flight any more, but we need to 676b8804815SKevin Wolf * drain potential other users of the BDS before changing the graph. */ 677e253f4b8SKevin Wolf bdrv_drained_begin(target_bs); 6785fe31c25SKevin Wolf bdrv_replace_node(to_replace, target_bs, &local_err); 679e253f4b8SKevin Wolf bdrv_drained_end(target_bs); 6805fe31c25SKevin Wolf if (local_err) { 6815fe31c25SKevin Wolf error_report_err(local_err); 6825fe31c25SKevin Wolf data->ret = -EPERM; 6835fe31c25SKevin Wolf } 6845a7e7a0bSStefan Hajnoczi } 6855a7e7a0bSStefan Hajnoczi if (s->to_replace) { 6865a7e7a0bSStefan Hajnoczi bdrv_op_unblock_all(s->to_replace, s->replace_blocker); 6875a7e7a0bSStefan Hajnoczi error_free(s->replace_blocker); 6885a7e7a0bSStefan Hajnoczi bdrv_unref(s->to_replace); 6895a7e7a0bSStefan Hajnoczi } 6905a7e7a0bSStefan Hajnoczi if (replace_aio_context) { 6915a7e7a0bSStefan Hajnoczi aio_context_release(replace_aio_context); 6925a7e7a0bSStefan Hajnoczi } 6935a7e7a0bSStefan Hajnoczi g_free(s->replaces); 6947d9fcb39SKevin Wolf bdrv_unref(target_bs); 6954ef85a9cSKevin Wolf 6964ef85a9cSKevin Wolf /* Remove the mirror filter driver from the graph. Before this, get rid of 6974ef85a9cSKevin Wolf * the blockers on the intermediate nodes so that the resulting state is 6980bf74767SKevin Wolf * valid. Also give up permissions on mirror_top_bs->backing, which might 6990bf74767SKevin Wolf * block the removal. */ 7001908a559SKevin Wolf block_job_remove_all_bdrv(bjob); 701c1cef672SFam Zheng bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL, 702c1cef672SFam Zheng &error_abort); 7035fe31c25SKevin Wolf bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort); 7044ef85a9cSKevin Wolf 7054ef85a9cSKevin Wolf /* We just changed the BDS the job BB refers to (with either or both of the 7065fe31c25SKevin Wolf * bdrv_replace_node() calls), so switch the BB back so the cleanup does 7075fe31c25SKevin Wolf * the right thing. We don't need any permissions any more now. */ 7081908a559SKevin Wolf blk_remove_bs(bjob->blk); 7091908a559SKevin Wolf blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort); 7101908a559SKevin Wolf blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort); 7114ef85a9cSKevin Wolf 712429076e8SMax Reitz bs_opaque->job = NULL; 7131266c9b9SKevin Wolf job_completed(job, data->ret, NULL); 7144ef85a9cSKevin Wolf 7155a7e7a0bSStefan Hajnoczi g_free(data); 716176c3699SFam Zheng bdrv_drained_end(src); 7174ef85a9cSKevin Wolf bdrv_unref(mirror_top_bs); 7183f09bfbcSKevin Wolf bdrv_unref(src); 7195a7e7a0bSStefan Hajnoczi } 7205a7e7a0bSStefan Hajnoczi 72149efb1f5SDenis V. Lunev static void mirror_throttle(MirrorBlockJob *s) 72249efb1f5SDenis V. Lunev { 72349efb1f5SDenis V. Lunev int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 72449efb1f5SDenis V. Lunev 72518bb6928SKevin Wolf if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) { 72649efb1f5SDenis V. Lunev s->last_pause_ns = now; 7275d43e86eSKevin Wolf job_sleep_ns(&s->common.job, 0); 72849efb1f5SDenis V. Lunev } else { 729da01ff7fSKevin Wolf job_pause_point(&s->common.job); 73049efb1f5SDenis V. Lunev } 73149efb1f5SDenis V. Lunev } 73249efb1f5SDenis V. Lunev 733c0b363adSDenis V. Lunev static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s) 734c0b363adSDenis V. Lunev { 73523ca459aSEric Blake int64_t offset; 736c0b363adSDenis V. Lunev BlockDriverState *base = s->base; 737138f9fffSMax Reitz BlockDriverState *bs = s->mirror_top_bs->backing->bs; 738c0b363adSDenis V. Lunev BlockDriverState *target_bs = blk_bs(s->target); 73923ca459aSEric Blake int ret; 74051b0a488SEric Blake int64_t count; 741c0b363adSDenis V. Lunev 742b7d5062cSDenis V. Lunev if (base == NULL && !bdrv_has_zero_init(target_bs)) { 743c7c2769cSDenis V. Lunev if (!bdrv_can_write_zeroes_with_unmap(target_bs)) { 744e0d7f73eSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length); 745b7d5062cSDenis V. Lunev return 0; 746b7d5062cSDenis V. Lunev } 747b7d5062cSDenis V. Lunev 74890ab48ebSAnton Nefedov s->initial_zeroing_ongoing = true; 74923ca459aSEric Blake for (offset = 0; offset < s->bdev_length; ) { 75023ca459aSEric Blake int bytes = MIN(s->bdev_length - offset, 75123ca459aSEric Blake QEMU_ALIGN_DOWN(INT_MAX, s->granularity)); 752c7c2769cSDenis V. Lunev 753c7c2769cSDenis V. Lunev mirror_throttle(s); 754c7c2769cSDenis V. Lunev 755daa7f2f9SKevin Wolf if (job_is_cancelled(&s->common.job)) { 75690ab48ebSAnton Nefedov s->initial_zeroing_ongoing = false; 757c7c2769cSDenis V. Lunev return 0; 758c7c2769cSDenis V. Lunev } 759c7c2769cSDenis V. Lunev 760c7c2769cSDenis V. Lunev if (s->in_flight >= MAX_IN_FLIGHT) { 76167adf4b3SEric Blake trace_mirror_yield(s, UINT64_MAX, s->buf_free_count, 76267adf4b3SEric Blake s->in_flight); 7631181e19aSMax Reitz mirror_wait_for_free_in_flight_slot(s); 764c7c2769cSDenis V. Lunev continue; 765c7c2769cSDenis V. Lunev } 766c7c2769cSDenis V. Lunev 7674295c5fcSMax Reitz mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO); 76823ca459aSEric Blake offset += bytes; 769c7c2769cSDenis V. Lunev } 770c7c2769cSDenis V. Lunev 771bae8196dSPaolo Bonzini mirror_wait_for_all_io(s); 77290ab48ebSAnton Nefedov s->initial_zeroing_ongoing = false; 773c7c2769cSDenis V. Lunev } 774c7c2769cSDenis V. Lunev 775c0b363adSDenis V. Lunev /* First part, loop on the sectors and initialize the dirty bitmap. */ 77623ca459aSEric Blake for (offset = 0; offset < s->bdev_length; ) { 777c0b363adSDenis V. Lunev /* Just to make sure we are not exceeding int limit. */ 77823ca459aSEric Blake int bytes = MIN(s->bdev_length - offset, 77923ca459aSEric Blake QEMU_ALIGN_DOWN(INT_MAX, s->granularity)); 780c0b363adSDenis V. Lunev 781c0b363adSDenis V. Lunev mirror_throttle(s); 782c0b363adSDenis V. Lunev 783daa7f2f9SKevin Wolf if (job_is_cancelled(&s->common.job)) { 784c0b363adSDenis V. Lunev return 0; 785c0b363adSDenis V. Lunev } 786c0b363adSDenis V. Lunev 78723ca459aSEric Blake ret = bdrv_is_allocated_above(bs, base, offset, bytes, &count); 788c0b363adSDenis V. Lunev if (ret < 0) { 789c0b363adSDenis V. Lunev return ret; 790c0b363adSDenis V. Lunev } 791c0b363adSDenis V. Lunev 79223ca459aSEric Blake assert(count); 793b7d5062cSDenis V. Lunev if (ret == 1) { 79423ca459aSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count); 795c0b363adSDenis V. Lunev } 79623ca459aSEric Blake offset += count; 797c0b363adSDenis V. Lunev } 798c0b363adSDenis V. Lunev return 0; 799c0b363adSDenis V. Lunev } 800c0b363adSDenis V. Lunev 801bdffb31dSPaolo Bonzini /* Called when going out of the streaming phase to flush the bulk of the 802bdffb31dSPaolo Bonzini * data to the medium, or just before completing. 803bdffb31dSPaolo Bonzini */ 804bdffb31dSPaolo Bonzini static int mirror_flush(MirrorBlockJob *s) 805bdffb31dSPaolo Bonzini { 806bdffb31dSPaolo Bonzini int ret = blk_flush(s->target); 807bdffb31dSPaolo Bonzini if (ret < 0) { 808bdffb31dSPaolo Bonzini if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) { 809bdffb31dSPaolo Bonzini s->ret = ret; 810bdffb31dSPaolo Bonzini } 811bdffb31dSPaolo Bonzini } 812bdffb31dSPaolo Bonzini return ret; 813bdffb31dSPaolo Bonzini } 814bdffb31dSPaolo Bonzini 815893f7ebaSPaolo Bonzini static void coroutine_fn mirror_run(void *opaque) 816893f7ebaSPaolo Bonzini { 817893f7ebaSPaolo Bonzini MirrorBlockJob *s = opaque; 8185a7e7a0bSStefan Hajnoczi MirrorExitData *data; 819138f9fffSMax Reitz BlockDriverState *bs = s->mirror_top_bs->backing->bs; 820e253f4b8SKevin Wolf BlockDriverState *target_bs = blk_bs(s->target); 8219a0cec66SPaolo Bonzini bool need_drain = true; 822c0b363adSDenis V. Lunev int64_t length; 823b812f671SPaolo Bonzini BlockDriverInfo bdi; 8241d33936eSJeff Cody char backing_filename[2]; /* we only need 2 characters because we are only 8251d33936eSJeff Cody checking for a NULL string */ 826893f7ebaSPaolo Bonzini int ret = 0; 827893f7ebaSPaolo Bonzini 828daa7f2f9SKevin Wolf if (job_is_cancelled(&s->common.job)) { 829893f7ebaSPaolo Bonzini goto immediate_exit; 830893f7ebaSPaolo Bonzini } 831893f7ebaSPaolo Bonzini 832b21c7652SMax Reitz s->bdev_length = bdrv_getlength(bs); 833b21c7652SMax Reitz if (s->bdev_length < 0) { 834b21c7652SMax Reitz ret = s->bdev_length; 835373df5b1SFam Zheng goto immediate_exit; 836becc347eSKevin Wolf } 837becc347eSKevin Wolf 838becc347eSKevin Wolf /* Active commit must resize the base image if its size differs from the 839becc347eSKevin Wolf * active layer. */ 840becc347eSKevin Wolf if (s->base == blk_bs(s->target)) { 841becc347eSKevin Wolf int64_t base_length; 842becc347eSKevin Wolf 843becc347eSKevin Wolf base_length = blk_getlength(s->target); 844becc347eSKevin Wolf if (base_length < 0) { 845becc347eSKevin Wolf ret = base_length; 846becc347eSKevin Wolf goto immediate_exit; 847becc347eSKevin Wolf } 848becc347eSKevin Wolf 849becc347eSKevin Wolf if (s->bdev_length > base_length) { 8503a691c50SMax Reitz ret = blk_truncate(s->target, s->bdev_length, PREALLOC_MODE_OFF, 8513a691c50SMax Reitz NULL); 852becc347eSKevin Wolf if (ret < 0) { 853becc347eSKevin Wolf goto immediate_exit; 854becc347eSKevin Wolf } 855becc347eSKevin Wolf } 856becc347eSKevin Wolf } 857becc347eSKevin Wolf 858becc347eSKevin Wolf if (s->bdev_length == 0) { 8592e1795b5SKevin Wolf /* Transition to the READY state and wait for complete. */ 8602e1795b5SKevin Wolf job_transition_to_ready(&s->common.job); 8619e48b025SFam Zheng s->synced = true; 862*d06107adSMax Reitz s->actively_synced = true; 863daa7f2f9SKevin Wolf while (!job_is_cancelled(&s->common.job) && !s->should_complete) { 864198c49ccSKevin Wolf job_yield(&s->common.job); 8659e48b025SFam Zheng } 866daa7f2f9SKevin Wolf s->common.job.cancelled = false; 8679e48b025SFam Zheng goto immediate_exit; 868893f7ebaSPaolo Bonzini } 869893f7ebaSPaolo Bonzini 870b21c7652SMax Reitz length = DIV_ROUND_UP(s->bdev_length, s->granularity); 871402a4741SPaolo Bonzini s->in_flight_bitmap = bitmap_new(length); 872402a4741SPaolo Bonzini 873b812f671SPaolo Bonzini /* If we have no backing file yet in the destination, we cannot let 874b812f671SPaolo Bonzini * the destination do COW. Instead, we copy sectors around the 875b812f671SPaolo Bonzini * dirty data if needed. We need a bitmap to do that. 876b812f671SPaolo Bonzini */ 877e253f4b8SKevin Wolf bdrv_get_backing_filename(target_bs, backing_filename, 878b812f671SPaolo Bonzini sizeof(backing_filename)); 879e253f4b8SKevin Wolf if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) { 880b436982fSEric Blake s->target_cluster_size = bdi.cluster_size; 881b436982fSEric Blake } else { 882b436982fSEric Blake s->target_cluster_size = BDRV_SECTOR_SIZE; 883c3cc95bdSFam Zheng } 884b436982fSEric Blake if (backing_filename[0] && !target_bs->backing && 885b436982fSEric Blake s->granularity < s->target_cluster_size) { 886b436982fSEric Blake s->buf_size = MAX(s->buf_size, s->target_cluster_size); 887b812f671SPaolo Bonzini s->cow_bitmap = bitmap_new(length); 888b812f671SPaolo Bonzini } 889e253f4b8SKevin Wolf s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov); 890b812f671SPaolo Bonzini 8917504edf4SKevin Wolf s->buf = qemu_try_blockalign(bs, s->buf_size); 8927504edf4SKevin Wolf if (s->buf == NULL) { 8937504edf4SKevin Wolf ret = -ENOMEM; 8947504edf4SKevin Wolf goto immediate_exit; 8957504edf4SKevin Wolf } 8967504edf4SKevin Wolf 897402a4741SPaolo Bonzini mirror_free_init(s); 898893f7ebaSPaolo Bonzini 89949efb1f5SDenis V. Lunev s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 90003544a6eSFam Zheng if (!s->is_none_mode) { 901c0b363adSDenis V. Lunev ret = mirror_dirty_init(s); 902daa7f2f9SKevin Wolf if (ret < 0 || job_is_cancelled(&s->common.job)) { 9034c0cbd6fSFam Zheng goto immediate_exit; 9044c0cbd6fSFam Zheng } 905893f7ebaSPaolo Bonzini } 906893f7ebaSPaolo Bonzini 907dc162c8eSFam Zheng assert(!s->dbi); 908715a74d8SEric Blake s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap); 909893f7ebaSPaolo Bonzini for (;;) { 910cc8c9d6cSPaolo Bonzini uint64_t delay_ns = 0; 91149efb1f5SDenis V. Lunev int64_t cnt, delta; 912893f7ebaSPaolo Bonzini bool should_complete; 913893f7ebaSPaolo Bonzini 914*d06107adSMax Reitz /* Do not start passive operations while there are active 915*d06107adSMax Reitz * writes in progress */ 916*d06107adSMax Reitz while (s->in_active_write_counter) { 917*d06107adSMax Reitz mirror_wait_for_any_operation(s, true); 918*d06107adSMax Reitz } 919*d06107adSMax Reitz 920bd48bde8SPaolo Bonzini if (s->ret < 0) { 921bd48bde8SPaolo Bonzini ret = s->ret; 922893f7ebaSPaolo Bonzini goto immediate_exit; 923893f7ebaSPaolo Bonzini } 924bd48bde8SPaolo Bonzini 925da01ff7fSKevin Wolf job_pause_point(&s->common.job); 926565ac01fSStefan Hajnoczi 92720dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 92805df8a6aSKevin Wolf /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is 92905df8a6aSKevin Wolf * the number of bytes currently being processed; together those are 93005df8a6aSKevin Wolf * the current remaining operation length */ 93130a5c887SKevin Wolf job_progress_set_remaining(&s->common.job, s->bytes_in_flight + cnt); 932bd48bde8SPaolo Bonzini 933bd48bde8SPaolo Bonzini /* Note that even when no rate limit is applied we need to yield 934a7282330SFam Zheng * periodically with no pending I/O so that bdrv_drain_all() returns. 93518bb6928SKevin Wolf * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is 93618bb6928SKevin Wolf * an error, or when the source is clean, whichever comes first. */ 93749efb1f5SDenis V. Lunev delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns; 93818bb6928SKevin Wolf if (delta < BLOCK_JOB_SLICE_TIME && 939bd48bde8SPaolo Bonzini s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 940cf56a3c6SDenis V. Lunev if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 || 941402a4741SPaolo Bonzini (cnt == 0 && s->in_flight > 0)) { 9429a46dba7SEric Blake trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight); 9431181e19aSMax Reitz mirror_wait_for_free_in_flight_slot(s); 944bd48bde8SPaolo Bonzini continue; 945bd48bde8SPaolo Bonzini } else if (cnt != 0) { 946cc8c9d6cSPaolo Bonzini delay_ns = mirror_iteration(s); 947893f7ebaSPaolo Bonzini } 948cc8c9d6cSPaolo Bonzini } 949893f7ebaSPaolo Bonzini 950893f7ebaSPaolo Bonzini should_complete = false; 951bd48bde8SPaolo Bonzini if (s->in_flight == 0 && cnt == 0) { 952893f7ebaSPaolo Bonzini trace_mirror_before_flush(s); 953bdffb31dSPaolo Bonzini if (!s->synced) { 954bdffb31dSPaolo Bonzini if (mirror_flush(s) < 0) { 955bdffb31dSPaolo Bonzini /* Go check s->ret. */ 956bdffb31dSPaolo Bonzini continue; 957893f7ebaSPaolo Bonzini } 958893f7ebaSPaolo Bonzini /* We're out of the streaming phase. From now on, if the job 959893f7ebaSPaolo Bonzini * is cancelled we will actually complete all pending I/O and 960893f7ebaSPaolo Bonzini * report completion. This way, block-job-cancel will leave 961893f7ebaSPaolo Bonzini * the target in a consistent state. 962893f7ebaSPaolo Bonzini */ 9632e1795b5SKevin Wolf job_transition_to_ready(&s->common.job); 964d63ffd87SPaolo Bonzini s->synced = true; 965*d06107adSMax Reitz if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) { 966*d06107adSMax Reitz s->actively_synced = true; 967*d06107adSMax Reitz } 968d63ffd87SPaolo Bonzini } 969d63ffd87SPaolo Bonzini 970d63ffd87SPaolo Bonzini should_complete = s->should_complete || 971daa7f2f9SKevin Wolf job_is_cancelled(&s->common.job); 97220dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 973893f7ebaSPaolo Bonzini } 974893f7ebaSPaolo Bonzini 975893f7ebaSPaolo Bonzini if (cnt == 0 && should_complete) { 976893f7ebaSPaolo Bonzini /* The dirty bitmap is not updated while operations are pending. 977893f7ebaSPaolo Bonzini * If we're about to exit, wait for pending operations before 978893f7ebaSPaolo Bonzini * calling bdrv_get_dirty_count(bs), or we may exit while the 979893f7ebaSPaolo Bonzini * source has dirty data to copy! 980893f7ebaSPaolo Bonzini * 981893f7ebaSPaolo Bonzini * Note that I/O can be submitted by the guest while 9829a0cec66SPaolo Bonzini * mirror_populate runs, so pause it now. Before deciding 9839a0cec66SPaolo Bonzini * whether to switch to target check one last time if I/O has 9849a0cec66SPaolo Bonzini * come in the meanwhile, and if not flush the data to disk. 985893f7ebaSPaolo Bonzini */ 9869a46dba7SEric Blake trace_mirror_before_drain(s, cnt); 9879a0cec66SPaolo Bonzini 9889a0cec66SPaolo Bonzini bdrv_drained_begin(bs); 98920dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 990bdffb31dSPaolo Bonzini if (cnt > 0 || mirror_flush(s) < 0) { 9919a0cec66SPaolo Bonzini bdrv_drained_end(bs); 9929a0cec66SPaolo Bonzini continue; 9939a0cec66SPaolo Bonzini } 9949a0cec66SPaolo Bonzini 9959a0cec66SPaolo Bonzini /* The two disks are in sync. Exit and report successful 9969a0cec66SPaolo Bonzini * completion. 9979a0cec66SPaolo Bonzini */ 9989a0cec66SPaolo Bonzini assert(QLIST_EMPTY(&bs->tracked_requests)); 999daa7f2f9SKevin Wolf s->common.job.cancelled = false; 10009a0cec66SPaolo Bonzini need_drain = false; 10019a0cec66SPaolo Bonzini break; 1002893f7ebaSPaolo Bonzini } 1003893f7ebaSPaolo Bonzini 1004893f7ebaSPaolo Bonzini ret = 0; 1005ddc4115eSStefan Hajnoczi 1006ddc4115eSStefan Hajnoczi if (s->synced && !should_complete) { 100718bb6928SKevin Wolf delay_ns = (s->in_flight == 0 && 100818bb6928SKevin Wolf cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0); 1009ddc4115eSStefan Hajnoczi } 10109a46dba7SEric Blake trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); 10115d43e86eSKevin Wolf job_sleep_ns(&s->common.job, delay_ns); 1012daa7f2f9SKevin Wolf if (job_is_cancelled(&s->common.job) && 1013004e95dfSKevin Wolf (!s->synced || s->common.job.force_cancel)) 1014eb36639fSMax Reitz { 1015893f7ebaSPaolo Bonzini break; 1016893f7ebaSPaolo Bonzini } 101749efb1f5SDenis V. Lunev s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 1018893f7ebaSPaolo Bonzini } 1019893f7ebaSPaolo Bonzini 1020893f7ebaSPaolo Bonzini immediate_exit: 1021bd48bde8SPaolo Bonzini if (s->in_flight > 0) { 1022bd48bde8SPaolo Bonzini /* We get here only if something went wrong. Either the job failed, 1023bd48bde8SPaolo Bonzini * or it was cancelled prematurely so that we do not guarantee that 1024bd48bde8SPaolo Bonzini * the target is a copy of the source. 1025bd48bde8SPaolo Bonzini */ 1026004e95dfSKevin Wolf assert(ret < 0 || ((s->common.job.force_cancel || !s->synced) && 1027daa7f2f9SKevin Wolf job_is_cancelled(&s->common.job))); 10289a0cec66SPaolo Bonzini assert(need_drain); 1029bae8196dSPaolo Bonzini mirror_wait_for_all_io(s); 1030bd48bde8SPaolo Bonzini } 1031bd48bde8SPaolo Bonzini 1032bd48bde8SPaolo Bonzini assert(s->in_flight == 0); 10337191bf31SMarkus Armbruster qemu_vfree(s->buf); 1034b812f671SPaolo Bonzini g_free(s->cow_bitmap); 1035402a4741SPaolo Bonzini g_free(s->in_flight_bitmap); 1036dc162c8eSFam Zheng bdrv_dirty_iter_free(s->dbi); 10375a7e7a0bSStefan Hajnoczi 10385a7e7a0bSStefan Hajnoczi data = g_malloc(sizeof(*data)); 10395a7e7a0bSStefan Hajnoczi data->ret = ret; 10409a0cec66SPaolo Bonzini 10419a0cec66SPaolo Bonzini if (need_drain) { 1042e253f4b8SKevin Wolf bdrv_drained_begin(bs); 10439a0cec66SPaolo Bonzini } 10441908a559SKevin Wolf job_defer_to_main_loop(&s->common.job, mirror_exit, data); 1045893f7ebaSPaolo Bonzini } 1046893f7ebaSPaolo Bonzini 10473453d972SKevin Wolf static void mirror_complete(Job *job, Error **errp) 1048d63ffd87SPaolo Bonzini { 10493453d972SKevin Wolf MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 10504ef85a9cSKevin Wolf BlockDriverState *target; 1051d63ffd87SPaolo Bonzini 1052274fcceeSMax Reitz target = blk_bs(s->target); 1053274fcceeSMax Reitz 1054d63ffd87SPaolo Bonzini if (!s->synced) { 10559df229c3SAlberto Garcia error_setg(errp, "The active block job '%s' cannot be completed", 10563453d972SKevin Wolf job->id); 1057d63ffd87SPaolo Bonzini return; 1058d63ffd87SPaolo Bonzini } 1059d63ffd87SPaolo Bonzini 1060274fcceeSMax Reitz if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) { 1061274fcceeSMax Reitz int ret; 1062274fcceeSMax Reitz 1063274fcceeSMax Reitz assert(!target->backing); 1064274fcceeSMax Reitz ret = bdrv_open_backing_file(target, NULL, "backing", errp); 1065274fcceeSMax Reitz if (ret < 0) { 1066274fcceeSMax Reitz return; 1067274fcceeSMax Reitz } 1068274fcceeSMax Reitz } 1069274fcceeSMax Reitz 107015d67298SChanglong Xie /* block all operations on to_replace bs */ 107109158f00SBenoît Canet if (s->replaces) { 10725a7e7a0bSStefan Hajnoczi AioContext *replace_aio_context; 10735a7e7a0bSStefan Hajnoczi 1074e12f3784SWen Congyang s->to_replace = bdrv_find_node(s->replaces); 107509158f00SBenoît Canet if (!s->to_replace) { 1076e12f3784SWen Congyang error_setg(errp, "Node name '%s' not found", s->replaces); 107709158f00SBenoît Canet return; 107809158f00SBenoît Canet } 107909158f00SBenoît Canet 10805a7e7a0bSStefan Hajnoczi replace_aio_context = bdrv_get_aio_context(s->to_replace); 10815a7e7a0bSStefan Hajnoczi aio_context_acquire(replace_aio_context); 10825a7e7a0bSStefan Hajnoczi 10834ef85a9cSKevin Wolf /* TODO Translate this into permission system. Current definition of 10844ef85a9cSKevin Wolf * GRAPH_MOD would require to request it for the parents; they might 10854ef85a9cSKevin Wolf * not even be BlockDriverStates, however, so a BdrvChild can't address 10864ef85a9cSKevin Wolf * them. May need redefinition of GRAPH_MOD. */ 108709158f00SBenoît Canet error_setg(&s->replace_blocker, 108809158f00SBenoît Canet "block device is in use by block-job-complete"); 108909158f00SBenoît Canet bdrv_op_block_all(s->to_replace, s->replace_blocker); 109009158f00SBenoît Canet bdrv_ref(s->to_replace); 10915a7e7a0bSStefan Hajnoczi 10925a7e7a0bSStefan Hajnoczi aio_context_release(replace_aio_context); 109309158f00SBenoît Canet } 109409158f00SBenoît Canet 1095d63ffd87SPaolo Bonzini s->should_complete = true; 10963d70ff53SKevin Wolf job_enter(job); 1097d63ffd87SPaolo Bonzini } 1098d63ffd87SPaolo Bonzini 1099da01ff7fSKevin Wolf static void mirror_pause(Job *job) 1100565ac01fSStefan Hajnoczi { 1101da01ff7fSKevin Wolf MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 1102565ac01fSStefan Hajnoczi 1103bae8196dSPaolo Bonzini mirror_wait_for_all_io(s); 1104565ac01fSStefan Hajnoczi } 1105565ac01fSStefan Hajnoczi 110689bd0305SKevin Wolf static bool mirror_drained_poll(BlockJob *job) 110789bd0305SKevin Wolf { 110889bd0305SKevin Wolf MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 110989bd0305SKevin Wolf return !!s->in_flight; 111089bd0305SKevin Wolf } 111189bd0305SKevin Wolf 1112565ac01fSStefan Hajnoczi static void mirror_attached_aio_context(BlockJob *job, AioContext *new_context) 1113565ac01fSStefan Hajnoczi { 1114565ac01fSStefan Hajnoczi MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 1115565ac01fSStefan Hajnoczi 1116565ac01fSStefan Hajnoczi blk_set_aio_context(s->target, new_context); 1117565ac01fSStefan Hajnoczi } 1118565ac01fSStefan Hajnoczi 1119bae8196dSPaolo Bonzini static void mirror_drain(BlockJob *job) 1120bae8196dSPaolo Bonzini { 1121bae8196dSPaolo Bonzini MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 1122bae8196dSPaolo Bonzini 1123bae8196dSPaolo Bonzini /* Need to keep a reference in case blk_drain triggers execution 1124bae8196dSPaolo Bonzini * of mirror_complete... 1125bae8196dSPaolo Bonzini */ 1126bae8196dSPaolo Bonzini if (s->target) { 1127bae8196dSPaolo Bonzini BlockBackend *target = s->target; 1128bae8196dSPaolo Bonzini blk_ref(target); 1129bae8196dSPaolo Bonzini blk_drain(target); 1130bae8196dSPaolo Bonzini blk_unref(target); 1131bae8196dSPaolo Bonzini } 1132bae8196dSPaolo Bonzini } 1133bae8196dSPaolo Bonzini 11343fc4b10aSFam Zheng static const BlockJobDriver mirror_job_driver = { 113533e9e9bdSKevin Wolf .job_driver = { 1136893f7ebaSPaolo Bonzini .instance_size = sizeof(MirrorBlockJob), 11378e4c8700SKevin Wolf .job_type = JOB_TYPE_MIRROR, 113880fa2c75SKevin Wolf .free = block_job_free, 1139b15de828SKevin Wolf .user_resume = block_job_user_resume, 1140b69f777dSKevin Wolf .drain = block_job_drain, 1141a7815a76SJohn Snow .start = mirror_run, 1142565ac01fSStefan Hajnoczi .pause = mirror_pause, 1143da01ff7fSKevin Wolf .complete = mirror_complete, 11443453d972SKevin Wolf }, 114589bd0305SKevin Wolf .drained_poll = mirror_drained_poll, 1146565ac01fSStefan Hajnoczi .attached_aio_context = mirror_attached_aio_context, 1147bae8196dSPaolo Bonzini .drain = mirror_drain, 1148893f7ebaSPaolo Bonzini }; 1149893f7ebaSPaolo Bonzini 115003544a6eSFam Zheng static const BlockJobDriver commit_active_job_driver = { 115133e9e9bdSKevin Wolf .job_driver = { 115203544a6eSFam Zheng .instance_size = sizeof(MirrorBlockJob), 11538e4c8700SKevin Wolf .job_type = JOB_TYPE_COMMIT, 115480fa2c75SKevin Wolf .free = block_job_free, 1155b15de828SKevin Wolf .user_resume = block_job_user_resume, 1156b69f777dSKevin Wolf .drain = block_job_drain, 1157a7815a76SJohn Snow .start = mirror_run, 1158565ac01fSStefan Hajnoczi .pause = mirror_pause, 1159da01ff7fSKevin Wolf .complete = mirror_complete, 11603453d972SKevin Wolf }, 116189bd0305SKevin Wolf .drained_poll = mirror_drained_poll, 1162565ac01fSStefan Hajnoczi .attached_aio_context = mirror_attached_aio_context, 1163bae8196dSPaolo Bonzini .drain = mirror_drain, 116403544a6eSFam Zheng }; 116503544a6eSFam Zheng 1166*d06107adSMax Reitz static void do_sync_target_write(MirrorBlockJob *job, MirrorMethod method, 1167*d06107adSMax Reitz uint64_t offset, uint64_t bytes, 1168*d06107adSMax Reitz QEMUIOVector *qiov, int flags) 1169*d06107adSMax Reitz { 1170*d06107adSMax Reitz BdrvDirtyBitmapIter *iter; 1171*d06107adSMax Reitz QEMUIOVector target_qiov; 1172*d06107adSMax Reitz uint64_t dirty_offset; 1173*d06107adSMax Reitz int dirty_bytes; 1174*d06107adSMax Reitz 1175*d06107adSMax Reitz if (qiov) { 1176*d06107adSMax Reitz qemu_iovec_init(&target_qiov, qiov->niov); 1177*d06107adSMax Reitz } 1178*d06107adSMax Reitz 1179*d06107adSMax Reitz iter = bdrv_dirty_iter_new(job->dirty_bitmap); 1180*d06107adSMax Reitz bdrv_set_dirty_iter(iter, offset); 1181*d06107adSMax Reitz 1182*d06107adSMax Reitz while (true) { 1183*d06107adSMax Reitz bool valid_area; 1184*d06107adSMax Reitz int ret; 1185*d06107adSMax Reitz 1186*d06107adSMax Reitz bdrv_dirty_bitmap_lock(job->dirty_bitmap); 1187*d06107adSMax Reitz valid_area = bdrv_dirty_iter_next_area(iter, offset + bytes, 1188*d06107adSMax Reitz &dirty_offset, &dirty_bytes); 1189*d06107adSMax Reitz if (!valid_area) { 1190*d06107adSMax Reitz bdrv_dirty_bitmap_unlock(job->dirty_bitmap); 1191*d06107adSMax Reitz break; 1192*d06107adSMax Reitz } 1193*d06107adSMax Reitz 1194*d06107adSMax Reitz bdrv_reset_dirty_bitmap_locked(job->dirty_bitmap, 1195*d06107adSMax Reitz dirty_offset, dirty_bytes); 1196*d06107adSMax Reitz bdrv_dirty_bitmap_unlock(job->dirty_bitmap); 1197*d06107adSMax Reitz 1198*d06107adSMax Reitz job_progress_increase_remaining(&job->common.job, dirty_bytes); 1199*d06107adSMax Reitz 1200*d06107adSMax Reitz assert(dirty_offset - offset <= SIZE_MAX); 1201*d06107adSMax Reitz if (qiov) { 1202*d06107adSMax Reitz qemu_iovec_reset(&target_qiov); 1203*d06107adSMax Reitz qemu_iovec_concat(&target_qiov, qiov, 1204*d06107adSMax Reitz dirty_offset - offset, dirty_bytes); 1205*d06107adSMax Reitz } 1206*d06107adSMax Reitz 1207*d06107adSMax Reitz switch (method) { 1208*d06107adSMax Reitz case MIRROR_METHOD_COPY: 1209*d06107adSMax Reitz ret = blk_co_pwritev(job->target, dirty_offset, dirty_bytes, 1210*d06107adSMax Reitz qiov ? &target_qiov : NULL, flags); 1211*d06107adSMax Reitz break; 1212*d06107adSMax Reitz 1213*d06107adSMax Reitz case MIRROR_METHOD_ZERO: 1214*d06107adSMax Reitz assert(!qiov); 1215*d06107adSMax Reitz ret = blk_co_pwrite_zeroes(job->target, dirty_offset, dirty_bytes, 1216*d06107adSMax Reitz flags); 1217*d06107adSMax Reitz break; 1218*d06107adSMax Reitz 1219*d06107adSMax Reitz case MIRROR_METHOD_DISCARD: 1220*d06107adSMax Reitz assert(!qiov); 1221*d06107adSMax Reitz ret = blk_co_pdiscard(job->target, dirty_offset, dirty_bytes); 1222*d06107adSMax Reitz break; 1223*d06107adSMax Reitz 1224*d06107adSMax Reitz default: 1225*d06107adSMax Reitz abort(); 1226*d06107adSMax Reitz } 1227*d06107adSMax Reitz 1228*d06107adSMax Reitz if (ret >= 0) { 1229*d06107adSMax Reitz job_progress_update(&job->common.job, dirty_bytes); 1230*d06107adSMax Reitz } else { 1231*d06107adSMax Reitz BlockErrorAction action; 1232*d06107adSMax Reitz 1233*d06107adSMax Reitz bdrv_set_dirty_bitmap(job->dirty_bitmap, dirty_offset, dirty_bytes); 1234*d06107adSMax Reitz job->actively_synced = false; 1235*d06107adSMax Reitz 1236*d06107adSMax Reitz action = mirror_error_action(job, false, -ret); 1237*d06107adSMax Reitz if (action == BLOCK_ERROR_ACTION_REPORT) { 1238*d06107adSMax Reitz if (!job->ret) { 1239*d06107adSMax Reitz job->ret = ret; 1240*d06107adSMax Reitz } 1241*d06107adSMax Reitz break; 1242*d06107adSMax Reitz } 1243*d06107adSMax Reitz } 1244*d06107adSMax Reitz } 1245*d06107adSMax Reitz 1246*d06107adSMax Reitz bdrv_dirty_iter_free(iter); 1247*d06107adSMax Reitz if (qiov) { 1248*d06107adSMax Reitz qemu_iovec_destroy(&target_qiov); 1249*d06107adSMax Reitz } 1250*d06107adSMax Reitz } 1251*d06107adSMax Reitz 1252*d06107adSMax Reitz static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s, 1253*d06107adSMax Reitz uint64_t offset, 1254*d06107adSMax Reitz uint64_t bytes) 1255*d06107adSMax Reitz { 1256*d06107adSMax Reitz MirrorOp *op; 1257*d06107adSMax Reitz uint64_t start_chunk = offset / s->granularity; 1258*d06107adSMax Reitz uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity); 1259*d06107adSMax Reitz 1260*d06107adSMax Reitz op = g_new(MirrorOp, 1); 1261*d06107adSMax Reitz *op = (MirrorOp){ 1262*d06107adSMax Reitz .s = s, 1263*d06107adSMax Reitz .offset = offset, 1264*d06107adSMax Reitz .bytes = bytes, 1265*d06107adSMax Reitz .is_active_write = true, 1266*d06107adSMax Reitz }; 1267*d06107adSMax Reitz qemu_co_queue_init(&op->waiting_requests); 1268*d06107adSMax Reitz QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next); 1269*d06107adSMax Reitz 1270*d06107adSMax Reitz s->in_active_write_counter++; 1271*d06107adSMax Reitz 1272*d06107adSMax Reitz mirror_wait_on_conflicts(op, s, offset, bytes); 1273*d06107adSMax Reitz 1274*d06107adSMax Reitz bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk); 1275*d06107adSMax Reitz 1276*d06107adSMax Reitz return op; 1277*d06107adSMax Reitz } 1278*d06107adSMax Reitz 1279*d06107adSMax Reitz static void coroutine_fn active_write_settle(MirrorOp *op) 1280*d06107adSMax Reitz { 1281*d06107adSMax Reitz uint64_t start_chunk = op->offset / op->s->granularity; 1282*d06107adSMax Reitz uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes, 1283*d06107adSMax Reitz op->s->granularity); 1284*d06107adSMax Reitz 1285*d06107adSMax Reitz if (!--op->s->in_active_write_counter && op->s->actively_synced) { 1286*d06107adSMax Reitz BdrvChild *source = op->s->mirror_top_bs->backing; 1287*d06107adSMax Reitz 1288*d06107adSMax Reitz if (QLIST_FIRST(&source->bs->parents) == source && 1289*d06107adSMax Reitz QLIST_NEXT(source, next_parent) == NULL) 1290*d06107adSMax Reitz { 1291*d06107adSMax Reitz /* Assert that we are back in sync once all active write 1292*d06107adSMax Reitz * operations are settled. 1293*d06107adSMax Reitz * Note that we can only assert this if the mirror node 1294*d06107adSMax Reitz * is the source node's only parent. */ 1295*d06107adSMax Reitz assert(!bdrv_get_dirty_count(op->s->dirty_bitmap)); 1296*d06107adSMax Reitz } 1297*d06107adSMax Reitz } 1298*d06107adSMax Reitz bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk); 1299*d06107adSMax Reitz QTAILQ_REMOVE(&op->s->ops_in_flight, op, next); 1300*d06107adSMax Reitz qemu_co_queue_restart_all(&op->waiting_requests); 1301*d06107adSMax Reitz g_free(op); 1302*d06107adSMax Reitz } 1303*d06107adSMax Reitz 13044ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs, 13054ef85a9cSKevin Wolf uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) 13064ef85a9cSKevin Wolf { 13074ef85a9cSKevin Wolf return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags); 13084ef85a9cSKevin Wolf } 13094ef85a9cSKevin Wolf 1310*d06107adSMax Reitz static int coroutine_fn bdrv_mirror_top_do_write(BlockDriverState *bs, 1311*d06107adSMax Reitz MirrorMethod method, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, 1312*d06107adSMax Reitz int flags) 1313*d06107adSMax Reitz { 1314*d06107adSMax Reitz MirrorOp *op = NULL; 1315*d06107adSMax Reitz MirrorBDSOpaque *s = bs->opaque; 1316*d06107adSMax Reitz int ret = 0; 1317*d06107adSMax Reitz bool copy_to_target; 1318*d06107adSMax Reitz 1319*d06107adSMax Reitz copy_to_target = s->job->ret >= 0 && 1320*d06107adSMax Reitz s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; 1321*d06107adSMax Reitz 1322*d06107adSMax Reitz if (copy_to_target) { 1323*d06107adSMax Reitz op = active_write_prepare(s->job, offset, bytes); 1324*d06107adSMax Reitz } 1325*d06107adSMax Reitz 1326*d06107adSMax Reitz switch (method) { 1327*d06107adSMax Reitz case MIRROR_METHOD_COPY: 1328*d06107adSMax Reitz ret = bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags); 1329*d06107adSMax Reitz break; 1330*d06107adSMax Reitz 1331*d06107adSMax Reitz case MIRROR_METHOD_ZERO: 1332*d06107adSMax Reitz ret = bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags); 1333*d06107adSMax Reitz break; 1334*d06107adSMax Reitz 1335*d06107adSMax Reitz case MIRROR_METHOD_DISCARD: 1336*d06107adSMax Reitz ret = bdrv_co_pdiscard(bs->backing->bs, offset, bytes); 1337*d06107adSMax Reitz break; 1338*d06107adSMax Reitz 1339*d06107adSMax Reitz default: 1340*d06107adSMax Reitz abort(); 1341*d06107adSMax Reitz } 1342*d06107adSMax Reitz 1343*d06107adSMax Reitz if (ret < 0) { 1344*d06107adSMax Reitz goto out; 1345*d06107adSMax Reitz } 1346*d06107adSMax Reitz 1347*d06107adSMax Reitz if (copy_to_target) { 1348*d06107adSMax Reitz do_sync_target_write(s->job, method, offset, bytes, qiov, flags); 1349*d06107adSMax Reitz } 1350*d06107adSMax Reitz 1351*d06107adSMax Reitz out: 1352*d06107adSMax Reitz if (copy_to_target) { 1353*d06107adSMax Reitz active_write_settle(op); 1354*d06107adSMax Reitz } 1355*d06107adSMax Reitz return ret; 1356*d06107adSMax Reitz } 1357*d06107adSMax Reitz 13584ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs, 13594ef85a9cSKevin Wolf uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) 13604ef85a9cSKevin Wolf { 1361*d06107adSMax Reitz MirrorBDSOpaque *s = bs->opaque; 1362*d06107adSMax Reitz QEMUIOVector bounce_qiov; 1363*d06107adSMax Reitz void *bounce_buf; 1364*d06107adSMax Reitz int ret = 0; 1365*d06107adSMax Reitz bool copy_to_target; 1366*d06107adSMax Reitz 1367*d06107adSMax Reitz copy_to_target = s->job->ret >= 0 && 1368*d06107adSMax Reitz s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; 1369*d06107adSMax Reitz 1370*d06107adSMax Reitz if (copy_to_target) { 1371*d06107adSMax Reitz /* The guest might concurrently modify the data to write; but 1372*d06107adSMax Reitz * the data on source and destination must match, so we have 1373*d06107adSMax Reitz * to use a bounce buffer if we are going to write to the 1374*d06107adSMax Reitz * target now. */ 1375*d06107adSMax Reitz bounce_buf = qemu_blockalign(bs, bytes); 1376*d06107adSMax Reitz iov_to_buf_full(qiov->iov, qiov->niov, 0, bounce_buf, bytes); 1377*d06107adSMax Reitz 1378*d06107adSMax Reitz qemu_iovec_init(&bounce_qiov, 1); 1379*d06107adSMax Reitz qemu_iovec_add(&bounce_qiov, bounce_buf, bytes); 1380*d06107adSMax Reitz qiov = &bounce_qiov; 1381*d06107adSMax Reitz } 1382*d06107adSMax Reitz 1383*d06107adSMax Reitz ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, offset, bytes, qiov, 1384*d06107adSMax Reitz flags); 1385*d06107adSMax Reitz 1386*d06107adSMax Reitz if (copy_to_target) { 1387*d06107adSMax Reitz qemu_iovec_destroy(&bounce_qiov); 1388*d06107adSMax Reitz qemu_vfree(bounce_buf); 1389*d06107adSMax Reitz } 1390*d06107adSMax Reitz 1391*d06107adSMax Reitz return ret; 13924ef85a9cSKevin Wolf } 13934ef85a9cSKevin Wolf 13944ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs) 13954ef85a9cSKevin Wolf { 1396ce960aa9SVladimir Sementsov-Ogievskiy if (bs->backing == NULL) { 1397ce960aa9SVladimir Sementsov-Ogievskiy /* we can be here after failed bdrv_append in mirror_start_job */ 1398ce960aa9SVladimir Sementsov-Ogievskiy return 0; 1399ce960aa9SVladimir Sementsov-Ogievskiy } 14004ef85a9cSKevin Wolf return bdrv_co_flush(bs->backing->bs); 14014ef85a9cSKevin Wolf } 14024ef85a9cSKevin Wolf 14034ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs, 1404f5a5ca79SManos Pitsidianakis int64_t offset, int bytes, BdrvRequestFlags flags) 14054ef85a9cSKevin Wolf { 1406*d06107adSMax Reitz return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL, 1407*d06107adSMax Reitz flags); 14084ef85a9cSKevin Wolf } 14094ef85a9cSKevin Wolf 14104ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs, 1411f5a5ca79SManos Pitsidianakis int64_t offset, int bytes) 14124ef85a9cSKevin Wolf { 1413*d06107adSMax Reitz return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes, 1414*d06107adSMax Reitz NULL, 0); 14154ef85a9cSKevin Wolf } 14164ef85a9cSKevin Wolf 1417fd4a6493SKevin Wolf static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs, QDict *opts) 1418fd4a6493SKevin Wolf { 141918775ff3SVladimir Sementsov-Ogievskiy if (bs->backing == NULL) { 142018775ff3SVladimir Sementsov-Ogievskiy /* we can be here after failed bdrv_attach_child in 142118775ff3SVladimir Sementsov-Ogievskiy * bdrv_set_backing_hd */ 142218775ff3SVladimir Sementsov-Ogievskiy return; 142318775ff3SVladimir Sementsov-Ogievskiy } 1424fd4a6493SKevin Wolf bdrv_refresh_filename(bs->backing->bs); 1425fd4a6493SKevin Wolf pstrcpy(bs->exact_filename, sizeof(bs->exact_filename), 1426fd4a6493SKevin Wolf bs->backing->bs->filename); 1427fd4a6493SKevin Wolf } 1428fd4a6493SKevin Wolf 14294ef85a9cSKevin Wolf static void bdrv_mirror_top_close(BlockDriverState *bs) 14304ef85a9cSKevin Wolf { 14314ef85a9cSKevin Wolf } 14324ef85a9cSKevin Wolf 14334ef85a9cSKevin Wolf static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c, 14344ef85a9cSKevin Wolf const BdrvChildRole *role, 1435e0995dc3SKevin Wolf BlockReopenQueue *reopen_queue, 14364ef85a9cSKevin Wolf uint64_t perm, uint64_t shared, 14374ef85a9cSKevin Wolf uint64_t *nperm, uint64_t *nshared) 14384ef85a9cSKevin Wolf { 14394ef85a9cSKevin Wolf /* Must be able to forward guest writes to the real image */ 14404ef85a9cSKevin Wolf *nperm = 0; 14414ef85a9cSKevin Wolf if (perm & BLK_PERM_WRITE) { 14424ef85a9cSKevin Wolf *nperm |= BLK_PERM_WRITE; 14434ef85a9cSKevin Wolf } 14444ef85a9cSKevin Wolf 14454ef85a9cSKevin Wolf *nshared = BLK_PERM_ALL; 14464ef85a9cSKevin Wolf } 14474ef85a9cSKevin Wolf 14484ef85a9cSKevin Wolf /* Dummy node that provides consistent read to its users without requiring it 14494ef85a9cSKevin Wolf * from its backing file and that allows writes on the backing file chain. */ 14504ef85a9cSKevin Wolf static BlockDriver bdrv_mirror_top = { 14514ef85a9cSKevin Wolf .format_name = "mirror_top", 14524ef85a9cSKevin Wolf .bdrv_co_preadv = bdrv_mirror_top_preadv, 14534ef85a9cSKevin Wolf .bdrv_co_pwritev = bdrv_mirror_top_pwritev, 14544ef85a9cSKevin Wolf .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes, 14554ef85a9cSKevin Wolf .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard, 14564ef85a9cSKevin Wolf .bdrv_co_flush = bdrv_mirror_top_flush, 14573e4d0e72SEric Blake .bdrv_co_block_status = bdrv_co_block_status_from_backing, 1458fd4a6493SKevin Wolf .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename, 14594ef85a9cSKevin Wolf .bdrv_close = bdrv_mirror_top_close, 14604ef85a9cSKevin Wolf .bdrv_child_perm = bdrv_mirror_top_child_perm, 14614ef85a9cSKevin Wolf }; 14624ef85a9cSKevin Wolf 146371aa9867SAlberto Garcia static void mirror_start_job(const char *job_id, BlockDriverState *bs, 146447970dfbSJohn Snow int creation_flags, BlockDriverState *target, 146547970dfbSJohn Snow const char *replaces, int64_t speed, 146647970dfbSJohn Snow uint32_t granularity, int64_t buf_size, 1467274fcceeSMax Reitz BlockMirrorBackingMode backing_mode, 146803544a6eSFam Zheng BlockdevOnError on_source_error, 1469b952b558SPaolo Bonzini BlockdevOnError on_target_error, 14700fc9f8eaSFam Zheng bool unmap, 1471097310b5SMarkus Armbruster BlockCompletionFunc *cb, 147251ccfa2dSFam Zheng void *opaque, 147303544a6eSFam Zheng const BlockJobDriver *driver, 1474b49f7eadSWen Congyang bool is_none_mode, BlockDriverState *base, 147551ccfa2dSFam Zheng bool auto_complete, const char *filter_node_name, 1476045a2f82SFam Zheng bool is_mirror, 147751ccfa2dSFam Zheng Error **errp) 1478893f7ebaSPaolo Bonzini { 1479893f7ebaSPaolo Bonzini MirrorBlockJob *s; 1480429076e8SMax Reitz MirrorBDSOpaque *bs_opaque; 14814ef85a9cSKevin Wolf BlockDriverState *mirror_top_bs; 14824ef85a9cSKevin Wolf bool target_graph_mod; 14834ef85a9cSKevin Wolf bool target_is_backing; 1484b2c2832cSKevin Wolf Error *local_err = NULL; 1485d7086422SKevin Wolf int ret; 1486893f7ebaSPaolo Bonzini 1487eee13dfeSPaolo Bonzini if (granularity == 0) { 1488341ebc2fSJohn Snow granularity = bdrv_get_default_bitmap_granularity(target); 1489eee13dfeSPaolo Bonzini } 1490eee13dfeSPaolo Bonzini 149131826642SEric Blake assert(is_power_of_2(granularity)); 1492eee13dfeSPaolo Bonzini 149348ac0a4dSWen Congyang if (buf_size < 0) { 149448ac0a4dSWen Congyang error_setg(errp, "Invalid parameter 'buf-size'"); 149548ac0a4dSWen Congyang return; 149648ac0a4dSWen Congyang } 149748ac0a4dSWen Congyang 149848ac0a4dSWen Congyang if (buf_size == 0) { 149948ac0a4dSWen Congyang buf_size = DEFAULT_MIRROR_BUF_SIZE; 150048ac0a4dSWen Congyang } 15015bc361b8SFam Zheng 15024ef85a9cSKevin Wolf /* In the case of active commit, add dummy driver to provide consistent 15034ef85a9cSKevin Wolf * reads on the top, while disabling it in the intermediate nodes, and make 15044ef85a9cSKevin Wolf * the backing chain writable. */ 15056cdbceb1SKevin Wolf mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name, 15066cdbceb1SKevin Wolf BDRV_O_RDWR, errp); 15074ef85a9cSKevin Wolf if (mirror_top_bs == NULL) { 1508893f7ebaSPaolo Bonzini return; 1509893f7ebaSPaolo Bonzini } 1510d3c8c674SKevin Wolf if (!filter_node_name) { 1511d3c8c674SKevin Wolf mirror_top_bs->implicit = true; 1512d3c8c674SKevin Wolf } 15134ef85a9cSKevin Wolf mirror_top_bs->total_sectors = bs->total_sectors; 1514228345bfSMax Reitz mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED; 1515228345bfSMax Reitz mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED; 1516429076e8SMax Reitz bs_opaque = g_new0(MirrorBDSOpaque, 1); 1517429076e8SMax Reitz mirror_top_bs->opaque = bs_opaque; 151819dd29e8SFam Zheng bdrv_set_aio_context(mirror_top_bs, bdrv_get_aio_context(bs)); 1519893f7ebaSPaolo Bonzini 15204ef85a9cSKevin Wolf /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep 15217a25fcd0SMax Reitz * it alive until block_job_create() succeeds even if bs has no parent. */ 15224ef85a9cSKevin Wolf bdrv_ref(mirror_top_bs); 15234ef85a9cSKevin Wolf bdrv_drained_begin(bs); 1524b2c2832cSKevin Wolf bdrv_append(mirror_top_bs, bs, &local_err); 15254ef85a9cSKevin Wolf bdrv_drained_end(bs); 15264ef85a9cSKevin Wolf 1527b2c2832cSKevin Wolf if (local_err) { 1528b2c2832cSKevin Wolf bdrv_unref(mirror_top_bs); 1529b2c2832cSKevin Wolf error_propagate(errp, local_err); 1530b2c2832cSKevin Wolf return; 1531b2c2832cSKevin Wolf } 1532b2c2832cSKevin Wolf 15334ef85a9cSKevin Wolf /* Make sure that the source is not resized while the job is running */ 153475859b94SJohn Snow s = block_job_create(job_id, driver, NULL, mirror_top_bs, 15354ef85a9cSKevin Wolf BLK_PERM_CONSISTENT_READ, 15364ef85a9cSKevin Wolf BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | 15374ef85a9cSKevin Wolf BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed, 15384ef85a9cSKevin Wolf creation_flags, cb, opaque, errp); 15394ef85a9cSKevin Wolf if (!s) { 15404ef85a9cSKevin Wolf goto fail; 15414ef85a9cSKevin Wolf } 1542429076e8SMax Reitz bs_opaque->job = s; 1543429076e8SMax Reitz 15447a25fcd0SMax Reitz /* The block job now has a reference to this node */ 15457a25fcd0SMax Reitz bdrv_unref(mirror_top_bs); 15467a25fcd0SMax Reitz 15474ef85a9cSKevin Wolf s->mirror_top_bs = mirror_top_bs; 15484ef85a9cSKevin Wolf 15494ef85a9cSKevin Wolf /* No resize for the target either; while the mirror is still running, a 15504ef85a9cSKevin Wolf * consistent read isn't necessarily possible. We could possibly allow 15514ef85a9cSKevin Wolf * writes and graph modifications, though it would likely defeat the 15524ef85a9cSKevin Wolf * purpose of a mirror, so leave them blocked for now. 15534ef85a9cSKevin Wolf * 15544ef85a9cSKevin Wolf * In the case of active commit, things look a bit different, though, 15554ef85a9cSKevin Wolf * because the target is an already populated backing file in active use. 15564ef85a9cSKevin Wolf * We can allow anything except resize there.*/ 15574ef85a9cSKevin Wolf target_is_backing = bdrv_chain_contains(bs, target); 15584ef85a9cSKevin Wolf target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN); 15594ef85a9cSKevin Wolf s->target = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE | 15604ef85a9cSKevin Wolf (target_graph_mod ? BLK_PERM_GRAPH_MOD : 0), 15614ef85a9cSKevin Wolf BLK_PERM_WRITE_UNCHANGED | 15624ef85a9cSKevin Wolf (target_is_backing ? BLK_PERM_CONSISTENT_READ | 15634ef85a9cSKevin Wolf BLK_PERM_WRITE | 15644ef85a9cSKevin Wolf BLK_PERM_GRAPH_MOD : 0)); 1565d7086422SKevin Wolf ret = blk_insert_bs(s->target, target, errp); 1566d7086422SKevin Wolf if (ret < 0) { 15674ef85a9cSKevin Wolf goto fail; 1568d7086422SKevin Wolf } 1569045a2f82SFam Zheng if (is_mirror) { 1570045a2f82SFam Zheng /* XXX: Mirror target could be a NBD server of target QEMU in the case 1571045a2f82SFam Zheng * of non-shared block migration. To allow migration completion, we 1572045a2f82SFam Zheng * have to allow "inactivate" of the target BB. When that happens, we 1573045a2f82SFam Zheng * know the job is drained, and the vcpus are stopped, so no write 1574045a2f82SFam Zheng * operation will be performed. Block layer already has assertions to 1575045a2f82SFam Zheng * ensure that. */ 1576045a2f82SFam Zheng blk_set_force_allow_inactivate(s->target); 1577045a2f82SFam Zheng } 1578e253f4b8SKevin Wolf 157909158f00SBenoît Canet s->replaces = g_strdup(replaces); 1580b952b558SPaolo Bonzini s->on_source_error = on_source_error; 1581b952b558SPaolo Bonzini s->on_target_error = on_target_error; 158203544a6eSFam Zheng s->is_none_mode = is_none_mode; 1583274fcceeSMax Reitz s->backing_mode = backing_mode; 1584*d06107adSMax Reitz s->copy_mode = MIRROR_COPY_MODE_BACKGROUND; 15855bc361b8SFam Zheng s->base = base; 1586eee13dfeSPaolo Bonzini s->granularity = granularity; 158748ac0a4dSWen Congyang s->buf_size = ROUND_UP(buf_size, granularity); 15880fc9f8eaSFam Zheng s->unmap = unmap; 1589b49f7eadSWen Congyang if (auto_complete) { 1590b49f7eadSWen Congyang s->should_complete = true; 1591b49f7eadSWen Congyang } 1592b812f671SPaolo Bonzini 15930db6e54aSFam Zheng s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); 1594b8afb520SFam Zheng if (!s->dirty_bitmap) { 159588f9d1b3SKevin Wolf goto fail; 1596b8afb520SFam Zheng } 159710f3cd15SAlberto Garcia 15984ef85a9cSKevin Wolf /* Required permissions are already taken with blk_new() */ 159976d554e2SKevin Wolf block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL, 160076d554e2SKevin Wolf &error_abort); 160176d554e2SKevin Wolf 1602f3ede4b0SAlberto Garcia /* In commit_active_start() all intermediate nodes disappear, so 1603f3ede4b0SAlberto Garcia * any jobs in them must be blocked */ 16044ef85a9cSKevin Wolf if (target_is_backing) { 1605f3ede4b0SAlberto Garcia BlockDriverState *iter; 1606f3ede4b0SAlberto Garcia for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) { 16074ef85a9cSKevin Wolf /* XXX BLK_PERM_WRITE needs to be allowed so we don't block 16084ef85a9cSKevin Wolf * ourselves at s->base (if writes are blocked for a node, they are 16094ef85a9cSKevin Wolf * also blocked for its backing file). The other options would be a 16104ef85a9cSKevin Wolf * second filter driver above s->base (== target). */ 16114ef85a9cSKevin Wolf ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0, 16124ef85a9cSKevin Wolf BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE, 16134ef85a9cSKevin Wolf errp); 16144ef85a9cSKevin Wolf if (ret < 0) { 16154ef85a9cSKevin Wolf goto fail; 16164ef85a9cSKevin Wolf } 1617f3ede4b0SAlberto Garcia } 1618f3ede4b0SAlberto Garcia } 161910f3cd15SAlberto Garcia 162012aa4082SMax Reitz QTAILQ_INIT(&s->ops_in_flight); 162112aa4082SMax Reitz 16225ccac6f1SJohn Snow trace_mirror_start(bs, s, opaque); 1623da01ff7fSKevin Wolf job_start(&s->common.job); 16244ef85a9cSKevin Wolf return; 16254ef85a9cSKevin Wolf 16264ef85a9cSKevin Wolf fail: 16274ef85a9cSKevin Wolf if (s) { 16287a25fcd0SMax Reitz /* Make sure this BDS does not go away until we have completed the graph 16297a25fcd0SMax Reitz * changes below */ 16307a25fcd0SMax Reitz bdrv_ref(mirror_top_bs); 16317a25fcd0SMax Reitz 16324ef85a9cSKevin Wolf g_free(s->replaces); 16334ef85a9cSKevin Wolf blk_unref(s->target); 1634429076e8SMax Reitz bs_opaque->job = NULL; 16354ad35181SKevin Wolf job_early_fail(&s->common.job); 16364ef85a9cSKevin Wolf } 16374ef85a9cSKevin Wolf 1638c1cef672SFam Zheng bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL, 1639c1cef672SFam Zheng &error_abort); 16405fe31c25SKevin Wolf bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort); 16417a25fcd0SMax Reitz 16427a25fcd0SMax Reitz bdrv_unref(mirror_top_bs); 1643893f7ebaSPaolo Bonzini } 164403544a6eSFam Zheng 164571aa9867SAlberto Garcia void mirror_start(const char *job_id, BlockDriverState *bs, 164671aa9867SAlberto Garcia BlockDriverState *target, const char *replaces, 16475fba6c0eSJohn Snow int64_t speed, uint32_t granularity, int64_t buf_size, 1648274fcceeSMax Reitz MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, 1649274fcceeSMax Reitz BlockdevOnError on_source_error, 165003544a6eSFam Zheng BlockdevOnError on_target_error, 16516cdbceb1SKevin Wolf bool unmap, const char *filter_node_name, Error **errp) 165203544a6eSFam Zheng { 165303544a6eSFam Zheng bool is_none_mode; 165403544a6eSFam Zheng BlockDriverState *base; 165503544a6eSFam Zheng 16564b80ab2bSJohn Snow if (mode == MIRROR_SYNC_MODE_INCREMENTAL) { 16574b80ab2bSJohn Snow error_setg(errp, "Sync mode 'incremental' not supported"); 1658d58d8453SJohn Snow return; 1659d58d8453SJohn Snow } 166003544a6eSFam Zheng is_none_mode = mode == MIRROR_SYNC_MODE_NONE; 1661760e0063SKevin Wolf base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL; 1662bb02b65cSKevin Wolf mirror_start_job(job_id, bs, JOB_DEFAULT, target, replaces, 1663274fcceeSMax Reitz speed, granularity, buf_size, backing_mode, 166451ccfa2dSFam Zheng on_source_error, on_target_error, unmap, NULL, NULL, 16656cdbceb1SKevin Wolf &mirror_job_driver, is_none_mode, base, false, 1666045a2f82SFam Zheng filter_node_name, true, errp); 166703544a6eSFam Zheng } 166803544a6eSFam Zheng 1669fd62c609SAlberto Garcia void commit_active_start(const char *job_id, BlockDriverState *bs, 167047970dfbSJohn Snow BlockDriverState *base, int creation_flags, 167147970dfbSJohn Snow int64_t speed, BlockdevOnError on_error, 16720db832f4SKevin Wolf const char *filter_node_name, 167378bbd910SFam Zheng BlockCompletionFunc *cb, void *opaque, 167478bbd910SFam Zheng bool auto_complete, Error **errp) 167503544a6eSFam Zheng { 16764da83585SJeff Cody int orig_base_flags; 1677cc67f4d1SJeff Cody Error *local_err = NULL; 16784da83585SJeff Cody 16794da83585SJeff Cody orig_base_flags = bdrv_get_flags(base); 16804da83585SJeff Cody 168120a63d2cSFam Zheng if (bdrv_reopen(base, bs->open_flags, errp)) { 168220a63d2cSFam Zheng return; 168320a63d2cSFam Zheng } 16844da83585SJeff Cody 168547970dfbSJohn Snow mirror_start_job(job_id, bs, creation_flags, base, NULL, speed, 0, 0, 168671aa9867SAlberto Garcia MIRROR_LEAVE_BACKING_CHAIN, 168751ccfa2dSFam Zheng on_error, on_error, true, cb, opaque, 16886cdbceb1SKevin Wolf &commit_active_job_driver, false, base, auto_complete, 1689045a2f82SFam Zheng filter_node_name, false, &local_err); 16900fb6395cSMarkus Armbruster if (local_err) { 1691cc67f4d1SJeff Cody error_propagate(errp, local_err); 16924da83585SJeff Cody goto error_restore_flags; 16934da83585SJeff Cody } 16944da83585SJeff Cody 16954da83585SJeff Cody return; 16964da83585SJeff Cody 16974da83585SJeff Cody error_restore_flags: 16984da83585SJeff Cody /* ignore error and errp for bdrv_reopen, because we want to propagate 16994da83585SJeff Cody * the original error */ 17004da83585SJeff Cody bdrv_reopen(base, orig_base_flags, NULL); 17014da83585SJeff Cody return; 170203544a6eSFam Zheng } 1703