1893f7ebaSPaolo Bonzini /* 2893f7ebaSPaolo Bonzini * Image mirroring 3893f7ebaSPaolo Bonzini * 4893f7ebaSPaolo Bonzini * Copyright Red Hat, Inc. 2012 5893f7ebaSPaolo Bonzini * 6893f7ebaSPaolo Bonzini * Authors: 7893f7ebaSPaolo Bonzini * Paolo Bonzini <pbonzini@redhat.com> 8893f7ebaSPaolo Bonzini * 9893f7ebaSPaolo Bonzini * This work is licensed under the terms of the GNU LGPL, version 2 or later. 10893f7ebaSPaolo Bonzini * See the COPYING.LIB file in the top-level directory. 11893f7ebaSPaolo Bonzini * 12893f7ebaSPaolo Bonzini */ 13893f7ebaSPaolo Bonzini 1480c71a24SPeter Maydell #include "qemu/osdep.h" 15fd4a6493SKevin Wolf #include "qemu/cutils.h" 1612aa4082SMax Reitz #include "qemu/coroutine.h" 171181e19aSMax Reitz #include "qemu/range.h" 18893f7ebaSPaolo Bonzini #include "trace.h" 19c87621eaSJohn Snow #include "block/blockjob_int.h" 20737e150eSPaolo Bonzini #include "block/block_int.h" 21373340b2SMax Reitz #include "sysemu/block-backend.h" 22da34e65cSMarkus Armbruster #include "qapi/error.h" 23cc7a8ea7SMarkus Armbruster #include "qapi/qmp/qerror.h" 24893f7ebaSPaolo Bonzini #include "qemu/ratelimit.h" 25b812f671SPaolo Bonzini #include "qemu/bitmap.h" 26893f7ebaSPaolo Bonzini 27402a4741SPaolo Bonzini #define MAX_IN_FLIGHT 16 28b436982fSEric Blake #define MAX_IO_BYTES (1 << 20) /* 1 Mb */ 29b436982fSEric Blake #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES) 30402a4741SPaolo Bonzini 31402a4741SPaolo Bonzini /* The mirroring buffer is a list of granularity-sized chunks. 32402a4741SPaolo Bonzini * Free chunks are organized in a list. 33402a4741SPaolo Bonzini */ 34402a4741SPaolo Bonzini typedef struct MirrorBuffer { 35402a4741SPaolo Bonzini QSIMPLEQ_ENTRY(MirrorBuffer) next; 36402a4741SPaolo Bonzini } MirrorBuffer; 37893f7ebaSPaolo Bonzini 3812aa4082SMax Reitz typedef struct MirrorOp MirrorOp; 3912aa4082SMax Reitz 40893f7ebaSPaolo Bonzini typedef struct MirrorBlockJob { 41893f7ebaSPaolo Bonzini BlockJob common; 42e253f4b8SKevin Wolf BlockBackend *target; 434ef85a9cSKevin Wolf BlockDriverState *mirror_top_bs; 445bc361b8SFam Zheng BlockDriverState *base; 453f072a7fSMax Reitz BlockDriverState *base_overlay; 464ef85a9cSKevin Wolf 4709158f00SBenoît Canet /* The name of the graph node to replace */ 4809158f00SBenoît Canet char *replaces; 4909158f00SBenoît Canet /* The BDS to replace */ 5009158f00SBenoît Canet BlockDriverState *to_replace; 5109158f00SBenoît Canet /* Used to block operations on the drive-mirror-replace target */ 5209158f00SBenoît Canet Error *replace_blocker; 5303544a6eSFam Zheng bool is_none_mode; 54274fcceeSMax Reitz BlockMirrorBackingMode backing_mode; 55cdf3bc93SMax Reitz /* Whether the target image requires explicit zero-initialization */ 56cdf3bc93SMax Reitz bool zero_target; 57d06107adSMax Reitz MirrorCopyMode copy_mode; 58b952b558SPaolo Bonzini BlockdevOnError on_source_error, on_target_error; 59d06107adSMax Reitz /* Set when the target is synced (dirty bitmap is clean, nothing 60d06107adSMax Reitz * in flight) and the job is running in active mode */ 61d06107adSMax Reitz bool actively_synced; 62d63ffd87SPaolo Bonzini bool should_complete; 63eee13dfeSPaolo Bonzini int64_t granularity; 64b812f671SPaolo Bonzini size_t buf_size; 65b21c7652SMax Reitz int64_t bdev_length; 66b812f671SPaolo Bonzini unsigned long *cow_bitmap; 67e4654d2dSFam Zheng BdrvDirtyBitmap *dirty_bitmap; 68dc162c8eSFam Zheng BdrvDirtyBitmapIter *dbi; 69893f7ebaSPaolo Bonzini uint8_t *buf; 70402a4741SPaolo Bonzini QSIMPLEQ_HEAD(, MirrorBuffer) buf_free; 71402a4741SPaolo Bonzini int buf_free_count; 72bd48bde8SPaolo Bonzini 7349efb1f5SDenis V. Lunev uint64_t last_pause_ns; 74402a4741SPaolo Bonzini unsigned long *in_flight_bitmap; 75bd48bde8SPaolo Bonzini int in_flight; 76b436982fSEric Blake int64_t bytes_in_flight; 77b58deb34SPaolo Bonzini QTAILQ_HEAD(, MirrorOp) ops_in_flight; 78bd48bde8SPaolo Bonzini int ret; 790fc9f8eaSFam Zheng bool unmap; 80b436982fSEric Blake int target_cluster_size; 81e5b43573SFam Zheng int max_iov; 8290ab48ebSAnton Nefedov bool initial_zeroing_ongoing; 83d06107adSMax Reitz int in_active_write_counter; 84737efc1eSJohn Snow bool prepared; 855e771752SSergio Lopez bool in_drain; 86893f7ebaSPaolo Bonzini } MirrorBlockJob; 87893f7ebaSPaolo Bonzini 88429076e8SMax Reitz typedef struct MirrorBDSOpaque { 89429076e8SMax Reitz MirrorBlockJob *job; 90f94dc3b4SMax Reitz bool stop; 9153431b90SMax Reitz bool is_commit; 92429076e8SMax Reitz } MirrorBDSOpaque; 93429076e8SMax Reitz 9412aa4082SMax Reitz struct MirrorOp { 95bd48bde8SPaolo Bonzini MirrorBlockJob *s; 96bd48bde8SPaolo Bonzini QEMUIOVector qiov; 97b436982fSEric Blake int64_t offset; 98b436982fSEric Blake uint64_t bytes; 992e1990b2SMax Reitz 1002e1990b2SMax Reitz /* The pointee is set by mirror_co_read(), mirror_co_zero(), and 1012e1990b2SMax Reitz * mirror_co_discard() before yielding for the first time */ 1022e1990b2SMax Reitz int64_t *bytes_handled; 10312aa4082SMax Reitz 1041181e19aSMax Reitz bool is_pseudo_op; 105d06107adSMax Reitz bool is_active_write; 106ce8cabbdSKevin Wolf bool is_in_flight; 10712aa4082SMax Reitz CoQueue waiting_requests; 108eed325b9SKevin Wolf Coroutine *co; 109d44dae1aSVladimir Sementsov-Ogievskiy MirrorOp *waiting_for_op; 11012aa4082SMax Reitz 11112aa4082SMax Reitz QTAILQ_ENTRY(MirrorOp) next; 11212aa4082SMax Reitz }; 113bd48bde8SPaolo Bonzini 1144295c5fcSMax Reitz typedef enum MirrorMethod { 1154295c5fcSMax Reitz MIRROR_METHOD_COPY, 1164295c5fcSMax Reitz MIRROR_METHOD_ZERO, 1174295c5fcSMax Reitz MIRROR_METHOD_DISCARD, 1184295c5fcSMax Reitz } MirrorMethod; 1194295c5fcSMax Reitz 120b952b558SPaolo Bonzini static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, 121b952b558SPaolo Bonzini int error) 122b952b558SPaolo Bonzini { 123d06107adSMax Reitz s->actively_synced = false; 124b952b558SPaolo Bonzini if (read) { 12581e254dcSKevin Wolf return block_job_error_action(&s->common, s->on_source_error, 12681e254dcSKevin Wolf true, error); 127b952b558SPaolo Bonzini } else { 12881e254dcSKevin Wolf return block_job_error_action(&s->common, s->on_target_error, 12981e254dcSKevin Wolf false, error); 130b952b558SPaolo Bonzini } 131b952b558SPaolo Bonzini } 132b952b558SPaolo Bonzini 1331181e19aSMax Reitz static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self, 1341181e19aSMax Reitz MirrorBlockJob *s, 1351181e19aSMax Reitz uint64_t offset, 1361181e19aSMax Reitz uint64_t bytes) 1371181e19aSMax Reitz { 1381181e19aSMax Reitz uint64_t self_start_chunk = offset / s->granularity; 1391181e19aSMax Reitz uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity); 1401181e19aSMax Reitz uint64_t self_nb_chunks = self_end_chunk - self_start_chunk; 1411181e19aSMax Reitz 1421181e19aSMax Reitz while (find_next_bit(s->in_flight_bitmap, self_end_chunk, 1431181e19aSMax Reitz self_start_chunk) < self_end_chunk && 1441181e19aSMax Reitz s->ret >= 0) 1451181e19aSMax Reitz { 1461181e19aSMax Reitz MirrorOp *op; 1471181e19aSMax Reitz 1481181e19aSMax Reitz QTAILQ_FOREACH(op, &s->ops_in_flight, next) { 1491181e19aSMax Reitz uint64_t op_start_chunk = op->offset / s->granularity; 1501181e19aSMax Reitz uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes, 1511181e19aSMax Reitz s->granularity) - 1521181e19aSMax Reitz op_start_chunk; 1531181e19aSMax Reitz 1541181e19aSMax Reitz if (op == self) { 1551181e19aSMax Reitz continue; 1561181e19aSMax Reitz } 1571181e19aSMax Reitz 1581181e19aSMax Reitz if (ranges_overlap(self_start_chunk, self_nb_chunks, 1591181e19aSMax Reitz op_start_chunk, op_nb_chunks)) 1601181e19aSMax Reitz { 16166fed30cSStefano Garzarella if (self) { 162d44dae1aSVladimir Sementsov-Ogievskiy /* 16366fed30cSStefano Garzarella * If the operation is already (indirectly) waiting for us, 16466fed30cSStefano Garzarella * or will wait for us as soon as it wakes up, then just go 16566fed30cSStefano Garzarella * on (instead of producing a deadlock in the former case). 166d44dae1aSVladimir Sementsov-Ogievskiy */ 167d44dae1aSVladimir Sementsov-Ogievskiy if (op->waiting_for_op) { 168d44dae1aSVladimir Sementsov-Ogievskiy continue; 169d44dae1aSVladimir Sementsov-Ogievskiy } 170d44dae1aSVladimir Sementsov-Ogievskiy 171d44dae1aSVladimir Sementsov-Ogievskiy self->waiting_for_op = op; 17266fed30cSStefano Garzarella } 17366fed30cSStefano Garzarella 1741181e19aSMax Reitz qemu_co_queue_wait(&op->waiting_requests, NULL); 17566fed30cSStefano Garzarella 17666fed30cSStefano Garzarella if (self) { 177d44dae1aSVladimir Sementsov-Ogievskiy self->waiting_for_op = NULL; 17866fed30cSStefano Garzarella } 17966fed30cSStefano Garzarella 1801181e19aSMax Reitz break; 1811181e19aSMax Reitz } 1821181e19aSMax Reitz } 1831181e19aSMax Reitz } 1841181e19aSMax Reitz } 1851181e19aSMax Reitz 1862e1990b2SMax Reitz static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret) 187bd48bde8SPaolo Bonzini { 188bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 189402a4741SPaolo Bonzini struct iovec *iov; 190bd48bde8SPaolo Bonzini int64_t chunk_num; 191b436982fSEric Blake int i, nb_chunks; 192bd48bde8SPaolo Bonzini 193b436982fSEric Blake trace_mirror_iteration_done(s, op->offset, op->bytes, ret); 194bd48bde8SPaolo Bonzini 195bd48bde8SPaolo Bonzini s->in_flight--; 196b436982fSEric Blake s->bytes_in_flight -= op->bytes; 197402a4741SPaolo Bonzini iov = op->qiov.iov; 198402a4741SPaolo Bonzini for (i = 0; i < op->qiov.niov; i++) { 199402a4741SPaolo Bonzini MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base; 200402a4741SPaolo Bonzini QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next); 201402a4741SPaolo Bonzini s->buf_free_count++; 202402a4741SPaolo Bonzini } 203402a4741SPaolo Bonzini 204b436982fSEric Blake chunk_num = op->offset / s->granularity; 205b436982fSEric Blake nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity); 20612aa4082SMax Reitz 207402a4741SPaolo Bonzini bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); 20812aa4082SMax Reitz QTAILQ_REMOVE(&s->ops_in_flight, op, next); 209b21c7652SMax Reitz if (ret >= 0) { 210b21c7652SMax Reitz if (s->cow_bitmap) { 211bd48bde8SPaolo Bonzini bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); 212bd48bde8SPaolo Bonzini } 21390ab48ebSAnton Nefedov if (!s->initial_zeroing_ongoing) { 21430a5c887SKevin Wolf job_progress_update(&s->common.job, op->bytes); 215b21c7652SMax Reitz } 21690ab48ebSAnton Nefedov } 2176df3bf8eSZhang Min qemu_iovec_destroy(&op->qiov); 2187b770c72SStefan Hajnoczi 21912aa4082SMax Reitz qemu_co_queue_restart_all(&op->waiting_requests); 22012aa4082SMax Reitz g_free(op); 2217b770c72SStefan Hajnoczi } 222bd48bde8SPaolo Bonzini 2232e1990b2SMax Reitz static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret) 224bd48bde8SPaolo Bonzini { 225bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 226b9e413ddSPaolo Bonzini 227bd48bde8SPaolo Bonzini if (ret < 0) { 228bd48bde8SPaolo Bonzini BlockErrorAction action; 229bd48bde8SPaolo Bonzini 230e0d7f73eSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes); 231bd48bde8SPaolo Bonzini action = mirror_error_action(s, false, -ret); 232a589569fSWenchao Xia if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 233bd48bde8SPaolo Bonzini s->ret = ret; 234bd48bde8SPaolo Bonzini } 235bd48bde8SPaolo Bonzini } 236d12ade57SVladimir Sementsov-Ogievskiy 237bd48bde8SPaolo Bonzini mirror_iteration_done(op, ret); 238bd48bde8SPaolo Bonzini } 239bd48bde8SPaolo Bonzini 2402e1990b2SMax Reitz static void coroutine_fn mirror_read_complete(MirrorOp *op, int ret) 241bd48bde8SPaolo Bonzini { 242bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 243b9e413ddSPaolo Bonzini 244bd48bde8SPaolo Bonzini if (ret < 0) { 245bd48bde8SPaolo Bonzini BlockErrorAction action; 246bd48bde8SPaolo Bonzini 247e0d7f73eSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes); 248bd48bde8SPaolo Bonzini action = mirror_error_action(s, true, -ret); 249a589569fSWenchao Xia if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 250bd48bde8SPaolo Bonzini s->ret = ret; 251bd48bde8SPaolo Bonzini } 252bd48bde8SPaolo Bonzini 253bd48bde8SPaolo Bonzini mirror_iteration_done(op, ret); 254d12ade57SVladimir Sementsov-Ogievskiy return; 255bd48bde8SPaolo Bonzini } 256d12ade57SVladimir Sementsov-Ogievskiy 257d12ade57SVladimir Sementsov-Ogievskiy ret = blk_co_pwritev(s->target, op->offset, op->qiov.size, &op->qiov, 0); 258d12ade57SVladimir Sementsov-Ogievskiy mirror_write_complete(op, ret); 259b9e413ddSPaolo Bonzini } 260bd48bde8SPaolo Bonzini 261782d97efSEric Blake /* Clip bytes relative to offset to not exceed end-of-file */ 262782d97efSEric Blake static inline int64_t mirror_clip_bytes(MirrorBlockJob *s, 263782d97efSEric Blake int64_t offset, 264782d97efSEric Blake int64_t bytes) 265782d97efSEric Blake { 266782d97efSEric Blake return MIN(bytes, s->bdev_length - offset); 267782d97efSEric Blake } 268782d97efSEric Blake 269782d97efSEric Blake /* Round offset and/or bytes to target cluster if COW is needed, and 270782d97efSEric Blake * return the offset of the adjusted tail against original. */ 271782d97efSEric Blake static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset, 272ae4cc877SEric Blake uint64_t *bytes) 273893f7ebaSPaolo Bonzini { 274e5b43573SFam Zheng bool need_cow; 275e5b43573SFam Zheng int ret = 0; 276782d97efSEric Blake int64_t align_offset = *offset; 2777cfd5275SEric Blake int64_t align_bytes = *bytes; 278782d97efSEric Blake int max_bytes = s->granularity * s->max_iov; 279893f7ebaSPaolo Bonzini 280782d97efSEric Blake need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap); 281782d97efSEric Blake need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity, 282e5b43573SFam Zheng s->cow_bitmap); 283e5b43573SFam Zheng if (need_cow) { 284782d97efSEric Blake bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes, 285782d97efSEric Blake &align_offset, &align_bytes); 2868f0720ecSPaolo Bonzini } 2878f0720ecSPaolo Bonzini 288782d97efSEric Blake if (align_bytes > max_bytes) { 289782d97efSEric Blake align_bytes = max_bytes; 290e5b43573SFam Zheng if (need_cow) { 291782d97efSEric Blake align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size); 292e5b43573SFam Zheng } 293e5b43573SFam Zheng } 294782d97efSEric Blake /* Clipping may result in align_bytes unaligned to chunk boundary, but 2954150ae60SFam Zheng * that doesn't matter because it's already the end of source image. */ 296782d97efSEric Blake align_bytes = mirror_clip_bytes(s, align_offset, align_bytes); 297402a4741SPaolo Bonzini 298782d97efSEric Blake ret = align_offset + align_bytes - (*offset + *bytes); 299782d97efSEric Blake *offset = align_offset; 300782d97efSEric Blake *bytes = align_bytes; 301e5b43573SFam Zheng assert(ret >= 0); 302e5b43573SFam Zheng return ret; 303e5b43573SFam Zheng } 304e5b43573SFam Zheng 305537c3d4fSStefan Hajnoczi static inline void coroutine_fn 3069178f4feSKevin Wolf mirror_wait_for_any_operation(MirrorBlockJob *s, bool active) 30721cd917fSFam Zheng { 30812aa4082SMax Reitz MirrorOp *op; 30912aa4082SMax Reitz 3101181e19aSMax Reitz QTAILQ_FOREACH(op, &s->ops_in_flight, next) { 3111181e19aSMax Reitz /* Do not wait on pseudo ops, because it may in turn wait on 3121181e19aSMax Reitz * some other operation to start, which may in fact be the 3131181e19aSMax Reitz * caller of this function. Since there is only one pseudo op 3141181e19aSMax Reitz * at any given time, we will always find some real operation 3151181e19aSMax Reitz * to wait on. */ 316ce8cabbdSKevin Wolf if (!op->is_pseudo_op && op->is_in_flight && 317ce8cabbdSKevin Wolf op->is_active_write == active) 318ce8cabbdSKevin Wolf { 31912aa4082SMax Reitz qemu_co_queue_wait(&op->waiting_requests, NULL); 3201181e19aSMax Reitz return; 3211181e19aSMax Reitz } 3221181e19aSMax Reitz } 3231181e19aSMax Reitz abort(); 32421cd917fSFam Zheng } 32521cd917fSFam Zheng 326537c3d4fSStefan Hajnoczi static inline void coroutine_fn 3279178f4feSKevin Wolf mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s) 328d06107adSMax Reitz { 329d06107adSMax Reitz /* Only non-active operations use up in-flight slots */ 3309178f4feSKevin Wolf mirror_wait_for_any_operation(s, false); 331d06107adSMax Reitz } 332d06107adSMax Reitz 3332e1990b2SMax Reitz /* Perform a mirror copy operation. 3342e1990b2SMax Reitz * 3352e1990b2SMax Reitz * *op->bytes_handled is set to the number of bytes copied after and 3362e1990b2SMax Reitz * including offset, excluding any bytes copied prior to offset due 3372e1990b2SMax Reitz * to alignment. This will be op->bytes if no alignment is necessary, 3382e1990b2SMax Reitz * or (new_end - op->offset) if the tail is rounded up or down due to 339e5b43573SFam Zheng * alignment or buffer limit. 340402a4741SPaolo Bonzini */ 3412e1990b2SMax Reitz static void coroutine_fn mirror_co_read(void *opaque) 342e5b43573SFam Zheng { 3432e1990b2SMax Reitz MirrorOp *op = opaque; 3442e1990b2SMax Reitz MirrorBlockJob *s = op->s; 345ae4cc877SEric Blake int nb_chunks; 346ae4cc877SEric Blake uint64_t ret; 347ae4cc877SEric Blake uint64_t max_bytes; 348402a4741SPaolo Bonzini 349ae4cc877SEric Blake max_bytes = s->granularity * s->max_iov; 350e5b43573SFam Zheng 351e5b43573SFam Zheng /* We can only handle as much as buf_size at a time. */ 3522e1990b2SMax Reitz op->bytes = MIN(s->buf_size, MIN(max_bytes, op->bytes)); 3532e1990b2SMax Reitz assert(op->bytes); 3542e1990b2SMax Reitz assert(op->bytes < BDRV_REQUEST_MAX_BYTES); 3552e1990b2SMax Reitz *op->bytes_handled = op->bytes; 356e5b43573SFam Zheng 357e5b43573SFam Zheng if (s->cow_bitmap) { 3582e1990b2SMax Reitz *op->bytes_handled += mirror_cow_align(s, &op->offset, &op->bytes); 359e5b43573SFam Zheng } 3602e1990b2SMax Reitz /* Cannot exceed BDRV_REQUEST_MAX_BYTES + INT_MAX */ 3612e1990b2SMax Reitz assert(*op->bytes_handled <= UINT_MAX); 3622e1990b2SMax Reitz assert(op->bytes <= s->buf_size); 363ae4cc877SEric Blake /* The offset is granularity-aligned because: 364e5b43573SFam Zheng * 1) Caller passes in aligned values; 365e5b43573SFam Zheng * 2) mirror_cow_align is used only when target cluster is larger. */ 3662e1990b2SMax Reitz assert(QEMU_IS_ALIGNED(op->offset, s->granularity)); 367ae4cc877SEric Blake /* The range is sector-aligned, since bdrv_getlength() rounds up. */ 3682e1990b2SMax Reitz assert(QEMU_IS_ALIGNED(op->bytes, BDRV_SECTOR_SIZE)); 3692e1990b2SMax Reitz nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity); 370e5b43573SFam Zheng 371e5b43573SFam Zheng while (s->buf_free_count < nb_chunks) { 3722e1990b2SMax Reitz trace_mirror_yield_in_flight(s, op->offset, s->in_flight); 3739178f4feSKevin Wolf mirror_wait_for_free_in_flight_slot(s); 374b812f671SPaolo Bonzini } 375b812f671SPaolo Bonzini 376402a4741SPaolo Bonzini /* Now make a QEMUIOVector taking enough granularity-sized chunks 377402a4741SPaolo Bonzini * from s->buf_free. 378402a4741SPaolo Bonzini */ 379402a4741SPaolo Bonzini qemu_iovec_init(&op->qiov, nb_chunks); 380402a4741SPaolo Bonzini while (nb_chunks-- > 0) { 381402a4741SPaolo Bonzini MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); 3822e1990b2SMax Reitz size_t remaining = op->bytes - op->qiov.size; 3835a0f6fd5SKevin Wolf 384402a4741SPaolo Bonzini QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); 385402a4741SPaolo Bonzini s->buf_free_count--; 3865a0f6fd5SKevin Wolf qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); 387402a4741SPaolo Bonzini } 388402a4741SPaolo Bonzini 389893f7ebaSPaolo Bonzini /* Copy the dirty cluster. */ 390bd48bde8SPaolo Bonzini s->in_flight++; 3912e1990b2SMax Reitz s->bytes_in_flight += op->bytes; 392ce8cabbdSKevin Wolf op->is_in_flight = true; 3932e1990b2SMax Reitz trace_mirror_one_iteration(s, op->offset, op->bytes); 394dcfb3bebSFam Zheng 395138f9fffSMax Reitz ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes, 396138f9fffSMax Reitz &op->qiov, 0); 3972e1990b2SMax Reitz mirror_read_complete(op, ret); 398e5b43573SFam Zheng } 399e5b43573SFam Zheng 4002e1990b2SMax Reitz static void coroutine_fn mirror_co_zero(void *opaque) 401e5b43573SFam Zheng { 4022e1990b2SMax Reitz MirrorOp *op = opaque; 4032e1990b2SMax Reitz int ret; 404e5b43573SFam Zheng 4052e1990b2SMax Reitz op->s->in_flight++; 4062e1990b2SMax Reitz op->s->bytes_in_flight += op->bytes; 4072e1990b2SMax Reitz *op->bytes_handled = op->bytes; 408ce8cabbdSKevin Wolf op->is_in_flight = true; 409e5b43573SFam Zheng 4102e1990b2SMax Reitz ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes, 4112e1990b2SMax Reitz op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0); 4122e1990b2SMax Reitz mirror_write_complete(op, ret); 413e5b43573SFam Zheng } 4142e1990b2SMax Reitz 4152e1990b2SMax Reitz static void coroutine_fn mirror_co_discard(void *opaque) 4162e1990b2SMax Reitz { 4172e1990b2SMax Reitz MirrorOp *op = opaque; 4182e1990b2SMax Reitz int ret; 4192e1990b2SMax Reitz 4202e1990b2SMax Reitz op->s->in_flight++; 4212e1990b2SMax Reitz op->s->bytes_in_flight += op->bytes; 4222e1990b2SMax Reitz *op->bytes_handled = op->bytes; 423ce8cabbdSKevin Wolf op->is_in_flight = true; 4242e1990b2SMax Reitz 4252e1990b2SMax Reitz ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes); 4262e1990b2SMax Reitz mirror_write_complete(op, ret); 427e5b43573SFam Zheng } 428e5b43573SFam Zheng 4294295c5fcSMax Reitz static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset, 4304295c5fcSMax Reitz unsigned bytes, MirrorMethod mirror_method) 4314295c5fcSMax Reitz { 4322e1990b2SMax Reitz MirrorOp *op; 4332e1990b2SMax Reitz Coroutine *co; 4342e1990b2SMax Reitz int64_t bytes_handled = -1; 4352e1990b2SMax Reitz 4362e1990b2SMax Reitz op = g_new(MirrorOp, 1); 4372e1990b2SMax Reitz *op = (MirrorOp){ 4382e1990b2SMax Reitz .s = s, 4392e1990b2SMax Reitz .offset = offset, 4402e1990b2SMax Reitz .bytes = bytes, 4412e1990b2SMax Reitz .bytes_handled = &bytes_handled, 4422e1990b2SMax Reitz }; 44312aa4082SMax Reitz qemu_co_queue_init(&op->waiting_requests); 4442e1990b2SMax Reitz 4454295c5fcSMax Reitz switch (mirror_method) { 4464295c5fcSMax Reitz case MIRROR_METHOD_COPY: 4472e1990b2SMax Reitz co = qemu_coroutine_create(mirror_co_read, op); 4482e1990b2SMax Reitz break; 4494295c5fcSMax Reitz case MIRROR_METHOD_ZERO: 4502e1990b2SMax Reitz co = qemu_coroutine_create(mirror_co_zero, op); 4512e1990b2SMax Reitz break; 4524295c5fcSMax Reitz case MIRROR_METHOD_DISCARD: 4532e1990b2SMax Reitz co = qemu_coroutine_create(mirror_co_discard, op); 4542e1990b2SMax Reitz break; 4554295c5fcSMax Reitz default: 4564295c5fcSMax Reitz abort(); 4574295c5fcSMax Reitz } 458eed325b9SKevin Wolf op->co = co; 4592e1990b2SMax Reitz 46012aa4082SMax Reitz QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next); 4612e1990b2SMax Reitz qemu_coroutine_enter(co); 4622e1990b2SMax Reitz /* At this point, ownership of op has been moved to the coroutine 4632e1990b2SMax Reitz * and the object may already be freed */ 4642e1990b2SMax Reitz 4652e1990b2SMax Reitz /* Assert that this value has been set */ 4662e1990b2SMax Reitz assert(bytes_handled >= 0); 4672e1990b2SMax Reitz 4682e1990b2SMax Reitz /* Same assertion as in mirror_co_read() (and for mirror_co_read() 4692e1990b2SMax Reitz * and mirror_co_discard(), bytes_handled == op->bytes, which 4702e1990b2SMax Reitz * is the @bytes parameter given to this function) */ 4712e1990b2SMax Reitz assert(bytes_handled <= UINT_MAX); 4722e1990b2SMax Reitz return bytes_handled; 4734295c5fcSMax Reitz } 4744295c5fcSMax Reitz 475e5b43573SFam Zheng static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) 476e5b43573SFam Zheng { 477138f9fffSMax Reitz BlockDriverState *source = s->mirror_top_bs->backing->bs; 4781181e19aSMax Reitz MirrorOp *pseudo_op; 4791181e19aSMax Reitz int64_t offset; 4801181e19aSMax Reitz uint64_t delay_ns = 0, ret = 0; 481e5b43573SFam Zheng /* At least the first dirty chunk is mirrored in one iteration. */ 482e5b43573SFam Zheng int nb_chunks = 1; 4834b5004d9SDenis V. Lunev bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target)); 484b436982fSEric Blake int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES); 485e5b43573SFam Zheng 486b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_lock(s->dirty_bitmap); 487f798184cSEric Blake offset = bdrv_dirty_iter_next(s->dbi); 488fb2ef791SEric Blake if (offset < 0) { 489dc162c8eSFam Zheng bdrv_set_dirty_iter(s->dbi, 0); 490f798184cSEric Blake offset = bdrv_dirty_iter_next(s->dbi); 4919a46dba7SEric Blake trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); 492fb2ef791SEric Blake assert(offset >= 0); 493e5b43573SFam Zheng } 494b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_unlock(s->dirty_bitmap); 495e5b43573SFam Zheng 4961181e19aSMax Reitz mirror_wait_on_conflicts(NULL, s, offset, 1); 4979c83625bSMax Reitz 498da01ff7fSKevin Wolf job_pause_point(&s->common.job); 499565ac01fSStefan Hajnoczi 500e5b43573SFam Zheng /* Find the number of consective dirty chunks following the first dirty 501e5b43573SFam Zheng * one, and wait for in flight requests in them. */ 502b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_lock(s->dirty_bitmap); 503fb2ef791SEric Blake while (nb_chunks * s->granularity < s->buf_size) { 504dc162c8eSFam Zheng int64_t next_dirty; 505fb2ef791SEric Blake int64_t next_offset = offset + nb_chunks * s->granularity; 506fb2ef791SEric Blake int64_t next_chunk = next_offset / s->granularity; 507fb2ef791SEric Blake if (next_offset >= s->bdev_length || 50828636b82SJohn Snow !bdrv_dirty_bitmap_get_locked(s->dirty_bitmap, next_offset)) { 509e5b43573SFam Zheng break; 510e5b43573SFam Zheng } 511e5b43573SFam Zheng if (test_bit(next_chunk, s->in_flight_bitmap)) { 512e5b43573SFam Zheng break; 513e5b43573SFam Zheng } 5149c83625bSMax Reitz 515f798184cSEric Blake next_dirty = bdrv_dirty_iter_next(s->dbi); 516fb2ef791SEric Blake if (next_dirty > next_offset || next_dirty < 0) { 517f27a2742SMax Reitz /* The bitmap iterator's cache is stale, refresh it */ 518715a74d8SEric Blake bdrv_set_dirty_iter(s->dbi, next_offset); 519f798184cSEric Blake next_dirty = bdrv_dirty_iter_next(s->dbi); 520f27a2742SMax Reitz } 521fb2ef791SEric Blake assert(next_dirty == next_offset); 522e5b43573SFam Zheng nb_chunks++; 523e5b43573SFam Zheng } 524e5b43573SFam Zheng 525e5b43573SFam Zheng /* Clear dirty bits before querying the block status, because 52631826642SEric Blake * calling bdrv_block_status_above could yield - if some blocks are 527e5b43573SFam Zheng * marked dirty in this window, we need to know. 528e5b43573SFam Zheng */ 529e0d7f73eSEric Blake bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset, 530e0d7f73eSEric Blake nb_chunks * s->granularity); 531b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_unlock(s->dirty_bitmap); 532b64bd51eSPaolo Bonzini 5331181e19aSMax Reitz /* Before claiming an area in the in-flight bitmap, we have to 5341181e19aSMax Reitz * create a MirrorOp for it so that conflicting requests can wait 5351181e19aSMax Reitz * for it. mirror_perform() will create the real MirrorOps later, 5361181e19aSMax Reitz * for now we just create a pseudo operation that will wake up all 5371181e19aSMax Reitz * conflicting requests once all real operations have been 5381181e19aSMax Reitz * launched. */ 5391181e19aSMax Reitz pseudo_op = g_new(MirrorOp, 1); 5401181e19aSMax Reitz *pseudo_op = (MirrorOp){ 5411181e19aSMax Reitz .offset = offset, 5421181e19aSMax Reitz .bytes = nb_chunks * s->granularity, 5431181e19aSMax Reitz .is_pseudo_op = true, 5441181e19aSMax Reitz }; 5451181e19aSMax Reitz qemu_co_queue_init(&pseudo_op->waiting_requests); 5461181e19aSMax Reitz QTAILQ_INSERT_TAIL(&s->ops_in_flight, pseudo_op, next); 5471181e19aSMax Reitz 548fb2ef791SEric Blake bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks); 549fb2ef791SEric Blake while (nb_chunks > 0 && offset < s->bdev_length) { 55031826642SEric Blake int ret; 5517cfd5275SEric Blake int64_t io_bytes; 552f3e4ce4aSEric Blake int64_t io_bytes_acct; 5534295c5fcSMax Reitz MirrorMethod mirror_method = MIRROR_METHOD_COPY; 554e5b43573SFam Zheng 555fb2ef791SEric Blake assert(!(offset % s->granularity)); 55631826642SEric Blake ret = bdrv_block_status_above(source, NULL, offset, 55731826642SEric Blake nb_chunks * s->granularity, 55831826642SEric Blake &io_bytes, NULL, NULL); 559e5b43573SFam Zheng if (ret < 0) { 560fb2ef791SEric Blake io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes); 5610965a41eSVladimir Sementsov-Ogievskiy } else if (ret & BDRV_BLOCK_DATA) { 562fb2ef791SEric Blake io_bytes = MIN(io_bytes, max_io_bytes); 563e5b43573SFam Zheng } 564e5b43573SFam Zheng 565fb2ef791SEric Blake io_bytes -= io_bytes % s->granularity; 566fb2ef791SEric Blake if (io_bytes < s->granularity) { 567fb2ef791SEric Blake io_bytes = s->granularity; 568e5b43573SFam Zheng } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) { 569fb2ef791SEric Blake int64_t target_offset; 5707cfd5275SEric Blake int64_t target_bytes; 571fb2ef791SEric Blake bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes, 572fb2ef791SEric Blake &target_offset, &target_bytes); 573fb2ef791SEric Blake if (target_offset == offset && 574fb2ef791SEric Blake target_bytes == io_bytes) { 575e5b43573SFam Zheng mirror_method = ret & BDRV_BLOCK_ZERO ? 576e5b43573SFam Zheng MIRROR_METHOD_ZERO : 577e5b43573SFam Zheng MIRROR_METHOD_DISCARD; 578e5b43573SFam Zheng } 579e5b43573SFam Zheng } 580e5b43573SFam Zheng 581cf56a3c6SDenis V. Lunev while (s->in_flight >= MAX_IN_FLIGHT) { 582fb2ef791SEric Blake trace_mirror_yield_in_flight(s, offset, s->in_flight); 5839178f4feSKevin Wolf mirror_wait_for_free_in_flight_slot(s); 584cf56a3c6SDenis V. Lunev } 585cf56a3c6SDenis V. Lunev 586dbaa7b57SVladimir Sementsov-Ogievskiy if (s->ret < 0) { 5871181e19aSMax Reitz ret = 0; 5881181e19aSMax Reitz goto fail; 589dbaa7b57SVladimir Sementsov-Ogievskiy } 590dbaa7b57SVladimir Sementsov-Ogievskiy 591fb2ef791SEric Blake io_bytes = mirror_clip_bytes(s, offset, io_bytes); 5924295c5fcSMax Reitz io_bytes = mirror_perform(s, offset, io_bytes, mirror_method); 5934295c5fcSMax Reitz if (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok) { 594f3e4ce4aSEric Blake io_bytes_acct = 0; 5954b5004d9SDenis V. Lunev } else { 596fb2ef791SEric Blake io_bytes_acct = io_bytes; 5974b5004d9SDenis V. Lunev } 598fb2ef791SEric Blake assert(io_bytes); 599fb2ef791SEric Blake offset += io_bytes; 600fb2ef791SEric Blake nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity); 601dee81d51SKevin Wolf delay_ns = block_job_ratelimit_get_delay(&s->common, io_bytes_acct); 602dcfb3bebSFam Zheng } 6031181e19aSMax Reitz 6041181e19aSMax Reitz ret = delay_ns; 6051181e19aSMax Reitz fail: 6061181e19aSMax Reitz QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next); 6071181e19aSMax Reitz qemu_co_queue_restart_all(&pseudo_op->waiting_requests); 6081181e19aSMax Reitz g_free(pseudo_op); 6091181e19aSMax Reitz 6101181e19aSMax Reitz return ret; 611893f7ebaSPaolo Bonzini } 612b952b558SPaolo Bonzini 613402a4741SPaolo Bonzini static void mirror_free_init(MirrorBlockJob *s) 614402a4741SPaolo Bonzini { 615402a4741SPaolo Bonzini int granularity = s->granularity; 616402a4741SPaolo Bonzini size_t buf_size = s->buf_size; 617402a4741SPaolo Bonzini uint8_t *buf = s->buf; 618402a4741SPaolo Bonzini 619402a4741SPaolo Bonzini assert(s->buf_free_count == 0); 620402a4741SPaolo Bonzini QSIMPLEQ_INIT(&s->buf_free); 621402a4741SPaolo Bonzini while (buf_size != 0) { 622402a4741SPaolo Bonzini MirrorBuffer *cur = (MirrorBuffer *)buf; 623402a4741SPaolo Bonzini QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next); 624402a4741SPaolo Bonzini s->buf_free_count++; 625402a4741SPaolo Bonzini buf_size -= granularity; 626402a4741SPaolo Bonzini buf += granularity; 627402a4741SPaolo Bonzini } 628402a4741SPaolo Bonzini } 629402a4741SPaolo Bonzini 630bae8196dSPaolo Bonzini /* This is also used for the .pause callback. There is no matching 631bae8196dSPaolo Bonzini * mirror_resume() because mirror_run() will begin iterating again 632bae8196dSPaolo Bonzini * when the job is resumed. 633bae8196dSPaolo Bonzini */ 634537c3d4fSStefan Hajnoczi static void coroutine_fn mirror_wait_for_all_io(MirrorBlockJob *s) 635bd48bde8SPaolo Bonzini { 636bd48bde8SPaolo Bonzini while (s->in_flight > 0) { 6379178f4feSKevin Wolf mirror_wait_for_free_in_flight_slot(s); 638bd48bde8SPaolo Bonzini } 639893f7ebaSPaolo Bonzini } 640893f7ebaSPaolo Bonzini 641737efc1eSJohn Snow /** 642737efc1eSJohn Snow * mirror_exit_common: handle both abort() and prepare() cases. 643737efc1eSJohn Snow * for .prepare, returns 0 on success and -errno on failure. 644737efc1eSJohn Snow * for .abort cases, denoted by abort = true, MUST return 0. 645737efc1eSJohn Snow */ 646737efc1eSJohn Snow static int mirror_exit_common(Job *job) 6475a7e7a0bSStefan Hajnoczi { 6481908a559SKevin Wolf MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 6491908a559SKevin Wolf BlockJob *bjob = &s->common; 650f93c3addSMax Reitz MirrorBDSOpaque *bs_opaque; 6515a7e7a0bSStefan Hajnoczi AioContext *replace_aio_context = NULL; 652f93c3addSMax Reitz BlockDriverState *src; 653f93c3addSMax Reitz BlockDriverState *target_bs; 654f93c3addSMax Reitz BlockDriverState *mirror_top_bs; 65512fa4af6SKevin Wolf Error *local_err = NULL; 656737efc1eSJohn Snow bool abort = job->ret < 0; 657737efc1eSJohn Snow int ret = 0; 658737efc1eSJohn Snow 659737efc1eSJohn Snow if (s->prepared) { 660737efc1eSJohn Snow return 0; 661737efc1eSJohn Snow } 662737efc1eSJohn Snow s->prepared = true; 6633f09bfbcSKevin Wolf 664f93c3addSMax Reitz mirror_top_bs = s->mirror_top_bs; 665f93c3addSMax Reitz bs_opaque = mirror_top_bs->opaque; 666f93c3addSMax Reitz src = mirror_top_bs->backing->bs; 667f93c3addSMax Reitz target_bs = blk_bs(s->target); 668f93c3addSMax Reitz 669ef53dc09SAlberto Garcia if (bdrv_chain_contains(src, target_bs)) { 670ef53dc09SAlberto Garcia bdrv_unfreeze_backing_chain(mirror_top_bs, target_bs); 671ef53dc09SAlberto Garcia } 672ef53dc09SAlberto Garcia 6735deb6cbdSVladimir Sementsov-Ogievskiy bdrv_release_dirty_bitmap(s->dirty_bitmap); 6742119882cSPaolo Bonzini 6757b508f6bSJohn Snow /* Make sure that the source BDS doesn't go away during bdrv_replace_node, 6767b508f6bSJohn Snow * before we can call bdrv_drained_end */ 6773f09bfbcSKevin Wolf bdrv_ref(src); 6784ef85a9cSKevin Wolf bdrv_ref(mirror_top_bs); 6797d9fcb39SKevin Wolf bdrv_ref(target_bs); 6807d9fcb39SKevin Wolf 681bb0c9409SVladimir Sementsov-Ogievskiy /* 682bb0c9409SVladimir Sementsov-Ogievskiy * Remove target parent that still uses BLK_PERM_WRITE/RESIZE before 6837d9fcb39SKevin Wolf * inserting target_bs at s->to_replace, where we might not be able to get 68463c8ef28SKevin Wolf * these permissions. 685bb0c9409SVladimir Sementsov-Ogievskiy */ 6867d9fcb39SKevin Wolf blk_unref(s->target); 6877d9fcb39SKevin Wolf s->target = NULL; 6884ef85a9cSKevin Wolf 6894ef85a9cSKevin Wolf /* We don't access the source any more. Dropping any WRITE/RESIZE is 690d2da5e28SKevin Wolf * required before it could become a backing file of target_bs. Not having 691d2da5e28SKevin Wolf * these permissions any more means that we can't allow any new requests on 692d2da5e28SKevin Wolf * mirror_top_bs from now on, so keep it drained. */ 693d2da5e28SKevin Wolf bdrv_drained_begin(mirror_top_bs); 694f94dc3b4SMax Reitz bs_opaque->stop = true; 695f94dc3b4SMax Reitz bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing, 6964ef85a9cSKevin Wolf &error_abort); 697737efc1eSJohn Snow if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) { 6984ef85a9cSKevin Wolf BlockDriverState *backing = s->is_none_mode ? src : s->base; 6993f072a7fSMax Reitz BlockDriverState *unfiltered_target = bdrv_skip_filters(target_bs); 7003f072a7fSMax Reitz 7013f072a7fSMax Reitz if (bdrv_cow_bs(unfiltered_target) != backing) { 7023f072a7fSMax Reitz bdrv_set_backing_hd(unfiltered_target, backing, &local_err); 70312fa4af6SKevin Wolf if (local_err) { 70412fa4af6SKevin Wolf error_report_err(local_err); 70566c8672dSVladimir Sementsov-Ogievskiy local_err = NULL; 7067b508f6bSJohn Snow ret = -EPERM; 70712fa4af6SKevin Wolf } 7084ef85a9cSKevin Wolf } 709c41f5b96SMax Reitz } else if (!abort && s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) { 710c41f5b96SMax Reitz assert(!bdrv_backing_chain_next(target_bs)); 711c41f5b96SMax Reitz ret = bdrv_open_backing_file(bdrv_skip_filters(target_bs), NULL, 712c41f5b96SMax Reitz "backing", &local_err); 713c41f5b96SMax Reitz if (ret < 0) { 714c41f5b96SMax Reitz error_report_err(local_err); 715c41f5b96SMax Reitz local_err = NULL; 716c41f5b96SMax Reitz } 7174ef85a9cSKevin Wolf } 7185a7e7a0bSStefan Hajnoczi 7195a7e7a0bSStefan Hajnoczi if (s->to_replace) { 7205a7e7a0bSStefan Hajnoczi replace_aio_context = bdrv_get_aio_context(s->to_replace); 7215a7e7a0bSStefan Hajnoczi aio_context_acquire(replace_aio_context); 7225a7e7a0bSStefan Hajnoczi } 7235a7e7a0bSStefan Hajnoczi 724737efc1eSJohn Snow if (s->should_complete && !abort) { 725737efc1eSJohn Snow BlockDriverState *to_replace = s->to_replace ?: src; 7261ba79388SAlberto Garcia bool ro = bdrv_is_read_only(to_replace); 72740365552SKevin Wolf 7281ba79388SAlberto Garcia if (ro != bdrv_is_read_only(target_bs)) { 7291ba79388SAlberto Garcia bdrv_reopen_set_read_only(target_bs, ro, NULL); 7305a7e7a0bSStefan Hajnoczi } 731b8804815SKevin Wolf 732b8804815SKevin Wolf /* The mirror job has no requests in flight any more, but we need to 733b8804815SKevin Wolf * drain potential other users of the BDS before changing the graph. */ 7345e771752SSergio Lopez assert(s->in_drain); 735e253f4b8SKevin Wolf bdrv_drained_begin(target_bs); 7366e9cc051SMax Reitz /* 7376e9cc051SMax Reitz * Cannot use check_to_replace_node() here, because that would 7386e9cc051SMax Reitz * check for an op blocker on @to_replace, and we have our own 7396e9cc051SMax Reitz * there. 7406e9cc051SMax Reitz */ 7416e9cc051SMax Reitz if (bdrv_recurse_can_replace(src, to_replace)) { 7425fe31c25SKevin Wolf bdrv_replace_node(to_replace, target_bs, &local_err); 7436e9cc051SMax Reitz } else { 7446e9cc051SMax Reitz error_setg(&local_err, "Can no longer replace '%s' by '%s', " 7456e9cc051SMax Reitz "because it can no longer be guaranteed that doing so " 7466e9cc051SMax Reitz "would not lead to an abrupt change of visible data", 7476e9cc051SMax Reitz to_replace->node_name, target_bs->node_name); 7486e9cc051SMax Reitz } 749e253f4b8SKevin Wolf bdrv_drained_end(target_bs); 7505fe31c25SKevin Wolf if (local_err) { 7515fe31c25SKevin Wolf error_report_err(local_err); 7527b508f6bSJohn Snow ret = -EPERM; 7535fe31c25SKevin Wolf } 7545a7e7a0bSStefan Hajnoczi } 7555a7e7a0bSStefan Hajnoczi if (s->to_replace) { 7565a7e7a0bSStefan Hajnoczi bdrv_op_unblock_all(s->to_replace, s->replace_blocker); 7575a7e7a0bSStefan Hajnoczi error_free(s->replace_blocker); 7585a7e7a0bSStefan Hajnoczi bdrv_unref(s->to_replace); 7595a7e7a0bSStefan Hajnoczi } 7605a7e7a0bSStefan Hajnoczi if (replace_aio_context) { 7615a7e7a0bSStefan Hajnoczi aio_context_release(replace_aio_context); 7625a7e7a0bSStefan Hajnoczi } 7635a7e7a0bSStefan Hajnoczi g_free(s->replaces); 7647d9fcb39SKevin Wolf bdrv_unref(target_bs); 7654ef85a9cSKevin Wolf 766f94dc3b4SMax Reitz /* 767f94dc3b4SMax Reitz * Remove the mirror filter driver from the graph. Before this, get rid of 7684ef85a9cSKevin Wolf * the blockers on the intermediate nodes so that the resulting state is 769f94dc3b4SMax Reitz * valid. 770f94dc3b4SMax Reitz */ 7711908a559SKevin Wolf block_job_remove_all_bdrv(bjob); 7723f072a7fSMax Reitz bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort); 7734ef85a9cSKevin Wolf 7744ef85a9cSKevin Wolf /* We just changed the BDS the job BB refers to (with either or both of the 7755fe31c25SKevin Wolf * bdrv_replace_node() calls), so switch the BB back so the cleanup does 7765fe31c25SKevin Wolf * the right thing. We don't need any permissions any more now. */ 7771908a559SKevin Wolf blk_remove_bs(bjob->blk); 7781908a559SKevin Wolf blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort); 7791908a559SKevin Wolf blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort); 7804ef85a9cSKevin Wolf 781429076e8SMax Reitz bs_opaque->job = NULL; 7824ef85a9cSKevin Wolf 783176c3699SFam Zheng bdrv_drained_end(src); 784d2da5e28SKevin Wolf bdrv_drained_end(mirror_top_bs); 7855e771752SSergio Lopez s->in_drain = false; 7864ef85a9cSKevin Wolf bdrv_unref(mirror_top_bs); 7873f09bfbcSKevin Wolf bdrv_unref(src); 7887b508f6bSJohn Snow 789737efc1eSJohn Snow return ret; 790737efc1eSJohn Snow } 791737efc1eSJohn Snow 792737efc1eSJohn Snow static int mirror_prepare(Job *job) 793737efc1eSJohn Snow { 794737efc1eSJohn Snow return mirror_exit_common(job); 795737efc1eSJohn Snow } 796737efc1eSJohn Snow 797737efc1eSJohn Snow static void mirror_abort(Job *job) 798737efc1eSJohn Snow { 799737efc1eSJohn Snow int ret = mirror_exit_common(job); 800737efc1eSJohn Snow assert(ret == 0); 8015a7e7a0bSStefan Hajnoczi } 8025a7e7a0bSStefan Hajnoczi 803537c3d4fSStefan Hajnoczi static void coroutine_fn mirror_throttle(MirrorBlockJob *s) 80449efb1f5SDenis V. Lunev { 80549efb1f5SDenis V. Lunev int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 80649efb1f5SDenis V. Lunev 80718bb6928SKevin Wolf if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) { 80849efb1f5SDenis V. Lunev s->last_pause_ns = now; 8095d43e86eSKevin Wolf job_sleep_ns(&s->common.job, 0); 81049efb1f5SDenis V. Lunev } else { 811da01ff7fSKevin Wolf job_pause_point(&s->common.job); 81249efb1f5SDenis V. Lunev } 81349efb1f5SDenis V. Lunev } 81449efb1f5SDenis V. Lunev 815c0b363adSDenis V. Lunev static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s) 816c0b363adSDenis V. Lunev { 81723ca459aSEric Blake int64_t offset; 818138f9fffSMax Reitz BlockDriverState *bs = s->mirror_top_bs->backing->bs; 819c0b363adSDenis V. Lunev BlockDriverState *target_bs = blk_bs(s->target); 82023ca459aSEric Blake int ret; 82151b0a488SEric Blake int64_t count; 822c0b363adSDenis V. Lunev 823cdf3bc93SMax Reitz if (s->zero_target) { 824c7c2769cSDenis V. Lunev if (!bdrv_can_write_zeroes_with_unmap(target_bs)) { 825e0d7f73eSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length); 826b7d5062cSDenis V. Lunev return 0; 827b7d5062cSDenis V. Lunev } 828b7d5062cSDenis V. Lunev 82990ab48ebSAnton Nefedov s->initial_zeroing_ongoing = true; 83023ca459aSEric Blake for (offset = 0; offset < s->bdev_length; ) { 83123ca459aSEric Blake int bytes = MIN(s->bdev_length - offset, 83223ca459aSEric Blake QEMU_ALIGN_DOWN(INT_MAX, s->granularity)); 833c7c2769cSDenis V. Lunev 834c7c2769cSDenis V. Lunev mirror_throttle(s); 835c7c2769cSDenis V. Lunev 836daa7f2f9SKevin Wolf if (job_is_cancelled(&s->common.job)) { 83790ab48ebSAnton Nefedov s->initial_zeroing_ongoing = false; 838c7c2769cSDenis V. Lunev return 0; 839c7c2769cSDenis V. Lunev } 840c7c2769cSDenis V. Lunev 841c7c2769cSDenis V. Lunev if (s->in_flight >= MAX_IN_FLIGHT) { 84267adf4b3SEric Blake trace_mirror_yield(s, UINT64_MAX, s->buf_free_count, 84367adf4b3SEric Blake s->in_flight); 8449178f4feSKevin Wolf mirror_wait_for_free_in_flight_slot(s); 845c7c2769cSDenis V. Lunev continue; 846c7c2769cSDenis V. Lunev } 847c7c2769cSDenis V. Lunev 8484295c5fcSMax Reitz mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO); 84923ca459aSEric Blake offset += bytes; 850c7c2769cSDenis V. Lunev } 851c7c2769cSDenis V. Lunev 852bae8196dSPaolo Bonzini mirror_wait_for_all_io(s); 85390ab48ebSAnton Nefedov s->initial_zeroing_ongoing = false; 854c7c2769cSDenis V. Lunev } 855c7c2769cSDenis V. Lunev 856c0b363adSDenis V. Lunev /* First part, loop on the sectors and initialize the dirty bitmap. */ 85723ca459aSEric Blake for (offset = 0; offset < s->bdev_length; ) { 858c0b363adSDenis V. Lunev /* Just to make sure we are not exceeding int limit. */ 85923ca459aSEric Blake int bytes = MIN(s->bdev_length - offset, 86023ca459aSEric Blake QEMU_ALIGN_DOWN(INT_MAX, s->granularity)); 861c0b363adSDenis V. Lunev 862c0b363adSDenis V. Lunev mirror_throttle(s); 863c0b363adSDenis V. Lunev 864daa7f2f9SKevin Wolf if (job_is_cancelled(&s->common.job)) { 865c0b363adSDenis V. Lunev return 0; 866c0b363adSDenis V. Lunev } 867c0b363adSDenis V. Lunev 8683f072a7fSMax Reitz ret = bdrv_is_allocated_above(bs, s->base_overlay, true, offset, bytes, 8693f072a7fSMax Reitz &count); 870c0b363adSDenis V. Lunev if (ret < 0) { 871c0b363adSDenis V. Lunev return ret; 872c0b363adSDenis V. Lunev } 873c0b363adSDenis V. Lunev 87423ca459aSEric Blake assert(count); 875a92b1b06SEric Blake if (ret > 0) { 87623ca459aSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count); 877c0b363adSDenis V. Lunev } 87823ca459aSEric Blake offset += count; 879c0b363adSDenis V. Lunev } 880c0b363adSDenis V. Lunev return 0; 881c0b363adSDenis V. Lunev } 882c0b363adSDenis V. Lunev 883bdffb31dSPaolo Bonzini /* Called when going out of the streaming phase to flush the bulk of the 884bdffb31dSPaolo Bonzini * data to the medium, or just before completing. 885bdffb31dSPaolo Bonzini */ 886bdffb31dSPaolo Bonzini static int mirror_flush(MirrorBlockJob *s) 887bdffb31dSPaolo Bonzini { 888bdffb31dSPaolo Bonzini int ret = blk_flush(s->target); 889bdffb31dSPaolo Bonzini if (ret < 0) { 890bdffb31dSPaolo Bonzini if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) { 891bdffb31dSPaolo Bonzini s->ret = ret; 892bdffb31dSPaolo Bonzini } 893bdffb31dSPaolo Bonzini } 894bdffb31dSPaolo Bonzini return ret; 895bdffb31dSPaolo Bonzini } 896bdffb31dSPaolo Bonzini 897f67432a2SJohn Snow static int coroutine_fn mirror_run(Job *job, Error **errp) 898893f7ebaSPaolo Bonzini { 899f67432a2SJohn Snow MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 900138f9fffSMax Reitz BlockDriverState *bs = s->mirror_top_bs->backing->bs; 901e253f4b8SKevin Wolf BlockDriverState *target_bs = blk_bs(s->target); 9029a0cec66SPaolo Bonzini bool need_drain = true; 903c0b363adSDenis V. Lunev int64_t length; 904e83dd680SKevin Wolf int64_t target_length; 905b812f671SPaolo Bonzini BlockDriverInfo bdi; 9061d33936eSJeff Cody char backing_filename[2]; /* we only need 2 characters because we are only 9071d33936eSJeff Cody checking for a NULL string */ 908893f7ebaSPaolo Bonzini int ret = 0; 909893f7ebaSPaolo Bonzini 910daa7f2f9SKevin Wolf if (job_is_cancelled(&s->common.job)) { 911893f7ebaSPaolo Bonzini goto immediate_exit; 912893f7ebaSPaolo Bonzini } 913893f7ebaSPaolo Bonzini 914b21c7652SMax Reitz s->bdev_length = bdrv_getlength(bs); 915b21c7652SMax Reitz if (s->bdev_length < 0) { 916b21c7652SMax Reitz ret = s->bdev_length; 917373df5b1SFam Zheng goto immediate_exit; 918becc347eSKevin Wolf } 919becc347eSKevin Wolf 920e83dd680SKevin Wolf target_length = blk_getlength(s->target); 921e83dd680SKevin Wolf if (target_length < 0) { 922e83dd680SKevin Wolf ret = target_length; 923becc347eSKevin Wolf goto immediate_exit; 924becc347eSKevin Wolf } 925becc347eSKevin Wolf 926e83dd680SKevin Wolf /* Active commit must resize the base image if its size differs from the 927e83dd680SKevin Wolf * active layer. */ 928e83dd680SKevin Wolf if (s->base == blk_bs(s->target)) { 929e83dd680SKevin Wolf if (s->bdev_length > target_length) { 930c80d8b06SMax Reitz ret = blk_truncate(s->target, s->bdev_length, false, 9318c6242b6SKevin Wolf PREALLOC_MODE_OFF, 0, NULL); 932becc347eSKevin Wolf if (ret < 0) { 933becc347eSKevin Wolf goto immediate_exit; 934becc347eSKevin Wolf } 935becc347eSKevin Wolf } 936e83dd680SKevin Wolf } else if (s->bdev_length != target_length) { 937e83dd680SKevin Wolf error_setg(errp, "Source and target image have different sizes"); 938e83dd680SKevin Wolf ret = -EINVAL; 939e83dd680SKevin Wolf goto immediate_exit; 940becc347eSKevin Wolf } 941becc347eSKevin Wolf 942becc347eSKevin Wolf if (s->bdev_length == 0) { 9432e1795b5SKevin Wolf /* Transition to the READY state and wait for complete. */ 9442e1795b5SKevin Wolf job_transition_to_ready(&s->common.job); 945d06107adSMax Reitz s->actively_synced = true; 94608b83bffSHanna Reitz while (!job_cancel_requested(&s->common.job) && !s->should_complete) { 947198c49ccSKevin Wolf job_yield(&s->common.job); 9489e48b025SFam Zheng } 949daa7f2f9SKevin Wolf s->common.job.cancelled = false; 9509e48b025SFam Zheng goto immediate_exit; 951893f7ebaSPaolo Bonzini } 952893f7ebaSPaolo Bonzini 953b21c7652SMax Reitz length = DIV_ROUND_UP(s->bdev_length, s->granularity); 954402a4741SPaolo Bonzini s->in_flight_bitmap = bitmap_new(length); 955402a4741SPaolo Bonzini 956b812f671SPaolo Bonzini /* If we have no backing file yet in the destination, we cannot let 957b812f671SPaolo Bonzini * the destination do COW. Instead, we copy sectors around the 958b812f671SPaolo Bonzini * dirty data if needed. We need a bitmap to do that. 959b812f671SPaolo Bonzini */ 960e253f4b8SKevin Wolf bdrv_get_backing_filename(target_bs, backing_filename, 961b812f671SPaolo Bonzini sizeof(backing_filename)); 962e253f4b8SKevin Wolf if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) { 963b436982fSEric Blake s->target_cluster_size = bdi.cluster_size; 964b436982fSEric Blake } else { 965b436982fSEric Blake s->target_cluster_size = BDRV_SECTOR_SIZE; 966c3cc95bdSFam Zheng } 9673f072a7fSMax Reitz if (backing_filename[0] && !bdrv_backing_chain_next(target_bs) && 968b436982fSEric Blake s->granularity < s->target_cluster_size) { 969b436982fSEric Blake s->buf_size = MAX(s->buf_size, s->target_cluster_size); 970b812f671SPaolo Bonzini s->cow_bitmap = bitmap_new(length); 971b812f671SPaolo Bonzini } 972e253f4b8SKevin Wolf s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov); 973b812f671SPaolo Bonzini 9747504edf4SKevin Wolf s->buf = qemu_try_blockalign(bs, s->buf_size); 9757504edf4SKevin Wolf if (s->buf == NULL) { 9767504edf4SKevin Wolf ret = -ENOMEM; 9777504edf4SKevin Wolf goto immediate_exit; 9787504edf4SKevin Wolf } 9797504edf4SKevin Wolf 980402a4741SPaolo Bonzini mirror_free_init(s); 981893f7ebaSPaolo Bonzini 98249efb1f5SDenis V. Lunev s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 98303544a6eSFam Zheng if (!s->is_none_mode) { 984c0b363adSDenis V. Lunev ret = mirror_dirty_init(s); 985daa7f2f9SKevin Wolf if (ret < 0 || job_is_cancelled(&s->common.job)) { 9864c0cbd6fSFam Zheng goto immediate_exit; 9874c0cbd6fSFam Zheng } 988893f7ebaSPaolo Bonzini } 989893f7ebaSPaolo Bonzini 990dc162c8eSFam Zheng assert(!s->dbi); 991715a74d8SEric Blake s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap); 992893f7ebaSPaolo Bonzini for (;;) { 993cc8c9d6cSPaolo Bonzini uint64_t delay_ns = 0; 99449efb1f5SDenis V. Lunev int64_t cnt, delta; 995893f7ebaSPaolo Bonzini bool should_complete; 996893f7ebaSPaolo Bonzini 997d06107adSMax Reitz /* Do not start passive operations while there are active 998d06107adSMax Reitz * writes in progress */ 999d06107adSMax Reitz while (s->in_active_write_counter) { 10009178f4feSKevin Wolf mirror_wait_for_any_operation(s, true); 1001d06107adSMax Reitz } 1002d06107adSMax Reitz 1003bd48bde8SPaolo Bonzini if (s->ret < 0) { 1004bd48bde8SPaolo Bonzini ret = s->ret; 1005893f7ebaSPaolo Bonzini goto immediate_exit; 1006893f7ebaSPaolo Bonzini } 1007bd48bde8SPaolo Bonzini 1008da01ff7fSKevin Wolf job_pause_point(&s->common.job); 1009565ac01fSStefan Hajnoczi 10104feeec7eSHanna Reitz if (job_is_cancelled(&s->common.job)) { 10114feeec7eSHanna Reitz ret = 0; 10124feeec7eSHanna Reitz goto immediate_exit; 10134feeec7eSHanna Reitz } 10144feeec7eSHanna Reitz 101520dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 101605df8a6aSKevin Wolf /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is 101705df8a6aSKevin Wolf * the number of bytes currently being processed; together those are 101805df8a6aSKevin Wolf * the current remaining operation length */ 101930a5c887SKevin Wolf job_progress_set_remaining(&s->common.job, s->bytes_in_flight + cnt); 1020bd48bde8SPaolo Bonzini 1021bd48bde8SPaolo Bonzini /* Note that even when no rate limit is applied we need to yield 1022a7282330SFam Zheng * periodically with no pending I/O so that bdrv_drain_all() returns. 102318bb6928SKevin Wolf * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is 102418bb6928SKevin Wolf * an error, or when the source is clean, whichever comes first. */ 102549efb1f5SDenis V. Lunev delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns; 102618bb6928SKevin Wolf if (delta < BLOCK_JOB_SLICE_TIME && 1027bd48bde8SPaolo Bonzini s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 1028cf56a3c6SDenis V. Lunev if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 || 1029402a4741SPaolo Bonzini (cnt == 0 && s->in_flight > 0)) { 10309a46dba7SEric Blake trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight); 10319178f4feSKevin Wolf mirror_wait_for_free_in_flight_slot(s); 1032bd48bde8SPaolo Bonzini continue; 1033bd48bde8SPaolo Bonzini } else if (cnt != 0) { 1034cc8c9d6cSPaolo Bonzini delay_ns = mirror_iteration(s); 1035893f7ebaSPaolo Bonzini } 1036cc8c9d6cSPaolo Bonzini } 1037893f7ebaSPaolo Bonzini 1038893f7ebaSPaolo Bonzini should_complete = false; 1039bd48bde8SPaolo Bonzini if (s->in_flight == 0 && cnt == 0) { 1040893f7ebaSPaolo Bonzini trace_mirror_before_flush(s); 104144716224SHanna Reitz if (!job_is_ready(&s->common.job)) { 1042bdffb31dSPaolo Bonzini if (mirror_flush(s) < 0) { 1043bdffb31dSPaolo Bonzini /* Go check s->ret. */ 1044bdffb31dSPaolo Bonzini continue; 1045893f7ebaSPaolo Bonzini } 1046893f7ebaSPaolo Bonzini /* We're out of the streaming phase. From now on, if the job 1047893f7ebaSPaolo Bonzini * is cancelled we will actually complete all pending I/O and 1048893f7ebaSPaolo Bonzini * report completion. This way, block-job-cancel will leave 1049893f7ebaSPaolo Bonzini * the target in a consistent state. 1050893f7ebaSPaolo Bonzini */ 10512e1795b5SKevin Wolf job_transition_to_ready(&s->common.job); 1052d06107adSMax Reitz if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) { 1053d06107adSMax Reitz s->actively_synced = true; 1054d06107adSMax Reitz } 1055d63ffd87SPaolo Bonzini } 1056d63ffd87SPaolo Bonzini 1057d63ffd87SPaolo Bonzini should_complete = s->should_complete || 105808b83bffSHanna Reitz job_cancel_requested(&s->common.job); 105920dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 1060893f7ebaSPaolo Bonzini } 1061893f7ebaSPaolo Bonzini 1062893f7ebaSPaolo Bonzini if (cnt == 0 && should_complete) { 1063893f7ebaSPaolo Bonzini /* The dirty bitmap is not updated while operations are pending. 1064893f7ebaSPaolo Bonzini * If we're about to exit, wait for pending operations before 1065893f7ebaSPaolo Bonzini * calling bdrv_get_dirty_count(bs), or we may exit while the 1066893f7ebaSPaolo Bonzini * source has dirty data to copy! 1067893f7ebaSPaolo Bonzini * 1068893f7ebaSPaolo Bonzini * Note that I/O can be submitted by the guest while 10699a0cec66SPaolo Bonzini * mirror_populate runs, so pause it now. Before deciding 10709a0cec66SPaolo Bonzini * whether to switch to target check one last time if I/O has 10719a0cec66SPaolo Bonzini * come in the meanwhile, and if not flush the data to disk. 1072893f7ebaSPaolo Bonzini */ 10739a46dba7SEric Blake trace_mirror_before_drain(s, cnt); 10749a0cec66SPaolo Bonzini 10755e771752SSergio Lopez s->in_drain = true; 10769a0cec66SPaolo Bonzini bdrv_drained_begin(bs); 107720dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 1078bdffb31dSPaolo Bonzini if (cnt > 0 || mirror_flush(s) < 0) { 10799a0cec66SPaolo Bonzini bdrv_drained_end(bs); 10805e771752SSergio Lopez s->in_drain = false; 10819a0cec66SPaolo Bonzini continue; 10829a0cec66SPaolo Bonzini } 10839a0cec66SPaolo Bonzini 10849a0cec66SPaolo Bonzini /* The two disks are in sync. Exit and report successful 10859a0cec66SPaolo Bonzini * completion. 10869a0cec66SPaolo Bonzini */ 10879a0cec66SPaolo Bonzini assert(QLIST_EMPTY(&bs->tracked_requests)); 1088daa7f2f9SKevin Wolf s->common.job.cancelled = false; 10899a0cec66SPaolo Bonzini need_drain = false; 10909a0cec66SPaolo Bonzini break; 1091893f7ebaSPaolo Bonzini } 1092893f7ebaSPaolo Bonzini 109344716224SHanna Reitz if (job_is_ready(&s->common.job) && !should_complete) { 109418bb6928SKevin Wolf delay_ns = (s->in_flight == 0 && 109518bb6928SKevin Wolf cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0); 1096ddc4115eSStefan Hajnoczi } 109744716224SHanna Reitz trace_mirror_before_sleep(s, cnt, job_is_ready(&s->common.job), 109844716224SHanna Reitz delay_ns); 10995d43e86eSKevin Wolf job_sleep_ns(&s->common.job, delay_ns); 110049efb1f5SDenis V. Lunev s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 1101893f7ebaSPaolo Bonzini } 1102893f7ebaSPaolo Bonzini 1103893f7ebaSPaolo Bonzini immediate_exit: 1104bd48bde8SPaolo Bonzini if (s->in_flight > 0) { 1105bd48bde8SPaolo Bonzini /* We get here only if something went wrong. Either the job failed, 1106bd48bde8SPaolo Bonzini * or it was cancelled prematurely so that we do not guarantee that 1107bd48bde8SPaolo Bonzini * the target is a copy of the source. 1108bd48bde8SPaolo Bonzini */ 110908b83bffSHanna Reitz assert(ret < 0 || job_is_cancelled(&s->common.job)); 11109a0cec66SPaolo Bonzini assert(need_drain); 1111bae8196dSPaolo Bonzini mirror_wait_for_all_io(s); 1112bd48bde8SPaolo Bonzini } 1113bd48bde8SPaolo Bonzini 1114bd48bde8SPaolo Bonzini assert(s->in_flight == 0); 11157191bf31SMarkus Armbruster qemu_vfree(s->buf); 1116b812f671SPaolo Bonzini g_free(s->cow_bitmap); 1117402a4741SPaolo Bonzini g_free(s->in_flight_bitmap); 1118dc162c8eSFam Zheng bdrv_dirty_iter_free(s->dbi); 11195a7e7a0bSStefan Hajnoczi 11209a0cec66SPaolo Bonzini if (need_drain) { 11215e771752SSergio Lopez s->in_drain = true; 1122e253f4b8SKevin Wolf bdrv_drained_begin(bs); 11239a0cec66SPaolo Bonzini } 1124f67432a2SJohn Snow 1125f67432a2SJohn Snow return ret; 1126893f7ebaSPaolo Bonzini } 1127893f7ebaSPaolo Bonzini 11283453d972SKevin Wolf static void mirror_complete(Job *job, Error **errp) 1129d63ffd87SPaolo Bonzini { 11303453d972SKevin Wolf MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 1131274fcceeSMax Reitz 113244716224SHanna Reitz if (!job_is_ready(job)) { 11339df229c3SAlberto Garcia error_setg(errp, "The active block job '%s' cannot be completed", 11343453d972SKevin Wolf job->id); 1135d63ffd87SPaolo Bonzini return; 1136d63ffd87SPaolo Bonzini } 1137d63ffd87SPaolo Bonzini 113815d67298SChanglong Xie /* block all operations on to_replace bs */ 113909158f00SBenoît Canet if (s->replaces) { 11405a7e7a0bSStefan Hajnoczi AioContext *replace_aio_context; 11415a7e7a0bSStefan Hajnoczi 1142e12f3784SWen Congyang s->to_replace = bdrv_find_node(s->replaces); 114309158f00SBenoît Canet if (!s->to_replace) { 1144e12f3784SWen Congyang error_setg(errp, "Node name '%s' not found", s->replaces); 114509158f00SBenoît Canet return; 114609158f00SBenoît Canet } 114709158f00SBenoît Canet 11485a7e7a0bSStefan Hajnoczi replace_aio_context = bdrv_get_aio_context(s->to_replace); 11495a7e7a0bSStefan Hajnoczi aio_context_acquire(replace_aio_context); 11505a7e7a0bSStefan Hajnoczi 11514ef85a9cSKevin Wolf /* TODO Translate this into permission system. Current definition of 11524ef85a9cSKevin Wolf * GRAPH_MOD would require to request it for the parents; they might 11534ef85a9cSKevin Wolf * not even be BlockDriverStates, however, so a BdrvChild can't address 11544ef85a9cSKevin Wolf * them. May need redefinition of GRAPH_MOD. */ 115509158f00SBenoît Canet error_setg(&s->replace_blocker, 115609158f00SBenoît Canet "block device is in use by block-job-complete"); 115709158f00SBenoît Canet bdrv_op_block_all(s->to_replace, s->replace_blocker); 115809158f00SBenoît Canet bdrv_ref(s->to_replace); 11595a7e7a0bSStefan Hajnoczi 11605a7e7a0bSStefan Hajnoczi aio_context_release(replace_aio_context); 116109158f00SBenoît Canet } 116209158f00SBenoît Canet 1163d63ffd87SPaolo Bonzini s->should_complete = true; 116400769414SMax Reitz 116500769414SMax Reitz /* If the job is paused, it will be re-entered when it is resumed */ 116600769414SMax Reitz if (!job->paused) { 11673d70ff53SKevin Wolf job_enter(job); 1168d63ffd87SPaolo Bonzini } 116900769414SMax Reitz } 1170d63ffd87SPaolo Bonzini 1171537c3d4fSStefan Hajnoczi static void coroutine_fn mirror_pause(Job *job) 1172565ac01fSStefan Hajnoczi { 1173da01ff7fSKevin Wolf MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 1174565ac01fSStefan Hajnoczi 1175bae8196dSPaolo Bonzini mirror_wait_for_all_io(s); 1176565ac01fSStefan Hajnoczi } 1177565ac01fSStefan Hajnoczi 117889bd0305SKevin Wolf static bool mirror_drained_poll(BlockJob *job) 117989bd0305SKevin Wolf { 118089bd0305SKevin Wolf MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 11815e771752SSergio Lopez 11825e771752SSergio Lopez /* If the job isn't paused nor cancelled, we can't be sure that it won't 11835e771752SSergio Lopez * issue more requests. We make an exception if we've reached this point 11845e771752SSergio Lopez * from one of our own drain sections, to avoid a deadlock waiting for 11855e771752SSergio Lopez * ourselves. 11865e771752SSergio Lopez */ 118720ad4d20SHanna Reitz if (!s->common.job.paused && !job_is_cancelled(&job->job) && !s->in_drain) { 11885e771752SSergio Lopez return true; 11895e771752SSergio Lopez } 11905e771752SSergio Lopez 119189bd0305SKevin Wolf return !!s->in_flight; 119289bd0305SKevin Wolf } 119389bd0305SKevin Wolf 119473895f38SHanna Reitz static bool mirror_cancel(Job *job, bool force) 1195521ff8b7SVladimir Sementsov-Ogievskiy { 1196521ff8b7SVladimir Sementsov-Ogievskiy MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 1197521ff8b7SVladimir Sementsov-Ogievskiy BlockDriverState *target = blk_bs(s->target); 1198521ff8b7SVladimir Sementsov-Ogievskiy 119973895f38SHanna Reitz /* 120073895f38SHanna Reitz * Before the job is READY, we treat any cancellation like a 120173895f38SHanna Reitz * force-cancellation. 120273895f38SHanna Reitz */ 120373895f38SHanna Reitz force = force || !job_is_ready(job); 120473895f38SHanna Reitz 120573895f38SHanna Reitz if (force) { 1206521ff8b7SVladimir Sementsov-Ogievskiy bdrv_cancel_in_flight(target); 1207521ff8b7SVladimir Sementsov-Ogievskiy } 120873895f38SHanna Reitz return force; 120973895f38SHanna Reitz } 121073895f38SHanna Reitz 121173895f38SHanna Reitz static bool commit_active_cancel(Job *job, bool force) 121273895f38SHanna Reitz { 121373895f38SHanna Reitz /* Same as above in mirror_cancel() */ 121473895f38SHanna Reitz return force || !job_is_ready(job); 12159c785cd7SVladimir Sementsov-Ogievskiy } 1216521ff8b7SVladimir Sementsov-Ogievskiy 12173fc4b10aSFam Zheng static const BlockJobDriver mirror_job_driver = { 121833e9e9bdSKevin Wolf .job_driver = { 1219893f7ebaSPaolo Bonzini .instance_size = sizeof(MirrorBlockJob), 12208e4c8700SKevin Wolf .job_type = JOB_TYPE_MIRROR, 122180fa2c75SKevin Wolf .free = block_job_free, 1222b15de828SKevin Wolf .user_resume = block_job_user_resume, 1223f67432a2SJohn Snow .run = mirror_run, 1224737efc1eSJohn Snow .prepare = mirror_prepare, 1225737efc1eSJohn Snow .abort = mirror_abort, 1226565ac01fSStefan Hajnoczi .pause = mirror_pause, 1227da01ff7fSKevin Wolf .complete = mirror_complete, 1228521ff8b7SVladimir Sementsov-Ogievskiy .cancel = mirror_cancel, 12293453d972SKevin Wolf }, 123089bd0305SKevin Wolf .drained_poll = mirror_drained_poll, 1231893f7ebaSPaolo Bonzini }; 1232893f7ebaSPaolo Bonzini 123303544a6eSFam Zheng static const BlockJobDriver commit_active_job_driver = { 123433e9e9bdSKevin Wolf .job_driver = { 123503544a6eSFam Zheng .instance_size = sizeof(MirrorBlockJob), 12368e4c8700SKevin Wolf .job_type = JOB_TYPE_COMMIT, 123780fa2c75SKevin Wolf .free = block_job_free, 1238b15de828SKevin Wolf .user_resume = block_job_user_resume, 1239f67432a2SJohn Snow .run = mirror_run, 1240737efc1eSJohn Snow .prepare = mirror_prepare, 1241737efc1eSJohn Snow .abort = mirror_abort, 1242565ac01fSStefan Hajnoczi .pause = mirror_pause, 1243da01ff7fSKevin Wolf .complete = mirror_complete, 124473895f38SHanna Reitz .cancel = commit_active_cancel, 12453453d972SKevin Wolf }, 124689bd0305SKevin Wolf .drained_poll = mirror_drained_poll, 124703544a6eSFam Zheng }; 124803544a6eSFam Zheng 1249537c3d4fSStefan Hajnoczi static void coroutine_fn 1250537c3d4fSStefan Hajnoczi do_sync_target_write(MirrorBlockJob *job, MirrorMethod method, 1251d06107adSMax Reitz uint64_t offset, uint64_t bytes, 1252d06107adSMax Reitz QEMUIOVector *qiov, int flags) 1253d06107adSMax Reitz { 1254d06107adSMax Reitz int ret; 1255dbdf699cSVladimir Sementsov-Ogievskiy size_t qiov_offset = 0; 1256dbdf699cSVladimir Sementsov-Ogievskiy int64_t bitmap_offset, bitmap_end; 1257d06107adSMax Reitz 1258dbdf699cSVladimir Sementsov-Ogievskiy if (!QEMU_IS_ALIGNED(offset, job->granularity) && 1259dbdf699cSVladimir Sementsov-Ogievskiy bdrv_dirty_bitmap_get(job->dirty_bitmap, offset)) 1260dbdf699cSVladimir Sementsov-Ogievskiy { 1261dbdf699cSVladimir Sementsov-Ogievskiy /* 1262dbdf699cSVladimir Sementsov-Ogievskiy * Dirty unaligned padding: ignore it. 1263dbdf699cSVladimir Sementsov-Ogievskiy * 1264dbdf699cSVladimir Sementsov-Ogievskiy * Reasoning: 1265dbdf699cSVladimir Sementsov-Ogievskiy * 1. If we copy it, we can't reset corresponding bit in 1266dbdf699cSVladimir Sementsov-Ogievskiy * dirty_bitmap as there may be some "dirty" bytes still not 1267dbdf699cSVladimir Sementsov-Ogievskiy * copied. 1268dbdf699cSVladimir Sementsov-Ogievskiy * 2. It's already dirty, so skipping it we don't diverge mirror 1269dbdf699cSVladimir Sementsov-Ogievskiy * progress. 1270dbdf699cSVladimir Sementsov-Ogievskiy * 1271dbdf699cSVladimir Sementsov-Ogievskiy * Note, that because of this, guest write may have no contribution 1272dbdf699cSVladimir Sementsov-Ogievskiy * into mirror converge, but that's not bad, as we have background 1273dbdf699cSVladimir Sementsov-Ogievskiy * process of mirroring. If under some bad circumstances (high guest 1274dbdf699cSVladimir Sementsov-Ogievskiy * IO load) background process starve, we will not converge anyway, 1275dbdf699cSVladimir Sementsov-Ogievskiy * even if each write will contribute, as guest is not guaranteed to 1276dbdf699cSVladimir Sementsov-Ogievskiy * rewrite the whole disk. 1277dbdf699cSVladimir Sementsov-Ogievskiy */ 1278dbdf699cSVladimir Sementsov-Ogievskiy qiov_offset = QEMU_ALIGN_UP(offset, job->granularity) - offset; 1279dbdf699cSVladimir Sementsov-Ogievskiy if (bytes <= qiov_offset) { 1280dbdf699cSVladimir Sementsov-Ogievskiy /* nothing to do after shrink */ 1281dbdf699cSVladimir Sementsov-Ogievskiy return; 1282dbdf699cSVladimir Sementsov-Ogievskiy } 1283dbdf699cSVladimir Sementsov-Ogievskiy offset += qiov_offset; 1284dbdf699cSVladimir Sementsov-Ogievskiy bytes -= qiov_offset; 1285dbdf699cSVladimir Sementsov-Ogievskiy } 1286dbdf699cSVladimir Sementsov-Ogievskiy 1287dbdf699cSVladimir Sementsov-Ogievskiy if (!QEMU_IS_ALIGNED(offset + bytes, job->granularity) && 1288dbdf699cSVladimir Sementsov-Ogievskiy bdrv_dirty_bitmap_get(job->dirty_bitmap, offset + bytes - 1)) 1289dbdf699cSVladimir Sementsov-Ogievskiy { 1290dbdf699cSVladimir Sementsov-Ogievskiy uint64_t tail = (offset + bytes) % job->granularity; 1291dbdf699cSVladimir Sementsov-Ogievskiy 1292dbdf699cSVladimir Sementsov-Ogievskiy if (bytes <= tail) { 1293dbdf699cSVladimir Sementsov-Ogievskiy /* nothing to do after shrink */ 1294dbdf699cSVladimir Sementsov-Ogievskiy return; 1295dbdf699cSVladimir Sementsov-Ogievskiy } 1296dbdf699cSVladimir Sementsov-Ogievskiy bytes -= tail; 1297dbdf699cSVladimir Sementsov-Ogievskiy } 1298dbdf699cSVladimir Sementsov-Ogievskiy 1299dbdf699cSVladimir Sementsov-Ogievskiy /* 1300dbdf699cSVladimir Sementsov-Ogievskiy * Tails are either clean or shrunk, so for bitmap resetting 1301dbdf699cSVladimir Sementsov-Ogievskiy * we safely align the range down. 1302dbdf699cSVladimir Sementsov-Ogievskiy */ 1303dbdf699cSVladimir Sementsov-Ogievskiy bitmap_offset = QEMU_ALIGN_UP(offset, job->granularity); 1304dbdf699cSVladimir Sementsov-Ogievskiy bitmap_end = QEMU_ALIGN_DOWN(offset + bytes, job->granularity); 1305dbdf699cSVladimir Sementsov-Ogievskiy if (bitmap_offset < bitmap_end) { 1306dbdf699cSVladimir Sementsov-Ogievskiy bdrv_reset_dirty_bitmap(job->dirty_bitmap, bitmap_offset, 1307dbdf699cSVladimir Sementsov-Ogievskiy bitmap_end - bitmap_offset); 1308dbdf699cSVladimir Sementsov-Ogievskiy } 1309d06107adSMax Reitz 13105c511ac3SVladimir Sementsov-Ogievskiy job_progress_increase_remaining(&job->common.job, bytes); 1311d06107adSMax Reitz 1312d06107adSMax Reitz switch (method) { 1313d06107adSMax Reitz case MIRROR_METHOD_COPY: 1314dbdf699cSVladimir Sementsov-Ogievskiy ret = blk_co_pwritev_part(job->target, offset, bytes, 1315dbdf699cSVladimir Sementsov-Ogievskiy qiov, qiov_offset, flags); 1316d06107adSMax Reitz break; 1317d06107adSMax Reitz 1318d06107adSMax Reitz case MIRROR_METHOD_ZERO: 1319d06107adSMax Reitz assert(!qiov); 13205c511ac3SVladimir Sementsov-Ogievskiy ret = blk_co_pwrite_zeroes(job->target, offset, bytes, flags); 1321d06107adSMax Reitz break; 1322d06107adSMax Reitz 1323d06107adSMax Reitz case MIRROR_METHOD_DISCARD: 1324d06107adSMax Reitz assert(!qiov); 13255c511ac3SVladimir Sementsov-Ogievskiy ret = blk_co_pdiscard(job->target, offset, bytes); 1326d06107adSMax Reitz break; 1327d06107adSMax Reitz 1328d06107adSMax Reitz default: 1329d06107adSMax Reitz abort(); 1330d06107adSMax Reitz } 1331d06107adSMax Reitz 1332d06107adSMax Reitz if (ret >= 0) { 13335c511ac3SVladimir Sementsov-Ogievskiy job_progress_update(&job->common.job, bytes); 1334d06107adSMax Reitz } else { 1335d06107adSMax Reitz BlockErrorAction action; 1336d06107adSMax Reitz 1337dbdf699cSVladimir Sementsov-Ogievskiy /* 1338dbdf699cSVladimir Sementsov-Ogievskiy * We failed, so we should mark dirty the whole area, aligned up. 1339dbdf699cSVladimir Sementsov-Ogievskiy * Note that we don't care about shrunk tails if any: they were dirty 1340dbdf699cSVladimir Sementsov-Ogievskiy * at function start, and they must be still dirty, as we've locked 1341dbdf699cSVladimir Sementsov-Ogievskiy * the region for in-flight op. 1342dbdf699cSVladimir Sementsov-Ogievskiy */ 1343dbdf699cSVladimir Sementsov-Ogievskiy bitmap_offset = QEMU_ALIGN_DOWN(offset, job->granularity); 1344dbdf699cSVladimir Sementsov-Ogievskiy bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity); 1345dbdf699cSVladimir Sementsov-Ogievskiy bdrv_set_dirty_bitmap(job->dirty_bitmap, bitmap_offset, 1346dbdf699cSVladimir Sementsov-Ogievskiy bitmap_end - bitmap_offset); 1347d06107adSMax Reitz job->actively_synced = false; 1348d06107adSMax Reitz 1349d06107adSMax Reitz action = mirror_error_action(job, false, -ret); 1350d06107adSMax Reitz if (action == BLOCK_ERROR_ACTION_REPORT) { 1351d06107adSMax Reitz if (!job->ret) { 1352d06107adSMax Reitz job->ret = ret; 1353d06107adSMax Reitz } 1354d06107adSMax Reitz } 1355d06107adSMax Reitz } 1356d06107adSMax Reitz } 1357d06107adSMax Reitz 1358d06107adSMax Reitz static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s, 1359d06107adSMax Reitz uint64_t offset, 1360d06107adSMax Reitz uint64_t bytes) 1361d06107adSMax Reitz { 1362d06107adSMax Reitz MirrorOp *op; 1363d06107adSMax Reitz uint64_t start_chunk = offset / s->granularity; 1364d06107adSMax Reitz uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity); 1365d06107adSMax Reitz 1366d06107adSMax Reitz op = g_new(MirrorOp, 1); 1367d06107adSMax Reitz *op = (MirrorOp){ 1368d06107adSMax Reitz .s = s, 1369d06107adSMax Reitz .offset = offset, 1370d06107adSMax Reitz .bytes = bytes, 1371d06107adSMax Reitz .is_active_write = true, 1372ce8cabbdSKevin Wolf .is_in_flight = true, 1373ead3f1bfSVladimir Sementsov-Ogievskiy .co = qemu_coroutine_self(), 1374d06107adSMax Reitz }; 1375d06107adSMax Reitz qemu_co_queue_init(&op->waiting_requests); 1376d06107adSMax Reitz QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next); 1377d06107adSMax Reitz 1378d06107adSMax Reitz s->in_active_write_counter++; 1379d06107adSMax Reitz 1380d06107adSMax Reitz mirror_wait_on_conflicts(op, s, offset, bytes); 1381d06107adSMax Reitz 1382d06107adSMax Reitz bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk); 1383d06107adSMax Reitz 1384d06107adSMax Reitz return op; 1385d06107adSMax Reitz } 1386d06107adSMax Reitz 1387d06107adSMax Reitz static void coroutine_fn active_write_settle(MirrorOp *op) 1388d06107adSMax Reitz { 1389d06107adSMax Reitz uint64_t start_chunk = op->offset / op->s->granularity; 1390d06107adSMax Reitz uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes, 1391d06107adSMax Reitz op->s->granularity); 1392d06107adSMax Reitz 1393d06107adSMax Reitz if (!--op->s->in_active_write_counter && op->s->actively_synced) { 1394d06107adSMax Reitz BdrvChild *source = op->s->mirror_top_bs->backing; 1395d06107adSMax Reitz 1396d06107adSMax Reitz if (QLIST_FIRST(&source->bs->parents) == source && 1397d06107adSMax Reitz QLIST_NEXT(source, next_parent) == NULL) 1398d06107adSMax Reitz { 1399d06107adSMax Reitz /* Assert that we are back in sync once all active write 1400d06107adSMax Reitz * operations are settled. 1401d06107adSMax Reitz * Note that we can only assert this if the mirror node 1402d06107adSMax Reitz * is the source node's only parent. */ 1403d06107adSMax Reitz assert(!bdrv_get_dirty_count(op->s->dirty_bitmap)); 1404d06107adSMax Reitz } 1405d06107adSMax Reitz } 1406d06107adSMax Reitz bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk); 1407d06107adSMax Reitz QTAILQ_REMOVE(&op->s->ops_in_flight, op, next); 1408d06107adSMax Reitz qemu_co_queue_restart_all(&op->waiting_requests); 1409d06107adSMax Reitz g_free(op); 1410d06107adSMax Reitz } 1411d06107adSMax Reitz 14124ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs, 1413f7ef38ddSVladimir Sementsov-Ogievskiy int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) 14144ef85a9cSKevin Wolf { 14154ef85a9cSKevin Wolf return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags); 14164ef85a9cSKevin Wolf } 14174ef85a9cSKevin Wolf 1418d06107adSMax Reitz static int coroutine_fn bdrv_mirror_top_do_write(BlockDriverState *bs, 1419d06107adSMax Reitz MirrorMethod method, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, 1420d06107adSMax Reitz int flags) 1421d06107adSMax Reitz { 1422d06107adSMax Reitz MirrorOp *op = NULL; 1423d06107adSMax Reitz MirrorBDSOpaque *s = bs->opaque; 1424d06107adSMax Reitz int ret = 0; 1425d06107adSMax Reitz bool copy_to_target; 1426d06107adSMax Reitz 1427d06107adSMax Reitz copy_to_target = s->job->ret >= 0 && 1428*9b230ef9SHanna Reitz !job_is_cancelled(&s->job->common.job) && 1429d06107adSMax Reitz s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; 1430d06107adSMax Reitz 1431d06107adSMax Reitz if (copy_to_target) { 1432d06107adSMax Reitz op = active_write_prepare(s->job, offset, bytes); 1433d06107adSMax Reitz } 1434d06107adSMax Reitz 1435d06107adSMax Reitz switch (method) { 1436d06107adSMax Reitz case MIRROR_METHOD_COPY: 1437d06107adSMax Reitz ret = bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags); 1438d06107adSMax Reitz break; 1439d06107adSMax Reitz 1440d06107adSMax Reitz case MIRROR_METHOD_ZERO: 1441d06107adSMax Reitz ret = bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags); 1442d06107adSMax Reitz break; 1443d06107adSMax Reitz 1444d06107adSMax Reitz case MIRROR_METHOD_DISCARD: 14450b9fd3f4SFam Zheng ret = bdrv_co_pdiscard(bs->backing, offset, bytes); 1446d06107adSMax Reitz break; 1447d06107adSMax Reitz 1448d06107adSMax Reitz default: 1449d06107adSMax Reitz abort(); 1450d06107adSMax Reitz } 1451d06107adSMax Reitz 1452d06107adSMax Reitz if (ret < 0) { 1453d06107adSMax Reitz goto out; 1454d06107adSMax Reitz } 1455d06107adSMax Reitz 1456d06107adSMax Reitz if (copy_to_target) { 1457d06107adSMax Reitz do_sync_target_write(s->job, method, offset, bytes, qiov, flags); 1458d06107adSMax Reitz } 1459d06107adSMax Reitz 1460d06107adSMax Reitz out: 1461d06107adSMax Reitz if (copy_to_target) { 1462d06107adSMax Reitz active_write_settle(op); 1463d06107adSMax Reitz } 1464d06107adSMax Reitz return ret; 1465d06107adSMax Reitz } 1466d06107adSMax Reitz 14674ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs, 1468e75abedaSVladimir Sementsov-Ogievskiy int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) 14694ef85a9cSKevin Wolf { 1470d06107adSMax Reitz MirrorBDSOpaque *s = bs->opaque; 1471d06107adSMax Reitz QEMUIOVector bounce_qiov; 1472d06107adSMax Reitz void *bounce_buf; 1473d06107adSMax Reitz int ret = 0; 1474d06107adSMax Reitz bool copy_to_target; 1475d06107adSMax Reitz 1476d06107adSMax Reitz copy_to_target = s->job->ret >= 0 && 1477*9b230ef9SHanna Reitz !job_is_cancelled(&s->job->common.job) && 1478d06107adSMax Reitz s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; 1479d06107adSMax Reitz 1480d06107adSMax Reitz if (copy_to_target) { 1481d06107adSMax Reitz /* The guest might concurrently modify the data to write; but 1482d06107adSMax Reitz * the data on source and destination must match, so we have 1483d06107adSMax Reitz * to use a bounce buffer if we are going to write to the 1484d06107adSMax Reitz * target now. */ 1485d06107adSMax Reitz bounce_buf = qemu_blockalign(bs, bytes); 1486d06107adSMax Reitz iov_to_buf_full(qiov->iov, qiov->niov, 0, bounce_buf, bytes); 1487d06107adSMax Reitz 1488d06107adSMax Reitz qemu_iovec_init(&bounce_qiov, 1); 1489d06107adSMax Reitz qemu_iovec_add(&bounce_qiov, bounce_buf, bytes); 1490d06107adSMax Reitz qiov = &bounce_qiov; 1491d06107adSMax Reitz } 1492d06107adSMax Reitz 1493d06107adSMax Reitz ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, offset, bytes, qiov, 1494d06107adSMax Reitz flags); 1495d06107adSMax Reitz 1496d06107adSMax Reitz if (copy_to_target) { 1497d06107adSMax Reitz qemu_iovec_destroy(&bounce_qiov); 1498d06107adSMax Reitz qemu_vfree(bounce_buf); 1499d06107adSMax Reitz } 1500d06107adSMax Reitz 1501d06107adSMax Reitz return ret; 15024ef85a9cSKevin Wolf } 15034ef85a9cSKevin Wolf 15044ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs) 15054ef85a9cSKevin Wolf { 1506ce960aa9SVladimir Sementsov-Ogievskiy if (bs->backing == NULL) { 1507ce960aa9SVladimir Sementsov-Ogievskiy /* we can be here after failed bdrv_append in mirror_start_job */ 1508ce960aa9SVladimir Sementsov-Ogievskiy return 0; 1509ce960aa9SVladimir Sementsov-Ogievskiy } 15104ef85a9cSKevin Wolf return bdrv_co_flush(bs->backing->bs); 15114ef85a9cSKevin Wolf } 15124ef85a9cSKevin Wolf 15134ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs, 1514f34b2bcfSVladimir Sementsov-Ogievskiy int64_t offset, int64_t bytes, BdrvRequestFlags flags) 15154ef85a9cSKevin Wolf { 1516d06107adSMax Reitz return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL, 1517d06107adSMax Reitz flags); 15184ef85a9cSKevin Wolf } 15194ef85a9cSKevin Wolf 15204ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs, 15210c802287SVladimir Sementsov-Ogievskiy int64_t offset, int64_t bytes) 15224ef85a9cSKevin Wolf { 1523d06107adSMax Reitz return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes, 1524d06107adSMax Reitz NULL, 0); 15254ef85a9cSKevin Wolf } 15264ef85a9cSKevin Wolf 1527998b3a1eSMax Reitz static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs) 1528fd4a6493SKevin Wolf { 152918775ff3SVladimir Sementsov-Ogievskiy if (bs->backing == NULL) { 153018775ff3SVladimir Sementsov-Ogievskiy /* we can be here after failed bdrv_attach_child in 153118775ff3SVladimir Sementsov-Ogievskiy * bdrv_set_backing_hd */ 153218775ff3SVladimir Sementsov-Ogievskiy return; 153318775ff3SVladimir Sementsov-Ogievskiy } 1534fd4a6493SKevin Wolf pstrcpy(bs->exact_filename, sizeof(bs->exact_filename), 1535fd4a6493SKevin Wolf bs->backing->bs->filename); 1536fd4a6493SKevin Wolf } 1537fd4a6493SKevin Wolf 15384ef85a9cSKevin Wolf static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c, 1539bf8e925eSMax Reitz BdrvChildRole role, 1540e0995dc3SKevin Wolf BlockReopenQueue *reopen_queue, 15414ef85a9cSKevin Wolf uint64_t perm, uint64_t shared, 15424ef85a9cSKevin Wolf uint64_t *nperm, uint64_t *nshared) 15434ef85a9cSKevin Wolf { 1544f94dc3b4SMax Reitz MirrorBDSOpaque *s = bs->opaque; 1545f94dc3b4SMax Reitz 1546f94dc3b4SMax Reitz if (s->stop) { 1547f94dc3b4SMax Reitz /* 1548f94dc3b4SMax Reitz * If the job is to be stopped, we do not need to forward 1549f94dc3b4SMax Reitz * anything to the real image. 1550f94dc3b4SMax Reitz */ 1551f94dc3b4SMax Reitz *nperm = 0; 1552f94dc3b4SMax Reitz *nshared = BLK_PERM_ALL; 1553f94dc3b4SMax Reitz return; 1554f94dc3b4SMax Reitz } 1555f94dc3b4SMax Reitz 155653431b90SMax Reitz bdrv_default_perms(bs, c, role, reopen_queue, 155753431b90SMax Reitz perm, shared, nperm, nshared); 15584ef85a9cSKevin Wolf 155953431b90SMax Reitz if (s->is_commit) { 156053431b90SMax Reitz /* 156153431b90SMax Reitz * For commit jobs, we cannot take CONSISTENT_READ, because 156253431b90SMax Reitz * that permission is unshared for everything above the base 156353431b90SMax Reitz * node (except for filters on the base node). 156453431b90SMax Reitz * We also have to force-share the WRITE permission, or 156553431b90SMax Reitz * otherwise we would block ourselves at the base node (if 156653431b90SMax Reitz * writes are blocked for a node, they are also blocked for 156753431b90SMax Reitz * its backing file). 156853431b90SMax Reitz * (We could also share RESIZE, because it may be needed for 156953431b90SMax Reitz * the target if its size is less than the top node's; but 157053431b90SMax Reitz * bdrv_default_perms_for_cow() automatically shares RESIZE 157153431b90SMax Reitz * for backing nodes if WRITE is shared, so there is no need 157253431b90SMax Reitz * to do it here.) 157353431b90SMax Reitz */ 157453431b90SMax Reitz *nperm &= ~BLK_PERM_CONSISTENT_READ; 157553431b90SMax Reitz *nshared |= BLK_PERM_WRITE; 157653431b90SMax Reitz } 15774ef85a9cSKevin Wolf } 15784ef85a9cSKevin Wolf 15794ef85a9cSKevin Wolf /* Dummy node that provides consistent read to its users without requiring it 15804ef85a9cSKevin Wolf * from its backing file and that allows writes on the backing file chain. */ 15814ef85a9cSKevin Wolf static BlockDriver bdrv_mirror_top = { 15824ef85a9cSKevin Wolf .format_name = "mirror_top", 15834ef85a9cSKevin Wolf .bdrv_co_preadv = bdrv_mirror_top_preadv, 15844ef85a9cSKevin Wolf .bdrv_co_pwritev = bdrv_mirror_top_pwritev, 15854ef85a9cSKevin Wolf .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes, 15864ef85a9cSKevin Wolf .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard, 15874ef85a9cSKevin Wolf .bdrv_co_flush = bdrv_mirror_top_flush, 1588fd4a6493SKevin Wolf .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename, 15894ef85a9cSKevin Wolf .bdrv_child_perm = bdrv_mirror_top_child_perm, 15906540fd15SMax Reitz 15916540fd15SMax Reitz .is_filter = true, 15924ef85a9cSKevin Wolf }; 15934ef85a9cSKevin Wolf 1594cc19f177SVladimir Sementsov-Ogievskiy static BlockJob *mirror_start_job( 1595cc19f177SVladimir Sementsov-Ogievskiy const char *job_id, BlockDriverState *bs, 159647970dfbSJohn Snow int creation_flags, BlockDriverState *target, 159747970dfbSJohn Snow const char *replaces, int64_t speed, 159847970dfbSJohn Snow uint32_t granularity, int64_t buf_size, 1599274fcceeSMax Reitz BlockMirrorBackingMode backing_mode, 1600cdf3bc93SMax Reitz bool zero_target, 160103544a6eSFam Zheng BlockdevOnError on_source_error, 1602b952b558SPaolo Bonzini BlockdevOnError on_target_error, 16030fc9f8eaSFam Zheng bool unmap, 1604097310b5SMarkus Armbruster BlockCompletionFunc *cb, 160551ccfa2dSFam Zheng void *opaque, 160603544a6eSFam Zheng const BlockJobDriver *driver, 1607b49f7eadSWen Congyang bool is_none_mode, BlockDriverState *base, 160851ccfa2dSFam Zheng bool auto_complete, const char *filter_node_name, 1609481debaaSMax Reitz bool is_mirror, MirrorCopyMode copy_mode, 161051ccfa2dSFam Zheng Error **errp) 1611893f7ebaSPaolo Bonzini { 1612893f7ebaSPaolo Bonzini MirrorBlockJob *s; 1613429076e8SMax Reitz MirrorBDSOpaque *bs_opaque; 16144ef85a9cSKevin Wolf BlockDriverState *mirror_top_bs; 16154ef85a9cSKevin Wolf bool target_is_backing; 16163f072a7fSMax Reitz uint64_t target_perms, target_shared_perms; 1617d7086422SKevin Wolf int ret; 1618893f7ebaSPaolo Bonzini 1619eee13dfeSPaolo Bonzini if (granularity == 0) { 1620341ebc2fSJohn Snow granularity = bdrv_get_default_bitmap_granularity(target); 1621eee13dfeSPaolo Bonzini } 1622eee13dfeSPaolo Bonzini 162331826642SEric Blake assert(is_power_of_2(granularity)); 1624eee13dfeSPaolo Bonzini 162548ac0a4dSWen Congyang if (buf_size < 0) { 162648ac0a4dSWen Congyang error_setg(errp, "Invalid parameter 'buf-size'"); 1627cc19f177SVladimir Sementsov-Ogievskiy return NULL; 162848ac0a4dSWen Congyang } 162948ac0a4dSWen Congyang 163048ac0a4dSWen Congyang if (buf_size == 0) { 163148ac0a4dSWen Congyang buf_size = DEFAULT_MIRROR_BUF_SIZE; 163248ac0a4dSWen Congyang } 16335bc361b8SFam Zheng 16343f072a7fSMax Reitz if (bdrv_skip_filters(bs) == bdrv_skip_filters(target)) { 163586fae10cSKevin Wolf error_setg(errp, "Can't mirror node into itself"); 1636cc19f177SVladimir Sementsov-Ogievskiy return NULL; 163786fae10cSKevin Wolf } 163886fae10cSKevin Wolf 163953431b90SMax Reitz target_is_backing = bdrv_chain_contains(bs, target); 164053431b90SMax Reitz 16414ef85a9cSKevin Wolf /* In the case of active commit, add dummy driver to provide consistent 16424ef85a9cSKevin Wolf * reads on the top, while disabling it in the intermediate nodes, and make 16434ef85a9cSKevin Wolf * the backing chain writable. */ 16446cdbceb1SKevin Wolf mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name, 16456cdbceb1SKevin Wolf BDRV_O_RDWR, errp); 16464ef85a9cSKevin Wolf if (mirror_top_bs == NULL) { 1647cc19f177SVladimir Sementsov-Ogievskiy return NULL; 1648893f7ebaSPaolo Bonzini } 1649d3c8c674SKevin Wolf if (!filter_node_name) { 1650d3c8c674SKevin Wolf mirror_top_bs->implicit = true; 1651d3c8c674SKevin Wolf } 1652e5182c1cSMax Reitz 1653e5182c1cSMax Reitz /* So that we can always drop this node */ 1654e5182c1cSMax Reitz mirror_top_bs->never_freeze = true; 1655e5182c1cSMax Reitz 16564ef85a9cSKevin Wolf mirror_top_bs->total_sectors = bs->total_sectors; 1657228345bfSMax Reitz mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED; 165880f5c33fSKevin Wolf mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED | 165980f5c33fSKevin Wolf BDRV_REQ_NO_FALLBACK; 1660429076e8SMax Reitz bs_opaque = g_new0(MirrorBDSOpaque, 1); 1661429076e8SMax Reitz mirror_top_bs->opaque = bs_opaque; 1662893f7ebaSPaolo Bonzini 166353431b90SMax Reitz bs_opaque->is_commit = target_is_backing; 166453431b90SMax Reitz 16654ef85a9cSKevin Wolf bdrv_drained_begin(bs); 1666934aee14SVladimir Sementsov-Ogievskiy ret = bdrv_append(mirror_top_bs, bs, errp); 16674ef85a9cSKevin Wolf bdrv_drained_end(bs); 16684ef85a9cSKevin Wolf 1669934aee14SVladimir Sementsov-Ogievskiy if (ret < 0) { 1670b2c2832cSKevin Wolf bdrv_unref(mirror_top_bs); 1671cc19f177SVladimir Sementsov-Ogievskiy return NULL; 1672b2c2832cSKevin Wolf } 1673b2c2832cSKevin Wolf 16744ef85a9cSKevin Wolf /* Make sure that the source is not resized while the job is running */ 167575859b94SJohn Snow s = block_job_create(job_id, driver, NULL, mirror_top_bs, 16764ef85a9cSKevin Wolf BLK_PERM_CONSISTENT_READ, 16774ef85a9cSKevin Wolf BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | 16784ef85a9cSKevin Wolf BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed, 16794ef85a9cSKevin Wolf creation_flags, cb, opaque, errp); 16804ef85a9cSKevin Wolf if (!s) { 16814ef85a9cSKevin Wolf goto fail; 16824ef85a9cSKevin Wolf } 1683429076e8SMax Reitz bs_opaque->job = s; 1684429076e8SMax Reitz 16857a25fcd0SMax Reitz /* The block job now has a reference to this node */ 16867a25fcd0SMax Reitz bdrv_unref(mirror_top_bs); 16877a25fcd0SMax Reitz 16884ef85a9cSKevin Wolf s->mirror_top_bs = mirror_top_bs; 16894ef85a9cSKevin Wolf 16904ef85a9cSKevin Wolf /* No resize for the target either; while the mirror is still running, a 16914ef85a9cSKevin Wolf * consistent read isn't necessarily possible. We could possibly allow 16924ef85a9cSKevin Wolf * writes and graph modifications, though it would likely defeat the 16934ef85a9cSKevin Wolf * purpose of a mirror, so leave them blocked for now. 16944ef85a9cSKevin Wolf * 16954ef85a9cSKevin Wolf * In the case of active commit, things look a bit different, though, 16964ef85a9cSKevin Wolf * because the target is an already populated backing file in active use. 16974ef85a9cSKevin Wolf * We can allow anything except resize there.*/ 16983f072a7fSMax Reitz 16993f072a7fSMax Reitz target_perms = BLK_PERM_WRITE; 17003f072a7fSMax Reitz target_shared_perms = BLK_PERM_WRITE_UNCHANGED; 17013f072a7fSMax Reitz 17023f072a7fSMax Reitz if (target_is_backing) { 17033f072a7fSMax Reitz int64_t bs_size, target_size; 17043f072a7fSMax Reitz bs_size = bdrv_getlength(bs); 17053f072a7fSMax Reitz if (bs_size < 0) { 17063f072a7fSMax Reitz error_setg_errno(errp, -bs_size, 17073f072a7fSMax Reitz "Could not inquire top image size"); 17083f072a7fSMax Reitz goto fail; 17093f072a7fSMax Reitz } 17103f072a7fSMax Reitz 17113f072a7fSMax Reitz target_size = bdrv_getlength(target); 17123f072a7fSMax Reitz if (target_size < 0) { 17133f072a7fSMax Reitz error_setg_errno(errp, -target_size, 17143f072a7fSMax Reitz "Could not inquire base image size"); 17153f072a7fSMax Reitz goto fail; 17163f072a7fSMax Reitz } 17173f072a7fSMax Reitz 17183f072a7fSMax Reitz if (target_size < bs_size) { 17193f072a7fSMax Reitz target_perms |= BLK_PERM_RESIZE; 17203f072a7fSMax Reitz } 17213f072a7fSMax Reitz 17223f072a7fSMax Reitz target_shared_perms |= BLK_PERM_CONSISTENT_READ 17233f072a7fSMax Reitz | BLK_PERM_WRITE 17243f072a7fSMax Reitz | BLK_PERM_GRAPH_MOD; 17253f072a7fSMax Reitz } else if (bdrv_chain_contains(bs, bdrv_skip_filters(target))) { 17263f072a7fSMax Reitz /* 17273f072a7fSMax Reitz * We may want to allow this in the future, but it would 17283f072a7fSMax Reitz * require taking some extra care. 17293f072a7fSMax Reitz */ 17303f072a7fSMax Reitz error_setg(errp, "Cannot mirror to a filter on top of a node in the " 17313f072a7fSMax Reitz "source's backing chain"); 17323f072a7fSMax Reitz goto fail; 17333f072a7fSMax Reitz } 17343f072a7fSMax Reitz 17353f072a7fSMax Reitz if (backing_mode != MIRROR_LEAVE_BACKING_CHAIN) { 17363f072a7fSMax Reitz target_perms |= BLK_PERM_GRAPH_MOD; 17373f072a7fSMax Reitz } 17383f072a7fSMax Reitz 1739d861ab3aSKevin Wolf s->target = blk_new(s->common.job.aio_context, 17403f072a7fSMax Reitz target_perms, target_shared_perms); 1741d7086422SKevin Wolf ret = blk_insert_bs(s->target, target, errp); 1742d7086422SKevin Wolf if (ret < 0) { 17434ef85a9cSKevin Wolf goto fail; 1744d7086422SKevin Wolf } 1745045a2f82SFam Zheng if (is_mirror) { 1746045a2f82SFam Zheng /* XXX: Mirror target could be a NBD server of target QEMU in the case 1747045a2f82SFam Zheng * of non-shared block migration. To allow migration completion, we 1748045a2f82SFam Zheng * have to allow "inactivate" of the target BB. When that happens, we 1749045a2f82SFam Zheng * know the job is drained, and the vcpus are stopped, so no write 1750045a2f82SFam Zheng * operation will be performed. Block layer already has assertions to 1751045a2f82SFam Zheng * ensure that. */ 1752045a2f82SFam Zheng blk_set_force_allow_inactivate(s->target); 1753045a2f82SFam Zheng } 17549ff7f0dfSKevin Wolf blk_set_allow_aio_context_change(s->target, true); 1755cf312932SKevin Wolf blk_set_disable_request_queuing(s->target, true); 1756e253f4b8SKevin Wolf 175709158f00SBenoît Canet s->replaces = g_strdup(replaces); 1758b952b558SPaolo Bonzini s->on_source_error = on_source_error; 1759b952b558SPaolo Bonzini s->on_target_error = on_target_error; 176003544a6eSFam Zheng s->is_none_mode = is_none_mode; 1761274fcceeSMax Reitz s->backing_mode = backing_mode; 1762cdf3bc93SMax Reitz s->zero_target = zero_target; 1763481debaaSMax Reitz s->copy_mode = copy_mode; 17645bc361b8SFam Zheng s->base = base; 17653f072a7fSMax Reitz s->base_overlay = bdrv_find_overlay(bs, base); 1766eee13dfeSPaolo Bonzini s->granularity = granularity; 176748ac0a4dSWen Congyang s->buf_size = ROUND_UP(buf_size, granularity); 17680fc9f8eaSFam Zheng s->unmap = unmap; 1769b49f7eadSWen Congyang if (auto_complete) { 1770b49f7eadSWen Congyang s->should_complete = true; 1771b49f7eadSWen Congyang } 1772b812f671SPaolo Bonzini 17730db6e54aSFam Zheng s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); 1774b8afb520SFam Zheng if (!s->dirty_bitmap) { 177588f9d1b3SKevin Wolf goto fail; 1776b8afb520SFam Zheng } 1777dbdf699cSVladimir Sementsov-Ogievskiy if (s->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING) { 1778dbdf699cSVladimir Sementsov-Ogievskiy bdrv_disable_dirty_bitmap(s->dirty_bitmap); 1779dbdf699cSVladimir Sementsov-Ogievskiy } 178010f3cd15SAlberto Garcia 178167b24427SAlberto Garcia ret = block_job_add_bdrv(&s->common, "source", bs, 0, 178267b24427SAlberto Garcia BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE | 178367b24427SAlberto Garcia BLK_PERM_CONSISTENT_READ, 178467b24427SAlberto Garcia errp); 178567b24427SAlberto Garcia if (ret < 0) { 178667b24427SAlberto Garcia goto fail; 178767b24427SAlberto Garcia } 178867b24427SAlberto Garcia 17894ef85a9cSKevin Wolf /* Required permissions are already taken with blk_new() */ 179076d554e2SKevin Wolf block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL, 179176d554e2SKevin Wolf &error_abort); 179276d554e2SKevin Wolf 1793f3ede4b0SAlberto Garcia /* In commit_active_start() all intermediate nodes disappear, so 1794f3ede4b0SAlberto Garcia * any jobs in them must be blocked */ 17954ef85a9cSKevin Wolf if (target_is_backing) { 17963f072a7fSMax Reitz BlockDriverState *iter, *filtered_target; 17973f072a7fSMax Reitz uint64_t iter_shared_perms; 17983f072a7fSMax Reitz 17993f072a7fSMax Reitz /* 18003f072a7fSMax Reitz * The topmost node with 18013f072a7fSMax Reitz * bdrv_skip_filters(filtered_target) == bdrv_skip_filters(target) 18023f072a7fSMax Reitz */ 18033f072a7fSMax Reitz filtered_target = bdrv_cow_bs(bdrv_find_overlay(bs, target)); 18043f072a7fSMax Reitz 18053f072a7fSMax Reitz assert(bdrv_skip_filters(filtered_target) == 18063f072a7fSMax Reitz bdrv_skip_filters(target)); 18073f072a7fSMax Reitz 18083f072a7fSMax Reitz /* 18093f072a7fSMax Reitz * XXX BLK_PERM_WRITE needs to be allowed so we don't block 18104ef85a9cSKevin Wolf * ourselves at s->base (if writes are blocked for a node, they are 18114ef85a9cSKevin Wolf * also blocked for its backing file). The other options would be a 18123f072a7fSMax Reitz * second filter driver above s->base (== target). 18133f072a7fSMax Reitz */ 18143f072a7fSMax Reitz iter_shared_perms = BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE; 18153f072a7fSMax Reitz 18163f072a7fSMax Reitz for (iter = bdrv_filter_or_cow_bs(bs); iter != target; 18173f072a7fSMax Reitz iter = bdrv_filter_or_cow_bs(iter)) 18183f072a7fSMax Reitz { 18193f072a7fSMax Reitz if (iter == filtered_target) { 18203f072a7fSMax Reitz /* 18213f072a7fSMax Reitz * From here on, all nodes are filters on the base. 18223f072a7fSMax Reitz * This allows us to share BLK_PERM_CONSISTENT_READ. 18233f072a7fSMax Reitz */ 18243f072a7fSMax Reitz iter_shared_perms |= BLK_PERM_CONSISTENT_READ; 18253f072a7fSMax Reitz } 18263f072a7fSMax Reitz 18274ef85a9cSKevin Wolf ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0, 18283f072a7fSMax Reitz iter_shared_perms, errp); 18294ef85a9cSKevin Wolf if (ret < 0) { 18304ef85a9cSKevin Wolf goto fail; 18314ef85a9cSKevin Wolf } 1832f3ede4b0SAlberto Garcia } 1833ef53dc09SAlberto Garcia 1834ef53dc09SAlberto Garcia if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) { 1835ef53dc09SAlberto Garcia goto fail; 1836ef53dc09SAlberto Garcia } 1837f3ede4b0SAlberto Garcia } 183810f3cd15SAlberto Garcia 183912aa4082SMax Reitz QTAILQ_INIT(&s->ops_in_flight); 184012aa4082SMax Reitz 18415ccac6f1SJohn Snow trace_mirror_start(bs, s, opaque); 1842da01ff7fSKevin Wolf job_start(&s->common.job); 1843cc19f177SVladimir Sementsov-Ogievskiy 1844cc19f177SVladimir Sementsov-Ogievskiy return &s->common; 18454ef85a9cSKevin Wolf 18464ef85a9cSKevin Wolf fail: 18474ef85a9cSKevin Wolf if (s) { 18487a25fcd0SMax Reitz /* Make sure this BDS does not go away until we have completed the graph 18497a25fcd0SMax Reitz * changes below */ 18507a25fcd0SMax Reitz bdrv_ref(mirror_top_bs); 18517a25fcd0SMax Reitz 18524ef85a9cSKevin Wolf g_free(s->replaces); 18534ef85a9cSKevin Wolf blk_unref(s->target); 1854429076e8SMax Reitz bs_opaque->job = NULL; 1855e917e2cbSAlberto Garcia if (s->dirty_bitmap) { 18565deb6cbdSVladimir Sementsov-Ogievskiy bdrv_release_dirty_bitmap(s->dirty_bitmap); 1857e917e2cbSAlberto Garcia } 18584ad35181SKevin Wolf job_early_fail(&s->common.job); 18594ef85a9cSKevin Wolf } 18604ef85a9cSKevin Wolf 1861f94dc3b4SMax Reitz bs_opaque->stop = true; 1862f94dc3b4SMax Reitz bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing, 1863c1cef672SFam Zheng &error_abort); 18643f072a7fSMax Reitz bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort); 18657a25fcd0SMax Reitz 18667a25fcd0SMax Reitz bdrv_unref(mirror_top_bs); 1867cc19f177SVladimir Sementsov-Ogievskiy 1868cc19f177SVladimir Sementsov-Ogievskiy return NULL; 1869893f7ebaSPaolo Bonzini } 187003544a6eSFam Zheng 187171aa9867SAlberto Garcia void mirror_start(const char *job_id, BlockDriverState *bs, 187271aa9867SAlberto Garcia BlockDriverState *target, const char *replaces, 1873a1999b33SJohn Snow int creation_flags, int64_t speed, 1874a1999b33SJohn Snow uint32_t granularity, int64_t buf_size, 1875274fcceeSMax Reitz MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, 1876cdf3bc93SMax Reitz bool zero_target, 1877274fcceeSMax Reitz BlockdevOnError on_source_error, 187803544a6eSFam Zheng BlockdevOnError on_target_error, 1879481debaaSMax Reitz bool unmap, const char *filter_node_name, 1880481debaaSMax Reitz MirrorCopyMode copy_mode, Error **errp) 188103544a6eSFam Zheng { 188203544a6eSFam Zheng bool is_none_mode; 188303544a6eSFam Zheng BlockDriverState *base; 188403544a6eSFam Zheng 1885c8b56501SJohn Snow if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) || 1886c8b56501SJohn Snow (mode == MIRROR_SYNC_MODE_BITMAP)) { 1887c8b56501SJohn Snow error_setg(errp, "Sync mode '%s' not supported", 1888c8b56501SJohn Snow MirrorSyncMode_str(mode)); 1889d58d8453SJohn Snow return; 1890d58d8453SJohn Snow } 189103544a6eSFam Zheng is_none_mode = mode == MIRROR_SYNC_MODE_NONE; 18923f072a7fSMax Reitz base = mode == MIRROR_SYNC_MODE_TOP ? bdrv_backing_chain_next(bs) : NULL; 1893a1999b33SJohn Snow mirror_start_job(job_id, bs, creation_flags, target, replaces, 1894cdf3bc93SMax Reitz speed, granularity, buf_size, backing_mode, zero_target, 189551ccfa2dSFam Zheng on_source_error, on_target_error, unmap, NULL, NULL, 18966cdbceb1SKevin Wolf &mirror_job_driver, is_none_mode, base, false, 1897481debaaSMax Reitz filter_node_name, true, copy_mode, errp); 189803544a6eSFam Zheng } 189903544a6eSFam Zheng 1900cc19f177SVladimir Sementsov-Ogievskiy BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs, 190147970dfbSJohn Snow BlockDriverState *base, int creation_flags, 190247970dfbSJohn Snow int64_t speed, BlockdevOnError on_error, 19030db832f4SKevin Wolf const char *filter_node_name, 190478bbd910SFam Zheng BlockCompletionFunc *cb, void *opaque, 190578bbd910SFam Zheng bool auto_complete, Error **errp) 190603544a6eSFam Zheng { 19071ba79388SAlberto Garcia bool base_read_only; 1908eb5becc1SVladimir Sementsov-Ogievskiy BlockJob *job; 19094da83585SJeff Cody 19101ba79388SAlberto Garcia base_read_only = bdrv_is_read_only(base); 19114da83585SJeff Cody 19121ba79388SAlberto Garcia if (base_read_only) { 19131ba79388SAlberto Garcia if (bdrv_reopen_set_read_only(base, false, errp) < 0) { 1914cc19f177SVladimir Sementsov-Ogievskiy return NULL; 191520a63d2cSFam Zheng } 19161ba79388SAlberto Garcia } 19174da83585SJeff Cody 1918eb5becc1SVladimir Sementsov-Ogievskiy job = mirror_start_job( 1919cc19f177SVladimir Sementsov-Ogievskiy job_id, bs, creation_flags, base, NULL, speed, 0, 0, 1920cdf3bc93SMax Reitz MIRROR_LEAVE_BACKING_CHAIN, false, 192151ccfa2dSFam Zheng on_error, on_error, true, cb, opaque, 19226cdbceb1SKevin Wolf &commit_active_job_driver, false, base, auto_complete, 1923481debaaSMax Reitz filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND, 1924eb5becc1SVladimir Sementsov-Ogievskiy errp); 1925eb5becc1SVladimir Sementsov-Ogievskiy if (!job) { 19264da83585SJeff Cody goto error_restore_flags; 19274da83585SJeff Cody } 19284da83585SJeff Cody 1929eb5becc1SVladimir Sementsov-Ogievskiy return job; 19304da83585SJeff Cody 19314da83585SJeff Cody error_restore_flags: 19324da83585SJeff Cody /* ignore error and errp for bdrv_reopen, because we want to propagate 19334da83585SJeff Cody * the original error */ 19341ba79388SAlberto Garcia if (base_read_only) { 19351ba79388SAlberto Garcia bdrv_reopen_set_read_only(base, true, NULL); 19361ba79388SAlberto Garcia } 1937cc19f177SVladimir Sementsov-Ogievskiy return NULL; 193803544a6eSFam Zheng } 1939