xref: /qemu/block/mirror.c (revision 782d97efec66d743f87f28f1d040cdfacc380b1e)
1893f7ebaSPaolo Bonzini /*
2893f7ebaSPaolo Bonzini  * Image mirroring
3893f7ebaSPaolo Bonzini  *
4893f7ebaSPaolo Bonzini  * Copyright Red Hat, Inc. 2012
5893f7ebaSPaolo Bonzini  *
6893f7ebaSPaolo Bonzini  * Authors:
7893f7ebaSPaolo Bonzini  *  Paolo Bonzini  <pbonzini@redhat.com>
8893f7ebaSPaolo Bonzini  *
9893f7ebaSPaolo Bonzini  * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10893f7ebaSPaolo Bonzini  * See the COPYING.LIB file in the top-level directory.
11893f7ebaSPaolo Bonzini  *
12893f7ebaSPaolo Bonzini  */
13893f7ebaSPaolo Bonzini 
1480c71a24SPeter Maydell #include "qemu/osdep.h"
15fd4a6493SKevin Wolf #include "qemu/cutils.h"
16893f7ebaSPaolo Bonzini #include "trace.h"
17c87621eaSJohn Snow #include "block/blockjob_int.h"
18737e150eSPaolo Bonzini #include "block/block_int.h"
19373340b2SMax Reitz #include "sysemu/block-backend.h"
20da34e65cSMarkus Armbruster #include "qapi/error.h"
21cc7a8ea7SMarkus Armbruster #include "qapi/qmp/qerror.h"
22893f7ebaSPaolo Bonzini #include "qemu/ratelimit.h"
23b812f671SPaolo Bonzini #include "qemu/bitmap.h"
24893f7ebaSPaolo Bonzini 
25893f7ebaSPaolo Bonzini #define SLICE_TIME    100000000ULL /* ns */
26402a4741SPaolo Bonzini #define MAX_IN_FLIGHT 16
27b436982fSEric Blake #define MAX_IO_BYTES (1 << 20) /* 1 Mb */
28b436982fSEric Blake #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
29402a4741SPaolo Bonzini 
30402a4741SPaolo Bonzini /* The mirroring buffer is a list of granularity-sized chunks.
31402a4741SPaolo Bonzini  * Free chunks are organized in a list.
32402a4741SPaolo Bonzini  */
33402a4741SPaolo Bonzini typedef struct MirrorBuffer {
34402a4741SPaolo Bonzini     QSIMPLEQ_ENTRY(MirrorBuffer) next;
35402a4741SPaolo Bonzini } MirrorBuffer;
36893f7ebaSPaolo Bonzini 
37893f7ebaSPaolo Bonzini typedef struct MirrorBlockJob {
38893f7ebaSPaolo Bonzini     BlockJob common;
39893f7ebaSPaolo Bonzini     RateLimit limit;
40e253f4b8SKevin Wolf     BlockBackend *target;
414ef85a9cSKevin Wolf     BlockDriverState *mirror_top_bs;
424ef85a9cSKevin Wolf     BlockDriverState *source;
435bc361b8SFam Zheng     BlockDriverState *base;
444ef85a9cSKevin Wolf 
4509158f00SBenoît Canet     /* The name of the graph node to replace */
4609158f00SBenoît Canet     char *replaces;
4709158f00SBenoît Canet     /* The BDS to replace */
4809158f00SBenoît Canet     BlockDriverState *to_replace;
4909158f00SBenoît Canet     /* Used to block operations on the drive-mirror-replace target */
5009158f00SBenoît Canet     Error *replace_blocker;
5103544a6eSFam Zheng     bool is_none_mode;
52274fcceeSMax Reitz     BlockMirrorBackingMode backing_mode;
53b952b558SPaolo Bonzini     BlockdevOnError on_source_error, on_target_error;
54d63ffd87SPaolo Bonzini     bool synced;
55d63ffd87SPaolo Bonzini     bool should_complete;
56eee13dfeSPaolo Bonzini     int64_t granularity;
57b812f671SPaolo Bonzini     size_t buf_size;
58b21c7652SMax Reitz     int64_t bdev_length;
59b812f671SPaolo Bonzini     unsigned long *cow_bitmap;
60e4654d2dSFam Zheng     BdrvDirtyBitmap *dirty_bitmap;
61dc162c8eSFam Zheng     BdrvDirtyBitmapIter *dbi;
62893f7ebaSPaolo Bonzini     uint8_t *buf;
63402a4741SPaolo Bonzini     QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
64402a4741SPaolo Bonzini     int buf_free_count;
65bd48bde8SPaolo Bonzini 
6649efb1f5SDenis V. Lunev     uint64_t last_pause_ns;
67402a4741SPaolo Bonzini     unsigned long *in_flight_bitmap;
68bd48bde8SPaolo Bonzini     int in_flight;
69b436982fSEric Blake     int64_t bytes_in_flight;
70bd48bde8SPaolo Bonzini     int ret;
710fc9f8eaSFam Zheng     bool unmap;
72e424aff5SKevin Wolf     bool waiting_for_io;
73b436982fSEric Blake     int target_cluster_size;
74e5b43573SFam Zheng     int max_iov;
7590ab48ebSAnton Nefedov     bool initial_zeroing_ongoing;
76893f7ebaSPaolo Bonzini } MirrorBlockJob;
77893f7ebaSPaolo Bonzini 
78bd48bde8SPaolo Bonzini typedef struct MirrorOp {
79bd48bde8SPaolo Bonzini     MirrorBlockJob *s;
80bd48bde8SPaolo Bonzini     QEMUIOVector qiov;
81b436982fSEric Blake     int64_t offset;
82b436982fSEric Blake     uint64_t bytes;
83bd48bde8SPaolo Bonzini } MirrorOp;
84bd48bde8SPaolo Bonzini 
85b952b558SPaolo Bonzini static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
86b952b558SPaolo Bonzini                                             int error)
87b952b558SPaolo Bonzini {
88b952b558SPaolo Bonzini     s->synced = false;
89b952b558SPaolo Bonzini     if (read) {
9081e254dcSKevin Wolf         return block_job_error_action(&s->common, s->on_source_error,
9181e254dcSKevin Wolf                                       true, error);
92b952b558SPaolo Bonzini     } else {
9381e254dcSKevin Wolf         return block_job_error_action(&s->common, s->on_target_error,
9481e254dcSKevin Wolf                                       false, error);
95b952b558SPaolo Bonzini     }
96b952b558SPaolo Bonzini }
97b952b558SPaolo Bonzini 
98bd48bde8SPaolo Bonzini static void mirror_iteration_done(MirrorOp *op, int ret)
99bd48bde8SPaolo Bonzini {
100bd48bde8SPaolo Bonzini     MirrorBlockJob *s = op->s;
101402a4741SPaolo Bonzini     struct iovec *iov;
102bd48bde8SPaolo Bonzini     int64_t chunk_num;
103b436982fSEric Blake     int i, nb_chunks;
104bd48bde8SPaolo Bonzini 
105b436982fSEric Blake     trace_mirror_iteration_done(s, op->offset, op->bytes, ret);
106bd48bde8SPaolo Bonzini 
107bd48bde8SPaolo Bonzini     s->in_flight--;
108b436982fSEric Blake     s->bytes_in_flight -= op->bytes;
109402a4741SPaolo Bonzini     iov = op->qiov.iov;
110402a4741SPaolo Bonzini     for (i = 0; i < op->qiov.niov; i++) {
111402a4741SPaolo Bonzini         MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
112402a4741SPaolo Bonzini         QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
113402a4741SPaolo Bonzini         s->buf_free_count++;
114402a4741SPaolo Bonzini     }
115402a4741SPaolo Bonzini 
116b436982fSEric Blake     chunk_num = op->offset / s->granularity;
117b436982fSEric Blake     nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
118402a4741SPaolo Bonzini     bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
119b21c7652SMax Reitz     if (ret >= 0) {
120b21c7652SMax Reitz         if (s->cow_bitmap) {
121bd48bde8SPaolo Bonzini             bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
122bd48bde8SPaolo Bonzini         }
12390ab48ebSAnton Nefedov         if (!s->initial_zeroing_ongoing) {
124b436982fSEric Blake             s->common.offset += op->bytes;
125b21c7652SMax Reitz         }
12690ab48ebSAnton Nefedov     }
1276df3bf8eSZhang Min     qemu_iovec_destroy(&op->qiov);
128c84b3192SPaolo Bonzini     g_free(op);
1297b770c72SStefan Hajnoczi 
130e424aff5SKevin Wolf     if (s->waiting_for_io) {
1310b8b8753SPaolo Bonzini         qemu_coroutine_enter(s->common.co);
132bd48bde8SPaolo Bonzini     }
1337b770c72SStefan Hajnoczi }
134bd48bde8SPaolo Bonzini 
135bd48bde8SPaolo Bonzini static void mirror_write_complete(void *opaque, int ret)
136bd48bde8SPaolo Bonzini {
137bd48bde8SPaolo Bonzini     MirrorOp *op = opaque;
138bd48bde8SPaolo Bonzini     MirrorBlockJob *s = op->s;
139b9e413ddSPaolo Bonzini 
140b9e413ddSPaolo Bonzini     aio_context_acquire(blk_get_aio_context(s->common.blk));
141bd48bde8SPaolo Bonzini     if (ret < 0) {
142bd48bde8SPaolo Bonzini         BlockErrorAction action;
143bd48bde8SPaolo Bonzini 
144b436982fSEric Blake         bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset >> BDRV_SECTOR_BITS,
145b436982fSEric Blake                               op->bytes >> BDRV_SECTOR_BITS);
146bd48bde8SPaolo Bonzini         action = mirror_error_action(s, false, -ret);
147a589569fSWenchao Xia         if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
148bd48bde8SPaolo Bonzini             s->ret = ret;
149bd48bde8SPaolo Bonzini         }
150bd48bde8SPaolo Bonzini     }
151bd48bde8SPaolo Bonzini     mirror_iteration_done(op, ret);
152b9e413ddSPaolo Bonzini     aio_context_release(blk_get_aio_context(s->common.blk));
153bd48bde8SPaolo Bonzini }
154bd48bde8SPaolo Bonzini 
155bd48bde8SPaolo Bonzini static void mirror_read_complete(void *opaque, int ret)
156bd48bde8SPaolo Bonzini {
157bd48bde8SPaolo Bonzini     MirrorOp *op = opaque;
158bd48bde8SPaolo Bonzini     MirrorBlockJob *s = op->s;
159b9e413ddSPaolo Bonzini 
160b9e413ddSPaolo Bonzini     aio_context_acquire(blk_get_aio_context(s->common.blk));
161bd48bde8SPaolo Bonzini     if (ret < 0) {
162bd48bde8SPaolo Bonzini         BlockErrorAction action;
163bd48bde8SPaolo Bonzini 
164b436982fSEric Blake         bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset >> BDRV_SECTOR_BITS,
165b436982fSEric Blake                               op->bytes >> BDRV_SECTOR_BITS);
166bd48bde8SPaolo Bonzini         action = mirror_error_action(s, true, -ret);
167a589569fSWenchao Xia         if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
168bd48bde8SPaolo Bonzini             s->ret = ret;
169bd48bde8SPaolo Bonzini         }
170bd48bde8SPaolo Bonzini 
171bd48bde8SPaolo Bonzini         mirror_iteration_done(op, ret);
172b9e413ddSPaolo Bonzini     } else {
173b436982fSEric Blake         blk_aio_pwritev(s->target, op->offset, &op->qiov,
17473698c30SEric Blake                         0, mirror_write_complete, op);
175bd48bde8SPaolo Bonzini     }
176b9e413ddSPaolo Bonzini     aio_context_release(blk_get_aio_context(s->common.blk));
177b9e413ddSPaolo Bonzini }
178bd48bde8SPaolo Bonzini 
179*782d97efSEric Blake /* Clip bytes relative to offset to not exceed end-of-file */
180*782d97efSEric Blake static inline int64_t mirror_clip_bytes(MirrorBlockJob *s,
181*782d97efSEric Blake                                         int64_t offset,
182*782d97efSEric Blake                                         int64_t bytes)
183*782d97efSEric Blake {
184*782d97efSEric Blake     return MIN(bytes, s->bdev_length - offset);
185*782d97efSEric Blake }
186*782d97efSEric Blake 
187*782d97efSEric Blake /* Clip nb_sectors relative to sector_num to not exceed end-of-file */
188931e5260SEric Blake static inline int mirror_clip_sectors(MirrorBlockJob *s,
1894150ae60SFam Zheng                                       int64_t sector_num,
190931e5260SEric Blake                                       int nb_sectors)
1914150ae60SFam Zheng {
192931e5260SEric Blake     return MIN(nb_sectors,
1934150ae60SFam Zheng                s->bdev_length / BDRV_SECTOR_SIZE - sector_num);
1944150ae60SFam Zheng }
1954150ae60SFam Zheng 
196*782d97efSEric Blake /* Round offset and/or bytes to target cluster if COW is needed, and
197*782d97efSEric Blake  * return the offset of the adjusted tail against original. */
198*782d97efSEric Blake static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
199*782d97efSEric Blake                             unsigned int *bytes)
200893f7ebaSPaolo Bonzini {
201e5b43573SFam Zheng     bool need_cow;
202e5b43573SFam Zheng     int ret = 0;
203*782d97efSEric Blake     int64_t align_offset = *offset;
204*782d97efSEric Blake     unsigned int align_bytes = *bytes;
205*782d97efSEric Blake     int max_bytes = s->granularity * s->max_iov;
206893f7ebaSPaolo Bonzini 
207*782d97efSEric Blake     need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap);
208*782d97efSEric Blake     need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity,
209e5b43573SFam Zheng                           s->cow_bitmap);
210e5b43573SFam Zheng     if (need_cow) {
211*782d97efSEric Blake         bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes,
212*782d97efSEric Blake                                &align_offset, &align_bytes);
2138f0720ecSPaolo Bonzini     }
2148f0720ecSPaolo Bonzini 
215*782d97efSEric Blake     if (align_bytes > max_bytes) {
216*782d97efSEric Blake         align_bytes = max_bytes;
217e5b43573SFam Zheng         if (need_cow) {
218*782d97efSEric Blake             align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size);
219e5b43573SFam Zheng         }
220e5b43573SFam Zheng     }
221*782d97efSEric Blake     /* Clipping may result in align_bytes unaligned to chunk boundary, but
2224150ae60SFam Zheng      * that doesn't matter because it's already the end of source image. */
223*782d97efSEric Blake     align_bytes = mirror_clip_bytes(s, align_offset, align_bytes);
224402a4741SPaolo Bonzini 
225*782d97efSEric Blake     ret = align_offset + align_bytes - (*offset + *bytes);
226*782d97efSEric Blake     *offset = align_offset;
227*782d97efSEric Blake     *bytes = align_bytes;
228e5b43573SFam Zheng     assert(ret >= 0);
229e5b43573SFam Zheng     return ret;
230e5b43573SFam Zheng }
231e5b43573SFam Zheng 
23221cd917fSFam Zheng static inline void mirror_wait_for_io(MirrorBlockJob *s)
23321cd917fSFam Zheng {
23421cd917fSFam Zheng     assert(!s->waiting_for_io);
23521cd917fSFam Zheng     s->waiting_for_io = true;
23621cd917fSFam Zheng     qemu_coroutine_yield();
23721cd917fSFam Zheng     s->waiting_for_io = false;
23821cd917fSFam Zheng }
23921cd917fSFam Zheng 
240e5b43573SFam Zheng /* Submit async read while handling COW.
24117612955SJohn Snow  * Returns: The number of sectors copied after and including sector_num,
24217612955SJohn Snow  *          excluding any sectors copied prior to sector_num due to alignment.
24317612955SJohn Snow  *          This will be nb_sectors if no alignment is necessary, or
244e5b43573SFam Zheng  *          (new_end - sector_num) if tail is rounded up or down due to
245e5b43573SFam Zheng  *          alignment or buffer limit.
246402a4741SPaolo Bonzini  */
247e5b43573SFam Zheng static int mirror_do_read(MirrorBlockJob *s, int64_t sector_num,
248e5b43573SFam Zheng                           int nb_sectors)
249e5b43573SFam Zheng {
250e253f4b8SKevin Wolf     BlockBackend *source = s->common.blk;
251e5b43573SFam Zheng     int sectors_per_chunk, nb_chunks;
25217612955SJohn Snow     int ret;
253e5b43573SFam Zheng     MirrorOp *op;
254e4808881SJohn Snow     int max_sectors;
255402a4741SPaolo Bonzini 
256e5b43573SFam Zheng     sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
257e4808881SJohn Snow     max_sectors = sectors_per_chunk * s->max_iov;
258e5b43573SFam Zheng 
259e5b43573SFam Zheng     /* We can only handle as much as buf_size at a time. */
260e5b43573SFam Zheng     nb_sectors = MIN(s->buf_size >> BDRV_SECTOR_BITS, nb_sectors);
261e4808881SJohn Snow     nb_sectors = MIN(max_sectors, nb_sectors);
262e5b43573SFam Zheng     assert(nb_sectors);
263*782d97efSEric Blake     assert(nb_sectors < BDRV_REQUEST_MAX_SECTORS);
26417612955SJohn Snow     ret = nb_sectors;
265e5b43573SFam Zheng 
266e5b43573SFam Zheng     if (s->cow_bitmap) {
267*782d97efSEric Blake         int64_t offset = sector_num * BDRV_SECTOR_SIZE;
268*782d97efSEric Blake         unsigned int bytes = nb_sectors * BDRV_SECTOR_SIZE;
269*782d97efSEric Blake         int gap;
270*782d97efSEric Blake 
271*782d97efSEric Blake         gap = mirror_cow_align(s, &offset, &bytes);
272*782d97efSEric Blake         sector_num = offset / BDRV_SECTOR_SIZE;
273*782d97efSEric Blake         nb_sectors = bytes / BDRV_SECTOR_SIZE;
274*782d97efSEric Blake         ret += gap / BDRV_SECTOR_SIZE;
275e5b43573SFam Zheng     }
276e5b43573SFam Zheng     assert(nb_sectors << BDRV_SECTOR_BITS <= s->buf_size);
277e5b43573SFam Zheng     /* The sector range must meet granularity because:
278e5b43573SFam Zheng      * 1) Caller passes in aligned values;
279e5b43573SFam Zheng      * 2) mirror_cow_align is used only when target cluster is larger. */
280e5b43573SFam Zheng     assert(!(sector_num % sectors_per_chunk));
2814150ae60SFam Zheng     nb_chunks = DIV_ROUND_UP(nb_sectors, sectors_per_chunk);
282e5b43573SFam Zheng 
283e5b43573SFam Zheng     while (s->buf_free_count < nb_chunks) {
2845cb1a49eSEric Blake         trace_mirror_yield_in_flight(s, sector_num * BDRV_SECTOR_SIZE,
2855cb1a49eSEric Blake                                      s->in_flight);
28621cd917fSFam Zheng         mirror_wait_for_io(s);
287b812f671SPaolo Bonzini     }
288b812f671SPaolo Bonzini 
289bd48bde8SPaolo Bonzini     /* Allocate a MirrorOp that is used as an AIO callback.  */
290c84b3192SPaolo Bonzini     op = g_new(MirrorOp, 1);
291bd48bde8SPaolo Bonzini     op->s = s;
292b436982fSEric Blake     op->offset = sector_num * BDRV_SECTOR_SIZE;
293b436982fSEric Blake     op->bytes = nb_sectors * BDRV_SECTOR_SIZE;
294402a4741SPaolo Bonzini 
295402a4741SPaolo Bonzini     /* Now make a QEMUIOVector taking enough granularity-sized chunks
296402a4741SPaolo Bonzini      * from s->buf_free.
297402a4741SPaolo Bonzini      */
298402a4741SPaolo Bonzini     qemu_iovec_init(&op->qiov, nb_chunks);
299402a4741SPaolo Bonzini     while (nb_chunks-- > 0) {
300402a4741SPaolo Bonzini         MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
301e5b43573SFam Zheng         size_t remaining = nb_sectors * BDRV_SECTOR_SIZE - op->qiov.size;
3025a0f6fd5SKevin Wolf 
303402a4741SPaolo Bonzini         QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
304402a4741SPaolo Bonzini         s->buf_free_count--;
3055a0f6fd5SKevin Wolf         qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
306402a4741SPaolo Bonzini     }
307402a4741SPaolo Bonzini 
308893f7ebaSPaolo Bonzini     /* Copy the dirty cluster.  */
309bd48bde8SPaolo Bonzini     s->in_flight++;
310b436982fSEric Blake     s->bytes_in_flight += nb_sectors * BDRV_SECTOR_SIZE;
3115cb1a49eSEric Blake     trace_mirror_one_iteration(s, sector_num * BDRV_SECTOR_SIZE,
3125cb1a49eSEric Blake                                nb_sectors * BDRV_SECTOR_SIZE);
313dcfb3bebSFam Zheng 
31473698c30SEric Blake     blk_aio_preadv(source, sector_num * BDRV_SECTOR_SIZE, &op->qiov, 0,
315bd48bde8SPaolo Bonzini                    mirror_read_complete, op);
316e5b43573SFam Zheng     return ret;
317e5b43573SFam Zheng }
318e5b43573SFam Zheng 
319e5b43573SFam Zheng static void mirror_do_zero_or_discard(MirrorBlockJob *s,
320e6f24193SEric Blake                                       int64_t offset,
321e6f24193SEric Blake                                       uint64_t bytes,
322e5b43573SFam Zheng                                       bool is_discard)
323e5b43573SFam Zheng {
324e5b43573SFam Zheng     MirrorOp *op;
325e5b43573SFam Zheng 
326e5b43573SFam Zheng     /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed
327e5b43573SFam Zheng      * so the freeing in mirror_iteration_done is nop. */
328e5b43573SFam Zheng     op = g_new0(MirrorOp, 1);
329e5b43573SFam Zheng     op->s = s;
330e6f24193SEric Blake     op->offset = offset;
331e6f24193SEric Blake     op->bytes = bytes;
332e5b43573SFam Zheng 
333e5b43573SFam Zheng     s->in_flight++;
334e6f24193SEric Blake     s->bytes_in_flight += bytes;
335e5b43573SFam Zheng     if (is_discard) {
336e6f24193SEric Blake         blk_aio_pdiscard(s->target, offset,
337b436982fSEric Blake                          op->bytes, mirror_write_complete, op);
338e5b43573SFam Zheng     } else {
339e6f24193SEric Blake         blk_aio_pwrite_zeroes(s->target, offset,
340b436982fSEric Blake                               op->bytes, s->unmap ? BDRV_REQ_MAY_UNMAP : 0,
341dcfb3bebSFam Zheng                               mirror_write_complete, op);
342e5b43573SFam Zheng     }
343e5b43573SFam Zheng }
344e5b43573SFam Zheng 
345e5b43573SFam Zheng static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
346e5b43573SFam Zheng {
3474ef85a9cSKevin Wolf     BlockDriverState *source = s->source;
3489c83625bSMax Reitz     int64_t sector_num, first_chunk;
349e5b43573SFam Zheng     uint64_t delay_ns = 0;
350e5b43573SFam Zheng     /* At least the first dirty chunk is mirrored in one iteration. */
351e5b43573SFam Zheng     int nb_chunks = 1;
352e5b43573SFam Zheng     int64_t end = s->bdev_length / BDRV_SECTOR_SIZE;
353e5b43573SFam Zheng     int sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
3544b5004d9SDenis V. Lunev     bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
355b436982fSEric Blake     int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES);
356e5b43573SFam Zheng 
357b64bd51eSPaolo Bonzini     bdrv_dirty_bitmap_lock(s->dirty_bitmap);
358dc162c8eSFam Zheng     sector_num = bdrv_dirty_iter_next(s->dbi);
359e5b43573SFam Zheng     if (sector_num < 0) {
360dc162c8eSFam Zheng         bdrv_set_dirty_iter(s->dbi, 0);
361dc162c8eSFam Zheng         sector_num = bdrv_dirty_iter_next(s->dbi);
3625cb1a49eSEric Blake         trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap) *
3635cb1a49eSEric Blake                                   BDRV_SECTOR_SIZE);
364e5b43573SFam Zheng         assert(sector_num >= 0);
365e5b43573SFam Zheng     }
366b64bd51eSPaolo Bonzini     bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
367e5b43573SFam Zheng 
3689c83625bSMax Reitz     first_chunk = sector_num / sectors_per_chunk;
3699c83625bSMax Reitz     while (test_bit(first_chunk, s->in_flight_bitmap)) {
3705cb1a49eSEric Blake         trace_mirror_yield_in_flight(s, sector_num * BDRV_SECTOR_SIZE,
3715cb1a49eSEric Blake                                      s->in_flight);
3729c83625bSMax Reitz         mirror_wait_for_io(s);
3739c83625bSMax Reitz     }
3749c83625bSMax Reitz 
375565ac01fSStefan Hajnoczi     block_job_pause_point(&s->common);
376565ac01fSStefan Hajnoczi 
377e5b43573SFam Zheng     /* Find the number of consective dirty chunks following the first dirty
378e5b43573SFam Zheng      * one, and wait for in flight requests in them. */
379b64bd51eSPaolo Bonzini     bdrv_dirty_bitmap_lock(s->dirty_bitmap);
380e5b43573SFam Zheng     while (nb_chunks * sectors_per_chunk < (s->buf_size >> BDRV_SECTOR_BITS)) {
381dc162c8eSFam Zheng         int64_t next_dirty;
382e5b43573SFam Zheng         int64_t next_sector = sector_num + nb_chunks * sectors_per_chunk;
383e5b43573SFam Zheng         int64_t next_chunk = next_sector / sectors_per_chunk;
384e5b43573SFam Zheng         if (next_sector >= end ||
385b64bd51eSPaolo Bonzini             !bdrv_get_dirty_locked(source, s->dirty_bitmap, next_sector)) {
386e5b43573SFam Zheng             break;
387e5b43573SFam Zheng         }
388e5b43573SFam Zheng         if (test_bit(next_chunk, s->in_flight_bitmap)) {
389e5b43573SFam Zheng             break;
390e5b43573SFam Zheng         }
3919c83625bSMax Reitz 
392dc162c8eSFam Zheng         next_dirty = bdrv_dirty_iter_next(s->dbi);
393dc162c8eSFam Zheng         if (next_dirty > next_sector || next_dirty < 0) {
394f27a2742SMax Reitz             /* The bitmap iterator's cache is stale, refresh it */
395dc162c8eSFam Zheng             bdrv_set_dirty_iter(s->dbi, next_sector);
396dc162c8eSFam Zheng             next_dirty = bdrv_dirty_iter_next(s->dbi);
397f27a2742SMax Reitz         }
398dc162c8eSFam Zheng         assert(next_dirty == next_sector);
399e5b43573SFam Zheng         nb_chunks++;
400e5b43573SFam Zheng     }
401e5b43573SFam Zheng 
402e5b43573SFam Zheng     /* Clear dirty bits before querying the block status, because
403e5b43573SFam Zheng      * calling bdrv_get_block_status_above could yield - if some blocks are
404e5b43573SFam Zheng      * marked dirty in this window, we need to know.
405e5b43573SFam Zheng      */
406b64bd51eSPaolo Bonzini     bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, sector_num,
407e5b43573SFam Zheng                                   nb_chunks * sectors_per_chunk);
408b64bd51eSPaolo Bonzini     bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
409b64bd51eSPaolo Bonzini 
410e5b43573SFam Zheng     bitmap_set(s->in_flight_bitmap, sector_num / sectors_per_chunk, nb_chunks);
411e5b43573SFam Zheng     while (nb_chunks > 0 && sector_num < end) {
41239c11580SJohn Snow         int64_t ret;
413f3e4ce4aSEric Blake         int io_sectors;
414f3e4ce4aSEric Blake         int64_t io_bytes_acct;
415e5b43573SFam Zheng         BlockDriverState *file;
416e5b43573SFam Zheng         enum MirrorMethod {
417e5b43573SFam Zheng             MIRROR_METHOD_COPY,
418e5b43573SFam Zheng             MIRROR_METHOD_ZERO,
419e5b43573SFam Zheng             MIRROR_METHOD_DISCARD
420e5b43573SFam Zheng         } mirror_method = MIRROR_METHOD_COPY;
421e5b43573SFam Zheng 
422e5b43573SFam Zheng         assert(!(sector_num % sectors_per_chunk));
423e5b43573SFam Zheng         ret = bdrv_get_block_status_above(source, NULL, sector_num,
424e5b43573SFam Zheng                                           nb_chunks * sectors_per_chunk,
425e5b43573SFam Zheng                                           &io_sectors, &file);
426e5b43573SFam Zheng         if (ret < 0) {
427b436982fSEric Blake             io_sectors = MIN(nb_chunks * sectors_per_chunk,
428b436982fSEric Blake                              max_io_bytes >> BDRV_SECTOR_BITS);
4290965a41eSVladimir Sementsov-Ogievskiy         } else if (ret & BDRV_BLOCK_DATA) {
430b436982fSEric Blake             io_sectors = MIN(io_sectors, max_io_bytes >> BDRV_SECTOR_BITS);
431e5b43573SFam Zheng         }
432e5b43573SFam Zheng 
433e5b43573SFam Zheng         io_sectors -= io_sectors % sectors_per_chunk;
434e5b43573SFam Zheng         if (io_sectors < sectors_per_chunk) {
435e5b43573SFam Zheng             io_sectors = sectors_per_chunk;
436e5b43573SFam Zheng         } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
437e5b43573SFam Zheng             int64_t target_sector_num;
438e5b43573SFam Zheng             int target_nb_sectors;
439244483e6SKevin Wolf             bdrv_round_sectors_to_clusters(blk_bs(s->target), sector_num,
440244483e6SKevin Wolf                                            io_sectors,  &target_sector_num,
441244483e6SKevin Wolf                                            &target_nb_sectors);
442e5b43573SFam Zheng             if (target_sector_num == sector_num &&
443e5b43573SFam Zheng                 target_nb_sectors == io_sectors) {
444e5b43573SFam Zheng                 mirror_method = ret & BDRV_BLOCK_ZERO ?
445e5b43573SFam Zheng                                     MIRROR_METHOD_ZERO :
446e5b43573SFam Zheng                                     MIRROR_METHOD_DISCARD;
447e5b43573SFam Zheng             }
448e5b43573SFam Zheng         }
449e5b43573SFam Zheng 
450cf56a3c6SDenis V. Lunev         while (s->in_flight >= MAX_IN_FLIGHT) {
4515cb1a49eSEric Blake             trace_mirror_yield_in_flight(s, sector_num * BDRV_SECTOR_SIZE,
4525cb1a49eSEric Blake                                          s->in_flight);
453cf56a3c6SDenis V. Lunev             mirror_wait_for_io(s);
454cf56a3c6SDenis V. Lunev         }
455cf56a3c6SDenis V. Lunev 
456dbaa7b57SVladimir Sementsov-Ogievskiy         if (s->ret < 0) {
457dbaa7b57SVladimir Sementsov-Ogievskiy             return 0;
458dbaa7b57SVladimir Sementsov-Ogievskiy         }
459dbaa7b57SVladimir Sementsov-Ogievskiy 
460931e5260SEric Blake         io_sectors = mirror_clip_sectors(s, sector_num, io_sectors);
461e5b43573SFam Zheng         switch (mirror_method) {
462e5b43573SFam Zheng         case MIRROR_METHOD_COPY:
463e5b43573SFam Zheng             io_sectors = mirror_do_read(s, sector_num, io_sectors);
464f3e4ce4aSEric Blake             io_bytes_acct = io_sectors * BDRV_SECTOR_SIZE;
465e5b43573SFam Zheng             break;
466e5b43573SFam Zheng         case MIRROR_METHOD_ZERO:
467e5b43573SFam Zheng         case MIRROR_METHOD_DISCARD:
468e6f24193SEric Blake             mirror_do_zero_or_discard(s, sector_num * BDRV_SECTOR_SIZE,
469e6f24193SEric Blake                                       io_sectors * BDRV_SECTOR_SIZE,
4704b5004d9SDenis V. Lunev                                       mirror_method == MIRROR_METHOD_DISCARD);
4714b5004d9SDenis V. Lunev             if (write_zeroes_ok) {
472f3e4ce4aSEric Blake                 io_bytes_acct = 0;
4734b5004d9SDenis V. Lunev             } else {
474f3e4ce4aSEric Blake                 io_bytes_acct = io_sectors * BDRV_SECTOR_SIZE;
4754b5004d9SDenis V. Lunev             }
476e5b43573SFam Zheng             break;
477e5b43573SFam Zheng         default:
478e5b43573SFam Zheng             abort();
479e5b43573SFam Zheng         }
480e5b43573SFam Zheng         assert(io_sectors);
481e5b43573SFam Zheng         sector_num += io_sectors;
4824150ae60SFam Zheng         nb_chunks -= DIV_ROUND_UP(io_sectors, sectors_per_chunk);
483f14a39ccSSascha Silbe         if (s->common.speed) {
484f3e4ce4aSEric Blake             delay_ns = ratelimit_calculate_delay(&s->limit, io_bytes_acct);
485f14a39ccSSascha Silbe         }
486dcfb3bebSFam Zheng     }
487cc8c9d6cSPaolo Bonzini     return delay_ns;
488893f7ebaSPaolo Bonzini }
489b952b558SPaolo Bonzini 
490402a4741SPaolo Bonzini static void mirror_free_init(MirrorBlockJob *s)
491402a4741SPaolo Bonzini {
492402a4741SPaolo Bonzini     int granularity = s->granularity;
493402a4741SPaolo Bonzini     size_t buf_size = s->buf_size;
494402a4741SPaolo Bonzini     uint8_t *buf = s->buf;
495402a4741SPaolo Bonzini 
496402a4741SPaolo Bonzini     assert(s->buf_free_count == 0);
497402a4741SPaolo Bonzini     QSIMPLEQ_INIT(&s->buf_free);
498402a4741SPaolo Bonzini     while (buf_size != 0) {
499402a4741SPaolo Bonzini         MirrorBuffer *cur = (MirrorBuffer *)buf;
500402a4741SPaolo Bonzini         QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
501402a4741SPaolo Bonzini         s->buf_free_count++;
502402a4741SPaolo Bonzini         buf_size -= granularity;
503402a4741SPaolo Bonzini         buf += granularity;
504402a4741SPaolo Bonzini     }
505402a4741SPaolo Bonzini }
506402a4741SPaolo Bonzini 
507bae8196dSPaolo Bonzini /* This is also used for the .pause callback. There is no matching
508bae8196dSPaolo Bonzini  * mirror_resume() because mirror_run() will begin iterating again
509bae8196dSPaolo Bonzini  * when the job is resumed.
510bae8196dSPaolo Bonzini  */
511bae8196dSPaolo Bonzini static void mirror_wait_for_all_io(MirrorBlockJob *s)
512bd48bde8SPaolo Bonzini {
513bd48bde8SPaolo Bonzini     while (s->in_flight > 0) {
51421cd917fSFam Zheng         mirror_wait_for_io(s);
515bd48bde8SPaolo Bonzini     }
516893f7ebaSPaolo Bonzini }
517893f7ebaSPaolo Bonzini 
5185a7e7a0bSStefan Hajnoczi typedef struct {
5195a7e7a0bSStefan Hajnoczi     int ret;
5205a7e7a0bSStefan Hajnoczi } MirrorExitData;
5215a7e7a0bSStefan Hajnoczi 
5225a7e7a0bSStefan Hajnoczi static void mirror_exit(BlockJob *job, void *opaque)
5235a7e7a0bSStefan Hajnoczi {
5245a7e7a0bSStefan Hajnoczi     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
5255a7e7a0bSStefan Hajnoczi     MirrorExitData *data = opaque;
5265a7e7a0bSStefan Hajnoczi     AioContext *replace_aio_context = NULL;
5274ef85a9cSKevin Wolf     BlockDriverState *src = s->source;
528e253f4b8SKevin Wolf     BlockDriverState *target_bs = blk_bs(s->target);
5294ef85a9cSKevin Wolf     BlockDriverState *mirror_top_bs = s->mirror_top_bs;
53012fa4af6SKevin Wolf     Error *local_err = NULL;
5313f09bfbcSKevin Wolf 
5322119882cSPaolo Bonzini     bdrv_release_dirty_bitmap(src, s->dirty_bitmap);
5332119882cSPaolo Bonzini 
5343f09bfbcSKevin Wolf     /* Make sure that the source BDS doesn't go away before we called
5353f09bfbcSKevin Wolf      * block_job_completed(). */
5363f09bfbcSKevin Wolf     bdrv_ref(src);
5374ef85a9cSKevin Wolf     bdrv_ref(mirror_top_bs);
5387d9fcb39SKevin Wolf     bdrv_ref(target_bs);
5397d9fcb39SKevin Wolf 
5407d9fcb39SKevin Wolf     /* Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
5417d9fcb39SKevin Wolf      * inserting target_bs at s->to_replace, where we might not be able to get
54263c8ef28SKevin Wolf      * these permissions.
54363c8ef28SKevin Wolf      *
54463c8ef28SKevin Wolf      * Note that blk_unref() alone doesn't necessarily drop permissions because
54563c8ef28SKevin Wolf      * we might be running nested inside mirror_drain(), which takes an extra
54663c8ef28SKevin Wolf      * reference, so use an explicit blk_set_perm() first. */
54763c8ef28SKevin Wolf     blk_set_perm(s->target, 0, BLK_PERM_ALL, &error_abort);
5487d9fcb39SKevin Wolf     blk_unref(s->target);
5497d9fcb39SKevin Wolf     s->target = NULL;
5504ef85a9cSKevin Wolf 
5514ef85a9cSKevin Wolf     /* We don't access the source any more. Dropping any WRITE/RESIZE is
5524ef85a9cSKevin Wolf      * required before it could become a backing file of target_bs. */
5534ef85a9cSKevin Wolf     bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
5544ef85a9cSKevin Wolf                             &error_abort);
5554ef85a9cSKevin Wolf     if (s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
5564ef85a9cSKevin Wolf         BlockDriverState *backing = s->is_none_mode ? src : s->base;
5574ef85a9cSKevin Wolf         if (backing_bs(target_bs) != backing) {
55812fa4af6SKevin Wolf             bdrv_set_backing_hd(target_bs, backing, &local_err);
55912fa4af6SKevin Wolf             if (local_err) {
56012fa4af6SKevin Wolf                 error_report_err(local_err);
56112fa4af6SKevin Wolf                 data->ret = -EPERM;
56212fa4af6SKevin Wolf             }
5634ef85a9cSKevin Wolf         }
5644ef85a9cSKevin Wolf     }
5655a7e7a0bSStefan Hajnoczi 
5665a7e7a0bSStefan Hajnoczi     if (s->to_replace) {
5675a7e7a0bSStefan Hajnoczi         replace_aio_context = bdrv_get_aio_context(s->to_replace);
5685a7e7a0bSStefan Hajnoczi         aio_context_acquire(replace_aio_context);
5695a7e7a0bSStefan Hajnoczi     }
5705a7e7a0bSStefan Hajnoczi 
5715a7e7a0bSStefan Hajnoczi     if (s->should_complete && data->ret == 0) {
572e253f4b8SKevin Wolf         BlockDriverState *to_replace = src;
5735a7e7a0bSStefan Hajnoczi         if (s->to_replace) {
5745a7e7a0bSStefan Hajnoczi             to_replace = s->to_replace;
5755a7e7a0bSStefan Hajnoczi         }
57640365552SKevin Wolf 
577e253f4b8SKevin Wolf         if (bdrv_get_flags(target_bs) != bdrv_get_flags(to_replace)) {
578e253f4b8SKevin Wolf             bdrv_reopen(target_bs, bdrv_get_flags(to_replace), NULL);
5795a7e7a0bSStefan Hajnoczi         }
580b8804815SKevin Wolf 
581b8804815SKevin Wolf         /* The mirror job has no requests in flight any more, but we need to
582b8804815SKevin Wolf          * drain potential other users of the BDS before changing the graph. */
583e253f4b8SKevin Wolf         bdrv_drained_begin(target_bs);
5845fe31c25SKevin Wolf         bdrv_replace_node(to_replace, target_bs, &local_err);
585e253f4b8SKevin Wolf         bdrv_drained_end(target_bs);
5865fe31c25SKevin Wolf         if (local_err) {
5875fe31c25SKevin Wolf             error_report_err(local_err);
5885fe31c25SKevin Wolf             data->ret = -EPERM;
5895fe31c25SKevin Wolf         }
5905a7e7a0bSStefan Hajnoczi     }
5915a7e7a0bSStefan Hajnoczi     if (s->to_replace) {
5925a7e7a0bSStefan Hajnoczi         bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
5935a7e7a0bSStefan Hajnoczi         error_free(s->replace_blocker);
5945a7e7a0bSStefan Hajnoczi         bdrv_unref(s->to_replace);
5955a7e7a0bSStefan Hajnoczi     }
5965a7e7a0bSStefan Hajnoczi     if (replace_aio_context) {
5975a7e7a0bSStefan Hajnoczi         aio_context_release(replace_aio_context);
5985a7e7a0bSStefan Hajnoczi     }
5995a7e7a0bSStefan Hajnoczi     g_free(s->replaces);
6007d9fcb39SKevin Wolf     bdrv_unref(target_bs);
6014ef85a9cSKevin Wolf 
6024ef85a9cSKevin Wolf     /* Remove the mirror filter driver from the graph. Before this, get rid of
6034ef85a9cSKevin Wolf      * the blockers on the intermediate nodes so that the resulting state is
6040bf74767SKevin Wolf      * valid. Also give up permissions on mirror_top_bs->backing, which might
6050bf74767SKevin Wolf      * block the removal. */
6064ef85a9cSKevin Wolf     block_job_remove_all_bdrv(job);
607c1cef672SFam Zheng     bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
608c1cef672SFam Zheng                             &error_abort);
6095fe31c25SKevin Wolf     bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
6104ef85a9cSKevin Wolf 
6114ef85a9cSKevin Wolf     /* We just changed the BDS the job BB refers to (with either or both of the
6125fe31c25SKevin Wolf      * bdrv_replace_node() calls), so switch the BB back so the cleanup does
6135fe31c25SKevin Wolf      * the right thing. We don't need any permissions any more now. */
6144ef85a9cSKevin Wolf     blk_remove_bs(job->blk);
6154ef85a9cSKevin Wolf     blk_set_perm(job->blk, 0, BLK_PERM_ALL, &error_abort);
6164ef85a9cSKevin Wolf     blk_insert_bs(job->blk, mirror_top_bs, &error_abort);
6174ef85a9cSKevin Wolf 
6185a7e7a0bSStefan Hajnoczi     block_job_completed(&s->common, data->ret);
6194ef85a9cSKevin Wolf 
6205a7e7a0bSStefan Hajnoczi     g_free(data);
621176c3699SFam Zheng     bdrv_drained_end(src);
6224ef85a9cSKevin Wolf     bdrv_unref(mirror_top_bs);
6233f09bfbcSKevin Wolf     bdrv_unref(src);
6245a7e7a0bSStefan Hajnoczi }
6255a7e7a0bSStefan Hajnoczi 
62649efb1f5SDenis V. Lunev static void mirror_throttle(MirrorBlockJob *s)
62749efb1f5SDenis V. Lunev {
62849efb1f5SDenis V. Lunev     int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
62949efb1f5SDenis V. Lunev 
63049efb1f5SDenis V. Lunev     if (now - s->last_pause_ns > SLICE_TIME) {
63149efb1f5SDenis V. Lunev         s->last_pause_ns = now;
63249efb1f5SDenis V. Lunev         block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0);
63349efb1f5SDenis V. Lunev     } else {
63449efb1f5SDenis V. Lunev         block_job_pause_point(&s->common);
63549efb1f5SDenis V. Lunev     }
63649efb1f5SDenis V. Lunev }
63749efb1f5SDenis V. Lunev 
638c0b363adSDenis V. Lunev static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
639c0b363adSDenis V. Lunev {
640c0b363adSDenis V. Lunev     int64_t sector_num, end;
641c0b363adSDenis V. Lunev     BlockDriverState *base = s->base;
6424ef85a9cSKevin Wolf     BlockDriverState *bs = s->source;
643c0b363adSDenis V. Lunev     BlockDriverState *target_bs = blk_bs(s->target);
644c0b363adSDenis V. Lunev     int ret, n;
645c0b363adSDenis V. Lunev 
646c0b363adSDenis V. Lunev     end = s->bdev_length / BDRV_SECTOR_SIZE;
647c0b363adSDenis V. Lunev 
648b7d5062cSDenis V. Lunev     if (base == NULL && !bdrv_has_zero_init(target_bs)) {
649c7c2769cSDenis V. Lunev         if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
650b7d5062cSDenis V. Lunev             bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, end);
651b7d5062cSDenis V. Lunev             return 0;
652b7d5062cSDenis V. Lunev         }
653b7d5062cSDenis V. Lunev 
65490ab48ebSAnton Nefedov         s->initial_zeroing_ongoing = true;
655c7c2769cSDenis V. Lunev         for (sector_num = 0; sector_num < end; ) {
656c7c2769cSDenis V. Lunev             int nb_sectors = MIN(end - sector_num,
657c7c2769cSDenis V. Lunev                 QEMU_ALIGN_DOWN(INT_MAX, s->granularity) >> BDRV_SECTOR_BITS);
658c7c2769cSDenis V. Lunev 
659c7c2769cSDenis V. Lunev             mirror_throttle(s);
660c7c2769cSDenis V. Lunev 
661c7c2769cSDenis V. Lunev             if (block_job_is_cancelled(&s->common)) {
66290ab48ebSAnton Nefedov                 s->initial_zeroing_ongoing = false;
663c7c2769cSDenis V. Lunev                 return 0;
664c7c2769cSDenis V. Lunev             }
665c7c2769cSDenis V. Lunev 
666c7c2769cSDenis V. Lunev             if (s->in_flight >= MAX_IN_FLIGHT) {
66767adf4b3SEric Blake                 trace_mirror_yield(s, UINT64_MAX, s->buf_free_count,
66867adf4b3SEric Blake                                    s->in_flight);
669c7c2769cSDenis V. Lunev                 mirror_wait_for_io(s);
670c7c2769cSDenis V. Lunev                 continue;
671c7c2769cSDenis V. Lunev             }
672c7c2769cSDenis V. Lunev 
673e6f24193SEric Blake             mirror_do_zero_or_discard(s, sector_num * BDRV_SECTOR_SIZE,
674e6f24193SEric Blake                                       nb_sectors * BDRV_SECTOR_SIZE, false);
675c7c2769cSDenis V. Lunev             sector_num += nb_sectors;
676c7c2769cSDenis V. Lunev         }
677c7c2769cSDenis V. Lunev 
678bae8196dSPaolo Bonzini         mirror_wait_for_all_io(s);
67990ab48ebSAnton Nefedov         s->initial_zeroing_ongoing = false;
680c7c2769cSDenis V. Lunev     }
681c7c2769cSDenis V. Lunev 
682c0b363adSDenis V. Lunev     /* First part, loop on the sectors and initialize the dirty bitmap.  */
683c0b363adSDenis V. Lunev     for (sector_num = 0; sector_num < end; ) {
684c0b363adSDenis V. Lunev         /* Just to make sure we are not exceeding int limit. */
685c0b363adSDenis V. Lunev         int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS,
686c0b363adSDenis V. Lunev                              end - sector_num);
687c0b363adSDenis V. Lunev 
688c0b363adSDenis V. Lunev         mirror_throttle(s);
689c0b363adSDenis V. Lunev 
690c0b363adSDenis V. Lunev         if (block_job_is_cancelled(&s->common)) {
691c0b363adSDenis V. Lunev             return 0;
692c0b363adSDenis V. Lunev         }
693c0b363adSDenis V. Lunev 
694c0b363adSDenis V. Lunev         ret = bdrv_is_allocated_above(bs, base, sector_num, nb_sectors, &n);
695c0b363adSDenis V. Lunev         if (ret < 0) {
696c0b363adSDenis V. Lunev             return ret;
697c0b363adSDenis V. Lunev         }
698c0b363adSDenis V. Lunev 
699c0b363adSDenis V. Lunev         assert(n > 0);
700b7d5062cSDenis V. Lunev         if (ret == 1) {
701c0b363adSDenis V. Lunev             bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n);
702c0b363adSDenis V. Lunev         }
703c0b363adSDenis V. Lunev         sector_num += n;
704c0b363adSDenis V. Lunev     }
705c0b363adSDenis V. Lunev     return 0;
706c0b363adSDenis V. Lunev }
707c0b363adSDenis V. Lunev 
708bdffb31dSPaolo Bonzini /* Called when going out of the streaming phase to flush the bulk of the
709bdffb31dSPaolo Bonzini  * data to the medium, or just before completing.
710bdffb31dSPaolo Bonzini  */
711bdffb31dSPaolo Bonzini static int mirror_flush(MirrorBlockJob *s)
712bdffb31dSPaolo Bonzini {
713bdffb31dSPaolo Bonzini     int ret = blk_flush(s->target);
714bdffb31dSPaolo Bonzini     if (ret < 0) {
715bdffb31dSPaolo Bonzini         if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
716bdffb31dSPaolo Bonzini             s->ret = ret;
717bdffb31dSPaolo Bonzini         }
718bdffb31dSPaolo Bonzini     }
719bdffb31dSPaolo Bonzini     return ret;
720bdffb31dSPaolo Bonzini }
721bdffb31dSPaolo Bonzini 
722893f7ebaSPaolo Bonzini static void coroutine_fn mirror_run(void *opaque)
723893f7ebaSPaolo Bonzini {
724893f7ebaSPaolo Bonzini     MirrorBlockJob *s = opaque;
7255a7e7a0bSStefan Hajnoczi     MirrorExitData *data;
7264ef85a9cSKevin Wolf     BlockDriverState *bs = s->source;
727e253f4b8SKevin Wolf     BlockDriverState *target_bs = blk_bs(s->target);
7289a0cec66SPaolo Bonzini     bool need_drain = true;
729c0b363adSDenis V. Lunev     int64_t length;
730b812f671SPaolo Bonzini     BlockDriverInfo bdi;
7311d33936eSJeff Cody     char backing_filename[2]; /* we only need 2 characters because we are only
7321d33936eSJeff Cody                                  checking for a NULL string */
733893f7ebaSPaolo Bonzini     int ret = 0;
734893f7ebaSPaolo Bonzini 
735893f7ebaSPaolo Bonzini     if (block_job_is_cancelled(&s->common)) {
736893f7ebaSPaolo Bonzini         goto immediate_exit;
737893f7ebaSPaolo Bonzini     }
738893f7ebaSPaolo Bonzini 
739b21c7652SMax Reitz     s->bdev_length = bdrv_getlength(bs);
740b21c7652SMax Reitz     if (s->bdev_length < 0) {
741b21c7652SMax Reitz         ret = s->bdev_length;
742373df5b1SFam Zheng         goto immediate_exit;
743becc347eSKevin Wolf     }
744becc347eSKevin Wolf 
745becc347eSKevin Wolf     /* Active commit must resize the base image if its size differs from the
746becc347eSKevin Wolf      * active layer. */
747becc347eSKevin Wolf     if (s->base == blk_bs(s->target)) {
748becc347eSKevin Wolf         int64_t base_length;
749becc347eSKevin Wolf 
750becc347eSKevin Wolf         base_length = blk_getlength(s->target);
751becc347eSKevin Wolf         if (base_length < 0) {
752becc347eSKevin Wolf             ret = base_length;
753becc347eSKevin Wolf             goto immediate_exit;
754becc347eSKevin Wolf         }
755becc347eSKevin Wolf 
756becc347eSKevin Wolf         if (s->bdev_length > base_length) {
757ed3d2ec9SMax Reitz             ret = blk_truncate(s->target, s->bdev_length, NULL);
758becc347eSKevin Wolf             if (ret < 0) {
759becc347eSKevin Wolf                 goto immediate_exit;
760becc347eSKevin Wolf             }
761becc347eSKevin Wolf         }
762becc347eSKevin Wolf     }
763becc347eSKevin Wolf 
764becc347eSKevin Wolf     if (s->bdev_length == 0) {
7659e48b025SFam Zheng         /* Report BLOCK_JOB_READY and wait for complete. */
7669e48b025SFam Zheng         block_job_event_ready(&s->common);
7679e48b025SFam Zheng         s->synced = true;
7689e48b025SFam Zheng         while (!block_job_is_cancelled(&s->common) && !s->should_complete) {
7699e48b025SFam Zheng             block_job_yield(&s->common);
7709e48b025SFam Zheng         }
7719e48b025SFam Zheng         s->common.cancelled = false;
7729e48b025SFam Zheng         goto immediate_exit;
773893f7ebaSPaolo Bonzini     }
774893f7ebaSPaolo Bonzini 
775b21c7652SMax Reitz     length = DIV_ROUND_UP(s->bdev_length, s->granularity);
776402a4741SPaolo Bonzini     s->in_flight_bitmap = bitmap_new(length);
777402a4741SPaolo Bonzini 
778b812f671SPaolo Bonzini     /* If we have no backing file yet in the destination, we cannot let
779b812f671SPaolo Bonzini      * the destination do COW.  Instead, we copy sectors around the
780b812f671SPaolo Bonzini      * dirty data if needed.  We need a bitmap to do that.
781b812f671SPaolo Bonzini      */
782e253f4b8SKevin Wolf     bdrv_get_backing_filename(target_bs, backing_filename,
783b812f671SPaolo Bonzini                               sizeof(backing_filename));
784e253f4b8SKevin Wolf     if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) {
785b436982fSEric Blake         s->target_cluster_size = bdi.cluster_size;
786b436982fSEric Blake     } else {
787b436982fSEric Blake         s->target_cluster_size = BDRV_SECTOR_SIZE;
788c3cc95bdSFam Zheng     }
789b436982fSEric Blake     if (backing_filename[0] && !target_bs->backing &&
790b436982fSEric Blake         s->granularity < s->target_cluster_size) {
791b436982fSEric Blake         s->buf_size = MAX(s->buf_size, s->target_cluster_size);
792b812f671SPaolo Bonzini         s->cow_bitmap = bitmap_new(length);
793b812f671SPaolo Bonzini     }
794e253f4b8SKevin Wolf     s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
795b812f671SPaolo Bonzini 
7967504edf4SKevin Wolf     s->buf = qemu_try_blockalign(bs, s->buf_size);
7977504edf4SKevin Wolf     if (s->buf == NULL) {
7987504edf4SKevin Wolf         ret = -ENOMEM;
7997504edf4SKevin Wolf         goto immediate_exit;
8007504edf4SKevin Wolf     }
8017504edf4SKevin Wolf 
802402a4741SPaolo Bonzini     mirror_free_init(s);
803893f7ebaSPaolo Bonzini 
80449efb1f5SDenis V. Lunev     s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
80503544a6eSFam Zheng     if (!s->is_none_mode) {
806c0b363adSDenis V. Lunev         ret = mirror_dirty_init(s);
807c0b363adSDenis V. Lunev         if (ret < 0 || block_job_is_cancelled(&s->common)) {
8084c0cbd6fSFam Zheng             goto immediate_exit;
8094c0cbd6fSFam Zheng         }
810893f7ebaSPaolo Bonzini     }
811893f7ebaSPaolo Bonzini 
812dc162c8eSFam Zheng     assert(!s->dbi);
813dc162c8eSFam Zheng     s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap, 0);
814893f7ebaSPaolo Bonzini     for (;;) {
815cc8c9d6cSPaolo Bonzini         uint64_t delay_ns = 0;
81649efb1f5SDenis V. Lunev         int64_t cnt, delta;
817893f7ebaSPaolo Bonzini         bool should_complete;
818893f7ebaSPaolo Bonzini 
819bd48bde8SPaolo Bonzini         if (s->ret < 0) {
820bd48bde8SPaolo Bonzini             ret = s->ret;
821893f7ebaSPaolo Bonzini             goto immediate_exit;
822893f7ebaSPaolo Bonzini         }
823bd48bde8SPaolo Bonzini 
824565ac01fSStefan Hajnoczi         block_job_pause_point(&s->common);
825565ac01fSStefan Hajnoczi 
82620dca810SJohn Snow         cnt = bdrv_get_dirty_count(s->dirty_bitmap);
827b21c7652SMax Reitz         /* s->common.offset contains the number of bytes already processed so
828b21c7652SMax Reitz          * far, cnt is the number of dirty sectors remaining and
829b436982fSEric Blake          * s->bytes_in_flight is the number of bytes currently being
830b21c7652SMax Reitz          * processed; together those are the current total operation length */
831b436982fSEric Blake         s->common.len = s->common.offset + s->bytes_in_flight +
832b436982fSEric Blake             cnt * BDRV_SECTOR_SIZE;
833bd48bde8SPaolo Bonzini 
834bd48bde8SPaolo Bonzini         /* Note that even when no rate limit is applied we need to yield
835a7282330SFam Zheng          * periodically with no pending I/O so that bdrv_drain_all() returns.
836bd48bde8SPaolo Bonzini          * We do so every SLICE_TIME nanoseconds, or when there is an error,
837bd48bde8SPaolo Bonzini          * or when the source is clean, whichever comes first.
838bd48bde8SPaolo Bonzini          */
83949efb1f5SDenis V. Lunev         delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
84049efb1f5SDenis V. Lunev         if (delta < SLICE_TIME &&
841bd48bde8SPaolo Bonzini             s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
842cf56a3c6SDenis V. Lunev             if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
843402a4741SPaolo Bonzini                 (cnt == 0 && s->in_flight > 0)) {
8445cb1a49eSEric Blake                 trace_mirror_yield(s, cnt * BDRV_SECTOR_SIZE,
8455cb1a49eSEric Blake                                    s->buf_free_count, s->in_flight);
84621cd917fSFam Zheng                 mirror_wait_for_io(s);
847bd48bde8SPaolo Bonzini                 continue;
848bd48bde8SPaolo Bonzini             } else if (cnt != 0) {
849cc8c9d6cSPaolo Bonzini                 delay_ns = mirror_iteration(s);
850893f7ebaSPaolo Bonzini             }
851cc8c9d6cSPaolo Bonzini         }
852893f7ebaSPaolo Bonzini 
853893f7ebaSPaolo Bonzini         should_complete = false;
854bd48bde8SPaolo Bonzini         if (s->in_flight == 0 && cnt == 0) {
855893f7ebaSPaolo Bonzini             trace_mirror_before_flush(s);
856bdffb31dSPaolo Bonzini             if (!s->synced) {
857bdffb31dSPaolo Bonzini                 if (mirror_flush(s) < 0) {
858bdffb31dSPaolo Bonzini                     /* Go check s->ret.  */
859bdffb31dSPaolo Bonzini                     continue;
860893f7ebaSPaolo Bonzini                 }
861893f7ebaSPaolo Bonzini                 /* We're out of the streaming phase.  From now on, if the job
862893f7ebaSPaolo Bonzini                  * is cancelled we will actually complete all pending I/O and
863893f7ebaSPaolo Bonzini                  * report completion.  This way, block-job-cancel will leave
864893f7ebaSPaolo Bonzini                  * the target in a consistent state.
865893f7ebaSPaolo Bonzini                  */
866bcada37bSWenchao Xia                 block_job_event_ready(&s->common);
867d63ffd87SPaolo Bonzini                 s->synced = true;
868d63ffd87SPaolo Bonzini             }
869d63ffd87SPaolo Bonzini 
870d63ffd87SPaolo Bonzini             should_complete = s->should_complete ||
871d63ffd87SPaolo Bonzini                 block_job_is_cancelled(&s->common);
87220dca810SJohn Snow             cnt = bdrv_get_dirty_count(s->dirty_bitmap);
873893f7ebaSPaolo Bonzini         }
874893f7ebaSPaolo Bonzini 
875893f7ebaSPaolo Bonzini         if (cnt == 0 && should_complete) {
876893f7ebaSPaolo Bonzini             /* The dirty bitmap is not updated while operations are pending.
877893f7ebaSPaolo Bonzini              * If we're about to exit, wait for pending operations before
878893f7ebaSPaolo Bonzini              * calling bdrv_get_dirty_count(bs), or we may exit while the
879893f7ebaSPaolo Bonzini              * source has dirty data to copy!
880893f7ebaSPaolo Bonzini              *
881893f7ebaSPaolo Bonzini              * Note that I/O can be submitted by the guest while
8829a0cec66SPaolo Bonzini              * mirror_populate runs, so pause it now.  Before deciding
8839a0cec66SPaolo Bonzini              * whether to switch to target check one last time if I/O has
8849a0cec66SPaolo Bonzini              * come in the meanwhile, and if not flush the data to disk.
885893f7ebaSPaolo Bonzini              */
8865cb1a49eSEric Blake             trace_mirror_before_drain(s, cnt * BDRV_SECTOR_SIZE);
8879a0cec66SPaolo Bonzini 
8889a0cec66SPaolo Bonzini             bdrv_drained_begin(bs);
88920dca810SJohn Snow             cnt = bdrv_get_dirty_count(s->dirty_bitmap);
890bdffb31dSPaolo Bonzini             if (cnt > 0 || mirror_flush(s) < 0) {
8919a0cec66SPaolo Bonzini                 bdrv_drained_end(bs);
8929a0cec66SPaolo Bonzini                 continue;
8939a0cec66SPaolo Bonzini             }
8949a0cec66SPaolo Bonzini 
8959a0cec66SPaolo Bonzini             /* The two disks are in sync.  Exit and report successful
8969a0cec66SPaolo Bonzini              * completion.
8979a0cec66SPaolo Bonzini              */
8989a0cec66SPaolo Bonzini             assert(QLIST_EMPTY(&bs->tracked_requests));
8999a0cec66SPaolo Bonzini             s->common.cancelled = false;
9009a0cec66SPaolo Bonzini             need_drain = false;
9019a0cec66SPaolo Bonzini             break;
902893f7ebaSPaolo Bonzini         }
903893f7ebaSPaolo Bonzini 
904893f7ebaSPaolo Bonzini         ret = 0;
9055cb1a49eSEric Blake         trace_mirror_before_sleep(s, cnt * BDRV_SECTOR_SIZE,
9065cb1a49eSEric Blake                                   s->synced, delay_ns);
907d63ffd87SPaolo Bonzini         if (!s->synced) {
9087483d1e5SAlex Bligh             block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
909893f7ebaSPaolo Bonzini             if (block_job_is_cancelled(&s->common)) {
910893f7ebaSPaolo Bonzini                 break;
911893f7ebaSPaolo Bonzini             }
912893f7ebaSPaolo Bonzini         } else if (!should_complete) {
913bd48bde8SPaolo Bonzini             delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0);
9147483d1e5SAlex Bligh             block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
915893f7ebaSPaolo Bonzini         }
91649efb1f5SDenis V. Lunev         s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
917893f7ebaSPaolo Bonzini     }
918893f7ebaSPaolo Bonzini 
919893f7ebaSPaolo Bonzini immediate_exit:
920bd48bde8SPaolo Bonzini     if (s->in_flight > 0) {
921bd48bde8SPaolo Bonzini         /* We get here only if something went wrong.  Either the job failed,
922bd48bde8SPaolo Bonzini          * or it was cancelled prematurely so that we do not guarantee that
923bd48bde8SPaolo Bonzini          * the target is a copy of the source.
924bd48bde8SPaolo Bonzini          */
925bd48bde8SPaolo Bonzini         assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common)));
9269a0cec66SPaolo Bonzini         assert(need_drain);
927bae8196dSPaolo Bonzini         mirror_wait_for_all_io(s);
928bd48bde8SPaolo Bonzini     }
929bd48bde8SPaolo Bonzini 
930bd48bde8SPaolo Bonzini     assert(s->in_flight == 0);
9317191bf31SMarkus Armbruster     qemu_vfree(s->buf);
932b812f671SPaolo Bonzini     g_free(s->cow_bitmap);
933402a4741SPaolo Bonzini     g_free(s->in_flight_bitmap);
934dc162c8eSFam Zheng     bdrv_dirty_iter_free(s->dbi);
9355a7e7a0bSStefan Hajnoczi 
9365a7e7a0bSStefan Hajnoczi     data = g_malloc(sizeof(*data));
9375a7e7a0bSStefan Hajnoczi     data->ret = ret;
9389a0cec66SPaolo Bonzini 
9399a0cec66SPaolo Bonzini     if (need_drain) {
940e253f4b8SKevin Wolf         bdrv_drained_begin(bs);
9419a0cec66SPaolo Bonzini     }
9425a7e7a0bSStefan Hajnoczi     block_job_defer_to_main_loop(&s->common, mirror_exit, data);
943893f7ebaSPaolo Bonzini }
944893f7ebaSPaolo Bonzini 
945893f7ebaSPaolo Bonzini static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp)
946893f7ebaSPaolo Bonzini {
947893f7ebaSPaolo Bonzini     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
948893f7ebaSPaolo Bonzini 
949893f7ebaSPaolo Bonzini     if (speed < 0) {
950c6bd8c70SMarkus Armbruster         error_setg(errp, QERR_INVALID_PARAMETER, "speed");
951893f7ebaSPaolo Bonzini         return;
952893f7ebaSPaolo Bonzini     }
953f3e4ce4aSEric Blake     ratelimit_set_speed(&s->limit, speed, SLICE_TIME);
954893f7ebaSPaolo Bonzini }
955893f7ebaSPaolo Bonzini 
956d63ffd87SPaolo Bonzini static void mirror_complete(BlockJob *job, Error **errp)
957d63ffd87SPaolo Bonzini {
958d63ffd87SPaolo Bonzini     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
9594ef85a9cSKevin Wolf     BlockDriverState *target;
960d63ffd87SPaolo Bonzini 
961274fcceeSMax Reitz     target = blk_bs(s->target);
962274fcceeSMax Reitz 
963d63ffd87SPaolo Bonzini     if (!s->synced) {
9649df229c3SAlberto Garcia         error_setg(errp, "The active block job '%s' cannot be completed",
9659df229c3SAlberto Garcia                    job->id);
966d63ffd87SPaolo Bonzini         return;
967d63ffd87SPaolo Bonzini     }
968d63ffd87SPaolo Bonzini 
969274fcceeSMax Reitz     if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
970274fcceeSMax Reitz         int ret;
971274fcceeSMax Reitz 
972274fcceeSMax Reitz         assert(!target->backing);
973274fcceeSMax Reitz         ret = bdrv_open_backing_file(target, NULL, "backing", errp);
974274fcceeSMax Reitz         if (ret < 0) {
975274fcceeSMax Reitz             return;
976274fcceeSMax Reitz         }
977274fcceeSMax Reitz     }
978274fcceeSMax Reitz 
97915d67298SChanglong Xie     /* block all operations on to_replace bs */
98009158f00SBenoît Canet     if (s->replaces) {
9815a7e7a0bSStefan Hajnoczi         AioContext *replace_aio_context;
9825a7e7a0bSStefan Hajnoczi 
983e12f3784SWen Congyang         s->to_replace = bdrv_find_node(s->replaces);
98409158f00SBenoît Canet         if (!s->to_replace) {
985e12f3784SWen Congyang             error_setg(errp, "Node name '%s' not found", s->replaces);
98609158f00SBenoît Canet             return;
98709158f00SBenoît Canet         }
98809158f00SBenoît Canet 
9895a7e7a0bSStefan Hajnoczi         replace_aio_context = bdrv_get_aio_context(s->to_replace);
9905a7e7a0bSStefan Hajnoczi         aio_context_acquire(replace_aio_context);
9915a7e7a0bSStefan Hajnoczi 
9924ef85a9cSKevin Wolf         /* TODO Translate this into permission system. Current definition of
9934ef85a9cSKevin Wolf          * GRAPH_MOD would require to request it for the parents; they might
9944ef85a9cSKevin Wolf          * not even be BlockDriverStates, however, so a BdrvChild can't address
9954ef85a9cSKevin Wolf          * them. May need redefinition of GRAPH_MOD. */
99609158f00SBenoît Canet         error_setg(&s->replace_blocker,
99709158f00SBenoît Canet                    "block device is in use by block-job-complete");
99809158f00SBenoît Canet         bdrv_op_block_all(s->to_replace, s->replace_blocker);
99909158f00SBenoît Canet         bdrv_ref(s->to_replace);
10005a7e7a0bSStefan Hajnoczi 
10015a7e7a0bSStefan Hajnoczi         aio_context_release(replace_aio_context);
100209158f00SBenoît Canet     }
100309158f00SBenoît Canet 
1004d63ffd87SPaolo Bonzini     s->should_complete = true;
1005751ebd76SFam Zheng     block_job_enter(&s->common);
1006d63ffd87SPaolo Bonzini }
1007d63ffd87SPaolo Bonzini 
1008bae8196dSPaolo Bonzini static void mirror_pause(BlockJob *job)
1009565ac01fSStefan Hajnoczi {
1010565ac01fSStefan Hajnoczi     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1011565ac01fSStefan Hajnoczi 
1012bae8196dSPaolo Bonzini     mirror_wait_for_all_io(s);
1013565ac01fSStefan Hajnoczi }
1014565ac01fSStefan Hajnoczi 
1015565ac01fSStefan Hajnoczi static void mirror_attached_aio_context(BlockJob *job, AioContext *new_context)
1016565ac01fSStefan Hajnoczi {
1017565ac01fSStefan Hajnoczi     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1018565ac01fSStefan Hajnoczi 
1019565ac01fSStefan Hajnoczi     blk_set_aio_context(s->target, new_context);
1020565ac01fSStefan Hajnoczi }
1021565ac01fSStefan Hajnoczi 
1022bae8196dSPaolo Bonzini static void mirror_drain(BlockJob *job)
1023bae8196dSPaolo Bonzini {
1024bae8196dSPaolo Bonzini     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1025bae8196dSPaolo Bonzini 
1026bae8196dSPaolo Bonzini     /* Need to keep a reference in case blk_drain triggers execution
1027bae8196dSPaolo Bonzini      * of mirror_complete...
1028bae8196dSPaolo Bonzini      */
1029bae8196dSPaolo Bonzini     if (s->target) {
1030bae8196dSPaolo Bonzini         BlockBackend *target = s->target;
1031bae8196dSPaolo Bonzini         blk_ref(target);
1032bae8196dSPaolo Bonzini         blk_drain(target);
1033bae8196dSPaolo Bonzini         blk_unref(target);
1034bae8196dSPaolo Bonzini     }
1035bae8196dSPaolo Bonzini }
1036bae8196dSPaolo Bonzini 
10373fc4b10aSFam Zheng static const BlockJobDriver mirror_job_driver = {
1038893f7ebaSPaolo Bonzini     .instance_size          = sizeof(MirrorBlockJob),
103979e14bf7SFam Zheng     .job_type               = BLOCK_JOB_TYPE_MIRROR,
1040893f7ebaSPaolo Bonzini     .set_speed              = mirror_set_speed,
1041a7815a76SJohn Snow     .start                  = mirror_run,
1042d63ffd87SPaolo Bonzini     .complete               = mirror_complete,
1043565ac01fSStefan Hajnoczi     .pause                  = mirror_pause,
1044565ac01fSStefan Hajnoczi     .attached_aio_context   = mirror_attached_aio_context,
1045bae8196dSPaolo Bonzini     .drain                  = mirror_drain,
1046893f7ebaSPaolo Bonzini };
1047893f7ebaSPaolo Bonzini 
104803544a6eSFam Zheng static const BlockJobDriver commit_active_job_driver = {
104903544a6eSFam Zheng     .instance_size          = sizeof(MirrorBlockJob),
105003544a6eSFam Zheng     .job_type               = BLOCK_JOB_TYPE_COMMIT,
105103544a6eSFam Zheng     .set_speed              = mirror_set_speed,
1052a7815a76SJohn Snow     .start                  = mirror_run,
105303544a6eSFam Zheng     .complete               = mirror_complete,
1054565ac01fSStefan Hajnoczi     .pause                  = mirror_pause,
1055565ac01fSStefan Hajnoczi     .attached_aio_context   = mirror_attached_aio_context,
1056bae8196dSPaolo Bonzini     .drain                  = mirror_drain,
105703544a6eSFam Zheng };
105803544a6eSFam Zheng 
10594ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs,
10604ef85a9cSKevin Wolf     uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
10614ef85a9cSKevin Wolf {
10624ef85a9cSKevin Wolf     return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
10634ef85a9cSKevin Wolf }
10644ef85a9cSKevin Wolf 
10654ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs,
10664ef85a9cSKevin Wolf     uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
10674ef85a9cSKevin Wolf {
10684ef85a9cSKevin Wolf     return bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags);
10694ef85a9cSKevin Wolf }
10704ef85a9cSKevin Wolf 
10714ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs)
10724ef85a9cSKevin Wolf {
10734ef85a9cSKevin Wolf     return bdrv_co_flush(bs->backing->bs);
10744ef85a9cSKevin Wolf }
10754ef85a9cSKevin Wolf 
10764ef85a9cSKevin Wolf static int64_t coroutine_fn bdrv_mirror_top_get_block_status(
10774ef85a9cSKevin Wolf     BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum,
10784ef85a9cSKevin Wolf     BlockDriverState **file)
10794ef85a9cSKevin Wolf {
10804ef85a9cSKevin Wolf     *pnum = nb_sectors;
10814ef85a9cSKevin Wolf     *file = bs->backing->bs;
1082d5254033SEric Blake     return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID |
10834ef85a9cSKevin Wolf            (sector_num << BDRV_SECTOR_BITS);
10844ef85a9cSKevin Wolf }
10854ef85a9cSKevin Wolf 
10864ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs,
1087f5a5ca79SManos Pitsidianakis     int64_t offset, int bytes, BdrvRequestFlags flags)
10884ef85a9cSKevin Wolf {
1089f5a5ca79SManos Pitsidianakis     return bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags);
10904ef85a9cSKevin Wolf }
10914ef85a9cSKevin Wolf 
10924ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs,
1093f5a5ca79SManos Pitsidianakis     int64_t offset, int bytes)
10944ef85a9cSKevin Wolf {
1095f5a5ca79SManos Pitsidianakis     return bdrv_co_pdiscard(bs->backing->bs, offset, bytes);
10964ef85a9cSKevin Wolf }
10974ef85a9cSKevin Wolf 
1098fd4a6493SKevin Wolf static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs, QDict *opts)
1099fd4a6493SKevin Wolf {
1100fd4a6493SKevin Wolf     bdrv_refresh_filename(bs->backing->bs);
1101fd4a6493SKevin Wolf     pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
1102fd4a6493SKevin Wolf             bs->backing->bs->filename);
1103fd4a6493SKevin Wolf }
1104fd4a6493SKevin Wolf 
11054ef85a9cSKevin Wolf static void bdrv_mirror_top_close(BlockDriverState *bs)
11064ef85a9cSKevin Wolf {
11074ef85a9cSKevin Wolf }
11084ef85a9cSKevin Wolf 
11094ef85a9cSKevin Wolf static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
11104ef85a9cSKevin Wolf                                        const BdrvChildRole *role,
11114ef85a9cSKevin Wolf                                        uint64_t perm, uint64_t shared,
11124ef85a9cSKevin Wolf                                        uint64_t *nperm, uint64_t *nshared)
11134ef85a9cSKevin Wolf {
11144ef85a9cSKevin Wolf     /* Must be able to forward guest writes to the real image */
11154ef85a9cSKevin Wolf     *nperm = 0;
11164ef85a9cSKevin Wolf     if (perm & BLK_PERM_WRITE) {
11174ef85a9cSKevin Wolf         *nperm |= BLK_PERM_WRITE;
11184ef85a9cSKevin Wolf     }
11194ef85a9cSKevin Wolf 
11204ef85a9cSKevin Wolf     *nshared = BLK_PERM_ALL;
11214ef85a9cSKevin Wolf }
11224ef85a9cSKevin Wolf 
11234ef85a9cSKevin Wolf /* Dummy node that provides consistent read to its users without requiring it
11244ef85a9cSKevin Wolf  * from its backing file and that allows writes on the backing file chain. */
11254ef85a9cSKevin Wolf static BlockDriver bdrv_mirror_top = {
11264ef85a9cSKevin Wolf     .format_name                = "mirror_top",
11274ef85a9cSKevin Wolf     .bdrv_co_preadv             = bdrv_mirror_top_preadv,
11284ef85a9cSKevin Wolf     .bdrv_co_pwritev            = bdrv_mirror_top_pwritev,
11294ef85a9cSKevin Wolf     .bdrv_co_pwrite_zeroes      = bdrv_mirror_top_pwrite_zeroes,
11304ef85a9cSKevin Wolf     .bdrv_co_pdiscard           = bdrv_mirror_top_pdiscard,
11314ef85a9cSKevin Wolf     .bdrv_co_flush              = bdrv_mirror_top_flush,
11324ef85a9cSKevin Wolf     .bdrv_co_get_block_status   = bdrv_mirror_top_get_block_status,
1133fd4a6493SKevin Wolf     .bdrv_refresh_filename      = bdrv_mirror_top_refresh_filename,
11344ef85a9cSKevin Wolf     .bdrv_close                 = bdrv_mirror_top_close,
11354ef85a9cSKevin Wolf     .bdrv_child_perm            = bdrv_mirror_top_child_perm,
11364ef85a9cSKevin Wolf };
11374ef85a9cSKevin Wolf 
113871aa9867SAlberto Garcia static void mirror_start_job(const char *job_id, BlockDriverState *bs,
113947970dfbSJohn Snow                              int creation_flags, BlockDriverState *target,
114047970dfbSJohn Snow                              const char *replaces, int64_t speed,
114147970dfbSJohn Snow                              uint32_t granularity, int64_t buf_size,
1142274fcceeSMax Reitz                              BlockMirrorBackingMode backing_mode,
114303544a6eSFam Zheng                              BlockdevOnError on_source_error,
1144b952b558SPaolo Bonzini                              BlockdevOnError on_target_error,
11450fc9f8eaSFam Zheng                              bool unmap,
1146097310b5SMarkus Armbruster                              BlockCompletionFunc *cb,
114751ccfa2dSFam Zheng                              void *opaque,
114803544a6eSFam Zheng                              const BlockJobDriver *driver,
1149b49f7eadSWen Congyang                              bool is_none_mode, BlockDriverState *base,
115051ccfa2dSFam Zheng                              bool auto_complete, const char *filter_node_name,
115151ccfa2dSFam Zheng                              Error **errp)
1152893f7ebaSPaolo Bonzini {
1153893f7ebaSPaolo Bonzini     MirrorBlockJob *s;
11544ef85a9cSKevin Wolf     BlockDriverState *mirror_top_bs;
11554ef85a9cSKevin Wolf     bool target_graph_mod;
11564ef85a9cSKevin Wolf     bool target_is_backing;
1157b2c2832cSKevin Wolf     Error *local_err = NULL;
1158d7086422SKevin Wolf     int ret;
1159893f7ebaSPaolo Bonzini 
1160eee13dfeSPaolo Bonzini     if (granularity == 0) {
1161341ebc2fSJohn Snow         granularity = bdrv_get_default_bitmap_granularity(target);
1162eee13dfeSPaolo Bonzini     }
1163eee13dfeSPaolo Bonzini 
1164eee13dfeSPaolo Bonzini     assert ((granularity & (granularity - 1)) == 0);
1165b436982fSEric Blake     /* Granularity must be large enough for sector-based dirty bitmap */
1166b436982fSEric Blake     assert(granularity >= BDRV_SECTOR_SIZE);
1167eee13dfeSPaolo Bonzini 
116848ac0a4dSWen Congyang     if (buf_size < 0) {
116948ac0a4dSWen Congyang         error_setg(errp, "Invalid parameter 'buf-size'");
117048ac0a4dSWen Congyang         return;
117148ac0a4dSWen Congyang     }
117248ac0a4dSWen Congyang 
117348ac0a4dSWen Congyang     if (buf_size == 0) {
117448ac0a4dSWen Congyang         buf_size = DEFAULT_MIRROR_BUF_SIZE;
117548ac0a4dSWen Congyang     }
11765bc361b8SFam Zheng 
11774ef85a9cSKevin Wolf     /* In the case of active commit, add dummy driver to provide consistent
11784ef85a9cSKevin Wolf      * reads on the top, while disabling it in the intermediate nodes, and make
11794ef85a9cSKevin Wolf      * the backing chain writable. */
11806cdbceb1SKevin Wolf     mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
11816cdbceb1SKevin Wolf                                          BDRV_O_RDWR, errp);
11824ef85a9cSKevin Wolf     if (mirror_top_bs == NULL) {
1183893f7ebaSPaolo Bonzini         return;
1184893f7ebaSPaolo Bonzini     }
11854ef85a9cSKevin Wolf     mirror_top_bs->total_sectors = bs->total_sectors;
118619dd29e8SFam Zheng     bdrv_set_aio_context(mirror_top_bs, bdrv_get_aio_context(bs));
1187893f7ebaSPaolo Bonzini 
11884ef85a9cSKevin Wolf     /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep
11897a25fcd0SMax Reitz      * it alive until block_job_create() succeeds even if bs has no parent. */
11904ef85a9cSKevin Wolf     bdrv_ref(mirror_top_bs);
11914ef85a9cSKevin Wolf     bdrv_drained_begin(bs);
1192b2c2832cSKevin Wolf     bdrv_append(mirror_top_bs, bs, &local_err);
11934ef85a9cSKevin Wolf     bdrv_drained_end(bs);
11944ef85a9cSKevin Wolf 
1195b2c2832cSKevin Wolf     if (local_err) {
1196b2c2832cSKevin Wolf         bdrv_unref(mirror_top_bs);
1197b2c2832cSKevin Wolf         error_propagate(errp, local_err);
1198b2c2832cSKevin Wolf         return;
1199b2c2832cSKevin Wolf     }
1200b2c2832cSKevin Wolf 
12014ef85a9cSKevin Wolf     /* Make sure that the source is not resized while the job is running */
12024ef85a9cSKevin Wolf     s = block_job_create(job_id, driver, mirror_top_bs,
12034ef85a9cSKevin Wolf                          BLK_PERM_CONSISTENT_READ,
12044ef85a9cSKevin Wolf                          BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
12054ef85a9cSKevin Wolf                          BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed,
12064ef85a9cSKevin Wolf                          creation_flags, cb, opaque, errp);
12074ef85a9cSKevin Wolf     if (!s) {
12084ef85a9cSKevin Wolf         goto fail;
12094ef85a9cSKevin Wolf     }
12107a25fcd0SMax Reitz     /* The block job now has a reference to this node */
12117a25fcd0SMax Reitz     bdrv_unref(mirror_top_bs);
12127a25fcd0SMax Reitz 
12134ef85a9cSKevin Wolf     s->source = bs;
12144ef85a9cSKevin Wolf     s->mirror_top_bs = mirror_top_bs;
12154ef85a9cSKevin Wolf 
12164ef85a9cSKevin Wolf     /* No resize for the target either; while the mirror is still running, a
12174ef85a9cSKevin Wolf      * consistent read isn't necessarily possible. We could possibly allow
12184ef85a9cSKevin Wolf      * writes and graph modifications, though it would likely defeat the
12194ef85a9cSKevin Wolf      * purpose of a mirror, so leave them blocked for now.
12204ef85a9cSKevin Wolf      *
12214ef85a9cSKevin Wolf      * In the case of active commit, things look a bit different, though,
12224ef85a9cSKevin Wolf      * because the target is an already populated backing file in active use.
12234ef85a9cSKevin Wolf      * We can allow anything except resize there.*/
12244ef85a9cSKevin Wolf     target_is_backing = bdrv_chain_contains(bs, target);
12254ef85a9cSKevin Wolf     target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN);
12264ef85a9cSKevin Wolf     s->target = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE |
12274ef85a9cSKevin Wolf                         (target_graph_mod ? BLK_PERM_GRAPH_MOD : 0),
12284ef85a9cSKevin Wolf                         BLK_PERM_WRITE_UNCHANGED |
12294ef85a9cSKevin Wolf                         (target_is_backing ? BLK_PERM_CONSISTENT_READ |
12304ef85a9cSKevin Wolf                                              BLK_PERM_WRITE |
12314ef85a9cSKevin Wolf                                              BLK_PERM_GRAPH_MOD : 0));
1232d7086422SKevin Wolf     ret = blk_insert_bs(s->target, target, errp);
1233d7086422SKevin Wolf     if (ret < 0) {
12344ef85a9cSKevin Wolf         goto fail;
1235d7086422SKevin Wolf     }
1236e253f4b8SKevin Wolf 
123709158f00SBenoît Canet     s->replaces = g_strdup(replaces);
1238b952b558SPaolo Bonzini     s->on_source_error = on_source_error;
1239b952b558SPaolo Bonzini     s->on_target_error = on_target_error;
124003544a6eSFam Zheng     s->is_none_mode = is_none_mode;
1241274fcceeSMax Reitz     s->backing_mode = backing_mode;
12425bc361b8SFam Zheng     s->base = base;
1243eee13dfeSPaolo Bonzini     s->granularity = granularity;
124448ac0a4dSWen Congyang     s->buf_size = ROUND_UP(buf_size, granularity);
12450fc9f8eaSFam Zheng     s->unmap = unmap;
1246b49f7eadSWen Congyang     if (auto_complete) {
1247b49f7eadSWen Congyang         s->should_complete = true;
1248b49f7eadSWen Congyang     }
1249b812f671SPaolo Bonzini 
12500db6e54aSFam Zheng     s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
1251b8afb520SFam Zheng     if (!s->dirty_bitmap) {
125288f9d1b3SKevin Wolf         goto fail;
1253b8afb520SFam Zheng     }
125410f3cd15SAlberto Garcia 
12554ef85a9cSKevin Wolf     /* Required permissions are already taken with blk_new() */
125676d554e2SKevin Wolf     block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
125776d554e2SKevin Wolf                        &error_abort);
125876d554e2SKevin Wolf 
1259f3ede4b0SAlberto Garcia     /* In commit_active_start() all intermediate nodes disappear, so
1260f3ede4b0SAlberto Garcia      * any jobs in them must be blocked */
12614ef85a9cSKevin Wolf     if (target_is_backing) {
1262f3ede4b0SAlberto Garcia         BlockDriverState *iter;
1263f3ede4b0SAlberto Garcia         for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) {
12644ef85a9cSKevin Wolf             /* XXX BLK_PERM_WRITE needs to be allowed so we don't block
12654ef85a9cSKevin Wolf              * ourselves at s->base (if writes are blocked for a node, they are
12664ef85a9cSKevin Wolf              * also blocked for its backing file). The other options would be a
12674ef85a9cSKevin Wolf              * second filter driver above s->base (== target). */
12684ef85a9cSKevin Wolf             ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
12694ef85a9cSKevin Wolf                                      BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE,
12704ef85a9cSKevin Wolf                                      errp);
12714ef85a9cSKevin Wolf             if (ret < 0) {
12724ef85a9cSKevin Wolf                 goto fail;
12734ef85a9cSKevin Wolf             }
1274f3ede4b0SAlberto Garcia         }
1275f3ede4b0SAlberto Garcia     }
127610f3cd15SAlberto Garcia 
12775ccac6f1SJohn Snow     trace_mirror_start(bs, s, opaque);
12785ccac6f1SJohn Snow     block_job_start(&s->common);
12794ef85a9cSKevin Wolf     return;
12804ef85a9cSKevin Wolf 
12814ef85a9cSKevin Wolf fail:
12824ef85a9cSKevin Wolf     if (s) {
12837a25fcd0SMax Reitz         /* Make sure this BDS does not go away until we have completed the graph
12847a25fcd0SMax Reitz          * changes below */
12857a25fcd0SMax Reitz         bdrv_ref(mirror_top_bs);
12867a25fcd0SMax Reitz 
12874ef85a9cSKevin Wolf         g_free(s->replaces);
12884ef85a9cSKevin Wolf         blk_unref(s->target);
128905b0d8e3SPaolo Bonzini         block_job_early_fail(&s->common);
12904ef85a9cSKevin Wolf     }
12914ef85a9cSKevin Wolf 
1292c1cef672SFam Zheng     bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
1293c1cef672SFam Zheng                             &error_abort);
12945fe31c25SKevin Wolf     bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
12957a25fcd0SMax Reitz 
12967a25fcd0SMax Reitz     bdrv_unref(mirror_top_bs);
1297893f7ebaSPaolo Bonzini }
129803544a6eSFam Zheng 
129971aa9867SAlberto Garcia void mirror_start(const char *job_id, BlockDriverState *bs,
130071aa9867SAlberto Garcia                   BlockDriverState *target, const char *replaces,
13015fba6c0eSJohn Snow                   int64_t speed, uint32_t granularity, int64_t buf_size,
1302274fcceeSMax Reitz                   MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
1303274fcceeSMax Reitz                   BlockdevOnError on_source_error,
130403544a6eSFam Zheng                   BlockdevOnError on_target_error,
13056cdbceb1SKevin Wolf                   bool unmap, const char *filter_node_name, Error **errp)
130603544a6eSFam Zheng {
130703544a6eSFam Zheng     bool is_none_mode;
130803544a6eSFam Zheng     BlockDriverState *base;
130903544a6eSFam Zheng 
13104b80ab2bSJohn Snow     if (mode == MIRROR_SYNC_MODE_INCREMENTAL) {
13114b80ab2bSJohn Snow         error_setg(errp, "Sync mode 'incremental' not supported");
1312d58d8453SJohn Snow         return;
1313d58d8453SJohn Snow     }
131403544a6eSFam Zheng     is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
1315760e0063SKevin Wolf     base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
131647970dfbSJohn Snow     mirror_start_job(job_id, bs, BLOCK_JOB_DEFAULT, target, replaces,
1317274fcceeSMax Reitz                      speed, granularity, buf_size, backing_mode,
131851ccfa2dSFam Zheng                      on_source_error, on_target_error, unmap, NULL, NULL,
13196cdbceb1SKevin Wolf                      &mirror_job_driver, is_none_mode, base, false,
132051ccfa2dSFam Zheng                      filter_node_name, errp);
132103544a6eSFam Zheng }
132203544a6eSFam Zheng 
1323fd62c609SAlberto Garcia void commit_active_start(const char *job_id, BlockDriverState *bs,
132447970dfbSJohn Snow                          BlockDriverState *base, int creation_flags,
132547970dfbSJohn Snow                          int64_t speed, BlockdevOnError on_error,
13260db832f4SKevin Wolf                          const char *filter_node_name,
132778bbd910SFam Zheng                          BlockCompletionFunc *cb, void *opaque,
132878bbd910SFam Zheng                          bool auto_complete, Error **errp)
132903544a6eSFam Zheng {
13304da83585SJeff Cody     int orig_base_flags;
1331cc67f4d1SJeff Cody     Error *local_err = NULL;
13324da83585SJeff Cody 
13334da83585SJeff Cody     orig_base_flags = bdrv_get_flags(base);
13344da83585SJeff Cody 
133520a63d2cSFam Zheng     if (bdrv_reopen(base, bs->open_flags, errp)) {
133620a63d2cSFam Zheng         return;
133720a63d2cSFam Zheng     }
13384da83585SJeff Cody 
133947970dfbSJohn Snow     mirror_start_job(job_id, bs, creation_flags, base, NULL, speed, 0, 0,
134071aa9867SAlberto Garcia                      MIRROR_LEAVE_BACKING_CHAIN,
134151ccfa2dSFam Zheng                      on_error, on_error, true, cb, opaque,
13426cdbceb1SKevin Wolf                      &commit_active_job_driver, false, base, auto_complete,
134351ccfa2dSFam Zheng                      filter_node_name, &local_err);
13440fb6395cSMarkus Armbruster     if (local_err) {
1345cc67f4d1SJeff Cody         error_propagate(errp, local_err);
13464da83585SJeff Cody         goto error_restore_flags;
13474da83585SJeff Cody     }
13484da83585SJeff Cody 
13494da83585SJeff Cody     return;
13504da83585SJeff Cody 
13514da83585SJeff Cody error_restore_flags:
13524da83585SJeff Cody     /* ignore error and errp for bdrv_reopen, because we want to propagate
13534da83585SJeff Cody      * the original error */
13544da83585SJeff Cody     bdrv_reopen(base, orig_base_flags, NULL);
13554da83585SJeff Cody     return;
135603544a6eSFam Zheng }
1357