1893f7ebaSPaolo Bonzini /* 2893f7ebaSPaolo Bonzini * Image mirroring 3893f7ebaSPaolo Bonzini * 4893f7ebaSPaolo Bonzini * Copyright Red Hat, Inc. 2012 5893f7ebaSPaolo Bonzini * 6893f7ebaSPaolo Bonzini * Authors: 7893f7ebaSPaolo Bonzini * Paolo Bonzini <pbonzini@redhat.com> 8893f7ebaSPaolo Bonzini * 9893f7ebaSPaolo Bonzini * This work is licensed under the terms of the GNU LGPL, version 2 or later. 10893f7ebaSPaolo Bonzini * See the COPYING.LIB file in the top-level directory. 11893f7ebaSPaolo Bonzini * 12893f7ebaSPaolo Bonzini */ 13893f7ebaSPaolo Bonzini 14893f7ebaSPaolo Bonzini #include "trace.h" 15737e150eSPaolo Bonzini #include "block/blockjob.h" 16737e150eSPaolo Bonzini #include "block/block_int.h" 17cc7a8ea7SMarkus Armbruster #include "qapi/qmp/qerror.h" 18893f7ebaSPaolo Bonzini #include "qemu/ratelimit.h" 19b812f671SPaolo Bonzini #include "qemu/bitmap.h" 20893f7ebaSPaolo Bonzini 21893f7ebaSPaolo Bonzini #define SLICE_TIME 100000000ULL /* ns */ 22402a4741SPaolo Bonzini #define MAX_IN_FLIGHT 16 2348ac0a4dSWen Congyang #define DEFAULT_MIRROR_BUF_SIZE (10 << 20) 24402a4741SPaolo Bonzini 25402a4741SPaolo Bonzini /* The mirroring buffer is a list of granularity-sized chunks. 26402a4741SPaolo Bonzini * Free chunks are organized in a list. 27402a4741SPaolo Bonzini */ 28402a4741SPaolo Bonzini typedef struct MirrorBuffer { 29402a4741SPaolo Bonzini QSIMPLEQ_ENTRY(MirrorBuffer) next; 30402a4741SPaolo Bonzini } MirrorBuffer; 31893f7ebaSPaolo Bonzini 32893f7ebaSPaolo Bonzini typedef struct MirrorBlockJob { 33893f7ebaSPaolo Bonzini BlockJob common; 34893f7ebaSPaolo Bonzini RateLimit limit; 35893f7ebaSPaolo Bonzini BlockDriverState *target; 365bc361b8SFam Zheng BlockDriverState *base; 3709158f00SBenoît Canet /* The name of the graph node to replace */ 3809158f00SBenoît Canet char *replaces; 3909158f00SBenoît Canet /* The BDS to replace */ 4009158f00SBenoît Canet BlockDriverState *to_replace; 4109158f00SBenoît Canet /* Used to block operations on the drive-mirror-replace target */ 4209158f00SBenoît Canet Error *replace_blocker; 4303544a6eSFam Zheng bool is_none_mode; 44b952b558SPaolo Bonzini BlockdevOnError on_source_error, on_target_error; 45d63ffd87SPaolo Bonzini bool synced; 46d63ffd87SPaolo Bonzini bool should_complete; 47893f7ebaSPaolo Bonzini int64_t sector_num; 48eee13dfeSPaolo Bonzini int64_t granularity; 49b812f671SPaolo Bonzini size_t buf_size; 50b21c7652SMax Reitz int64_t bdev_length; 51b812f671SPaolo Bonzini unsigned long *cow_bitmap; 52e4654d2dSFam Zheng BdrvDirtyBitmap *dirty_bitmap; 538f0720ecSPaolo Bonzini HBitmapIter hbi; 54893f7ebaSPaolo Bonzini uint8_t *buf; 55402a4741SPaolo Bonzini QSIMPLEQ_HEAD(, MirrorBuffer) buf_free; 56402a4741SPaolo Bonzini int buf_free_count; 57bd48bde8SPaolo Bonzini 58402a4741SPaolo Bonzini unsigned long *in_flight_bitmap; 59bd48bde8SPaolo Bonzini int in_flight; 60b21c7652SMax Reitz int sectors_in_flight; 61bd48bde8SPaolo Bonzini int ret; 620fc9f8eaSFam Zheng bool unmap; 63893f7ebaSPaolo Bonzini } MirrorBlockJob; 64893f7ebaSPaolo Bonzini 65bd48bde8SPaolo Bonzini typedef struct MirrorOp { 66bd48bde8SPaolo Bonzini MirrorBlockJob *s; 67bd48bde8SPaolo Bonzini QEMUIOVector qiov; 68bd48bde8SPaolo Bonzini int64_t sector_num; 69bd48bde8SPaolo Bonzini int nb_sectors; 70bd48bde8SPaolo Bonzini } MirrorOp; 71bd48bde8SPaolo Bonzini 72b952b558SPaolo Bonzini static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, 73b952b558SPaolo Bonzini int error) 74b952b558SPaolo Bonzini { 75b952b558SPaolo Bonzini s->synced = false; 76b952b558SPaolo Bonzini if (read) { 77b952b558SPaolo Bonzini return block_job_error_action(&s->common, s->common.bs, 78b952b558SPaolo Bonzini s->on_source_error, true, error); 79b952b558SPaolo Bonzini } else { 80b952b558SPaolo Bonzini return block_job_error_action(&s->common, s->target, 81b952b558SPaolo Bonzini s->on_target_error, false, error); 82b952b558SPaolo Bonzini } 83b952b558SPaolo Bonzini } 84b952b558SPaolo Bonzini 85bd48bde8SPaolo Bonzini static void mirror_iteration_done(MirrorOp *op, int ret) 86bd48bde8SPaolo Bonzini { 87bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 88402a4741SPaolo Bonzini struct iovec *iov; 89bd48bde8SPaolo Bonzini int64_t chunk_num; 90402a4741SPaolo Bonzini int i, nb_chunks, sectors_per_chunk; 91bd48bde8SPaolo Bonzini 92bd48bde8SPaolo Bonzini trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret); 93bd48bde8SPaolo Bonzini 94bd48bde8SPaolo Bonzini s->in_flight--; 95b21c7652SMax Reitz s->sectors_in_flight -= op->nb_sectors; 96402a4741SPaolo Bonzini iov = op->qiov.iov; 97402a4741SPaolo Bonzini for (i = 0; i < op->qiov.niov; i++) { 98402a4741SPaolo Bonzini MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base; 99402a4741SPaolo Bonzini QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next); 100402a4741SPaolo Bonzini s->buf_free_count++; 101402a4741SPaolo Bonzini } 102402a4741SPaolo Bonzini 103bd48bde8SPaolo Bonzini sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; 104bd48bde8SPaolo Bonzini chunk_num = op->sector_num / sectors_per_chunk; 105bd48bde8SPaolo Bonzini nb_chunks = op->nb_sectors / sectors_per_chunk; 106402a4741SPaolo Bonzini bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); 107b21c7652SMax Reitz if (ret >= 0) { 108b21c7652SMax Reitz if (s->cow_bitmap) { 109bd48bde8SPaolo Bonzini bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); 110bd48bde8SPaolo Bonzini } 111b21c7652SMax Reitz s->common.offset += (uint64_t)op->nb_sectors * BDRV_SECTOR_SIZE; 112b21c7652SMax Reitz } 113bd48bde8SPaolo Bonzini 1146df3bf8eSZhang Min qemu_iovec_destroy(&op->qiov); 115bd48bde8SPaolo Bonzini g_slice_free(MirrorOp, op); 1167b770c72SStefan Hajnoczi 1177b770c72SStefan Hajnoczi /* Enter coroutine when it is not sleeping. The coroutine sleeps to 1187b770c72SStefan Hajnoczi * rate-limit itself. The coroutine will eventually resume since there is 1197b770c72SStefan Hajnoczi * a sleep timeout so don't wake it early. 1207b770c72SStefan Hajnoczi */ 1217b770c72SStefan Hajnoczi if (s->common.busy) { 122bd48bde8SPaolo Bonzini qemu_coroutine_enter(s->common.co, NULL); 123bd48bde8SPaolo Bonzini } 1247b770c72SStefan Hajnoczi } 125bd48bde8SPaolo Bonzini 126bd48bde8SPaolo Bonzini static void mirror_write_complete(void *opaque, int ret) 127bd48bde8SPaolo Bonzini { 128bd48bde8SPaolo Bonzini MirrorOp *op = opaque; 129bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 130bd48bde8SPaolo Bonzini if (ret < 0) { 131bd48bde8SPaolo Bonzini BlockErrorAction action; 132bd48bde8SPaolo Bonzini 13320dca810SJohn Snow bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors); 134bd48bde8SPaolo Bonzini action = mirror_error_action(s, false, -ret); 135a589569fSWenchao Xia if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 136bd48bde8SPaolo Bonzini s->ret = ret; 137bd48bde8SPaolo Bonzini } 138bd48bde8SPaolo Bonzini } 139bd48bde8SPaolo Bonzini mirror_iteration_done(op, ret); 140bd48bde8SPaolo Bonzini } 141bd48bde8SPaolo Bonzini 142bd48bde8SPaolo Bonzini static void mirror_read_complete(void *opaque, int ret) 143bd48bde8SPaolo Bonzini { 144bd48bde8SPaolo Bonzini MirrorOp *op = opaque; 145bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 146bd48bde8SPaolo Bonzini if (ret < 0) { 147bd48bde8SPaolo Bonzini BlockErrorAction action; 148bd48bde8SPaolo Bonzini 14920dca810SJohn Snow bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors); 150bd48bde8SPaolo Bonzini action = mirror_error_action(s, true, -ret); 151a589569fSWenchao Xia if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 152bd48bde8SPaolo Bonzini s->ret = ret; 153bd48bde8SPaolo Bonzini } 154bd48bde8SPaolo Bonzini 155bd48bde8SPaolo Bonzini mirror_iteration_done(op, ret); 156bd48bde8SPaolo Bonzini return; 157bd48bde8SPaolo Bonzini } 158bd48bde8SPaolo Bonzini bdrv_aio_writev(s->target, op->sector_num, &op->qiov, op->nb_sectors, 159bd48bde8SPaolo Bonzini mirror_write_complete, op); 160bd48bde8SPaolo Bonzini } 161bd48bde8SPaolo Bonzini 162cc8c9d6cSPaolo Bonzini static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) 163893f7ebaSPaolo Bonzini { 164893f7ebaSPaolo Bonzini BlockDriverState *source = s->common.bs; 165402a4741SPaolo Bonzini int nb_sectors, sectors_per_chunk, nb_chunks; 166884fea4eSPaolo Bonzini int64_t end, sector_num, next_chunk, next_sector, hbitmap_next_sector; 1676d0de8ebSStefan Hajnoczi uint64_t delay_ns = 0; 168bd48bde8SPaolo Bonzini MirrorOp *op; 169dcfb3bebSFam Zheng int pnum; 170dcfb3bebSFam Zheng int64_t ret; 171893f7ebaSPaolo Bonzini 1728f0720ecSPaolo Bonzini s->sector_num = hbitmap_iter_next(&s->hbi); 1738f0720ecSPaolo Bonzini if (s->sector_num < 0) { 17420dca810SJohn Snow bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi); 1758f0720ecSPaolo Bonzini s->sector_num = hbitmap_iter_next(&s->hbi); 17620dca810SJohn Snow trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); 1778f0720ecSPaolo Bonzini assert(s->sector_num >= 0); 1788f0720ecSPaolo Bonzini } 1798f0720ecSPaolo Bonzini 180402a4741SPaolo Bonzini hbitmap_next_sector = s->sector_num; 181b812f671SPaolo Bonzini sector_num = s->sector_num; 182884fea4eSPaolo Bonzini sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; 183b21c7652SMax Reitz end = s->bdev_length / BDRV_SECTOR_SIZE; 184402a4741SPaolo Bonzini 185884fea4eSPaolo Bonzini /* Extend the QEMUIOVector to include all adjacent blocks that will 186884fea4eSPaolo Bonzini * be copied in this operation. 187884fea4eSPaolo Bonzini * 188884fea4eSPaolo Bonzini * We have to do this if we have no backing file yet in the destination, 189884fea4eSPaolo Bonzini * and the cluster size is very large. Then we need to do COW ourselves. 190884fea4eSPaolo Bonzini * The first time a cluster is copied, copy it entirely. Note that, 191884fea4eSPaolo Bonzini * because both the granularity and the cluster size are powers of two, 192884fea4eSPaolo Bonzini * the number of sectors to copy cannot exceed one cluster. 193884fea4eSPaolo Bonzini * 194884fea4eSPaolo Bonzini * We also want to extend the QEMUIOVector to include more adjacent 195884fea4eSPaolo Bonzini * dirty blocks if possible, to limit the number of I/O operations and 196884fea4eSPaolo Bonzini * run efficiently even with a small granularity. 197402a4741SPaolo Bonzini */ 198884fea4eSPaolo Bonzini nb_chunks = 0; 199884fea4eSPaolo Bonzini nb_sectors = 0; 200884fea4eSPaolo Bonzini next_sector = sector_num; 201884fea4eSPaolo Bonzini next_chunk = sector_num / sectors_per_chunk; 202402a4741SPaolo Bonzini 203402a4741SPaolo Bonzini /* Wait for I/O to this cluster (from a previous iteration) to be done. */ 204884fea4eSPaolo Bonzini while (test_bit(next_chunk, s->in_flight_bitmap)) { 205402a4741SPaolo Bonzini trace_mirror_yield_in_flight(s, sector_num, s->in_flight); 206402a4741SPaolo Bonzini qemu_coroutine_yield(); 207b812f671SPaolo Bonzini } 208b812f671SPaolo Bonzini 209884fea4eSPaolo Bonzini do { 210884fea4eSPaolo Bonzini int added_sectors, added_chunks; 211884fea4eSPaolo Bonzini 212e4654d2dSFam Zheng if (!bdrv_get_dirty(source, s->dirty_bitmap, next_sector) || 213884fea4eSPaolo Bonzini test_bit(next_chunk, s->in_flight_bitmap)) { 214884fea4eSPaolo Bonzini assert(nb_sectors > 0); 215884fea4eSPaolo Bonzini break; 216884fea4eSPaolo Bonzini } 217884fea4eSPaolo Bonzini 218884fea4eSPaolo Bonzini added_sectors = sectors_per_chunk; 219884fea4eSPaolo Bonzini if (s->cow_bitmap && !test_bit(next_chunk, s->cow_bitmap)) { 220884fea4eSPaolo Bonzini bdrv_round_to_clusters(s->target, 221884fea4eSPaolo Bonzini next_sector, added_sectors, 222884fea4eSPaolo Bonzini &next_sector, &added_sectors); 223884fea4eSPaolo Bonzini 224884fea4eSPaolo Bonzini /* On the first iteration, the rounding may make us copy 225884fea4eSPaolo Bonzini * sectors before the first dirty one. 226884fea4eSPaolo Bonzini */ 227884fea4eSPaolo Bonzini if (next_sector < sector_num) { 228884fea4eSPaolo Bonzini assert(nb_sectors == 0); 229884fea4eSPaolo Bonzini sector_num = next_sector; 230884fea4eSPaolo Bonzini next_chunk = next_sector / sectors_per_chunk; 231884fea4eSPaolo Bonzini } 232884fea4eSPaolo Bonzini } 233884fea4eSPaolo Bonzini 234884fea4eSPaolo Bonzini added_sectors = MIN(added_sectors, end - (sector_num + nb_sectors)); 235884fea4eSPaolo Bonzini added_chunks = (added_sectors + sectors_per_chunk - 1) / sectors_per_chunk; 236884fea4eSPaolo Bonzini 237884fea4eSPaolo Bonzini /* When doing COW, it may happen that there is not enough space for 238884fea4eSPaolo Bonzini * a full cluster. Wait if that is the case. 239884fea4eSPaolo Bonzini */ 240884fea4eSPaolo Bonzini while (nb_chunks == 0 && s->buf_free_count < added_chunks) { 241402a4741SPaolo Bonzini trace_mirror_yield_buf_busy(s, nb_chunks, s->in_flight); 242402a4741SPaolo Bonzini qemu_coroutine_yield(); 243402a4741SPaolo Bonzini } 244884fea4eSPaolo Bonzini if (s->buf_free_count < nb_chunks + added_chunks) { 245884fea4eSPaolo Bonzini trace_mirror_break_buf_busy(s, nb_chunks, s->in_flight); 246884fea4eSPaolo Bonzini break; 247884fea4eSPaolo Bonzini } 248*cae98cb8SStefan Hajnoczi if (IOV_MAX < nb_chunks + added_chunks) { 249*cae98cb8SStefan Hajnoczi trace_mirror_break_iov_max(s, nb_chunks, added_chunks); 250*cae98cb8SStefan Hajnoczi break; 251*cae98cb8SStefan Hajnoczi } 252402a4741SPaolo Bonzini 253402a4741SPaolo Bonzini /* We have enough free space to copy these sectors. */ 254884fea4eSPaolo Bonzini bitmap_set(s->in_flight_bitmap, next_chunk, added_chunks); 255884fea4eSPaolo Bonzini 256884fea4eSPaolo Bonzini nb_sectors += added_sectors; 257884fea4eSPaolo Bonzini nb_chunks += added_chunks; 258884fea4eSPaolo Bonzini next_sector += added_sectors; 259884fea4eSPaolo Bonzini next_chunk += added_chunks; 260cc8c9d6cSPaolo Bonzini if (!s->synced && s->common.speed) { 261cc8c9d6cSPaolo Bonzini delay_ns = ratelimit_calculate_delay(&s->limit, added_sectors); 262cc8c9d6cSPaolo Bonzini } 263cc8c9d6cSPaolo Bonzini } while (delay_ns == 0 && next_sector < end); 264bd48bde8SPaolo Bonzini 265bd48bde8SPaolo Bonzini /* Allocate a MirrorOp that is used as an AIO callback. */ 266bd48bde8SPaolo Bonzini op = g_slice_new(MirrorOp); 267bd48bde8SPaolo Bonzini op->s = s; 268bd48bde8SPaolo Bonzini op->sector_num = sector_num; 269bd48bde8SPaolo Bonzini op->nb_sectors = nb_sectors; 270402a4741SPaolo Bonzini 271402a4741SPaolo Bonzini /* Now make a QEMUIOVector taking enough granularity-sized chunks 272402a4741SPaolo Bonzini * from s->buf_free. 273402a4741SPaolo Bonzini */ 274402a4741SPaolo Bonzini qemu_iovec_init(&op->qiov, nb_chunks); 275402a4741SPaolo Bonzini next_sector = sector_num; 276402a4741SPaolo Bonzini while (nb_chunks-- > 0) { 277402a4741SPaolo Bonzini MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); 2785a0f6fd5SKevin Wolf size_t remaining = (nb_sectors * BDRV_SECTOR_SIZE) - op->qiov.size; 2795a0f6fd5SKevin Wolf 280402a4741SPaolo Bonzini QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); 281402a4741SPaolo Bonzini s->buf_free_count--; 2825a0f6fd5SKevin Wolf qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); 283402a4741SPaolo Bonzini 284402a4741SPaolo Bonzini /* Advance the HBitmapIter in parallel, so that we do not examine 285402a4741SPaolo Bonzini * the same sector twice. 286402a4741SPaolo Bonzini */ 287e4654d2dSFam Zheng if (next_sector > hbitmap_next_sector 288e4654d2dSFam Zheng && bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) { 289402a4741SPaolo Bonzini hbitmap_next_sector = hbitmap_iter_next(&s->hbi); 290402a4741SPaolo Bonzini } 291402a4741SPaolo Bonzini 292402a4741SPaolo Bonzini next_sector += sectors_per_chunk; 293402a4741SPaolo Bonzini } 294bd48bde8SPaolo Bonzini 29520dca810SJohn Snow bdrv_reset_dirty_bitmap(s->dirty_bitmap, sector_num, nb_sectors); 296893f7ebaSPaolo Bonzini 297893f7ebaSPaolo Bonzini /* Copy the dirty cluster. */ 298bd48bde8SPaolo Bonzini s->in_flight++; 299b21c7652SMax Reitz s->sectors_in_flight += nb_sectors; 300b812f671SPaolo Bonzini trace_mirror_one_iteration(s, sector_num, nb_sectors); 301dcfb3bebSFam Zheng 302dcfb3bebSFam Zheng ret = bdrv_get_block_status_above(source, NULL, sector_num, 303dcfb3bebSFam Zheng nb_sectors, &pnum); 304dcfb3bebSFam Zheng if (ret < 0 || pnum < nb_sectors || 305dcfb3bebSFam Zheng (ret & BDRV_BLOCK_DATA && !(ret & BDRV_BLOCK_ZERO))) { 306bd48bde8SPaolo Bonzini bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors, 307bd48bde8SPaolo Bonzini mirror_read_complete, op); 308dcfb3bebSFam Zheng } else if (ret & BDRV_BLOCK_ZERO) { 309dcfb3bebSFam Zheng bdrv_aio_write_zeroes(s->target, sector_num, op->nb_sectors, 310dcfb3bebSFam Zheng s->unmap ? BDRV_REQ_MAY_UNMAP : 0, 311dcfb3bebSFam Zheng mirror_write_complete, op); 312dcfb3bebSFam Zheng } else { 313dcfb3bebSFam Zheng assert(!(ret & BDRV_BLOCK_DATA)); 314dcfb3bebSFam Zheng bdrv_aio_discard(s->target, sector_num, op->nb_sectors, 315dcfb3bebSFam Zheng mirror_write_complete, op); 316dcfb3bebSFam Zheng } 317cc8c9d6cSPaolo Bonzini return delay_ns; 318893f7ebaSPaolo Bonzini } 319b952b558SPaolo Bonzini 320402a4741SPaolo Bonzini static void mirror_free_init(MirrorBlockJob *s) 321402a4741SPaolo Bonzini { 322402a4741SPaolo Bonzini int granularity = s->granularity; 323402a4741SPaolo Bonzini size_t buf_size = s->buf_size; 324402a4741SPaolo Bonzini uint8_t *buf = s->buf; 325402a4741SPaolo Bonzini 326402a4741SPaolo Bonzini assert(s->buf_free_count == 0); 327402a4741SPaolo Bonzini QSIMPLEQ_INIT(&s->buf_free); 328402a4741SPaolo Bonzini while (buf_size != 0) { 329402a4741SPaolo Bonzini MirrorBuffer *cur = (MirrorBuffer *)buf; 330402a4741SPaolo Bonzini QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next); 331402a4741SPaolo Bonzini s->buf_free_count++; 332402a4741SPaolo Bonzini buf_size -= granularity; 333402a4741SPaolo Bonzini buf += granularity; 334402a4741SPaolo Bonzini } 335402a4741SPaolo Bonzini } 336402a4741SPaolo Bonzini 337bd48bde8SPaolo Bonzini static void mirror_drain(MirrorBlockJob *s) 338bd48bde8SPaolo Bonzini { 339bd48bde8SPaolo Bonzini while (s->in_flight > 0) { 340bd48bde8SPaolo Bonzini qemu_coroutine_yield(); 341bd48bde8SPaolo Bonzini } 342893f7ebaSPaolo Bonzini } 343893f7ebaSPaolo Bonzini 3445a7e7a0bSStefan Hajnoczi typedef struct { 3455a7e7a0bSStefan Hajnoczi int ret; 3465a7e7a0bSStefan Hajnoczi } MirrorExitData; 3475a7e7a0bSStefan Hajnoczi 3485a7e7a0bSStefan Hajnoczi static void mirror_exit(BlockJob *job, void *opaque) 3495a7e7a0bSStefan Hajnoczi { 3505a7e7a0bSStefan Hajnoczi MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 3515a7e7a0bSStefan Hajnoczi MirrorExitData *data = opaque; 3525a7e7a0bSStefan Hajnoczi AioContext *replace_aio_context = NULL; 3535a7e7a0bSStefan Hajnoczi 3545a7e7a0bSStefan Hajnoczi if (s->to_replace) { 3555a7e7a0bSStefan Hajnoczi replace_aio_context = bdrv_get_aio_context(s->to_replace); 3565a7e7a0bSStefan Hajnoczi aio_context_acquire(replace_aio_context); 3575a7e7a0bSStefan Hajnoczi } 3585a7e7a0bSStefan Hajnoczi 3595a7e7a0bSStefan Hajnoczi if (s->should_complete && data->ret == 0) { 3605a7e7a0bSStefan Hajnoczi BlockDriverState *to_replace = s->common.bs; 3615a7e7a0bSStefan Hajnoczi if (s->to_replace) { 3625a7e7a0bSStefan Hajnoczi to_replace = s->to_replace; 3635a7e7a0bSStefan Hajnoczi } 3645a7e7a0bSStefan Hajnoczi if (bdrv_get_flags(s->target) != bdrv_get_flags(to_replace)) { 3655a7e7a0bSStefan Hajnoczi bdrv_reopen(s->target, bdrv_get_flags(to_replace), NULL); 3665a7e7a0bSStefan Hajnoczi } 3675a7e7a0bSStefan Hajnoczi bdrv_swap(s->target, to_replace); 3685a7e7a0bSStefan Hajnoczi if (s->common.driver->job_type == BLOCK_JOB_TYPE_COMMIT) { 3695a7e7a0bSStefan Hajnoczi /* drop the bs loop chain formed by the swap: break the loop then 3705a7e7a0bSStefan Hajnoczi * trigger the unref from the top one */ 3715a7e7a0bSStefan Hajnoczi BlockDriverState *p = s->base->backing_hd; 3725a7e7a0bSStefan Hajnoczi bdrv_set_backing_hd(s->base, NULL); 3735a7e7a0bSStefan Hajnoczi bdrv_unref(p); 3745a7e7a0bSStefan Hajnoczi } 3755a7e7a0bSStefan Hajnoczi } 3765a7e7a0bSStefan Hajnoczi if (s->to_replace) { 3775a7e7a0bSStefan Hajnoczi bdrv_op_unblock_all(s->to_replace, s->replace_blocker); 3785a7e7a0bSStefan Hajnoczi error_free(s->replace_blocker); 3795a7e7a0bSStefan Hajnoczi bdrv_unref(s->to_replace); 3805a7e7a0bSStefan Hajnoczi } 3815a7e7a0bSStefan Hajnoczi if (replace_aio_context) { 3825a7e7a0bSStefan Hajnoczi aio_context_release(replace_aio_context); 3835a7e7a0bSStefan Hajnoczi } 3845a7e7a0bSStefan Hajnoczi g_free(s->replaces); 3855a7e7a0bSStefan Hajnoczi bdrv_unref(s->target); 3865a7e7a0bSStefan Hajnoczi block_job_completed(&s->common, data->ret); 3875a7e7a0bSStefan Hajnoczi g_free(data); 3885a7e7a0bSStefan Hajnoczi } 3895a7e7a0bSStefan Hajnoczi 390893f7ebaSPaolo Bonzini static void coroutine_fn mirror_run(void *opaque) 391893f7ebaSPaolo Bonzini { 392893f7ebaSPaolo Bonzini MirrorBlockJob *s = opaque; 3935a7e7a0bSStefan Hajnoczi MirrorExitData *data; 394893f7ebaSPaolo Bonzini BlockDriverState *bs = s->common.bs; 39599900697SFam Zheng int64_t sector_num, end, length; 396bd48bde8SPaolo Bonzini uint64_t last_pause_ns; 397b812f671SPaolo Bonzini BlockDriverInfo bdi; 3981d33936eSJeff Cody char backing_filename[2]; /* we only need 2 characters because we are only 3991d33936eSJeff Cody checking for a NULL string */ 400893f7ebaSPaolo Bonzini int ret = 0; 401893f7ebaSPaolo Bonzini int n; 402893f7ebaSPaolo Bonzini 403893f7ebaSPaolo Bonzini if (block_job_is_cancelled(&s->common)) { 404893f7ebaSPaolo Bonzini goto immediate_exit; 405893f7ebaSPaolo Bonzini } 406893f7ebaSPaolo Bonzini 407b21c7652SMax Reitz s->bdev_length = bdrv_getlength(bs); 408b21c7652SMax Reitz if (s->bdev_length < 0) { 409b21c7652SMax Reitz ret = s->bdev_length; 410373df5b1SFam Zheng goto immediate_exit; 411b21c7652SMax Reitz } else if (s->bdev_length == 0) { 4129e48b025SFam Zheng /* Report BLOCK_JOB_READY and wait for complete. */ 4139e48b025SFam Zheng block_job_event_ready(&s->common); 4149e48b025SFam Zheng s->synced = true; 4159e48b025SFam Zheng while (!block_job_is_cancelled(&s->common) && !s->should_complete) { 4169e48b025SFam Zheng block_job_yield(&s->common); 4179e48b025SFam Zheng } 4189e48b025SFam Zheng s->common.cancelled = false; 4199e48b025SFam Zheng goto immediate_exit; 420893f7ebaSPaolo Bonzini } 421893f7ebaSPaolo Bonzini 422b21c7652SMax Reitz length = DIV_ROUND_UP(s->bdev_length, s->granularity); 423402a4741SPaolo Bonzini s->in_flight_bitmap = bitmap_new(length); 424402a4741SPaolo Bonzini 425b812f671SPaolo Bonzini /* If we have no backing file yet in the destination, we cannot let 426b812f671SPaolo Bonzini * the destination do COW. Instead, we copy sectors around the 427b812f671SPaolo Bonzini * dirty data if needed. We need a bitmap to do that. 428b812f671SPaolo Bonzini */ 429b812f671SPaolo Bonzini bdrv_get_backing_filename(s->target, backing_filename, 430b812f671SPaolo Bonzini sizeof(backing_filename)); 431b812f671SPaolo Bonzini if (backing_filename[0] && !s->target->backing_hd) { 432c3cc95bdSFam Zheng ret = bdrv_get_info(s->target, &bdi); 433c3cc95bdSFam Zheng if (ret < 0) { 434c3cc95bdSFam Zheng goto immediate_exit; 435c3cc95bdSFam Zheng } 436eee13dfeSPaolo Bonzini if (s->granularity < bdi.cluster_size) { 43708e4ed6cSPaolo Bonzini s->buf_size = MAX(s->buf_size, bdi.cluster_size); 438b812f671SPaolo Bonzini s->cow_bitmap = bitmap_new(length); 439b812f671SPaolo Bonzini } 440b812f671SPaolo Bonzini } 441b812f671SPaolo Bonzini 442b21c7652SMax Reitz end = s->bdev_length / BDRV_SECTOR_SIZE; 4437504edf4SKevin Wolf s->buf = qemu_try_blockalign(bs, s->buf_size); 4447504edf4SKevin Wolf if (s->buf == NULL) { 4457504edf4SKevin Wolf ret = -ENOMEM; 4467504edf4SKevin Wolf goto immediate_exit; 4477504edf4SKevin Wolf } 4487504edf4SKevin Wolf 449402a4741SPaolo Bonzini mirror_free_init(s); 450893f7ebaSPaolo Bonzini 4514c0cbd6fSFam Zheng last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 45203544a6eSFam Zheng if (!s->is_none_mode) { 453893f7ebaSPaolo Bonzini /* First part, loop on the sectors and initialize the dirty bitmap. */ 4545bc361b8SFam Zheng BlockDriverState *base = s->base; 455893f7ebaSPaolo Bonzini for (sector_num = 0; sector_num < end; ) { 45699900697SFam Zheng /* Just to make sure we are not exceeding int limit. */ 45799900697SFam Zheng int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS, 45899900697SFam Zheng end - sector_num); 4594c0cbd6fSFam Zheng int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 4604c0cbd6fSFam Zheng 4614c0cbd6fSFam Zheng if (now - last_pause_ns > SLICE_TIME) { 4624c0cbd6fSFam Zheng last_pause_ns = now; 4634c0cbd6fSFam Zheng block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0); 4644c0cbd6fSFam Zheng } 4654c0cbd6fSFam Zheng 4664c0cbd6fSFam Zheng if (block_job_is_cancelled(&s->common)) { 4674c0cbd6fSFam Zheng goto immediate_exit; 4684c0cbd6fSFam Zheng } 4694c0cbd6fSFam Zheng 47099900697SFam Zheng ret = bdrv_is_allocated_above(bs, base, sector_num, nb_sectors, &n); 471893f7ebaSPaolo Bonzini 472893f7ebaSPaolo Bonzini if (ret < 0) { 473893f7ebaSPaolo Bonzini goto immediate_exit; 474893f7ebaSPaolo Bonzini } 475893f7ebaSPaolo Bonzini 476893f7ebaSPaolo Bonzini assert(n > 0); 477893f7ebaSPaolo Bonzini if (ret == 1) { 47820dca810SJohn Snow bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n); 479893f7ebaSPaolo Bonzini } 48099900697SFam Zheng sector_num += n; 481893f7ebaSPaolo Bonzini } 482893f7ebaSPaolo Bonzini } 483893f7ebaSPaolo Bonzini 48420dca810SJohn Snow bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi); 485893f7ebaSPaolo Bonzini for (;;) { 486cc8c9d6cSPaolo Bonzini uint64_t delay_ns = 0; 487893f7ebaSPaolo Bonzini int64_t cnt; 488893f7ebaSPaolo Bonzini bool should_complete; 489893f7ebaSPaolo Bonzini 490bd48bde8SPaolo Bonzini if (s->ret < 0) { 491bd48bde8SPaolo Bonzini ret = s->ret; 492893f7ebaSPaolo Bonzini goto immediate_exit; 493893f7ebaSPaolo Bonzini } 494bd48bde8SPaolo Bonzini 49520dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 496b21c7652SMax Reitz /* s->common.offset contains the number of bytes already processed so 497b21c7652SMax Reitz * far, cnt is the number of dirty sectors remaining and 498b21c7652SMax Reitz * s->sectors_in_flight is the number of sectors currently being 499b21c7652SMax Reitz * processed; together those are the current total operation length */ 500b21c7652SMax Reitz s->common.len = s->common.offset + 501b21c7652SMax Reitz (cnt + s->sectors_in_flight) * BDRV_SECTOR_SIZE; 502bd48bde8SPaolo Bonzini 503bd48bde8SPaolo Bonzini /* Note that even when no rate limit is applied we need to yield 504a7282330SFam Zheng * periodically with no pending I/O so that bdrv_drain_all() returns. 505bd48bde8SPaolo Bonzini * We do so every SLICE_TIME nanoseconds, or when there is an error, 506bd48bde8SPaolo Bonzini * or when the source is clean, whichever comes first. 507bd48bde8SPaolo Bonzini */ 508bc72ad67SAlex Bligh if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - last_pause_ns < SLICE_TIME && 509bd48bde8SPaolo Bonzini s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 510402a4741SPaolo Bonzini if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 || 511402a4741SPaolo Bonzini (cnt == 0 && s->in_flight > 0)) { 512402a4741SPaolo Bonzini trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt); 513bd48bde8SPaolo Bonzini qemu_coroutine_yield(); 514bd48bde8SPaolo Bonzini continue; 515bd48bde8SPaolo Bonzini } else if (cnt != 0) { 516cc8c9d6cSPaolo Bonzini delay_ns = mirror_iteration(s); 517893f7ebaSPaolo Bonzini } 518cc8c9d6cSPaolo Bonzini } 519893f7ebaSPaolo Bonzini 520893f7ebaSPaolo Bonzini should_complete = false; 521bd48bde8SPaolo Bonzini if (s->in_flight == 0 && cnt == 0) { 522893f7ebaSPaolo Bonzini trace_mirror_before_flush(s); 523893f7ebaSPaolo Bonzini ret = bdrv_flush(s->target); 524893f7ebaSPaolo Bonzini if (ret < 0) { 525a589569fSWenchao Xia if (mirror_error_action(s, false, -ret) == 526a589569fSWenchao Xia BLOCK_ERROR_ACTION_REPORT) { 527893f7ebaSPaolo Bonzini goto immediate_exit; 528893f7ebaSPaolo Bonzini } 529b952b558SPaolo Bonzini } else { 530893f7ebaSPaolo Bonzini /* We're out of the streaming phase. From now on, if the job 531893f7ebaSPaolo Bonzini * is cancelled we will actually complete all pending I/O and 532893f7ebaSPaolo Bonzini * report completion. This way, block-job-cancel will leave 533893f7ebaSPaolo Bonzini * the target in a consistent state. 534893f7ebaSPaolo Bonzini */ 535d63ffd87SPaolo Bonzini if (!s->synced) { 536bcada37bSWenchao Xia block_job_event_ready(&s->common); 537d63ffd87SPaolo Bonzini s->synced = true; 538d63ffd87SPaolo Bonzini } 539d63ffd87SPaolo Bonzini 540d63ffd87SPaolo Bonzini should_complete = s->should_complete || 541d63ffd87SPaolo Bonzini block_job_is_cancelled(&s->common); 54220dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 543893f7ebaSPaolo Bonzini } 544b952b558SPaolo Bonzini } 545893f7ebaSPaolo Bonzini 546893f7ebaSPaolo Bonzini if (cnt == 0 && should_complete) { 547893f7ebaSPaolo Bonzini /* The dirty bitmap is not updated while operations are pending. 548893f7ebaSPaolo Bonzini * If we're about to exit, wait for pending operations before 549893f7ebaSPaolo Bonzini * calling bdrv_get_dirty_count(bs), or we may exit while the 550893f7ebaSPaolo Bonzini * source has dirty data to copy! 551893f7ebaSPaolo Bonzini * 552893f7ebaSPaolo Bonzini * Note that I/O can be submitted by the guest while 553893f7ebaSPaolo Bonzini * mirror_populate runs. 554893f7ebaSPaolo Bonzini */ 555893f7ebaSPaolo Bonzini trace_mirror_before_drain(s, cnt); 5565a7e7a0bSStefan Hajnoczi bdrv_drain(bs); 55720dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 558893f7ebaSPaolo Bonzini } 559893f7ebaSPaolo Bonzini 560893f7ebaSPaolo Bonzini ret = 0; 561cc8c9d6cSPaolo Bonzini trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); 562d63ffd87SPaolo Bonzini if (!s->synced) { 5637483d1e5SAlex Bligh block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); 564893f7ebaSPaolo Bonzini if (block_job_is_cancelled(&s->common)) { 565893f7ebaSPaolo Bonzini break; 566893f7ebaSPaolo Bonzini } 567893f7ebaSPaolo Bonzini } else if (!should_complete) { 568bd48bde8SPaolo Bonzini delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0); 5697483d1e5SAlex Bligh block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); 570893f7ebaSPaolo Bonzini } else if (cnt == 0) { 571893f7ebaSPaolo Bonzini /* The two disks are in sync. Exit and report successful 572893f7ebaSPaolo Bonzini * completion. 573893f7ebaSPaolo Bonzini */ 574893f7ebaSPaolo Bonzini assert(QLIST_EMPTY(&bs->tracked_requests)); 575893f7ebaSPaolo Bonzini s->common.cancelled = false; 576893f7ebaSPaolo Bonzini break; 577893f7ebaSPaolo Bonzini } 578bc72ad67SAlex Bligh last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 579893f7ebaSPaolo Bonzini } 580893f7ebaSPaolo Bonzini 581893f7ebaSPaolo Bonzini immediate_exit: 582bd48bde8SPaolo Bonzini if (s->in_flight > 0) { 583bd48bde8SPaolo Bonzini /* We get here only if something went wrong. Either the job failed, 584bd48bde8SPaolo Bonzini * or it was cancelled prematurely so that we do not guarantee that 585bd48bde8SPaolo Bonzini * the target is a copy of the source. 586bd48bde8SPaolo Bonzini */ 587bd48bde8SPaolo Bonzini assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common))); 588bd48bde8SPaolo Bonzini mirror_drain(s); 589bd48bde8SPaolo Bonzini } 590bd48bde8SPaolo Bonzini 591bd48bde8SPaolo Bonzini assert(s->in_flight == 0); 5927191bf31SMarkus Armbruster qemu_vfree(s->buf); 593b812f671SPaolo Bonzini g_free(s->cow_bitmap); 594402a4741SPaolo Bonzini g_free(s->in_flight_bitmap); 595e4654d2dSFam Zheng bdrv_release_dirty_bitmap(bs, s->dirty_bitmap); 596b952b558SPaolo Bonzini bdrv_iostatus_disable(s->target); 5975a7e7a0bSStefan Hajnoczi 5985a7e7a0bSStefan Hajnoczi data = g_malloc(sizeof(*data)); 5995a7e7a0bSStefan Hajnoczi data->ret = ret; 6005a7e7a0bSStefan Hajnoczi block_job_defer_to_main_loop(&s->common, mirror_exit, data); 601893f7ebaSPaolo Bonzini } 602893f7ebaSPaolo Bonzini 603893f7ebaSPaolo Bonzini static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp) 604893f7ebaSPaolo Bonzini { 605893f7ebaSPaolo Bonzini MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 606893f7ebaSPaolo Bonzini 607893f7ebaSPaolo Bonzini if (speed < 0) { 608c6bd8c70SMarkus Armbruster error_setg(errp, QERR_INVALID_PARAMETER, "speed"); 609893f7ebaSPaolo Bonzini return; 610893f7ebaSPaolo Bonzini } 611893f7ebaSPaolo Bonzini ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME); 612893f7ebaSPaolo Bonzini } 613893f7ebaSPaolo Bonzini 614b952b558SPaolo Bonzini static void mirror_iostatus_reset(BlockJob *job) 615b952b558SPaolo Bonzini { 616b952b558SPaolo Bonzini MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 617b952b558SPaolo Bonzini 618b952b558SPaolo Bonzini bdrv_iostatus_reset(s->target); 619b952b558SPaolo Bonzini } 620b952b558SPaolo Bonzini 621d63ffd87SPaolo Bonzini static void mirror_complete(BlockJob *job, Error **errp) 622d63ffd87SPaolo Bonzini { 623d63ffd87SPaolo Bonzini MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 62434b5d2c6SMax Reitz Error *local_err = NULL; 625d63ffd87SPaolo Bonzini int ret; 626d63ffd87SPaolo Bonzini 62734b5d2c6SMax Reitz ret = bdrv_open_backing_file(s->target, NULL, &local_err); 628d63ffd87SPaolo Bonzini if (ret < 0) { 62934b5d2c6SMax Reitz error_propagate(errp, local_err); 630d63ffd87SPaolo Bonzini return; 631d63ffd87SPaolo Bonzini } 632d63ffd87SPaolo Bonzini if (!s->synced) { 633c6bd8c70SMarkus Armbruster error_setg(errp, QERR_BLOCK_JOB_NOT_READY, 634bfb197e0SMarkus Armbruster bdrv_get_device_name(job->bs)); 635d63ffd87SPaolo Bonzini return; 636d63ffd87SPaolo Bonzini } 637d63ffd87SPaolo Bonzini 63809158f00SBenoît Canet /* check the target bs is not blocked and block all operations on it */ 63909158f00SBenoît Canet if (s->replaces) { 6405a7e7a0bSStefan Hajnoczi AioContext *replace_aio_context; 6415a7e7a0bSStefan Hajnoczi 64209158f00SBenoît Canet s->to_replace = check_to_replace_node(s->replaces, &local_err); 64309158f00SBenoît Canet if (!s->to_replace) { 64409158f00SBenoît Canet error_propagate(errp, local_err); 64509158f00SBenoît Canet return; 64609158f00SBenoît Canet } 64709158f00SBenoît Canet 6485a7e7a0bSStefan Hajnoczi replace_aio_context = bdrv_get_aio_context(s->to_replace); 6495a7e7a0bSStefan Hajnoczi aio_context_acquire(replace_aio_context); 6505a7e7a0bSStefan Hajnoczi 65109158f00SBenoît Canet error_setg(&s->replace_blocker, 65209158f00SBenoît Canet "block device is in use by block-job-complete"); 65309158f00SBenoît Canet bdrv_op_block_all(s->to_replace, s->replace_blocker); 65409158f00SBenoît Canet bdrv_ref(s->to_replace); 6555a7e7a0bSStefan Hajnoczi 6565a7e7a0bSStefan Hajnoczi aio_context_release(replace_aio_context); 65709158f00SBenoît Canet } 65809158f00SBenoît Canet 659d63ffd87SPaolo Bonzini s->should_complete = true; 660751ebd76SFam Zheng block_job_enter(&s->common); 661d63ffd87SPaolo Bonzini } 662d63ffd87SPaolo Bonzini 6633fc4b10aSFam Zheng static const BlockJobDriver mirror_job_driver = { 664893f7ebaSPaolo Bonzini .instance_size = sizeof(MirrorBlockJob), 66579e14bf7SFam Zheng .job_type = BLOCK_JOB_TYPE_MIRROR, 666893f7ebaSPaolo Bonzini .set_speed = mirror_set_speed, 667b952b558SPaolo Bonzini .iostatus_reset= mirror_iostatus_reset, 668d63ffd87SPaolo Bonzini .complete = mirror_complete, 669893f7ebaSPaolo Bonzini }; 670893f7ebaSPaolo Bonzini 67103544a6eSFam Zheng static const BlockJobDriver commit_active_job_driver = { 67203544a6eSFam Zheng .instance_size = sizeof(MirrorBlockJob), 67303544a6eSFam Zheng .job_type = BLOCK_JOB_TYPE_COMMIT, 67403544a6eSFam Zheng .set_speed = mirror_set_speed, 67503544a6eSFam Zheng .iostatus_reset 67603544a6eSFam Zheng = mirror_iostatus_reset, 67703544a6eSFam Zheng .complete = mirror_complete, 67803544a6eSFam Zheng }; 67903544a6eSFam Zheng 68003544a6eSFam Zheng static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target, 68109158f00SBenoît Canet const char *replaces, 6825fba6c0eSJohn Snow int64_t speed, uint32_t granularity, 68303544a6eSFam Zheng int64_t buf_size, 68403544a6eSFam Zheng BlockdevOnError on_source_error, 685b952b558SPaolo Bonzini BlockdevOnError on_target_error, 6860fc9f8eaSFam Zheng bool unmap, 687097310b5SMarkus Armbruster BlockCompletionFunc *cb, 68803544a6eSFam Zheng void *opaque, Error **errp, 68903544a6eSFam Zheng const BlockJobDriver *driver, 69003544a6eSFam Zheng bool is_none_mode, BlockDriverState *base) 691893f7ebaSPaolo Bonzini { 692893f7ebaSPaolo Bonzini MirrorBlockJob *s; 693893f7ebaSPaolo Bonzini 694eee13dfeSPaolo Bonzini if (granularity == 0) { 695341ebc2fSJohn Snow granularity = bdrv_get_default_bitmap_granularity(target); 696eee13dfeSPaolo Bonzini } 697eee13dfeSPaolo Bonzini 698eee13dfeSPaolo Bonzini assert ((granularity & (granularity - 1)) == 0); 699eee13dfeSPaolo Bonzini 700b952b558SPaolo Bonzini if ((on_source_error == BLOCKDEV_ON_ERROR_STOP || 701b952b558SPaolo Bonzini on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) && 702b952b558SPaolo Bonzini !bdrv_iostatus_is_enabled(bs)) { 703c6bd8c70SMarkus Armbruster error_setg(errp, QERR_INVALID_PARAMETER, "on-source-error"); 704b952b558SPaolo Bonzini return; 705b952b558SPaolo Bonzini } 706b952b558SPaolo Bonzini 70748ac0a4dSWen Congyang if (buf_size < 0) { 70848ac0a4dSWen Congyang error_setg(errp, "Invalid parameter 'buf-size'"); 70948ac0a4dSWen Congyang return; 71048ac0a4dSWen Congyang } 71148ac0a4dSWen Congyang 71248ac0a4dSWen Congyang if (buf_size == 0) { 71348ac0a4dSWen Congyang buf_size = DEFAULT_MIRROR_BUF_SIZE; 71448ac0a4dSWen Congyang } 7155bc361b8SFam Zheng 71603544a6eSFam Zheng s = block_job_create(driver, bs, speed, cb, opaque, errp); 717893f7ebaSPaolo Bonzini if (!s) { 718893f7ebaSPaolo Bonzini return; 719893f7ebaSPaolo Bonzini } 720893f7ebaSPaolo Bonzini 72109158f00SBenoît Canet s->replaces = g_strdup(replaces); 722b952b558SPaolo Bonzini s->on_source_error = on_source_error; 723b952b558SPaolo Bonzini s->on_target_error = on_target_error; 724893f7ebaSPaolo Bonzini s->target = target; 72503544a6eSFam Zheng s->is_none_mode = is_none_mode; 7265bc361b8SFam Zheng s->base = base; 727eee13dfeSPaolo Bonzini s->granularity = granularity; 72848ac0a4dSWen Congyang s->buf_size = ROUND_UP(buf_size, granularity); 7290fc9f8eaSFam Zheng s->unmap = unmap; 730b812f671SPaolo Bonzini 7310db6e54aSFam Zheng s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); 732b8afb520SFam Zheng if (!s->dirty_bitmap) { 73397031164STing Wang g_free(s->replaces); 73497031164STing Wang block_job_release(bs); 735b8afb520SFam Zheng return; 736b8afb520SFam Zheng } 737893f7ebaSPaolo Bonzini bdrv_set_enable_write_cache(s->target, true); 738b952b558SPaolo Bonzini bdrv_set_on_error(s->target, on_target_error, on_target_error); 739b952b558SPaolo Bonzini bdrv_iostatus_enable(s->target); 740893f7ebaSPaolo Bonzini s->common.co = qemu_coroutine_create(mirror_run); 741893f7ebaSPaolo Bonzini trace_mirror_start(bs, s, s->common.co, opaque); 742893f7ebaSPaolo Bonzini qemu_coroutine_enter(s->common.co, s); 743893f7ebaSPaolo Bonzini } 74403544a6eSFam Zheng 74503544a6eSFam Zheng void mirror_start(BlockDriverState *bs, BlockDriverState *target, 74609158f00SBenoît Canet const char *replaces, 7475fba6c0eSJohn Snow int64_t speed, uint32_t granularity, int64_t buf_size, 74803544a6eSFam Zheng MirrorSyncMode mode, BlockdevOnError on_source_error, 74903544a6eSFam Zheng BlockdevOnError on_target_error, 7500fc9f8eaSFam Zheng bool unmap, 751097310b5SMarkus Armbruster BlockCompletionFunc *cb, 75203544a6eSFam Zheng void *opaque, Error **errp) 75303544a6eSFam Zheng { 75403544a6eSFam Zheng bool is_none_mode; 75503544a6eSFam Zheng BlockDriverState *base; 75603544a6eSFam Zheng 7574b80ab2bSJohn Snow if (mode == MIRROR_SYNC_MODE_INCREMENTAL) { 7584b80ab2bSJohn Snow error_setg(errp, "Sync mode 'incremental' not supported"); 759d58d8453SJohn Snow return; 760d58d8453SJohn Snow } 76103544a6eSFam Zheng is_none_mode = mode == MIRROR_SYNC_MODE_NONE; 76203544a6eSFam Zheng base = mode == MIRROR_SYNC_MODE_TOP ? bs->backing_hd : NULL; 76309158f00SBenoît Canet mirror_start_job(bs, target, replaces, 76409158f00SBenoît Canet speed, granularity, buf_size, 7650fc9f8eaSFam Zheng on_source_error, on_target_error, unmap, cb, opaque, errp, 76603544a6eSFam Zheng &mirror_job_driver, is_none_mode, base); 76703544a6eSFam Zheng } 76803544a6eSFam Zheng 76903544a6eSFam Zheng void commit_active_start(BlockDriverState *bs, BlockDriverState *base, 77003544a6eSFam Zheng int64_t speed, 77103544a6eSFam Zheng BlockdevOnError on_error, 772097310b5SMarkus Armbruster BlockCompletionFunc *cb, 77303544a6eSFam Zheng void *opaque, Error **errp) 77403544a6eSFam Zheng { 7754da83585SJeff Cody int64_t length, base_length; 7764da83585SJeff Cody int orig_base_flags; 77739a611a3SJeff Cody int ret; 778cc67f4d1SJeff Cody Error *local_err = NULL; 7794da83585SJeff Cody 7804da83585SJeff Cody orig_base_flags = bdrv_get_flags(base); 7814da83585SJeff Cody 78220a63d2cSFam Zheng if (bdrv_reopen(base, bs->open_flags, errp)) { 78320a63d2cSFam Zheng return; 78420a63d2cSFam Zheng } 7854da83585SJeff Cody 7864da83585SJeff Cody length = bdrv_getlength(bs); 7874da83585SJeff Cody if (length < 0) { 78839a611a3SJeff Cody error_setg_errno(errp, -length, 78939a611a3SJeff Cody "Unable to determine length of %s", bs->filename); 7904da83585SJeff Cody goto error_restore_flags; 7914da83585SJeff Cody } 7924da83585SJeff Cody 7934da83585SJeff Cody base_length = bdrv_getlength(base); 7944da83585SJeff Cody if (base_length < 0) { 79539a611a3SJeff Cody error_setg_errno(errp, -base_length, 79639a611a3SJeff Cody "Unable to determine length of %s", base->filename); 7974da83585SJeff Cody goto error_restore_flags; 7984da83585SJeff Cody } 7994da83585SJeff Cody 8004da83585SJeff Cody if (length > base_length) { 80139a611a3SJeff Cody ret = bdrv_truncate(base, length); 80239a611a3SJeff Cody if (ret < 0) { 80339a611a3SJeff Cody error_setg_errno(errp, -ret, 80439a611a3SJeff Cody "Top image %s is larger than base image %s, and " 8054da83585SJeff Cody "resize of base image failed", 8064da83585SJeff Cody bs->filename, base->filename); 8074da83585SJeff Cody goto error_restore_flags; 8084da83585SJeff Cody } 8094da83585SJeff Cody } 8104da83585SJeff Cody 81120a63d2cSFam Zheng bdrv_ref(base); 81209158f00SBenoît Canet mirror_start_job(bs, base, NULL, speed, 0, 0, 8130fc9f8eaSFam Zheng on_error, on_error, false, cb, opaque, &local_err, 81403544a6eSFam Zheng &commit_active_job_driver, false, base); 8150fb6395cSMarkus Armbruster if (local_err) { 816cc67f4d1SJeff Cody error_propagate(errp, local_err); 8174da83585SJeff Cody goto error_restore_flags; 8184da83585SJeff Cody } 8194da83585SJeff Cody 8204da83585SJeff Cody return; 8214da83585SJeff Cody 8224da83585SJeff Cody error_restore_flags: 8234da83585SJeff Cody /* ignore error and errp for bdrv_reopen, because we want to propagate 8244da83585SJeff Cody * the original error */ 8254da83585SJeff Cody bdrv_reopen(base, orig_base_flags, NULL); 8264da83585SJeff Cody return; 82703544a6eSFam Zheng } 828