1893f7ebaSPaolo Bonzini /* 2893f7ebaSPaolo Bonzini * Image mirroring 3893f7ebaSPaolo Bonzini * 4893f7ebaSPaolo Bonzini * Copyright Red Hat, Inc. 2012 5893f7ebaSPaolo Bonzini * 6893f7ebaSPaolo Bonzini * Authors: 7893f7ebaSPaolo Bonzini * Paolo Bonzini <pbonzini@redhat.com> 8893f7ebaSPaolo Bonzini * 9893f7ebaSPaolo Bonzini * This work is licensed under the terms of the GNU LGPL, version 2 or later. 10893f7ebaSPaolo Bonzini * See the COPYING.LIB file in the top-level directory. 11893f7ebaSPaolo Bonzini * 12893f7ebaSPaolo Bonzini */ 13893f7ebaSPaolo Bonzini 14893f7ebaSPaolo Bonzini #include "trace.h" 15737e150eSPaolo Bonzini #include "block/blockjob.h" 16737e150eSPaolo Bonzini #include "block/block_int.h" 17893f7ebaSPaolo Bonzini #include "qemu/ratelimit.h" 18b812f671SPaolo Bonzini #include "qemu/bitmap.h" 19893f7ebaSPaolo Bonzini 20893f7ebaSPaolo Bonzini enum { 21893f7ebaSPaolo Bonzini /* 22893f7ebaSPaolo Bonzini * Size of data buffer for populating the image file. This should be large 23893f7ebaSPaolo Bonzini * enough to process multiple clusters in a single call, so that populating 24893f7ebaSPaolo Bonzini * contiguous regions of the image is efficient. 25893f7ebaSPaolo Bonzini */ 26893f7ebaSPaolo Bonzini BLOCK_SIZE = 512 * BDRV_SECTORS_PER_DIRTY_CHUNK, /* in bytes */ 27893f7ebaSPaolo Bonzini }; 28893f7ebaSPaolo Bonzini 29893f7ebaSPaolo Bonzini #define SLICE_TIME 100000000ULL /* ns */ 30893f7ebaSPaolo Bonzini 31893f7ebaSPaolo Bonzini typedef struct MirrorBlockJob { 32893f7ebaSPaolo Bonzini BlockJob common; 33893f7ebaSPaolo Bonzini RateLimit limit; 34893f7ebaSPaolo Bonzini BlockDriverState *target; 35893f7ebaSPaolo Bonzini MirrorSyncMode mode; 36b952b558SPaolo Bonzini BlockdevOnError on_source_error, on_target_error; 37d63ffd87SPaolo Bonzini bool synced; 38d63ffd87SPaolo Bonzini bool should_complete; 39893f7ebaSPaolo Bonzini int64_t sector_num; 40b812f671SPaolo Bonzini size_t buf_size; 41b812f671SPaolo Bonzini unsigned long *cow_bitmap; 428f0720ecSPaolo Bonzini HBitmapIter hbi; 43893f7ebaSPaolo Bonzini uint8_t *buf; 44893f7ebaSPaolo Bonzini } MirrorBlockJob; 45893f7ebaSPaolo Bonzini 46b952b558SPaolo Bonzini static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, 47b952b558SPaolo Bonzini int error) 48b952b558SPaolo Bonzini { 49b952b558SPaolo Bonzini s->synced = false; 50b952b558SPaolo Bonzini if (read) { 51b952b558SPaolo Bonzini return block_job_error_action(&s->common, s->common.bs, 52b952b558SPaolo Bonzini s->on_source_error, true, error); 53b952b558SPaolo Bonzini } else { 54b952b558SPaolo Bonzini return block_job_error_action(&s->common, s->target, 55b952b558SPaolo Bonzini s->on_target_error, false, error); 56b952b558SPaolo Bonzini } 57b952b558SPaolo Bonzini } 58b952b558SPaolo Bonzini 59b952b558SPaolo Bonzini static int coroutine_fn mirror_iteration(MirrorBlockJob *s, 60b952b558SPaolo Bonzini BlockErrorAction *p_action) 61893f7ebaSPaolo Bonzini { 62893f7ebaSPaolo Bonzini BlockDriverState *source = s->common.bs; 63893f7ebaSPaolo Bonzini BlockDriverState *target = s->target; 64893f7ebaSPaolo Bonzini QEMUIOVector qiov; 65893f7ebaSPaolo Bonzini int ret, nb_sectors; 66b812f671SPaolo Bonzini int64_t end, sector_num, chunk_num; 67893f7ebaSPaolo Bonzini struct iovec iov; 68893f7ebaSPaolo Bonzini 698f0720ecSPaolo Bonzini s->sector_num = hbitmap_iter_next(&s->hbi); 708f0720ecSPaolo Bonzini if (s->sector_num < 0) { 718f0720ecSPaolo Bonzini bdrv_dirty_iter_init(source, &s->hbi); 728f0720ecSPaolo Bonzini s->sector_num = hbitmap_iter_next(&s->hbi); 738f0720ecSPaolo Bonzini trace_mirror_restart_iter(s, bdrv_get_dirty_count(source)); 748f0720ecSPaolo Bonzini assert(s->sector_num >= 0); 758f0720ecSPaolo Bonzini } 768f0720ecSPaolo Bonzini 77b812f671SPaolo Bonzini /* If we have no backing file yet in the destination, and the cluster size 78b812f671SPaolo Bonzini * is very large, we need to do COW ourselves. The first time a cluster is 79b812f671SPaolo Bonzini * copied, copy it entirely. 80b812f671SPaolo Bonzini * 81b812f671SPaolo Bonzini * Because both BDRV_SECTORS_PER_DIRTY_CHUNK and the cluster size are 82b812f671SPaolo Bonzini * powers of two, the number of sectors to copy cannot exceed one cluster. 83b812f671SPaolo Bonzini */ 84b812f671SPaolo Bonzini sector_num = s->sector_num; 85b812f671SPaolo Bonzini nb_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK; 86b812f671SPaolo Bonzini chunk_num = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK; 87b812f671SPaolo Bonzini if (s->cow_bitmap && !test_bit(chunk_num, s->cow_bitmap)) { 88b812f671SPaolo Bonzini trace_mirror_cow(s, sector_num); 89b812f671SPaolo Bonzini bdrv_round_to_clusters(s->target, 90b812f671SPaolo Bonzini sector_num, BDRV_SECTORS_PER_DIRTY_CHUNK, 91b812f671SPaolo Bonzini §or_num, &nb_sectors); 92b812f671SPaolo Bonzini } 93b812f671SPaolo Bonzini 94893f7ebaSPaolo Bonzini end = s->common.len >> BDRV_SECTOR_BITS; 95b812f671SPaolo Bonzini nb_sectors = MIN(nb_sectors, end - sector_num); 96b812f671SPaolo Bonzini bdrv_reset_dirty(source, sector_num, nb_sectors); 97893f7ebaSPaolo Bonzini 98893f7ebaSPaolo Bonzini /* Copy the dirty cluster. */ 99893f7ebaSPaolo Bonzini iov.iov_base = s->buf; 100893f7ebaSPaolo Bonzini iov.iov_len = nb_sectors * 512; 101893f7ebaSPaolo Bonzini qemu_iovec_init_external(&qiov, &iov, 1); 102893f7ebaSPaolo Bonzini 103b812f671SPaolo Bonzini trace_mirror_one_iteration(s, sector_num, nb_sectors); 104b812f671SPaolo Bonzini ret = bdrv_co_readv(source, sector_num, nb_sectors, &qiov); 105893f7ebaSPaolo Bonzini if (ret < 0) { 106b952b558SPaolo Bonzini *p_action = mirror_error_action(s, true, -ret); 107b952b558SPaolo Bonzini goto fail; 108893f7ebaSPaolo Bonzini } 109b812f671SPaolo Bonzini ret = bdrv_co_writev(target, sector_num, nb_sectors, &qiov); 110b952b558SPaolo Bonzini if (ret < 0) { 111b952b558SPaolo Bonzini *p_action = mirror_error_action(s, false, -ret); 112b952b558SPaolo Bonzini s->synced = false; 113b952b558SPaolo Bonzini goto fail; 114b952b558SPaolo Bonzini } 115b812f671SPaolo Bonzini if (s->cow_bitmap) { 116b812f671SPaolo Bonzini bitmap_set(s->cow_bitmap, sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK, 117b812f671SPaolo Bonzini nb_sectors / BDRV_SECTORS_PER_DIRTY_CHUNK); 118b812f671SPaolo Bonzini } 119b952b558SPaolo Bonzini return 0; 120b952b558SPaolo Bonzini 121b952b558SPaolo Bonzini fail: 122b952b558SPaolo Bonzini /* Try again later. */ 123b812f671SPaolo Bonzini bdrv_set_dirty(source, sector_num, nb_sectors); 124b952b558SPaolo Bonzini return ret; 125893f7ebaSPaolo Bonzini } 126893f7ebaSPaolo Bonzini 127893f7ebaSPaolo Bonzini static void coroutine_fn mirror_run(void *opaque) 128893f7ebaSPaolo Bonzini { 129893f7ebaSPaolo Bonzini MirrorBlockJob *s = opaque; 130893f7ebaSPaolo Bonzini BlockDriverState *bs = s->common.bs; 131b812f671SPaolo Bonzini int64_t sector_num, end, length; 132b812f671SPaolo Bonzini BlockDriverInfo bdi; 133b812f671SPaolo Bonzini char backing_filename[1024]; 134893f7ebaSPaolo Bonzini int ret = 0; 135893f7ebaSPaolo Bonzini int n; 136893f7ebaSPaolo Bonzini 137893f7ebaSPaolo Bonzini if (block_job_is_cancelled(&s->common)) { 138893f7ebaSPaolo Bonzini goto immediate_exit; 139893f7ebaSPaolo Bonzini } 140893f7ebaSPaolo Bonzini 141893f7ebaSPaolo Bonzini s->common.len = bdrv_getlength(bs); 142893f7ebaSPaolo Bonzini if (s->common.len < 0) { 143893f7ebaSPaolo Bonzini block_job_completed(&s->common, s->common.len); 144893f7ebaSPaolo Bonzini return; 145893f7ebaSPaolo Bonzini } 146893f7ebaSPaolo Bonzini 147b812f671SPaolo Bonzini /* If we have no backing file yet in the destination, we cannot let 148b812f671SPaolo Bonzini * the destination do COW. Instead, we copy sectors around the 149b812f671SPaolo Bonzini * dirty data if needed. We need a bitmap to do that. 150b812f671SPaolo Bonzini */ 151b812f671SPaolo Bonzini bdrv_get_backing_filename(s->target, backing_filename, 152b812f671SPaolo Bonzini sizeof(backing_filename)); 153b812f671SPaolo Bonzini if (backing_filename[0] && !s->target->backing_hd) { 154b812f671SPaolo Bonzini bdrv_get_info(s->target, &bdi); 155b812f671SPaolo Bonzini if (s->buf_size < bdi.cluster_size) { 156b812f671SPaolo Bonzini s->buf_size = bdi.cluster_size; 157b812f671SPaolo Bonzini length = (bdrv_getlength(bs) + BLOCK_SIZE - 1) / BLOCK_SIZE; 158b812f671SPaolo Bonzini s->cow_bitmap = bitmap_new(length); 159b812f671SPaolo Bonzini } 160b812f671SPaolo Bonzini } 161b812f671SPaolo Bonzini 162893f7ebaSPaolo Bonzini end = s->common.len >> BDRV_SECTOR_BITS; 163b812f671SPaolo Bonzini s->buf = qemu_blockalign(bs, s->buf_size); 164893f7ebaSPaolo Bonzini 165893f7ebaSPaolo Bonzini if (s->mode != MIRROR_SYNC_MODE_NONE) { 166893f7ebaSPaolo Bonzini /* First part, loop on the sectors and initialize the dirty bitmap. */ 167893f7ebaSPaolo Bonzini BlockDriverState *base; 168893f7ebaSPaolo Bonzini base = s->mode == MIRROR_SYNC_MODE_FULL ? NULL : bs->backing_hd; 169893f7ebaSPaolo Bonzini for (sector_num = 0; sector_num < end; ) { 170893f7ebaSPaolo Bonzini int64_t next = (sector_num | (BDRV_SECTORS_PER_DIRTY_CHUNK - 1)) + 1; 171893f7ebaSPaolo Bonzini ret = bdrv_co_is_allocated_above(bs, base, 172893f7ebaSPaolo Bonzini sector_num, next - sector_num, &n); 173893f7ebaSPaolo Bonzini 174893f7ebaSPaolo Bonzini if (ret < 0) { 175893f7ebaSPaolo Bonzini goto immediate_exit; 176893f7ebaSPaolo Bonzini } 177893f7ebaSPaolo Bonzini 178893f7ebaSPaolo Bonzini assert(n > 0); 179893f7ebaSPaolo Bonzini if (ret == 1) { 180893f7ebaSPaolo Bonzini bdrv_set_dirty(bs, sector_num, n); 181893f7ebaSPaolo Bonzini sector_num = next; 182893f7ebaSPaolo Bonzini } else { 183893f7ebaSPaolo Bonzini sector_num += n; 184893f7ebaSPaolo Bonzini } 185893f7ebaSPaolo Bonzini } 186893f7ebaSPaolo Bonzini } 187893f7ebaSPaolo Bonzini 1888f0720ecSPaolo Bonzini bdrv_dirty_iter_init(bs, &s->hbi); 189893f7ebaSPaolo Bonzini for (;;) { 190893f7ebaSPaolo Bonzini uint64_t delay_ns; 191893f7ebaSPaolo Bonzini int64_t cnt; 192893f7ebaSPaolo Bonzini bool should_complete; 193893f7ebaSPaolo Bonzini 194893f7ebaSPaolo Bonzini cnt = bdrv_get_dirty_count(bs); 195893f7ebaSPaolo Bonzini if (cnt != 0) { 196b952b558SPaolo Bonzini BlockErrorAction action = BDRV_ACTION_REPORT; 197b952b558SPaolo Bonzini ret = mirror_iteration(s, &action); 198b952b558SPaolo Bonzini if (ret < 0 && action == BDRV_ACTION_REPORT) { 199893f7ebaSPaolo Bonzini goto immediate_exit; 200893f7ebaSPaolo Bonzini } 201893f7ebaSPaolo Bonzini cnt = bdrv_get_dirty_count(bs); 202893f7ebaSPaolo Bonzini } 203893f7ebaSPaolo Bonzini 204893f7ebaSPaolo Bonzini should_complete = false; 205893f7ebaSPaolo Bonzini if (cnt == 0) { 206893f7ebaSPaolo Bonzini trace_mirror_before_flush(s); 207893f7ebaSPaolo Bonzini ret = bdrv_flush(s->target); 208893f7ebaSPaolo Bonzini if (ret < 0) { 209b952b558SPaolo Bonzini if (mirror_error_action(s, false, -ret) == BDRV_ACTION_REPORT) { 210893f7ebaSPaolo Bonzini goto immediate_exit; 211893f7ebaSPaolo Bonzini } 212b952b558SPaolo Bonzini } else { 213893f7ebaSPaolo Bonzini /* We're out of the streaming phase. From now on, if the job 214893f7ebaSPaolo Bonzini * is cancelled we will actually complete all pending I/O and 215893f7ebaSPaolo Bonzini * report completion. This way, block-job-cancel will leave 216893f7ebaSPaolo Bonzini * the target in a consistent state. 217893f7ebaSPaolo Bonzini */ 218893f7ebaSPaolo Bonzini s->common.offset = end * BDRV_SECTOR_SIZE; 219d63ffd87SPaolo Bonzini if (!s->synced) { 220d63ffd87SPaolo Bonzini block_job_ready(&s->common); 221d63ffd87SPaolo Bonzini s->synced = true; 222d63ffd87SPaolo Bonzini } 223d63ffd87SPaolo Bonzini 224d63ffd87SPaolo Bonzini should_complete = s->should_complete || 225d63ffd87SPaolo Bonzini block_job_is_cancelled(&s->common); 226893f7ebaSPaolo Bonzini cnt = bdrv_get_dirty_count(bs); 227893f7ebaSPaolo Bonzini } 228b952b558SPaolo Bonzini } 229893f7ebaSPaolo Bonzini 230893f7ebaSPaolo Bonzini if (cnt == 0 && should_complete) { 231893f7ebaSPaolo Bonzini /* The dirty bitmap is not updated while operations are pending. 232893f7ebaSPaolo Bonzini * If we're about to exit, wait for pending operations before 233893f7ebaSPaolo Bonzini * calling bdrv_get_dirty_count(bs), or we may exit while the 234893f7ebaSPaolo Bonzini * source has dirty data to copy! 235893f7ebaSPaolo Bonzini * 236893f7ebaSPaolo Bonzini * Note that I/O can be submitted by the guest while 237893f7ebaSPaolo Bonzini * mirror_populate runs. 238893f7ebaSPaolo Bonzini */ 239893f7ebaSPaolo Bonzini trace_mirror_before_drain(s, cnt); 240893f7ebaSPaolo Bonzini bdrv_drain_all(); 241893f7ebaSPaolo Bonzini cnt = bdrv_get_dirty_count(bs); 242893f7ebaSPaolo Bonzini } 243893f7ebaSPaolo Bonzini 244893f7ebaSPaolo Bonzini ret = 0; 245d63ffd87SPaolo Bonzini trace_mirror_before_sleep(s, cnt, s->synced); 246d63ffd87SPaolo Bonzini if (!s->synced) { 247893f7ebaSPaolo Bonzini /* Publish progress */ 248*acc906c6SPaolo Bonzini s->common.offset = (end - cnt) * BDRV_SECTOR_SIZE; 249893f7ebaSPaolo Bonzini 250893f7ebaSPaolo Bonzini if (s->common.speed) { 251893f7ebaSPaolo Bonzini delay_ns = ratelimit_calculate_delay(&s->limit, BDRV_SECTORS_PER_DIRTY_CHUNK); 252893f7ebaSPaolo Bonzini } else { 253893f7ebaSPaolo Bonzini delay_ns = 0; 254893f7ebaSPaolo Bonzini } 255893f7ebaSPaolo Bonzini 256893f7ebaSPaolo Bonzini /* Note that even when no rate limit is applied we need to yield 257c57b6656SKevin Wolf * with no pending I/O here so that bdrv_drain_all() returns. 258893f7ebaSPaolo Bonzini */ 259893f7ebaSPaolo Bonzini block_job_sleep_ns(&s->common, rt_clock, delay_ns); 260893f7ebaSPaolo Bonzini if (block_job_is_cancelled(&s->common)) { 261893f7ebaSPaolo Bonzini break; 262893f7ebaSPaolo Bonzini } 263893f7ebaSPaolo Bonzini } else if (!should_complete) { 264893f7ebaSPaolo Bonzini delay_ns = (cnt == 0 ? SLICE_TIME : 0); 265893f7ebaSPaolo Bonzini block_job_sleep_ns(&s->common, rt_clock, delay_ns); 266893f7ebaSPaolo Bonzini } else if (cnt == 0) { 267893f7ebaSPaolo Bonzini /* The two disks are in sync. Exit and report successful 268893f7ebaSPaolo Bonzini * completion. 269893f7ebaSPaolo Bonzini */ 270893f7ebaSPaolo Bonzini assert(QLIST_EMPTY(&bs->tracked_requests)); 271893f7ebaSPaolo Bonzini s->common.cancelled = false; 272893f7ebaSPaolo Bonzini break; 273893f7ebaSPaolo Bonzini } 274893f7ebaSPaolo Bonzini } 275893f7ebaSPaolo Bonzini 276893f7ebaSPaolo Bonzini immediate_exit: 2777191bf31SMarkus Armbruster qemu_vfree(s->buf); 278b812f671SPaolo Bonzini g_free(s->cow_bitmap); 279893f7ebaSPaolo Bonzini bdrv_set_dirty_tracking(bs, false); 280b952b558SPaolo Bonzini bdrv_iostatus_disable(s->target); 281d63ffd87SPaolo Bonzini if (s->should_complete && ret == 0) { 282d63ffd87SPaolo Bonzini if (bdrv_get_flags(s->target) != bdrv_get_flags(s->common.bs)) { 283d63ffd87SPaolo Bonzini bdrv_reopen(s->target, bdrv_get_flags(s->common.bs), NULL); 284d63ffd87SPaolo Bonzini } 285d63ffd87SPaolo Bonzini bdrv_swap(s->target, s->common.bs); 286d63ffd87SPaolo Bonzini } 287893f7ebaSPaolo Bonzini bdrv_close(s->target); 288893f7ebaSPaolo Bonzini bdrv_delete(s->target); 289893f7ebaSPaolo Bonzini block_job_completed(&s->common, ret); 290893f7ebaSPaolo Bonzini } 291893f7ebaSPaolo Bonzini 292893f7ebaSPaolo Bonzini static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp) 293893f7ebaSPaolo Bonzini { 294893f7ebaSPaolo Bonzini MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 295893f7ebaSPaolo Bonzini 296893f7ebaSPaolo Bonzini if (speed < 0) { 297893f7ebaSPaolo Bonzini error_set(errp, QERR_INVALID_PARAMETER, "speed"); 298893f7ebaSPaolo Bonzini return; 299893f7ebaSPaolo Bonzini } 300893f7ebaSPaolo Bonzini ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME); 301893f7ebaSPaolo Bonzini } 302893f7ebaSPaolo Bonzini 303b952b558SPaolo Bonzini static void mirror_iostatus_reset(BlockJob *job) 304b952b558SPaolo Bonzini { 305b952b558SPaolo Bonzini MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 306b952b558SPaolo Bonzini 307b952b558SPaolo Bonzini bdrv_iostatus_reset(s->target); 308b952b558SPaolo Bonzini } 309b952b558SPaolo Bonzini 310d63ffd87SPaolo Bonzini static void mirror_complete(BlockJob *job, Error **errp) 311d63ffd87SPaolo Bonzini { 312d63ffd87SPaolo Bonzini MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 313d63ffd87SPaolo Bonzini int ret; 314d63ffd87SPaolo Bonzini 315d63ffd87SPaolo Bonzini ret = bdrv_open_backing_file(s->target); 316d63ffd87SPaolo Bonzini if (ret < 0) { 317d63ffd87SPaolo Bonzini char backing_filename[PATH_MAX]; 318d63ffd87SPaolo Bonzini bdrv_get_full_backing_filename(s->target, backing_filename, 319d63ffd87SPaolo Bonzini sizeof(backing_filename)); 320d63ffd87SPaolo Bonzini error_set(errp, QERR_OPEN_FILE_FAILED, backing_filename); 321d63ffd87SPaolo Bonzini return; 322d63ffd87SPaolo Bonzini } 323d63ffd87SPaolo Bonzini if (!s->synced) { 324d63ffd87SPaolo Bonzini error_set(errp, QERR_BLOCK_JOB_NOT_READY, job->bs->device_name); 325d63ffd87SPaolo Bonzini return; 326d63ffd87SPaolo Bonzini } 327d63ffd87SPaolo Bonzini 328d63ffd87SPaolo Bonzini s->should_complete = true; 329d63ffd87SPaolo Bonzini block_job_resume(job); 330d63ffd87SPaolo Bonzini } 331d63ffd87SPaolo Bonzini 332893f7ebaSPaolo Bonzini static BlockJobType mirror_job_type = { 333893f7ebaSPaolo Bonzini .instance_size = sizeof(MirrorBlockJob), 334893f7ebaSPaolo Bonzini .job_type = "mirror", 335893f7ebaSPaolo Bonzini .set_speed = mirror_set_speed, 336b952b558SPaolo Bonzini .iostatus_reset= mirror_iostatus_reset, 337d63ffd87SPaolo Bonzini .complete = mirror_complete, 338893f7ebaSPaolo Bonzini }; 339893f7ebaSPaolo Bonzini 340893f7ebaSPaolo Bonzini void mirror_start(BlockDriverState *bs, BlockDriverState *target, 341893f7ebaSPaolo Bonzini int64_t speed, MirrorSyncMode mode, 342b952b558SPaolo Bonzini BlockdevOnError on_source_error, 343b952b558SPaolo Bonzini BlockdevOnError on_target_error, 344893f7ebaSPaolo Bonzini BlockDriverCompletionFunc *cb, 345893f7ebaSPaolo Bonzini void *opaque, Error **errp) 346893f7ebaSPaolo Bonzini { 347893f7ebaSPaolo Bonzini MirrorBlockJob *s; 348893f7ebaSPaolo Bonzini 349b952b558SPaolo Bonzini if ((on_source_error == BLOCKDEV_ON_ERROR_STOP || 350b952b558SPaolo Bonzini on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) && 351b952b558SPaolo Bonzini !bdrv_iostatus_is_enabled(bs)) { 352b952b558SPaolo Bonzini error_set(errp, QERR_INVALID_PARAMETER, "on-source-error"); 353b952b558SPaolo Bonzini return; 354b952b558SPaolo Bonzini } 355b952b558SPaolo Bonzini 356893f7ebaSPaolo Bonzini s = block_job_create(&mirror_job_type, bs, speed, cb, opaque, errp); 357893f7ebaSPaolo Bonzini if (!s) { 358893f7ebaSPaolo Bonzini return; 359893f7ebaSPaolo Bonzini } 360893f7ebaSPaolo Bonzini 361b952b558SPaolo Bonzini s->on_source_error = on_source_error; 362b952b558SPaolo Bonzini s->on_target_error = on_target_error; 363893f7ebaSPaolo Bonzini s->target = target; 364893f7ebaSPaolo Bonzini s->mode = mode; 365b812f671SPaolo Bonzini s->buf_size = BLOCK_SIZE; 366b812f671SPaolo Bonzini 367893f7ebaSPaolo Bonzini bdrv_set_dirty_tracking(bs, true); 368893f7ebaSPaolo Bonzini bdrv_set_enable_write_cache(s->target, true); 369b952b558SPaolo Bonzini bdrv_set_on_error(s->target, on_target_error, on_target_error); 370b952b558SPaolo Bonzini bdrv_iostatus_enable(s->target); 371893f7ebaSPaolo Bonzini s->common.co = qemu_coroutine_create(mirror_run); 372893f7ebaSPaolo Bonzini trace_mirror_start(bs, s, s->common.co, opaque); 373893f7ebaSPaolo Bonzini qemu_coroutine_enter(s->common.co, s); 374893f7ebaSPaolo Bonzini } 375