1 /*
2 * Live block commit
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Jeff Cody <jcody@redhat.com>
8 * Based on stream.c by Stefan Hajnoczi
9 *
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
12 *
13 */
14
15 #include "qemu/osdep.h"
16 #include "qemu/cutils.h"
17 #include "trace.h"
18 #include "block/block-common.h"
19 #include "block/coroutines.h"
20 #include "block/block_int.h"
21 #include "block/blockjob_int.h"
22 #include "qapi/error.h"
23 #include "qemu/ratelimit.h"
24 #include "qemu/memalign.h"
25 #include "system/block-backend.h"
26
27 enum {
28 /*
29 * Size of data buffer for populating the image file. This should be large
30 * enough to process multiple clusters in a single call, so that populating
31 * contiguous regions of the image is efficient.
32 */
33 COMMIT_BUFFER_SIZE = 512 * 1024, /* in bytes */
34 };
35
36 typedef struct CommitBlockJob {
37 BlockJob common;
38 BlockDriverState *commit_top_bs;
39 BlockBackend *top;
40 BlockBackend *base;
41 BlockDriverState *base_bs;
42 BlockDriverState *base_overlay;
43 BlockdevOnError on_error;
44 bool base_read_only;
45 bool chain_frozen;
46 char *backing_file_str;
47 bool backing_mask_protocol;
48 } CommitBlockJob;
49
commit_prepare(Job * job)50 static int commit_prepare(Job *job)
51 {
52 CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
53
54 bdrv_graph_rdlock_main_loop();
55 bdrv_unfreeze_backing_chain(s->commit_top_bs, s->base_bs);
56 s->chain_frozen = false;
57 bdrv_graph_rdunlock_main_loop();
58
59 /* Remove base node parent that still uses BLK_PERM_WRITE/RESIZE before
60 * the normal backing chain can be restored. */
61 blk_unref(s->base);
62 s->base = NULL;
63
64 /* FIXME: bdrv_drop_intermediate treats total failures and partial failures
65 * identically. Further work is needed to disambiguate these cases. */
66 return bdrv_drop_intermediate(s->commit_top_bs, s->base_bs,
67 s->backing_file_str,
68 s->backing_mask_protocol);
69 }
70
commit_abort(Job * job)71 static void commit_abort(Job *job)
72 {
73 CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
74 BlockDriverState *top_bs = blk_bs(s->top);
75 BlockDriverState *commit_top_backing_bs;
76
77 if (s->chain_frozen) {
78 bdrv_graph_rdlock_main_loop();
79 bdrv_unfreeze_backing_chain(s->commit_top_bs, s->base_bs);
80 bdrv_graph_rdunlock_main_loop();
81 }
82
83 /* Make sure commit_top_bs and top stay around until bdrv_replace_node() */
84 bdrv_ref(top_bs);
85 bdrv_ref(s->commit_top_bs);
86
87 if (s->base) {
88 blk_unref(s->base);
89 }
90
91 /* free the blockers on the intermediate nodes so that bdrv_replace_nodes
92 * can succeed */
93 block_job_remove_all_bdrv(&s->common);
94
95 /* If bdrv_drop_intermediate() failed (or was not invoked), remove the
96 * commit filter driver from the backing chain now. Do this as the final
97 * step so that the 'consistent read' permission can be granted.
98 *
99 * XXX Can (or should) we somehow keep 'consistent read' blocked even
100 * after the failed/cancelled commit job is gone? If we already wrote
101 * something to base, the intermediate images aren't valid any more. */
102 bdrv_graph_rdlock_main_loop();
103 commit_top_backing_bs = s->commit_top_bs->backing->bs;
104 bdrv_graph_rdunlock_main_loop();
105
106 bdrv_drained_begin(commit_top_backing_bs);
107 bdrv_graph_wrlock();
108 bdrv_replace_node(s->commit_top_bs, commit_top_backing_bs, &error_abort);
109 bdrv_graph_wrunlock();
110 bdrv_drained_end(commit_top_backing_bs);
111
112 bdrv_unref(s->commit_top_bs);
113 bdrv_unref(top_bs);
114 }
115
commit_clean(Job * job)116 static void commit_clean(Job *job)
117 {
118 CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
119
120 /* restore base open flags here if appropriate (e.g., change the base back
121 * to r/o). These reopens do not need to be atomic, since we won't abort
122 * even on failure here */
123 if (s->base_read_only) {
124 bdrv_reopen_set_read_only(s->base_bs, true, NULL);
125 }
126
127 g_free(s->backing_file_str);
128 blk_unref(s->top);
129 }
130
commit_iteration(CommitBlockJob * s,int64_t offset,int64_t * requested_bytes,void * buf)131 static int commit_iteration(CommitBlockJob *s, int64_t offset,
132 int64_t *requested_bytes, void *buf)
133 {
134 BlockErrorAction action;
135 int64_t bytes = *requested_bytes;
136 int ret = 0;
137 bool error_in_source = true;
138
139 /* Copy if allocated above the base */
140 WITH_GRAPH_RDLOCK_GUARD() {
141 ret = bdrv_co_common_block_status_above(blk_bs(s->top),
142 s->base_overlay, true, true, offset, COMMIT_BUFFER_SIZE,
143 &bytes, NULL, NULL, NULL);
144 }
145
146 trace_commit_one_iteration(s, offset, bytes, ret);
147
148 if (ret < 0) {
149 goto fail;
150 }
151
152 if (ret & BDRV_BLOCK_ALLOCATED) {
153 if (ret & BDRV_BLOCK_ZERO) {
154 /*
155 * If the top (sub)clusters are smaller than the base
156 * (sub)clusters, this will not unmap unless the underlying device
157 * does some tracking of these requests. Ideally, we would find
158 * the maximal extent of the zero clusters.
159 */
160 ret = blk_co_pwrite_zeroes(s->base, offset, bytes,
161 BDRV_REQ_MAY_UNMAP);
162 if (ret < 0) {
163 error_in_source = false;
164 goto fail;
165 }
166 } else {
167 assert(bytes < SIZE_MAX);
168
169 ret = blk_co_pread(s->top, offset, bytes, buf, 0);
170 if (ret < 0) {
171 goto fail;
172 }
173
174 ret = blk_co_pwrite(s->base, offset, bytes, buf, 0);
175 if (ret < 0) {
176 error_in_source = false;
177 goto fail;
178 }
179 }
180
181 /*
182 * Whether zeroes actually end up on disk depends on the details of
183 * the underlying driver. Therefore, this might rate limit more than
184 * is necessary.
185 */
186 block_job_ratelimit_processed_bytes(&s->common, bytes);
187 }
188
189 /* Publish progress */
190
191 job_progress_update(&s->common.job, bytes);
192
193 *requested_bytes = bytes;
194
195 return 0;
196
197 fail:
198 action = block_job_error_action(&s->common, s->on_error,
199 error_in_source, -ret);
200 if (action == BLOCK_ERROR_ACTION_REPORT) {
201 return ret;
202 }
203
204 *requested_bytes = 0;
205
206 return 0;
207 }
208
commit_run(Job * job,Error ** errp)209 static int coroutine_fn commit_run(Job *job, Error **errp)
210 {
211 CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
212 int64_t offset;
213 int ret = 0;
214 int64_t n = 0; /* bytes */
215 QEMU_AUTO_VFREE void *buf = NULL;
216 int64_t len, base_len;
217
218 len = blk_co_getlength(s->top);
219 if (len < 0) {
220 return len;
221 }
222 job_progress_set_remaining(&s->common.job, len);
223
224 base_len = blk_co_getlength(s->base);
225 if (base_len < 0) {
226 return base_len;
227 }
228
229 if (base_len < len) {
230 ret = blk_co_truncate(s->base, len, false, PREALLOC_MODE_OFF, 0, NULL);
231 if (ret) {
232 return ret;
233 }
234 }
235
236 buf = blk_blockalign(s->top, COMMIT_BUFFER_SIZE);
237
238 for (offset = 0; offset < len; offset += n) {
239 /* Note that even when no rate limit is applied we need to yield
240 * with no pending I/O here so that bdrv_drain_all() returns.
241 */
242 block_job_ratelimit_sleep(&s->common);
243 if (job_is_cancelled(&s->common.job)) {
244 break;
245 }
246
247 ret = commit_iteration(s, offset, &n, buf);
248
249 if (ret < 0) {
250 return ret;
251 }
252 }
253
254 return 0;
255 }
256
257 static const BlockJobDriver commit_job_driver = {
258 .job_driver = {
259 .instance_size = sizeof(CommitBlockJob),
260 .job_type = JOB_TYPE_COMMIT,
261 .free = block_job_free,
262 .user_resume = block_job_user_resume,
263 .run = commit_run,
264 .prepare = commit_prepare,
265 .abort = commit_abort,
266 .clean = commit_clean
267 },
268 };
269
270 static int coroutine_fn GRAPH_RDLOCK
bdrv_commit_top_preadv(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,BdrvRequestFlags flags)271 bdrv_commit_top_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
272 QEMUIOVector *qiov, BdrvRequestFlags flags)
273 {
274 return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
275 }
276
bdrv_commit_top_refresh_filename(BlockDriverState * bs)277 static GRAPH_RDLOCK void bdrv_commit_top_refresh_filename(BlockDriverState *bs)
278 {
279 pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
280 bs->backing->bs->filename);
281 }
282
bdrv_commit_top_child_perm(BlockDriverState * bs,BdrvChild * c,BdrvChildRole role,BlockReopenQueue * reopen_queue,uint64_t perm,uint64_t shared,uint64_t * nperm,uint64_t * nshared)283 static void bdrv_commit_top_child_perm(BlockDriverState *bs, BdrvChild *c,
284 BdrvChildRole role,
285 BlockReopenQueue *reopen_queue,
286 uint64_t perm, uint64_t shared,
287 uint64_t *nperm, uint64_t *nshared)
288 {
289 *nperm = 0;
290 *nshared = BLK_PERM_ALL;
291 }
292
293 /* Dummy node that provides consistent read to its users without requiring it
294 * from its backing file and that allows writes on the backing file chain. */
295 static BlockDriver bdrv_commit_top = {
296 .format_name = "commit_top",
297 .bdrv_co_preadv = bdrv_commit_top_preadv,
298 .bdrv_refresh_filename = bdrv_commit_top_refresh_filename,
299 .bdrv_child_perm = bdrv_commit_top_child_perm,
300
301 .is_filter = true,
302 .filtered_child_is_backing = true,
303 };
304
commit_start(const char * job_id,BlockDriverState * bs,BlockDriverState * base,BlockDriverState * top,int creation_flags,int64_t speed,BlockdevOnError on_error,const char * backing_file_str,bool backing_mask_protocol,const char * filter_node_name,Error ** errp)305 void commit_start(const char *job_id, BlockDriverState *bs,
306 BlockDriverState *base, BlockDriverState *top,
307 int creation_flags, int64_t speed,
308 BlockdevOnError on_error, const char *backing_file_str,
309 bool backing_mask_protocol,
310 const char *filter_node_name, Error **errp)
311 {
312 CommitBlockJob *s;
313 BlockDriverState *iter;
314 BlockDriverState *commit_top_bs = NULL;
315 BlockDriverState *filtered_base;
316 int64_t base_size, top_size;
317 uint64_t base_perms, iter_shared_perms;
318 int ret;
319
320 GLOBAL_STATE_CODE();
321
322 assert(top != bs);
323 bdrv_graph_rdlock_main_loop();
324 if (bdrv_skip_filters(top) == bdrv_skip_filters(base)) {
325 error_setg(errp, "Invalid files for merge: top and base are the same");
326 bdrv_graph_rdunlock_main_loop();
327 return;
328 }
329 bdrv_graph_rdunlock_main_loop();
330
331 base_size = bdrv_getlength(base);
332 if (base_size < 0) {
333 error_setg_errno(errp, -base_size, "Could not inquire base image size");
334 return;
335 }
336
337 top_size = bdrv_getlength(top);
338 if (top_size < 0) {
339 error_setg_errno(errp, -top_size, "Could not inquire top image size");
340 return;
341 }
342
343 base_perms = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE;
344 if (base_size < top_size) {
345 base_perms |= BLK_PERM_RESIZE;
346 }
347
348 s = block_job_create(job_id, &commit_job_driver, NULL, bs, 0, BLK_PERM_ALL,
349 speed, creation_flags, NULL, NULL, errp);
350 if (!s) {
351 return;
352 }
353
354 /* convert base to r/w, if necessary */
355 s->base_read_only = bdrv_is_read_only(base);
356 if (s->base_read_only) {
357 if (bdrv_reopen_set_read_only(base, false, errp) != 0) {
358 goto fail;
359 }
360 }
361
362 /* Insert commit_top block node above top, so we can block consistent read
363 * on the backing chain below it */
364 commit_top_bs = bdrv_new_open_driver(&bdrv_commit_top, filter_node_name, 0,
365 errp);
366 if (commit_top_bs == NULL) {
367 goto fail;
368 }
369 if (!filter_node_name) {
370 commit_top_bs->implicit = true;
371 }
372
373 /* So that we can always drop this node */
374 commit_top_bs->never_freeze = true;
375
376 commit_top_bs->total_sectors = top->total_sectors;
377
378 ret = bdrv_append(commit_top_bs, top, errp);
379 bdrv_unref(commit_top_bs); /* referenced by new parents or failed */
380 if (ret < 0) {
381 commit_top_bs = NULL;
382 goto fail;
383 }
384
385 s->commit_top_bs = commit_top_bs;
386
387 /*
388 * Block all nodes between top and base, because they will
389 * disappear from the chain after this operation.
390 * Note that this assumes that the user is fine with removing all
391 * nodes (including R/W filters) between top and base. Assuring
392 * this is the responsibility of the interface (i.e. whoever calls
393 * commit_start()).
394 */
395 bdrv_drain_all_begin();
396 bdrv_graph_wrlock();
397 s->base_overlay = bdrv_find_overlay(top, base);
398 assert(s->base_overlay);
399
400 /*
401 * The topmost node with
402 * bdrv_skip_filters(filtered_base) == bdrv_skip_filters(base)
403 */
404 filtered_base = bdrv_cow_bs(s->base_overlay);
405 assert(bdrv_skip_filters(filtered_base) == bdrv_skip_filters(base));
406
407 /*
408 * XXX BLK_PERM_WRITE needs to be allowed so we don't block ourselves
409 * at s->base (if writes are blocked for a node, they are also blocked
410 * for its backing file). The other options would be a second filter
411 * driver above s->base.
412 */
413 iter_shared_perms = BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE;
414
415 for (iter = top; iter != base; iter = bdrv_filter_or_cow_bs(iter)) {
416 if (iter == filtered_base) {
417 /*
418 * From here on, all nodes are filters on the base. This
419 * allows us to share BLK_PERM_CONSISTENT_READ.
420 */
421 iter_shared_perms |= BLK_PERM_CONSISTENT_READ;
422 }
423
424 ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
425 iter_shared_perms, errp);
426 if (ret < 0) {
427 bdrv_graph_wrunlock();
428 bdrv_drain_all_end();
429 goto fail;
430 }
431 }
432
433 if (bdrv_freeze_backing_chain(commit_top_bs, base, errp) < 0) {
434 bdrv_graph_wrunlock();
435 bdrv_drain_all_end();
436 goto fail;
437 }
438 s->chain_frozen = true;
439
440 ret = block_job_add_bdrv(&s->common, "base", base, 0, BLK_PERM_ALL, errp);
441 bdrv_graph_wrunlock();
442 bdrv_drain_all_end();
443
444 if (ret < 0) {
445 goto fail;
446 }
447
448 s->base = blk_new(s->common.job.aio_context,
449 base_perms,
450 BLK_PERM_CONSISTENT_READ
451 | BLK_PERM_WRITE_UNCHANGED);
452 ret = blk_insert_bs(s->base, base, errp);
453 if (ret < 0) {
454 goto fail;
455 }
456 blk_set_disable_request_queuing(s->base, true);
457 s->base_bs = base;
458
459 /* Required permissions are already taken with block_job_add_bdrv() */
460 s->top = blk_new(s->common.job.aio_context, 0, BLK_PERM_ALL);
461 ret = blk_insert_bs(s->top, top, errp);
462 if (ret < 0) {
463 goto fail;
464 }
465 blk_set_disable_request_queuing(s->top, true);
466
467 s->backing_file_str = g_strdup(backing_file_str);
468 s->backing_mask_protocol = backing_mask_protocol;
469 s->on_error = on_error;
470
471 trace_commit_start(bs, base, top, s);
472 job_start(&s->common.job);
473 return;
474
475 fail:
476 if (s->chain_frozen) {
477 bdrv_graph_rdlock_main_loop();
478 bdrv_unfreeze_backing_chain(commit_top_bs, base);
479 bdrv_graph_rdunlock_main_loop();
480 }
481 if (s->base) {
482 blk_unref(s->base);
483 }
484 if (s->top) {
485 blk_unref(s->top);
486 }
487 if (s->base_read_only) {
488 bdrv_reopen_set_read_only(base, true, NULL);
489 }
490 job_early_fail(&s->common.job);
491 /* commit_top_bs has to be replaced after deleting the block job,
492 * otherwise this would fail because of lack of permissions. */
493 if (commit_top_bs) {
494 bdrv_drained_begin(top);
495 bdrv_graph_wrlock();
496 bdrv_replace_node(commit_top_bs, top, &error_abort);
497 bdrv_graph_wrunlock();
498 bdrv_drained_end(top);
499 }
500 }
501
502
503 #define COMMIT_BUF_SIZE (2048 * BDRV_SECTOR_SIZE)
504
505 /* commit COW file into the raw image */
bdrv_commit(BlockDriverState * bs)506 int bdrv_commit(BlockDriverState *bs)
507 {
508 BlockBackend *src, *backing;
509 BlockDriverState *backing_file_bs = NULL;
510 BlockDriverState *commit_top_bs = NULL;
511 BlockDriver *drv = bs->drv;
512 AioContext *ctx;
513 int64_t offset, length, backing_length;
514 int ro;
515 int64_t n;
516 int ret = 0;
517 QEMU_AUTO_VFREE uint8_t *buf = NULL;
518 Error *local_err = NULL;
519
520 GLOBAL_STATE_CODE();
521 GRAPH_RDLOCK_GUARD_MAINLOOP();
522
523 if (!drv)
524 return -ENOMEDIUM;
525
526 backing_file_bs = bdrv_cow_bs(bs);
527
528 if (!backing_file_bs) {
529 return -ENOTSUP;
530 }
531
532 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT_SOURCE, NULL) ||
533 bdrv_op_is_blocked(backing_file_bs, BLOCK_OP_TYPE_COMMIT_TARGET, NULL))
534 {
535 return -EBUSY;
536 }
537
538 ro = bdrv_is_read_only(backing_file_bs);
539
540 if (ro) {
541 if (bdrv_reopen_set_read_only(backing_file_bs, false, NULL)) {
542 return -EACCES;
543 }
544 }
545
546 ctx = bdrv_get_aio_context(bs);
547 /* WRITE_UNCHANGED is required for bdrv_make_empty() */
548 src = blk_new(ctx, BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED,
549 BLK_PERM_ALL);
550 backing = blk_new(ctx, BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL);
551
552 ret = blk_insert_bs(src, bs, &local_err);
553 if (ret < 0) {
554 error_report_err(local_err);
555 goto ro_cleanup;
556 }
557
558 /* Insert commit_top block node above backing, so we can write to it */
559 commit_top_bs = bdrv_new_open_driver(&bdrv_commit_top, NULL, BDRV_O_RDWR,
560 &local_err);
561 if (commit_top_bs == NULL) {
562 error_report_err(local_err);
563 goto ro_cleanup;
564 }
565
566 bdrv_set_backing_hd(commit_top_bs, backing_file_bs, &error_abort);
567 bdrv_set_backing_hd(bs, commit_top_bs, &error_abort);
568
569 ret = blk_insert_bs(backing, backing_file_bs, &local_err);
570 if (ret < 0) {
571 error_report_err(local_err);
572 goto ro_cleanup;
573 }
574
575 length = blk_getlength(src);
576 if (length < 0) {
577 ret = length;
578 goto ro_cleanup;
579 }
580
581 backing_length = blk_getlength(backing);
582 if (backing_length < 0) {
583 ret = backing_length;
584 goto ro_cleanup;
585 }
586
587 /* If our top snapshot is larger than the backing file image,
588 * grow the backing file image if possible. If not possible,
589 * we must return an error */
590 if (length > backing_length) {
591 ret = blk_truncate(backing, length, false, PREALLOC_MODE_OFF, 0,
592 &local_err);
593 if (ret < 0) {
594 error_report_err(local_err);
595 goto ro_cleanup;
596 }
597 }
598
599 /* blk_try_blockalign() for src will choose an alignment that works for
600 * backing as well, so no need to compare the alignment manually. */
601 buf = blk_try_blockalign(src, COMMIT_BUF_SIZE);
602 if (buf == NULL) {
603 ret = -ENOMEM;
604 goto ro_cleanup;
605 }
606
607 for (offset = 0; offset < length; offset += n) {
608 ret = bdrv_is_allocated(bs, offset, COMMIT_BUF_SIZE, &n);
609 if (ret < 0) {
610 goto ro_cleanup;
611 }
612 if (ret) {
613 ret = blk_pread(src, offset, n, buf, 0);
614 if (ret < 0) {
615 goto ro_cleanup;
616 }
617
618 ret = blk_pwrite(backing, offset, n, buf, 0);
619 if (ret < 0) {
620 goto ro_cleanup;
621 }
622 }
623 }
624
625 ret = blk_make_empty(src, NULL);
626 /* Ignore -ENOTSUP */
627 if (ret < 0 && ret != -ENOTSUP) {
628 goto ro_cleanup;
629 }
630
631 blk_flush(src);
632
633 /*
634 * Make sure all data we wrote to the backing device is actually
635 * stable on disk.
636 */
637 blk_flush(backing);
638
639 ret = 0;
640 ro_cleanup:
641 blk_unref(backing);
642 if (bdrv_cow_bs(bs) != backing_file_bs) {
643 bdrv_set_backing_hd(bs, backing_file_bs, &error_abort);
644 }
645 bdrv_unref(commit_top_bs);
646 blk_unref(src);
647
648 if (ro) {
649 /* ignoring error return here */
650 bdrv_reopen_set_read_only(backing_file_bs, true, NULL);
651 }
652
653 return ret;
654 }
655