1 /* 2 * QEMU System Emulator block driver 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #ifndef BLOCK_INT_H 25 #define BLOCK_INT_H 26 27 #include "block/accounting.h" 28 #include "block/block.h" 29 #include "qemu/option.h" 30 #include "qemu/queue.h" 31 #include "block/coroutine.h" 32 #include "qemu/timer.h" 33 #include "qapi-types.h" 34 #include "qapi/qmp/qerror.h" 35 #include "monitor/monitor.h" 36 #include "qemu/hbitmap.h" 37 #include "block/snapshot.h" 38 #include "qemu/main-loop.h" 39 #include "qemu/throttle.h" 40 41 #define BLOCK_FLAG_ENCRYPT 1 42 #define BLOCK_FLAG_COMPAT6 4 43 #define BLOCK_FLAG_LAZY_REFCOUNTS 8 44 45 #define BLOCK_OPT_SIZE "size" 46 #define BLOCK_OPT_ENCRYPT "encryption" 47 #define BLOCK_OPT_COMPAT6 "compat6" 48 #define BLOCK_OPT_BACKING_FILE "backing_file" 49 #define BLOCK_OPT_BACKING_FMT "backing_fmt" 50 #define BLOCK_OPT_CLUSTER_SIZE "cluster_size" 51 #define BLOCK_OPT_TABLE_SIZE "table_size" 52 #define BLOCK_OPT_PREALLOC "preallocation" 53 #define BLOCK_OPT_SUBFMT "subformat" 54 #define BLOCK_OPT_COMPAT_LEVEL "compat" 55 #define BLOCK_OPT_LAZY_REFCOUNTS "lazy_refcounts" 56 #define BLOCK_OPT_ADAPTER_TYPE "adapter_type" 57 #define BLOCK_OPT_REDUNDANCY "redundancy" 58 #define BLOCK_OPT_NOCOW "nocow" 59 60 typedef struct BdrvTrackedRequest { 61 BlockDriverState *bs; 62 int64_t offset; 63 unsigned int bytes; 64 bool is_write; 65 66 bool serialising; 67 int64_t overlap_offset; 68 unsigned int overlap_bytes; 69 70 QLIST_ENTRY(BdrvTrackedRequest) list; 71 Coroutine *co; /* owner, used for deadlock detection */ 72 CoQueue wait_queue; /* coroutines blocked on this request */ 73 74 struct BdrvTrackedRequest *waiting_for; 75 } BdrvTrackedRequest; 76 77 struct BlockDriver { 78 const char *format_name; 79 int instance_size; 80 81 /* set to true if the BlockDriver is a block filter */ 82 bool is_filter; 83 /* for snapshots block filter like Quorum can implement the 84 * following recursive callback. 85 * It's purpose is to recurse on the filter children while calling 86 * bdrv_recurse_is_first_non_filter on them. 87 * For a sample implementation look in the future Quorum block filter. 88 */ 89 bool (*bdrv_recurse_is_first_non_filter)(BlockDriverState *bs, 90 BlockDriverState *candidate); 91 92 int (*bdrv_probe)(const uint8_t *buf, int buf_size, const char *filename); 93 int (*bdrv_probe_device)(const char *filename); 94 95 /* Any driver implementing this callback is expected to be able to handle 96 * NULL file names in its .bdrv_open() implementation */ 97 void (*bdrv_parse_filename)(const char *filename, QDict *options, Error **errp); 98 /* Drivers not implementing bdrv_parse_filename nor bdrv_open should have 99 * this field set to true, except ones that are defined only by their 100 * child's bs. 101 * An example of the last type will be the quorum block driver. 102 */ 103 bool bdrv_needs_filename; 104 105 /* Set if a driver can support backing files */ 106 bool supports_backing; 107 108 /* For handling image reopen for split or non-split files */ 109 int (*bdrv_reopen_prepare)(BDRVReopenState *reopen_state, 110 BlockReopenQueue *queue, Error **errp); 111 void (*bdrv_reopen_commit)(BDRVReopenState *reopen_state); 112 void (*bdrv_reopen_abort)(BDRVReopenState *reopen_state); 113 114 int (*bdrv_open)(BlockDriverState *bs, QDict *options, int flags, 115 Error **errp); 116 int (*bdrv_file_open)(BlockDriverState *bs, QDict *options, int flags, 117 Error **errp); 118 int (*bdrv_read)(BlockDriverState *bs, int64_t sector_num, 119 uint8_t *buf, int nb_sectors); 120 int (*bdrv_write)(BlockDriverState *bs, int64_t sector_num, 121 const uint8_t *buf, int nb_sectors); 122 void (*bdrv_close)(BlockDriverState *bs); 123 void (*bdrv_rebind)(BlockDriverState *bs); 124 int (*bdrv_create)(const char *filename, QemuOpts *opts, Error **errp); 125 int (*bdrv_set_key)(BlockDriverState *bs, const char *key); 126 int (*bdrv_make_empty)(BlockDriverState *bs); 127 128 void (*bdrv_refresh_filename)(BlockDriverState *bs); 129 130 /* aio */ 131 BlockAIOCB *(*bdrv_aio_readv)(BlockDriverState *bs, 132 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 133 BlockCompletionFunc *cb, void *opaque); 134 BlockAIOCB *(*bdrv_aio_writev)(BlockDriverState *bs, 135 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 136 BlockCompletionFunc *cb, void *opaque); 137 BlockAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs, 138 BlockCompletionFunc *cb, void *opaque); 139 BlockAIOCB *(*bdrv_aio_discard)(BlockDriverState *bs, 140 int64_t sector_num, int nb_sectors, 141 BlockCompletionFunc *cb, void *opaque); 142 143 int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs, 144 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); 145 int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs, 146 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); 147 /* 148 * Efficiently zero a region of the disk image. Typically an image format 149 * would use a compact metadata representation to implement this. This 150 * function pointer may be NULL and .bdrv_co_writev() will be called 151 * instead. 152 */ 153 int coroutine_fn (*bdrv_co_write_zeroes)(BlockDriverState *bs, 154 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags); 155 int coroutine_fn (*bdrv_co_discard)(BlockDriverState *bs, 156 int64_t sector_num, int nb_sectors); 157 int64_t coroutine_fn (*bdrv_co_get_block_status)(BlockDriverState *bs, 158 int64_t sector_num, int nb_sectors, int *pnum); 159 160 /* 161 * Invalidate any cached meta-data. 162 */ 163 void (*bdrv_invalidate_cache)(BlockDriverState *bs, Error **errp); 164 165 /* 166 * Flushes all data that was already written to the OS all the way down to 167 * the disk (for example raw-posix calls fsync()). 168 */ 169 int coroutine_fn (*bdrv_co_flush_to_disk)(BlockDriverState *bs); 170 171 /* 172 * Flushes all internal caches to the OS. The data may still sit in a 173 * writeback cache of the host OS, but it will survive a crash of the qemu 174 * process. 175 */ 176 int coroutine_fn (*bdrv_co_flush_to_os)(BlockDriverState *bs); 177 178 const char *protocol_name; 179 int (*bdrv_truncate)(BlockDriverState *bs, int64_t offset); 180 181 int64_t (*bdrv_getlength)(BlockDriverState *bs); 182 bool has_variable_length; 183 int64_t (*bdrv_get_allocated_file_size)(BlockDriverState *bs); 184 185 int (*bdrv_write_compressed)(BlockDriverState *bs, int64_t sector_num, 186 const uint8_t *buf, int nb_sectors); 187 188 int (*bdrv_snapshot_create)(BlockDriverState *bs, 189 QEMUSnapshotInfo *sn_info); 190 int (*bdrv_snapshot_goto)(BlockDriverState *bs, 191 const char *snapshot_id); 192 int (*bdrv_snapshot_delete)(BlockDriverState *bs, 193 const char *snapshot_id, 194 const char *name, 195 Error **errp); 196 int (*bdrv_snapshot_list)(BlockDriverState *bs, 197 QEMUSnapshotInfo **psn_info); 198 int (*bdrv_snapshot_load_tmp)(BlockDriverState *bs, 199 const char *snapshot_id, 200 const char *name, 201 Error **errp); 202 int (*bdrv_get_info)(BlockDriverState *bs, BlockDriverInfo *bdi); 203 ImageInfoSpecific *(*bdrv_get_specific_info)(BlockDriverState *bs); 204 205 int (*bdrv_save_vmstate)(BlockDriverState *bs, QEMUIOVector *qiov, 206 int64_t pos); 207 int (*bdrv_load_vmstate)(BlockDriverState *bs, uint8_t *buf, 208 int64_t pos, int size); 209 210 int (*bdrv_change_backing_file)(BlockDriverState *bs, 211 const char *backing_file, const char *backing_fmt); 212 213 /* removable device specific */ 214 int (*bdrv_is_inserted)(BlockDriverState *bs); 215 int (*bdrv_media_changed)(BlockDriverState *bs); 216 void (*bdrv_eject)(BlockDriverState *bs, bool eject_flag); 217 void (*bdrv_lock_medium)(BlockDriverState *bs, bool locked); 218 219 /* to control generic scsi devices */ 220 int (*bdrv_ioctl)(BlockDriverState *bs, unsigned long int req, void *buf); 221 BlockAIOCB *(*bdrv_aio_ioctl)(BlockDriverState *bs, 222 unsigned long int req, void *buf, 223 BlockCompletionFunc *cb, void *opaque); 224 225 /* List of options for creating images, terminated by name == NULL */ 226 QemuOptsList *create_opts; 227 228 /* 229 * Returns 0 for completed check, -errno for internal errors. 230 * The check results are stored in result. 231 */ 232 int (*bdrv_check)(BlockDriverState* bs, BdrvCheckResult *result, 233 BdrvCheckMode fix); 234 235 int (*bdrv_amend_options)(BlockDriverState *bs, QemuOpts *opts); 236 237 void (*bdrv_debug_event)(BlockDriverState *bs, BlkDebugEvent event); 238 239 /* TODO Better pass a option string/QDict/QemuOpts to add any rule? */ 240 int (*bdrv_debug_breakpoint)(BlockDriverState *bs, const char *event, 241 const char *tag); 242 int (*bdrv_debug_remove_breakpoint)(BlockDriverState *bs, 243 const char *tag); 244 int (*bdrv_debug_resume)(BlockDriverState *bs, const char *tag); 245 bool (*bdrv_debug_is_suspended)(BlockDriverState *bs, const char *tag); 246 247 void (*bdrv_refresh_limits)(BlockDriverState *bs, Error **errp); 248 249 /* 250 * Returns 1 if newly created images are guaranteed to contain only 251 * zeros, 0 otherwise. 252 */ 253 int (*bdrv_has_zero_init)(BlockDriverState *bs); 254 255 /* Remove fd handlers, timers, and other event loop callbacks so the event 256 * loop is no longer in use. Called with no in-flight requests and in 257 * depth-first traversal order with parents before child nodes. 258 */ 259 void (*bdrv_detach_aio_context)(BlockDriverState *bs); 260 261 /* Add fd handlers, timers, and other event loop callbacks so I/O requests 262 * can be processed again. Called with no in-flight requests and in 263 * depth-first traversal order with child nodes before parent nodes. 264 */ 265 void (*bdrv_attach_aio_context)(BlockDriverState *bs, 266 AioContext *new_context); 267 268 /* io queue for linux-aio */ 269 void (*bdrv_io_plug)(BlockDriverState *bs); 270 void (*bdrv_io_unplug)(BlockDriverState *bs); 271 void (*bdrv_flush_io_queue)(BlockDriverState *bs); 272 273 QLIST_ENTRY(BlockDriver) list; 274 }; 275 276 typedef struct BlockLimits { 277 /* maximum number of sectors that can be discarded at once */ 278 int max_discard; 279 280 /* optimal alignment for discard requests in sectors */ 281 int64_t discard_alignment; 282 283 /* maximum number of sectors that can zeroized at once */ 284 int max_write_zeroes; 285 286 /* optimal alignment for write zeroes requests in sectors */ 287 int64_t write_zeroes_alignment; 288 289 /* optimal transfer length in sectors */ 290 int opt_transfer_length; 291 292 /* memory alignment so that no bounce buffer is needed */ 293 size_t opt_mem_alignment; 294 } BlockLimits; 295 296 typedef struct BdrvOpBlocker BdrvOpBlocker; 297 298 typedef struct BdrvAioNotifier { 299 void (*attached_aio_context)(AioContext *new_context, void *opaque); 300 void (*detach_aio_context)(void *opaque); 301 302 void *opaque; 303 304 QLIST_ENTRY(BdrvAioNotifier) list; 305 } BdrvAioNotifier; 306 307 /* 308 * Note: the function bdrv_append() copies and swaps contents of 309 * BlockDriverStates, so if you add new fields to this struct, please 310 * inspect bdrv_append() to determine if the new fields need to be 311 * copied as well. 312 */ 313 struct BlockDriverState { 314 int64_t total_sectors; /* if we are reading a disk image, give its 315 size in sectors */ 316 int read_only; /* if true, the media is read only */ 317 int open_flags; /* flags used to open the file, re-used for re-open */ 318 int encrypted; /* if true, the media is encrypted */ 319 int valid_key; /* if true, a valid encryption key has been set */ 320 int sg; /* if true, the device is a /dev/sg* */ 321 int copy_on_read; /* if true, copy read backing sectors into image 322 note this is a reference count */ 323 324 BlockDriver *drv; /* NULL means no media */ 325 void *opaque; 326 327 BlockBackend *blk; /* owning backend, if any */ 328 329 AioContext *aio_context; /* event loop used for fd handlers, timers, etc */ 330 /* long-running tasks intended to always use the same AioContext as this 331 * BDS may register themselves in this list to be notified of changes 332 * regarding this BDS's context */ 333 QLIST_HEAD(, BdrvAioNotifier) aio_notifiers; 334 335 char filename[1024]; 336 char backing_file[1024]; /* if non zero, the image is a diff of 337 this file image */ 338 char backing_format[16]; /* if non-zero and backing_file exists */ 339 340 QDict *full_open_options; 341 char exact_filename[1024]; 342 343 BlockDriverState *backing_hd; 344 BlockDriverState *file; 345 346 NotifierList close_notifiers; 347 348 /* Callback before write request is processed */ 349 NotifierWithReturnList before_write_notifiers; 350 351 /* number of in-flight serialising requests */ 352 unsigned int serialising_in_flight; 353 354 /* I/O throttling */ 355 ThrottleState throttle_state; 356 CoQueue throttled_reqs[2]; 357 bool io_limits_enabled; 358 359 /* I/O stats (display with "info blockstats"). */ 360 BlockAcctStats stats; 361 362 /* I/O Limits */ 363 BlockLimits bl; 364 365 /* Whether the disk can expand beyond total_sectors */ 366 int growable; 367 368 /* Whether produces zeros when read beyond eof */ 369 bool zero_beyond_eof; 370 371 /* Alignment requirement for offset/length of I/O requests */ 372 unsigned int request_alignment; 373 374 /* the block size for which the guest device expects atomicity */ 375 int guest_block_size; 376 377 /* do we need to tell the quest if we have a volatile write cache? */ 378 int enable_write_cache; 379 380 /* NOTE: the following infos are only hints for real hardware 381 drivers. They are not used by the block driver */ 382 BlockdevOnError on_read_error, on_write_error; 383 bool iostatus_enabled; 384 BlockDeviceIoStatus iostatus; 385 386 /* the following member gives a name to every node on the bs graph. */ 387 char node_name[32]; 388 /* element of the list of named nodes building the graph */ 389 QTAILQ_ENTRY(BlockDriverState) node_list; 390 /* element of the list of "drives" the guest sees */ 391 QTAILQ_ENTRY(BlockDriverState) device_list; 392 QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps; 393 int refcnt; 394 395 QLIST_HEAD(, BdrvTrackedRequest) tracked_requests; 396 397 /* operation blockers */ 398 QLIST_HEAD(, BdrvOpBlocker) op_blockers[BLOCK_OP_TYPE_MAX]; 399 400 /* long-running background operation */ 401 BlockJob *job; 402 403 QDict *options; 404 BlockdevDetectZeroesOptions detect_zeroes; 405 406 /* The error object in use for blocking operations on backing_hd */ 407 Error *backing_blocker; 408 }; 409 410 int get_tmp_filename(char *filename, int size); 411 412 void bdrv_set_io_limits(BlockDriverState *bs, 413 ThrottleConfig *cfg); 414 415 416 /** 417 * bdrv_add_before_write_notifier: 418 * 419 * Register a callback that is invoked before write requests are processed but 420 * after any throttling or waiting for overlapping requests. 421 */ 422 void bdrv_add_before_write_notifier(BlockDriverState *bs, 423 NotifierWithReturn *notifier); 424 425 /** 426 * bdrv_detach_aio_context: 427 * 428 * May be called from .bdrv_detach_aio_context() to detach children from the 429 * current #AioContext. This is only needed by block drivers that manage their 430 * own children. Both ->file and ->backing_hd are automatically handled and 431 * block drivers should not call this function on them explicitly. 432 */ 433 void bdrv_detach_aio_context(BlockDriverState *bs); 434 435 /** 436 * bdrv_attach_aio_context: 437 * 438 * May be called from .bdrv_attach_aio_context() to attach children to the new 439 * #AioContext. This is only needed by block drivers that manage their own 440 * children. Both ->file and ->backing_hd are automatically handled and block 441 * drivers should not call this function on them explicitly. 442 */ 443 void bdrv_attach_aio_context(BlockDriverState *bs, 444 AioContext *new_context); 445 446 /** 447 * bdrv_add_aio_context_notifier: 448 * 449 * If a long-running job intends to be always run in the same AioContext as a 450 * certain BDS, it may use this function to be notified of changes regarding the 451 * association of the BDS to an AioContext. 452 * 453 * attached_aio_context() is called after the target BDS has been attached to a 454 * new AioContext; detach_aio_context() is called before the target BDS is being 455 * detached from its old AioContext. 456 */ 457 void bdrv_add_aio_context_notifier(BlockDriverState *bs, 458 void (*attached_aio_context)(AioContext *new_context, void *opaque), 459 void (*detach_aio_context)(void *opaque), void *opaque); 460 461 /** 462 * bdrv_remove_aio_context_notifier: 463 * 464 * Unsubscribe of change notifications regarding the BDS's AioContext. The 465 * parameters given here have to be the same as those given to 466 * bdrv_add_aio_context_notifier(). 467 */ 468 void bdrv_remove_aio_context_notifier(BlockDriverState *bs, 469 void (*aio_context_attached)(AioContext *, 470 void *), 471 void (*aio_context_detached)(void *), 472 void *opaque); 473 474 #ifdef _WIN32 475 int is_windows_drive(const char *filename); 476 #endif 477 478 /** 479 * stream_start: 480 * @bs: Block device to operate on. 481 * @base: Block device that will become the new base, or %NULL to 482 * flatten the whole backing file chain onto @bs. 483 * @base_id: The file name that will be written to @bs as the new 484 * backing file if the job completes. Ignored if @base is %NULL. 485 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 486 * @on_error: The action to take upon error. 487 * @cb: Completion function for the job. 488 * @opaque: Opaque pointer value passed to @cb. 489 * @errp: Error object. 490 * 491 * Start a streaming operation on @bs. Clusters that are unallocated 492 * in @bs, but allocated in any image between @base and @bs (both 493 * exclusive) will be written to @bs. At the end of a successful 494 * streaming job, the backing file of @bs will be changed to 495 * @base_id in the written image and to @base in the live BlockDriverState. 496 */ 497 void stream_start(BlockDriverState *bs, BlockDriverState *base, 498 const char *base_id, int64_t speed, BlockdevOnError on_error, 499 BlockCompletionFunc *cb, 500 void *opaque, Error **errp); 501 502 /** 503 * commit_start: 504 * @bs: Active block device. 505 * @top: Top block device to be committed. 506 * @base: Block device that will be written into, and become the new top. 507 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 508 * @on_error: The action to take upon error. 509 * @cb: Completion function for the job. 510 * @opaque: Opaque pointer value passed to @cb. 511 * @backing_file_str: String to use as the backing file in @top's overlay 512 * @errp: Error object. 513 * 514 */ 515 void commit_start(BlockDriverState *bs, BlockDriverState *base, 516 BlockDriverState *top, int64_t speed, 517 BlockdevOnError on_error, BlockCompletionFunc *cb, 518 void *opaque, const char *backing_file_str, Error **errp); 519 /** 520 * commit_active_start: 521 * @bs: Active block device to be committed. 522 * @base: Block device that will be written into, and become the new top. 523 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 524 * @on_error: The action to take upon error. 525 * @cb: Completion function for the job. 526 * @opaque: Opaque pointer value passed to @cb. 527 * @errp: Error object. 528 * 529 */ 530 void commit_active_start(BlockDriverState *bs, BlockDriverState *base, 531 int64_t speed, 532 BlockdevOnError on_error, 533 BlockCompletionFunc *cb, 534 void *opaque, Error **errp); 535 /* 536 * mirror_start: 537 * @bs: Block device to operate on. 538 * @target: Block device to write to. 539 * @replaces: Block graph node name to replace once the mirror is done. Can 540 * only be used when full mirroring is selected. 541 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 542 * @granularity: The chosen granularity for the dirty bitmap. 543 * @buf_size: The amount of data that can be in flight at one time. 544 * @mode: Whether to collapse all images in the chain to the target. 545 * @on_source_error: The action to take upon error reading from the source. 546 * @on_target_error: The action to take upon error writing to the target. 547 * @cb: Completion function for the job. 548 * @opaque: Opaque pointer value passed to @cb. 549 * @errp: Error object. 550 * 551 * Start a mirroring operation on @bs. Clusters that are allocated 552 * in @bs will be written to @bs until the job is cancelled or 553 * manually completed. At the end of a successful mirroring job, 554 * @bs will be switched to read from @target. 555 */ 556 void mirror_start(BlockDriverState *bs, BlockDriverState *target, 557 const char *replaces, 558 int64_t speed, int64_t granularity, int64_t buf_size, 559 MirrorSyncMode mode, BlockdevOnError on_source_error, 560 BlockdevOnError on_target_error, 561 BlockCompletionFunc *cb, 562 void *opaque, Error **errp); 563 564 /* 565 * backup_start: 566 * @bs: Block device to operate on. 567 * @target: Block device to write to. 568 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 569 * @sync_mode: What parts of the disk image should be copied to the destination. 570 * @on_source_error: The action to take upon error reading from the source. 571 * @on_target_error: The action to take upon error writing to the target. 572 * @cb: Completion function for the job. 573 * @opaque: Opaque pointer value passed to @cb. 574 * 575 * Start a backup operation on @bs. Clusters in @bs are written to @target 576 * until the job is cancelled or manually completed. 577 */ 578 void backup_start(BlockDriverState *bs, BlockDriverState *target, 579 int64_t speed, MirrorSyncMode sync_mode, 580 BlockdevOnError on_source_error, 581 BlockdevOnError on_target_error, 582 BlockCompletionFunc *cb, void *opaque, 583 Error **errp); 584 585 void blk_dev_change_media_cb(BlockBackend *blk, bool load); 586 bool blk_dev_has_removable_media(BlockBackend *blk); 587 void blk_dev_eject_request(BlockBackend *blk, bool force); 588 bool blk_dev_is_tray_open(BlockBackend *blk); 589 bool blk_dev_is_medium_locked(BlockBackend *blk); 590 void blk_dev_resize_cb(BlockBackend *blk); 591 592 #endif /* BLOCK_INT_H */ 593