1 /* 2 * QEMU System Emulator block driver 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #ifndef BLOCK_INT_H 25 #define BLOCK_INT_H 26 27 #include "block/accounting.h" 28 #include "block/block.h" 29 #include "block/aio-wait.h" 30 #include "qemu/queue.h" 31 #include "qemu/coroutine.h" 32 #include "qemu/stats64.h" 33 #include "qemu/timer.h" 34 #include "qemu/hbitmap.h" 35 #include "block/snapshot.h" 36 #include "qemu/main-loop.h" 37 #include "qemu/throttle.h" 38 39 #define BLOCK_FLAG_LAZY_REFCOUNTS 8 40 41 #define BLOCK_OPT_SIZE "size" 42 #define BLOCK_OPT_ENCRYPT "encryption" 43 #define BLOCK_OPT_ENCRYPT_FORMAT "encrypt.format" 44 #define BLOCK_OPT_COMPAT6 "compat6" 45 #define BLOCK_OPT_HWVERSION "hwversion" 46 #define BLOCK_OPT_BACKING_FILE "backing_file" 47 #define BLOCK_OPT_BACKING_FMT "backing_fmt" 48 #define BLOCK_OPT_CLUSTER_SIZE "cluster_size" 49 #define BLOCK_OPT_TABLE_SIZE "table_size" 50 #define BLOCK_OPT_PREALLOC "preallocation" 51 #define BLOCK_OPT_SUBFMT "subformat" 52 #define BLOCK_OPT_COMPAT_LEVEL "compat" 53 #define BLOCK_OPT_LAZY_REFCOUNTS "lazy_refcounts" 54 #define BLOCK_OPT_ADAPTER_TYPE "adapter_type" 55 #define BLOCK_OPT_REDUNDANCY "redundancy" 56 #define BLOCK_OPT_NOCOW "nocow" 57 #define BLOCK_OPT_OBJECT_SIZE "object_size" 58 #define BLOCK_OPT_REFCOUNT_BITS "refcount_bits" 59 60 #define BLOCK_PROBE_BUF_SIZE 512 61 62 enum BdrvTrackedRequestType { 63 BDRV_TRACKED_READ, 64 BDRV_TRACKED_WRITE, 65 BDRV_TRACKED_DISCARD, 66 }; 67 68 typedef struct BdrvTrackedRequest { 69 BlockDriverState *bs; 70 int64_t offset; 71 unsigned int bytes; 72 enum BdrvTrackedRequestType type; 73 74 bool serialising; 75 int64_t overlap_offset; 76 unsigned int overlap_bytes; 77 78 QLIST_ENTRY(BdrvTrackedRequest) list; 79 Coroutine *co; /* owner, used for deadlock detection */ 80 CoQueue wait_queue; /* coroutines blocked on this request */ 81 82 struct BdrvTrackedRequest *waiting_for; 83 } BdrvTrackedRequest; 84 85 struct BlockDriver { 86 const char *format_name; 87 int instance_size; 88 89 /* set to true if the BlockDriver is a block filter. Block filters pass 90 * certain callbacks that refer to data (see block.c) to their bs->file if 91 * the driver doesn't implement them. Drivers that do not wish to forward 92 * must implement them and return -ENOTSUP. 93 */ 94 bool is_filter; 95 /* for snapshots block filter like Quorum can implement the 96 * following recursive callback. 97 * It's purpose is to recurse on the filter children while calling 98 * bdrv_recurse_is_first_non_filter on them. 99 * For a sample implementation look in the future Quorum block filter. 100 */ 101 bool (*bdrv_recurse_is_first_non_filter)(BlockDriverState *bs, 102 BlockDriverState *candidate); 103 104 int (*bdrv_probe)(const uint8_t *buf, int buf_size, const char *filename); 105 int (*bdrv_probe_device)(const char *filename); 106 107 /* Any driver implementing this callback is expected to be able to handle 108 * NULL file names in its .bdrv_open() implementation */ 109 void (*bdrv_parse_filename)(const char *filename, QDict *options, Error **errp); 110 /* Drivers not implementing bdrv_parse_filename nor bdrv_open should have 111 * this field set to true, except ones that are defined only by their 112 * child's bs. 113 * An example of the last type will be the quorum block driver. 114 */ 115 bool bdrv_needs_filename; 116 117 /* Set if a driver can support backing files */ 118 bool supports_backing; 119 120 /* For handling image reopen for split or non-split files */ 121 int (*bdrv_reopen_prepare)(BDRVReopenState *reopen_state, 122 BlockReopenQueue *queue, Error **errp); 123 void (*bdrv_reopen_commit)(BDRVReopenState *reopen_state); 124 void (*bdrv_reopen_abort)(BDRVReopenState *reopen_state); 125 void (*bdrv_join_options)(QDict *options, QDict *old_options); 126 127 int (*bdrv_open)(BlockDriverState *bs, QDict *options, int flags, 128 Error **errp); 129 130 /* Protocol drivers should implement this instead of bdrv_open */ 131 int (*bdrv_file_open)(BlockDriverState *bs, QDict *options, int flags, 132 Error **errp); 133 void (*bdrv_close)(BlockDriverState *bs); 134 int coroutine_fn (*bdrv_co_create)(BlockdevCreateOptions *opts, 135 Error **errp); 136 int coroutine_fn (*bdrv_co_create_opts)(const char *filename, 137 QemuOpts *opts, 138 Error **errp); 139 int (*bdrv_make_empty)(BlockDriverState *bs); 140 141 void (*bdrv_refresh_filename)(BlockDriverState *bs, QDict *options); 142 143 /* aio */ 144 BlockAIOCB *(*bdrv_aio_preadv)(BlockDriverState *bs, 145 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags, 146 BlockCompletionFunc *cb, void *opaque); 147 BlockAIOCB *(*bdrv_aio_pwritev)(BlockDriverState *bs, 148 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags, 149 BlockCompletionFunc *cb, void *opaque); 150 BlockAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs, 151 BlockCompletionFunc *cb, void *opaque); 152 BlockAIOCB *(*bdrv_aio_pdiscard)(BlockDriverState *bs, 153 int64_t offset, int bytes, 154 BlockCompletionFunc *cb, void *opaque); 155 156 int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs, 157 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); 158 159 /** 160 * @offset: position in bytes to read at 161 * @bytes: number of bytes to read 162 * @qiov: the buffers to fill with read data 163 * @flags: currently unused, always 0 164 * 165 * @offset and @bytes will be a multiple of 'request_alignment', 166 * but the length of individual @qiov elements does not have to 167 * be a multiple. 168 * 169 * @bytes will always equal the total size of @qiov, and will be 170 * no larger than 'max_transfer'. 171 * 172 * The buffer in @qiov may point directly to guest memory. 173 */ 174 int coroutine_fn (*bdrv_co_preadv)(BlockDriverState *bs, 175 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags); 176 int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs, 177 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int flags); 178 /** 179 * @offset: position in bytes to write at 180 * @bytes: number of bytes to write 181 * @qiov: the buffers containing data to write 182 * @flags: zero or more bits allowed by 'supported_write_flags' 183 * 184 * @offset and @bytes will be a multiple of 'request_alignment', 185 * but the length of individual @qiov elements does not have to 186 * be a multiple. 187 * 188 * @bytes will always equal the total size of @qiov, and will be 189 * no larger than 'max_transfer'. 190 * 191 * The buffer in @qiov may point directly to guest memory. 192 */ 193 int coroutine_fn (*bdrv_co_pwritev)(BlockDriverState *bs, 194 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags); 195 196 /* 197 * Efficiently zero a region of the disk image. Typically an image format 198 * would use a compact metadata representation to implement this. This 199 * function pointer may be NULL or return -ENOSUP and .bdrv_co_writev() 200 * will be called instead. 201 */ 202 int coroutine_fn (*bdrv_co_pwrite_zeroes)(BlockDriverState *bs, 203 int64_t offset, int bytes, BdrvRequestFlags flags); 204 int coroutine_fn (*bdrv_co_pdiscard)(BlockDriverState *bs, 205 int64_t offset, int bytes); 206 207 /* Map [offset, offset + nbytes) range onto a child of @bs to copy from, 208 * and invoke bdrv_co_copy_range_from(child, ...), or invoke 209 * bdrv_co_copy_range_to() if @bs is the leaf child to copy data from. 210 * 211 * See the comment of bdrv_co_copy_range for the parameter and return value 212 * semantics. 213 */ 214 int coroutine_fn (*bdrv_co_copy_range_from)(BlockDriverState *bs, 215 BdrvChild *src, 216 uint64_t offset, 217 BdrvChild *dst, 218 uint64_t dst_offset, 219 uint64_t bytes, 220 BdrvRequestFlags flags); 221 222 /* Map [offset, offset + nbytes) range onto a child of bs to copy data to, 223 * and invoke bdrv_co_copy_range_to(child, src, ...), or perform the copy 224 * operation if @bs is the leaf and @src has the same BlockDriver. Return 225 * -ENOTSUP if @bs is the leaf but @src has a different BlockDriver. 226 * 227 * See the comment of bdrv_co_copy_range for the parameter and return value 228 * semantics. 229 */ 230 int coroutine_fn (*bdrv_co_copy_range_to)(BlockDriverState *bs, 231 BdrvChild *src, 232 uint64_t src_offset, 233 BdrvChild *dst, 234 uint64_t dst_offset, 235 uint64_t bytes, 236 BdrvRequestFlags flags); 237 238 /* 239 * Building block for bdrv_block_status[_above] and 240 * bdrv_is_allocated[_above]. The driver should answer only 241 * according to the current layer, and should only need to set 242 * BDRV_BLOCK_DATA, BDRV_BLOCK_ZERO, BDRV_BLOCK_OFFSET_VALID, 243 * and/or BDRV_BLOCK_RAW; if the current layer defers to a backing 244 * layer, the result should be 0 (and not BDRV_BLOCK_ZERO). See 245 * block.h for the overall meaning of the bits. As a hint, the 246 * flag want_zero is true if the caller cares more about precise 247 * mappings (favor accurate _OFFSET_VALID/_ZERO) or false for 248 * overall allocation (favor larger *pnum, perhaps by reporting 249 * _DATA instead of _ZERO). The block layer guarantees input 250 * clamped to bdrv_getlength() and aligned to request_alignment, 251 * as well as non-NULL pnum, map, and file; in turn, the driver 252 * must return an error or set pnum to an aligned non-zero value. 253 */ 254 int coroutine_fn (*bdrv_co_block_status)(BlockDriverState *bs, 255 bool want_zero, int64_t offset, int64_t bytes, int64_t *pnum, 256 int64_t *map, BlockDriverState **file); 257 258 /* 259 * Invalidate any cached meta-data. 260 */ 261 void coroutine_fn (*bdrv_co_invalidate_cache)(BlockDriverState *bs, 262 Error **errp); 263 int (*bdrv_inactivate)(BlockDriverState *bs); 264 265 /* 266 * Flushes all data for all layers by calling bdrv_co_flush for underlying 267 * layers, if needed. This function is needed for deterministic 268 * synchronization of the flush finishing callback. 269 */ 270 int coroutine_fn (*bdrv_co_flush)(BlockDriverState *bs); 271 272 /* 273 * Flushes all data that was already written to the OS all the way down to 274 * the disk (for example file-posix.c calls fsync()). 275 */ 276 int coroutine_fn (*bdrv_co_flush_to_disk)(BlockDriverState *bs); 277 278 /* 279 * Flushes all internal caches to the OS. The data may still sit in a 280 * writeback cache of the host OS, but it will survive a crash of the qemu 281 * process. 282 */ 283 int coroutine_fn (*bdrv_co_flush_to_os)(BlockDriverState *bs); 284 285 /* 286 * Drivers setting this field must be able to work with just a plain 287 * filename with '<protocol_name>:' as a prefix, and no other options. 288 * Options may be extracted from the filename by implementing 289 * bdrv_parse_filename. 290 */ 291 const char *protocol_name; 292 int (*bdrv_truncate)(BlockDriverState *bs, int64_t offset, 293 PreallocMode prealloc, Error **errp); 294 295 int64_t (*bdrv_getlength)(BlockDriverState *bs); 296 bool has_variable_length; 297 int64_t (*bdrv_get_allocated_file_size)(BlockDriverState *bs); 298 BlockMeasureInfo *(*bdrv_measure)(QemuOpts *opts, BlockDriverState *in_bs, 299 Error **errp); 300 301 int coroutine_fn (*bdrv_co_pwritev_compressed)(BlockDriverState *bs, 302 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov); 303 304 int (*bdrv_snapshot_create)(BlockDriverState *bs, 305 QEMUSnapshotInfo *sn_info); 306 int (*bdrv_snapshot_goto)(BlockDriverState *bs, 307 const char *snapshot_id); 308 int (*bdrv_snapshot_delete)(BlockDriverState *bs, 309 const char *snapshot_id, 310 const char *name, 311 Error **errp); 312 int (*bdrv_snapshot_list)(BlockDriverState *bs, 313 QEMUSnapshotInfo **psn_info); 314 int (*bdrv_snapshot_load_tmp)(BlockDriverState *bs, 315 const char *snapshot_id, 316 const char *name, 317 Error **errp); 318 int (*bdrv_get_info)(BlockDriverState *bs, BlockDriverInfo *bdi); 319 ImageInfoSpecific *(*bdrv_get_specific_info)(BlockDriverState *bs); 320 321 int coroutine_fn (*bdrv_save_vmstate)(BlockDriverState *bs, 322 QEMUIOVector *qiov, 323 int64_t pos); 324 int coroutine_fn (*bdrv_load_vmstate)(BlockDriverState *bs, 325 QEMUIOVector *qiov, 326 int64_t pos); 327 328 int (*bdrv_change_backing_file)(BlockDriverState *bs, 329 const char *backing_file, const char *backing_fmt); 330 331 /* removable device specific */ 332 bool (*bdrv_is_inserted)(BlockDriverState *bs); 333 void (*bdrv_eject)(BlockDriverState *bs, bool eject_flag); 334 void (*bdrv_lock_medium)(BlockDriverState *bs, bool locked); 335 336 /* to control generic scsi devices */ 337 BlockAIOCB *(*bdrv_aio_ioctl)(BlockDriverState *bs, 338 unsigned long int req, void *buf, 339 BlockCompletionFunc *cb, void *opaque); 340 int coroutine_fn (*bdrv_co_ioctl)(BlockDriverState *bs, 341 unsigned long int req, void *buf); 342 343 /* List of options for creating images, terminated by name == NULL */ 344 QemuOptsList *create_opts; 345 346 /* 347 * Returns 0 for completed check, -errno for internal errors. 348 * The check results are stored in result. 349 */ 350 int coroutine_fn (*bdrv_co_check)(BlockDriverState *bs, 351 BdrvCheckResult *result, 352 BdrvCheckMode fix); 353 354 int (*bdrv_amend_options)(BlockDriverState *bs, QemuOpts *opts, 355 BlockDriverAmendStatusCB *status_cb, 356 void *cb_opaque); 357 358 void (*bdrv_debug_event)(BlockDriverState *bs, BlkdebugEvent event); 359 360 /* TODO Better pass a option string/QDict/QemuOpts to add any rule? */ 361 int (*bdrv_debug_breakpoint)(BlockDriverState *bs, const char *event, 362 const char *tag); 363 int (*bdrv_debug_remove_breakpoint)(BlockDriverState *bs, 364 const char *tag); 365 int (*bdrv_debug_resume)(BlockDriverState *bs, const char *tag); 366 bool (*bdrv_debug_is_suspended)(BlockDriverState *bs, const char *tag); 367 368 void (*bdrv_refresh_limits)(BlockDriverState *bs, Error **errp); 369 370 /* 371 * Returns 1 if newly created images are guaranteed to contain only 372 * zeros, 0 otherwise. 373 */ 374 int (*bdrv_has_zero_init)(BlockDriverState *bs); 375 376 /* Remove fd handlers, timers, and other event loop callbacks so the event 377 * loop is no longer in use. Called with no in-flight requests and in 378 * depth-first traversal order with parents before child nodes. 379 */ 380 void (*bdrv_detach_aio_context)(BlockDriverState *bs); 381 382 /* Add fd handlers, timers, and other event loop callbacks so I/O requests 383 * can be processed again. Called with no in-flight requests and in 384 * depth-first traversal order with child nodes before parent nodes. 385 */ 386 void (*bdrv_attach_aio_context)(BlockDriverState *bs, 387 AioContext *new_context); 388 389 /* io queue for linux-aio */ 390 void (*bdrv_io_plug)(BlockDriverState *bs); 391 void (*bdrv_io_unplug)(BlockDriverState *bs); 392 393 /** 394 * Try to get @bs's logical and physical block size. 395 * On success, store them in @bsz and return zero. 396 * On failure, return negative errno. 397 */ 398 int (*bdrv_probe_blocksizes)(BlockDriverState *bs, BlockSizes *bsz); 399 /** 400 * Try to get @bs's geometry (cyls, heads, sectors) 401 * On success, store them in @geo and return 0. 402 * On failure return -errno. 403 * Only drivers that want to override guest geometry implement this 404 * callback; see hd_geometry_guess(). 405 */ 406 int (*bdrv_probe_geometry)(BlockDriverState *bs, HDGeometry *geo); 407 408 /** 409 * bdrv_co_drain_begin is called if implemented in the beginning of a 410 * drain operation to drain and stop any internal sources of requests in 411 * the driver. 412 * bdrv_co_drain_end is called if implemented at the end of the drain. 413 * 414 * They should be used by the driver to e.g. manage scheduled I/O 415 * requests, or toggle an internal state. After the end of the drain new 416 * requests will continue normally. 417 */ 418 void coroutine_fn (*bdrv_co_drain_begin)(BlockDriverState *bs); 419 void coroutine_fn (*bdrv_co_drain_end)(BlockDriverState *bs); 420 421 void (*bdrv_add_child)(BlockDriverState *parent, BlockDriverState *child, 422 Error **errp); 423 void (*bdrv_del_child)(BlockDriverState *parent, BdrvChild *child, 424 Error **errp); 425 426 /** 427 * Informs the block driver that a permission change is intended. The 428 * driver checks whether the change is permissible and may take other 429 * preparations for the change (e.g. get file system locks). This operation 430 * is always followed either by a call to either .bdrv_set_perm or 431 * .bdrv_abort_perm_update. 432 * 433 * Checks whether the requested set of cumulative permissions in @perm 434 * can be granted for accessing @bs and whether no other users are using 435 * permissions other than those given in @shared (both arguments take 436 * BLK_PERM_* bitmasks). 437 * 438 * If both conditions are met, 0 is returned. Otherwise, -errno is returned 439 * and errp is set to an error describing the conflict. 440 */ 441 int (*bdrv_check_perm)(BlockDriverState *bs, uint64_t perm, 442 uint64_t shared, Error **errp); 443 444 /** 445 * Called to inform the driver that the set of cumulative set of used 446 * permissions for @bs has changed to @perm, and the set of sharable 447 * permission to @shared. The driver can use this to propagate changes to 448 * its children (i.e. request permissions only if a parent actually needs 449 * them). 450 * 451 * This function is only invoked after bdrv_check_perm(), so block drivers 452 * may rely on preparations made in their .bdrv_check_perm implementation. 453 */ 454 void (*bdrv_set_perm)(BlockDriverState *bs, uint64_t perm, uint64_t shared); 455 456 /* 457 * Called to inform the driver that after a previous bdrv_check_perm() 458 * call, the permission update is not performed and any preparations made 459 * for it (e.g. taken file locks) need to be undone. 460 * 461 * This function can be called even for nodes that never saw a 462 * bdrv_check_perm() call. It is a no-op then. 463 */ 464 void (*bdrv_abort_perm_update)(BlockDriverState *bs); 465 466 /** 467 * Returns in @nperm and @nshared the permissions that the driver for @bs 468 * needs on its child @c, based on the cumulative permissions requested by 469 * the parents in @parent_perm and @parent_shared. 470 * 471 * If @c is NULL, return the permissions for attaching a new child for the 472 * given @role. 473 * 474 * If @reopen_queue is non-NULL, don't return the currently needed 475 * permissions, but those that will be needed after applying the 476 * @reopen_queue. 477 */ 478 void (*bdrv_child_perm)(BlockDriverState *bs, BdrvChild *c, 479 const BdrvChildRole *role, 480 BlockReopenQueue *reopen_queue, 481 uint64_t parent_perm, uint64_t parent_shared, 482 uint64_t *nperm, uint64_t *nshared); 483 484 /** 485 * Bitmaps should be marked as 'IN_USE' in the image on reopening image 486 * as rw. This handler should realize it. It also should unset readonly 487 * field of BlockDirtyBitmap's in case of success. 488 */ 489 int (*bdrv_reopen_bitmaps_rw)(BlockDriverState *bs, Error **errp); 490 bool (*bdrv_can_store_new_dirty_bitmap)(BlockDriverState *bs, 491 const char *name, 492 uint32_t granularity, 493 Error **errp); 494 void (*bdrv_remove_persistent_dirty_bitmap)(BlockDriverState *bs, 495 const char *name, 496 Error **errp); 497 498 /** 499 * Register/unregister a buffer for I/O. For example, when the driver is 500 * interested to know the memory areas that will later be used in iovs, so 501 * that it can do IOMMU mapping with VFIO etc., in order to get better 502 * performance. In the case of VFIO drivers, this callback is used to do 503 * DMA mapping for hot buffers. 504 */ 505 void (*bdrv_register_buf)(BlockDriverState *bs, void *host, size_t size); 506 void (*bdrv_unregister_buf)(BlockDriverState *bs, void *host); 507 QLIST_ENTRY(BlockDriver) list; 508 }; 509 510 typedef struct BlockLimits { 511 /* Alignment requirement, in bytes, for offset/length of I/O 512 * requests. Must be a power of 2 less than INT_MAX; defaults to 513 * 1 for drivers with modern byte interfaces, and to 512 514 * otherwise. */ 515 uint32_t request_alignment; 516 517 /* Maximum number of bytes that can be discarded at once (since it 518 * is signed, it must be < 2G, if set). Must be multiple of 519 * pdiscard_alignment, but need not be power of 2. May be 0 if no 520 * inherent 32-bit limit */ 521 int32_t max_pdiscard; 522 523 /* Optimal alignment for discard requests in bytes. A power of 2 524 * is best but not mandatory. Must be a multiple of 525 * bl.request_alignment, and must be less than max_pdiscard if 526 * that is set. May be 0 if bl.request_alignment is good enough */ 527 uint32_t pdiscard_alignment; 528 529 /* Maximum number of bytes that can zeroized at once (since it is 530 * signed, it must be < 2G, if set). Must be multiple of 531 * pwrite_zeroes_alignment. May be 0 if no inherent 32-bit limit */ 532 int32_t max_pwrite_zeroes; 533 534 /* Optimal alignment for write zeroes requests in bytes. A power 535 * of 2 is best but not mandatory. Must be a multiple of 536 * bl.request_alignment, and must be less than max_pwrite_zeroes 537 * if that is set. May be 0 if bl.request_alignment is good 538 * enough */ 539 uint32_t pwrite_zeroes_alignment; 540 541 /* Optimal transfer length in bytes. A power of 2 is best but not 542 * mandatory. Must be a multiple of bl.request_alignment, or 0 if 543 * no preferred size */ 544 uint32_t opt_transfer; 545 546 /* Maximal transfer length in bytes. Need not be power of 2, but 547 * must be multiple of opt_transfer and bl.request_alignment, or 0 548 * for no 32-bit limit. For now, anything larger than INT_MAX is 549 * clamped down. */ 550 uint32_t max_transfer; 551 552 /* memory alignment, in bytes so that no bounce buffer is needed */ 553 size_t min_mem_alignment; 554 555 /* memory alignment, in bytes, for bounce buffer */ 556 size_t opt_mem_alignment; 557 558 /* maximum number of iovec elements */ 559 int max_iov; 560 } BlockLimits; 561 562 typedef struct BdrvOpBlocker BdrvOpBlocker; 563 564 typedef struct BdrvAioNotifier { 565 void (*attached_aio_context)(AioContext *new_context, void *opaque); 566 void (*detach_aio_context)(void *opaque); 567 568 void *opaque; 569 bool deleted; 570 571 QLIST_ENTRY(BdrvAioNotifier) list; 572 } BdrvAioNotifier; 573 574 struct BdrvChildRole { 575 /* If true, bdrv_replace_node() doesn't change the node this BdrvChild 576 * points to. */ 577 bool stay_at_node; 578 579 void (*inherit_options)(int *child_flags, QDict *child_options, 580 int parent_flags, QDict *parent_options); 581 582 void (*change_media)(BdrvChild *child, bool load); 583 void (*resize)(BdrvChild *child); 584 585 /* Returns a name that is supposedly more useful for human users than the 586 * node name for identifying the node in question (in particular, a BB 587 * name), or NULL if the parent can't provide a better name. */ 588 const char *(*get_name)(BdrvChild *child); 589 590 /* Returns a malloced string that describes the parent of the child for a 591 * human reader. This could be a node-name, BlockBackend name, qdev ID or 592 * QOM path of the device owning the BlockBackend, job type and ID etc. The 593 * caller is responsible for freeing the memory. */ 594 char *(*get_parent_desc)(BdrvChild *child); 595 596 /* 597 * If this pair of functions is implemented, the parent doesn't issue new 598 * requests after returning from .drained_begin() until .drained_end() is 599 * called. 600 * 601 * Note that this can be nested. If drained_begin() was called twice, new 602 * I/O is allowed only after drained_end() was called twice, too. 603 */ 604 void (*drained_begin)(BdrvChild *child); 605 void (*drained_end)(BdrvChild *child); 606 607 /* Notifies the parent that the child has been activated/inactivated (e.g. 608 * when migration is completing) and it can start/stop requesting 609 * permissions and doing I/O on it. */ 610 void (*activate)(BdrvChild *child, Error **errp); 611 int (*inactivate)(BdrvChild *child); 612 613 void (*attach)(BdrvChild *child); 614 void (*detach)(BdrvChild *child); 615 616 /* Notifies the parent that the filename of its child has changed (e.g. 617 * because the direct child was removed from the backing chain), so that it 618 * can update its reference. */ 619 int (*update_filename)(BdrvChild *child, BlockDriverState *new_base, 620 const char *filename, Error **errp); 621 }; 622 623 extern const BdrvChildRole child_file; 624 extern const BdrvChildRole child_format; 625 extern const BdrvChildRole child_backing; 626 627 struct BdrvChild { 628 BlockDriverState *bs; 629 char *name; 630 const BdrvChildRole *role; 631 void *opaque; 632 633 /** 634 * Granted permissions for operating on this BdrvChild (BLK_PERM_* bitmask) 635 */ 636 uint64_t perm; 637 638 /** 639 * Permissions that can still be granted to other users of @bs while this 640 * BdrvChild is still attached to it. (BLK_PERM_* bitmask) 641 */ 642 uint64_t shared_perm; 643 644 QLIST_ENTRY(BdrvChild) next; 645 QLIST_ENTRY(BdrvChild) next_parent; 646 }; 647 648 /* 649 * Note: the function bdrv_append() copies and swaps contents of 650 * BlockDriverStates, so if you add new fields to this struct, please 651 * inspect bdrv_append() to determine if the new fields need to be 652 * copied as well. 653 */ 654 struct BlockDriverState { 655 /* Protected by big QEMU lock or read-only after opening. No special 656 * locking needed during I/O... 657 */ 658 int open_flags; /* flags used to open the file, re-used for re-open */ 659 bool read_only; /* if true, the media is read only */ 660 bool encrypted; /* if true, the media is encrypted */ 661 bool sg; /* if true, the device is a /dev/sg* */ 662 bool probed; /* if true, format was probed rather than specified */ 663 bool force_share; /* if true, always allow all shared permissions */ 664 bool implicit; /* if true, this filter node was automatically inserted */ 665 666 BlockDriver *drv; /* NULL means no media */ 667 void *opaque; 668 669 AioContext *aio_context; /* event loop used for fd handlers, timers, etc */ 670 /* long-running tasks intended to always use the same AioContext as this 671 * BDS may register themselves in this list to be notified of changes 672 * regarding this BDS's context */ 673 QLIST_HEAD(, BdrvAioNotifier) aio_notifiers; 674 bool walking_aio_notifiers; /* to make removal during iteration safe */ 675 676 char filename[PATH_MAX]; 677 char backing_file[PATH_MAX]; /* if non zero, the image is a diff of 678 this file image */ 679 char backing_format[16]; /* if non-zero and backing_file exists */ 680 681 QDict *full_open_options; 682 char exact_filename[PATH_MAX]; 683 684 BdrvChild *backing; 685 BdrvChild *file; 686 687 /* I/O Limits */ 688 BlockLimits bl; 689 690 /* Flags honored during pwrite (so far: BDRV_REQ_FUA, 691 * BDRV_REQ_WRITE_UNCHANGED). 692 * If a driver does not support BDRV_REQ_WRITE_UNCHANGED, those 693 * writes will be issued as normal writes without the flag set. 694 * This is important to note for drivers that do not explicitly 695 * request a WRITE permission for their children and instead take 696 * the same permissions as their parent did (this is commonly what 697 * block filters do). Such drivers have to be aware that the 698 * parent may have taken a WRITE_UNCHANGED permission only and is 699 * issuing such requests. Drivers either must make sure that 700 * these requests do not result in plain WRITE accesses (usually 701 * by supporting BDRV_REQ_WRITE_UNCHANGED, and then forwarding 702 * every incoming write request as-is, including potentially that 703 * flag), or they have to explicitly take the WRITE permission for 704 * their children. */ 705 unsigned int supported_write_flags; 706 /* Flags honored during pwrite_zeroes (so far: BDRV_REQ_FUA, 707 * BDRV_REQ_MAY_UNMAP, BDRV_REQ_WRITE_UNCHANGED) */ 708 unsigned int supported_zero_flags; 709 710 /* the following member gives a name to every node on the bs graph. */ 711 char node_name[32]; 712 /* element of the list of named nodes building the graph */ 713 QTAILQ_ENTRY(BlockDriverState) node_list; 714 /* element of the list of all BlockDriverStates (all_bdrv_states) */ 715 QTAILQ_ENTRY(BlockDriverState) bs_list; 716 /* element of the list of monitor-owned BDS */ 717 QTAILQ_ENTRY(BlockDriverState) monitor_list; 718 int refcnt; 719 720 /* operation blockers */ 721 QLIST_HEAD(, BdrvOpBlocker) op_blockers[BLOCK_OP_TYPE_MAX]; 722 723 /* long-running background operation */ 724 BlockJob *job; 725 726 /* The node that this node inherited default options from (and a reopen on 727 * which can affect this node by changing these defaults). This is always a 728 * parent node of this node. */ 729 BlockDriverState *inherits_from; 730 QLIST_HEAD(, BdrvChild) children; 731 QLIST_HEAD(, BdrvChild) parents; 732 733 QDict *options; 734 QDict *explicit_options; 735 BlockdevDetectZeroesOptions detect_zeroes; 736 737 /* The error object in use for blocking operations on backing_hd */ 738 Error *backing_blocker; 739 740 /* Protected by AioContext lock */ 741 742 /* If we are reading a disk image, give its size in sectors. 743 * Generally read-only; it is written to by load_snapshot and 744 * save_snaphost, but the block layer is quiescent during those. 745 */ 746 int64_t total_sectors; 747 748 /* Callback before write request is processed */ 749 NotifierWithReturnList before_write_notifiers; 750 751 /* threshold limit for writes, in bytes. "High water mark". */ 752 uint64_t write_threshold_offset; 753 NotifierWithReturn write_threshold_notifier; 754 755 /* Writing to the list requires the BQL _and_ the dirty_bitmap_mutex. 756 * Reading from the list can be done with either the BQL or the 757 * dirty_bitmap_mutex. Modifying a bitmap only requires 758 * dirty_bitmap_mutex. */ 759 QemuMutex dirty_bitmap_mutex; 760 QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps; 761 762 /* Offset after the highest byte written to */ 763 Stat64 wr_highest_offset; 764 765 /* If true, copy read backing sectors into image. Can be >1 if more 766 * than one client has requested copy-on-read. Accessed with atomic 767 * ops. 768 */ 769 int copy_on_read; 770 771 /* number of in-flight requests; overall and serialising. 772 * Accessed with atomic ops. 773 */ 774 unsigned int in_flight; 775 unsigned int serialising_in_flight; 776 777 /* Kicked to signal main loop when a request completes. */ 778 AioWait wait; 779 780 /* counter for nested bdrv_io_plug. 781 * Accessed with atomic ops. 782 */ 783 unsigned io_plugged; 784 785 /* do we need to tell the quest if we have a volatile write cache? */ 786 int enable_write_cache; 787 788 /* Accessed with atomic ops. */ 789 int quiesce_counter; 790 int recursive_quiesce_counter; 791 792 unsigned int write_gen; /* Current data generation */ 793 794 /* Protected by reqs_lock. */ 795 CoMutex reqs_lock; 796 QLIST_HEAD(, BdrvTrackedRequest) tracked_requests; 797 CoQueue flush_queue; /* Serializing flush queue */ 798 bool active_flush_req; /* Flush request in flight? */ 799 800 /* Only read/written by whoever has set active_flush_req to true. */ 801 unsigned int flushed_gen; /* Flushed write generation */ 802 }; 803 804 struct BlockBackendRootState { 805 int open_flags; 806 bool read_only; 807 BlockdevDetectZeroesOptions detect_zeroes; 808 }; 809 810 typedef enum BlockMirrorBackingMode { 811 /* Reuse the existing backing chain from the source for the target. 812 * - sync=full: Set backing BDS to NULL. 813 * - sync=top: Use source's backing BDS. 814 * - sync=none: Use source as the backing BDS. */ 815 MIRROR_SOURCE_BACKING_CHAIN, 816 817 /* Open the target's backing chain completely anew */ 818 MIRROR_OPEN_BACKING_CHAIN, 819 820 /* Do not change the target's backing BDS after job completion */ 821 MIRROR_LEAVE_BACKING_CHAIN, 822 } BlockMirrorBackingMode; 823 824 static inline BlockDriverState *backing_bs(BlockDriverState *bs) 825 { 826 return bs->backing ? bs->backing->bs : NULL; 827 } 828 829 830 /* Essential block drivers which must always be statically linked into qemu, and 831 * which therefore can be accessed without using bdrv_find_format() */ 832 extern BlockDriver bdrv_file; 833 extern BlockDriver bdrv_raw; 834 extern BlockDriver bdrv_qcow2; 835 836 int coroutine_fn bdrv_co_preadv(BdrvChild *child, 837 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 838 BdrvRequestFlags flags); 839 int coroutine_fn bdrv_co_pwritev(BdrvChild *child, 840 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 841 BdrvRequestFlags flags); 842 843 void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent); 844 void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent); 845 846 int get_tmp_filename(char *filename, int size); 847 BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size, 848 const char *filename); 849 850 void bdrv_parse_filename_strip_prefix(const char *filename, const char *prefix, 851 QDict *options); 852 853 854 /** 855 * bdrv_add_before_write_notifier: 856 * 857 * Register a callback that is invoked before write requests are processed but 858 * after any throttling or waiting for overlapping requests. 859 */ 860 void bdrv_add_before_write_notifier(BlockDriverState *bs, 861 NotifierWithReturn *notifier); 862 863 /** 864 * bdrv_detach_aio_context: 865 * 866 * May be called from .bdrv_detach_aio_context() to detach children from the 867 * current #AioContext. This is only needed by block drivers that manage their 868 * own children. Both ->file and ->backing are automatically handled and 869 * block drivers should not call this function on them explicitly. 870 */ 871 void bdrv_detach_aio_context(BlockDriverState *bs); 872 873 /** 874 * bdrv_attach_aio_context: 875 * 876 * May be called from .bdrv_attach_aio_context() to attach children to the new 877 * #AioContext. This is only needed by block drivers that manage their own 878 * children. Both ->file and ->backing are automatically handled and block 879 * drivers should not call this function on them explicitly. 880 */ 881 void bdrv_attach_aio_context(BlockDriverState *bs, 882 AioContext *new_context); 883 884 /** 885 * bdrv_add_aio_context_notifier: 886 * 887 * If a long-running job intends to be always run in the same AioContext as a 888 * certain BDS, it may use this function to be notified of changes regarding the 889 * association of the BDS to an AioContext. 890 * 891 * attached_aio_context() is called after the target BDS has been attached to a 892 * new AioContext; detach_aio_context() is called before the target BDS is being 893 * detached from its old AioContext. 894 */ 895 void bdrv_add_aio_context_notifier(BlockDriverState *bs, 896 void (*attached_aio_context)(AioContext *new_context, void *opaque), 897 void (*detach_aio_context)(void *opaque), void *opaque); 898 899 /** 900 * bdrv_remove_aio_context_notifier: 901 * 902 * Unsubscribe of change notifications regarding the BDS's AioContext. The 903 * parameters given here have to be the same as those given to 904 * bdrv_add_aio_context_notifier(). 905 */ 906 void bdrv_remove_aio_context_notifier(BlockDriverState *bs, 907 void (*aio_context_attached)(AioContext *, 908 void *), 909 void (*aio_context_detached)(void *), 910 void *opaque); 911 912 /** 913 * bdrv_wakeup: 914 * @bs: The BlockDriverState for which an I/O operation has been completed. 915 * 916 * Wake up the main thread if it is waiting on BDRV_POLL_WHILE. During 917 * synchronous I/O on a BlockDriverState that is attached to another 918 * I/O thread, the main thread lets the I/O thread's event loop run, 919 * waiting for the I/O operation to complete. A bdrv_wakeup will wake 920 * up the main thread if necessary. 921 * 922 * Manual calls to bdrv_wakeup are rarely necessary, because 923 * bdrv_dec_in_flight already calls it. 924 */ 925 void bdrv_wakeup(BlockDriverState *bs); 926 927 #ifdef _WIN32 928 int is_windows_drive(const char *filename); 929 #endif 930 931 /** 932 * stream_start: 933 * @job_id: The id of the newly-created job, or %NULL to use the 934 * device name of @bs. 935 * @bs: Block device to operate on. 936 * @base: Block device that will become the new base, or %NULL to 937 * flatten the whole backing file chain onto @bs. 938 * @backing_file_str: The file name that will be written to @bs as the 939 * the new backing file if the job completes. Ignored if @base is %NULL. 940 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 941 * @on_error: The action to take upon error. 942 * @errp: Error object. 943 * 944 * Start a streaming operation on @bs. Clusters that are unallocated 945 * in @bs, but allocated in any image between @base and @bs (both 946 * exclusive) will be written to @bs. At the end of a successful 947 * streaming job, the backing file of @bs will be changed to 948 * @backing_file_str in the written image and to @base in the live 949 * BlockDriverState. 950 */ 951 void stream_start(const char *job_id, BlockDriverState *bs, 952 BlockDriverState *base, const char *backing_file_str, 953 int64_t speed, BlockdevOnError on_error, Error **errp); 954 955 /** 956 * commit_start: 957 * @job_id: The id of the newly-created job, or %NULL to use the 958 * device name of @bs. 959 * @bs: Active block device. 960 * @top: Top block device to be committed. 961 * @base: Block device that will be written into, and become the new top. 962 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 963 * @on_error: The action to take upon error. 964 * @backing_file_str: String to use as the backing file in @top's overlay 965 * @filter_node_name: The node name that should be assigned to the filter 966 * driver that the commit job inserts into the graph above @top. NULL means 967 * that a node name should be autogenerated. 968 * @errp: Error object. 969 * 970 */ 971 void commit_start(const char *job_id, BlockDriverState *bs, 972 BlockDriverState *base, BlockDriverState *top, int64_t speed, 973 BlockdevOnError on_error, const char *backing_file_str, 974 const char *filter_node_name, Error **errp); 975 /** 976 * commit_active_start: 977 * @job_id: The id of the newly-created job, or %NULL to use the 978 * device name of @bs. 979 * @bs: Active block device to be committed. 980 * @base: Block device that will be written into, and become the new top. 981 * @creation_flags: Flags that control the behavior of the Job lifetime. 982 * See @BlockJobCreateFlags 983 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 984 * @on_error: The action to take upon error. 985 * @filter_node_name: The node name that should be assigned to the filter 986 * driver that the commit job inserts into the graph above @bs. NULL means that 987 * a node name should be autogenerated. 988 * @cb: Completion function for the job. 989 * @opaque: Opaque pointer value passed to @cb. 990 * @auto_complete: Auto complete the job. 991 * @errp: Error object. 992 * 993 */ 994 void commit_active_start(const char *job_id, BlockDriverState *bs, 995 BlockDriverState *base, int creation_flags, 996 int64_t speed, BlockdevOnError on_error, 997 const char *filter_node_name, 998 BlockCompletionFunc *cb, void *opaque, 999 bool auto_complete, Error **errp); 1000 /* 1001 * mirror_start: 1002 * @job_id: The id of the newly-created job, or %NULL to use the 1003 * device name of @bs. 1004 * @bs: Block device to operate on. 1005 * @target: Block device to write to. 1006 * @replaces: Block graph node name to replace once the mirror is done. Can 1007 * only be used when full mirroring is selected. 1008 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 1009 * @granularity: The chosen granularity for the dirty bitmap. 1010 * @buf_size: The amount of data that can be in flight at one time. 1011 * @mode: Whether to collapse all images in the chain to the target. 1012 * @backing_mode: How to establish the target's backing chain after completion. 1013 * @on_source_error: The action to take upon error reading from the source. 1014 * @on_target_error: The action to take upon error writing to the target. 1015 * @unmap: Whether to unmap target where source sectors only contain zeroes. 1016 * @filter_node_name: The node name that should be assigned to the filter 1017 * driver that the mirror job inserts into the graph above @bs. NULL means that 1018 * a node name should be autogenerated. 1019 * @errp: Error object. 1020 * 1021 * Start a mirroring operation on @bs. Clusters that are allocated 1022 * in @bs will be written to @target until the job is cancelled or 1023 * manually completed. At the end of a successful mirroring job, 1024 * @bs will be switched to read from @target. 1025 */ 1026 void mirror_start(const char *job_id, BlockDriverState *bs, 1027 BlockDriverState *target, const char *replaces, 1028 int64_t speed, uint32_t granularity, int64_t buf_size, 1029 MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, 1030 BlockdevOnError on_source_error, 1031 BlockdevOnError on_target_error, 1032 bool unmap, const char *filter_node_name, Error **errp); 1033 1034 /* 1035 * backup_job_create: 1036 * @job_id: The id of the newly-created job, or %NULL to use the 1037 * device name of @bs. 1038 * @bs: Block device to operate on. 1039 * @target: Block device to write to. 1040 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 1041 * @sync_mode: What parts of the disk image should be copied to the destination. 1042 * @sync_bitmap: The dirty bitmap if sync_mode is MIRROR_SYNC_MODE_INCREMENTAL. 1043 * @on_source_error: The action to take upon error reading from the source. 1044 * @on_target_error: The action to take upon error writing to the target. 1045 * @creation_flags: Flags that control the behavior of the Job lifetime. 1046 * See @BlockJobCreateFlags 1047 * @cb: Completion function for the job. 1048 * @opaque: Opaque pointer value passed to @cb. 1049 * @txn: Transaction that this job is part of (may be NULL). 1050 * 1051 * Create a backup operation on @bs. Clusters in @bs are written to @target 1052 * until the job is cancelled or manually completed. 1053 */ 1054 BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, 1055 BlockDriverState *target, int64_t speed, 1056 MirrorSyncMode sync_mode, 1057 BdrvDirtyBitmap *sync_bitmap, 1058 bool compress, 1059 BlockdevOnError on_source_error, 1060 BlockdevOnError on_target_error, 1061 int creation_flags, 1062 BlockCompletionFunc *cb, void *opaque, 1063 JobTxn *txn, Error **errp); 1064 1065 void hmp_drive_add_node(Monitor *mon, const char *optstr); 1066 1067 BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs, 1068 const char *child_name, 1069 const BdrvChildRole *child_role, 1070 uint64_t perm, uint64_t shared_perm, 1071 void *opaque, Error **errp); 1072 void bdrv_root_unref_child(BdrvChild *child); 1073 1074 int bdrv_child_try_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared, 1075 Error **errp); 1076 1077 /* Default implementation for BlockDriver.bdrv_child_perm() that can be used by 1078 * block filters: Forward CONSISTENT_READ, WRITE, WRITE_UNCHANGED and RESIZE to 1079 * all children */ 1080 void bdrv_filter_default_perms(BlockDriverState *bs, BdrvChild *c, 1081 const BdrvChildRole *role, 1082 BlockReopenQueue *reopen_queue, 1083 uint64_t perm, uint64_t shared, 1084 uint64_t *nperm, uint64_t *nshared); 1085 1086 /* Default implementation for BlockDriver.bdrv_child_perm() that can be used by 1087 * (non-raw) image formats: Like above for bs->backing, but for bs->file it 1088 * requires WRITE | RESIZE for read-write images, always requires 1089 * CONSISTENT_READ and doesn't share WRITE. */ 1090 void bdrv_format_default_perms(BlockDriverState *bs, BdrvChild *c, 1091 const BdrvChildRole *role, 1092 BlockReopenQueue *reopen_queue, 1093 uint64_t perm, uint64_t shared, 1094 uint64_t *nperm, uint64_t *nshared); 1095 1096 /* 1097 * Default implementation for drivers to pass bdrv_co_block_status() to 1098 * their file. 1099 */ 1100 int coroutine_fn bdrv_co_block_status_from_file(BlockDriverState *bs, 1101 bool want_zero, 1102 int64_t offset, 1103 int64_t bytes, 1104 int64_t *pnum, 1105 int64_t *map, 1106 BlockDriverState **file); 1107 /* 1108 * Default implementation for drivers to pass bdrv_co_block_status() to 1109 * their backing file. 1110 */ 1111 int coroutine_fn bdrv_co_block_status_from_backing(BlockDriverState *bs, 1112 bool want_zero, 1113 int64_t offset, 1114 int64_t bytes, 1115 int64_t *pnum, 1116 int64_t *map, 1117 BlockDriverState **file); 1118 const char *bdrv_get_parent_name(const BlockDriverState *bs); 1119 void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp); 1120 bool blk_dev_has_removable_media(BlockBackend *blk); 1121 bool blk_dev_has_tray(BlockBackend *blk); 1122 void blk_dev_eject_request(BlockBackend *blk, bool force); 1123 bool blk_dev_is_tray_open(BlockBackend *blk); 1124 bool blk_dev_is_medium_locked(BlockBackend *blk); 1125 1126 void bdrv_set_dirty(BlockDriverState *bs, int64_t offset, int64_t bytes); 1127 1128 void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap **out); 1129 void bdrv_undo_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *in); 1130 1131 void bdrv_inc_in_flight(BlockDriverState *bs); 1132 void bdrv_dec_in_flight(BlockDriverState *bs); 1133 1134 void blockdev_close_all_bdrv_states(void); 1135 1136 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, uint64_t src_offset, 1137 BdrvChild *dst, uint64_t dst_offset, 1138 uint64_t bytes, BdrvRequestFlags flags); 1139 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, uint64_t src_offset, 1140 BdrvChild *dst, uint64_t dst_offset, 1141 uint64_t bytes, BdrvRequestFlags flags); 1142 1143 #endif /* BLOCK_INT_H */ 1144