Lines Matching +full:async +full:- +full:prefix
27 #include "system/block-backend.h"
28 #include "block/aio-wait.h"
33 #include "block/dirty-bitmap.h"
34 #include "block/write-threshold.h"
38 #include "qemu/error-report.h"
39 #include "qemu/main-loop.h"
43 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
62 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { in bdrv_parent_drained_begin()
74 assert(c->quiesced_parent); in bdrv_parent_drained_end_single()
75 c->quiesced_parent = false; in bdrv_parent_drained_end_single()
77 if (c->klass->drained_end) { in bdrv_parent_drained_end_single()
78 c->klass->drained_end(c); in bdrv_parent_drained_end_single()
89 QLIST_FOREACH(c, &bs->parents, next_parent) { in bdrv_parent_drained_end()
101 if (c->klass->drained_poll) { in bdrv_parent_drained_poll_single()
102 return c->klass->drained_poll(c); in bdrv_parent_drained_poll_single()
116 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { in bdrv_parent_drained_poll()
117 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) { in bdrv_parent_drained_poll()
130 assert(!c->quiesced_parent); in bdrv_parent_drained_begin_single()
131 c->quiesced_parent = true; in bdrv_parent_drained_begin_single()
133 if (c->klass->drained_begin) { in bdrv_parent_drained_begin_single()
135 c->klass->drained_begin(c); in bdrv_parent_drained_begin_single()
141 dst->pdiscard_alignment = MAX(dst->pdiscard_alignment, in bdrv_merge_limits()
142 src->pdiscard_alignment); in bdrv_merge_limits()
143 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer); in bdrv_merge_limits()
144 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer); in bdrv_merge_limits()
145 dst->max_hw_transfer = MIN_NON_ZERO(dst->max_hw_transfer, in bdrv_merge_limits()
146 src->max_hw_transfer); in bdrv_merge_limits()
147 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment, in bdrv_merge_limits()
148 src->opt_mem_alignment); in bdrv_merge_limits()
149 dst->min_mem_alignment = MAX(dst->min_mem_alignment, in bdrv_merge_limits()
150 src->min_mem_alignment); in bdrv_merge_limits()
151 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov); in bdrv_merge_limits()
152 dst->max_hw_iov = MIN_NON_ZERO(dst->max_hw_iov, src->max_hw_iov); in bdrv_merge_limits()
164 s->bs->bl = s->old_bl; in bdrv_refresh_limits_abort()
176 BlockDriver *drv = bs->drv; in bdrv_refresh_limits()
186 .old_bl = bs->bl, in bdrv_refresh_limits()
191 memset(&bs->bl, 0, sizeof(bs->bl)); in bdrv_refresh_limits()
198 bs->bl.request_alignment = (drv->bdrv_co_preadv || in bdrv_refresh_limits()
199 drv->bdrv_aio_preadv || in bdrv_refresh_limits()
200 drv->bdrv_co_preadv_part) ? 1 : 512; in bdrv_refresh_limits()
204 QLIST_FOREACH(c, &bs->children, next) { in bdrv_refresh_limits()
205 if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW)) in bdrv_refresh_limits()
207 bdrv_merge_limits(&bs->bl, &c->bs->bl); in bdrv_refresh_limits()
211 if (c->role & BDRV_CHILD_FILTERED) { in bdrv_refresh_limits()
212 bs->bl.has_variable_length |= c->bs->bl.has_variable_length; in bdrv_refresh_limits()
217 bs->bl.min_mem_alignment = 512; in bdrv_refresh_limits()
218 bs->bl.opt_mem_alignment = qemu_real_host_page_size(); in bdrv_refresh_limits()
221 bs->bl.max_iov = IOV_MAX; in bdrv_refresh_limits()
225 if (drv->bdrv_refresh_limits) { in bdrv_refresh_limits()
226 drv->bdrv_refresh_limits(bs, errp); in bdrv_refresh_limits()
232 if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) { in bdrv_refresh_limits()
238 * The copy-on-read flag is actually a reference count so multiple users may
240 * Copy-on-read stays enabled until all users have called to disable it.
245 qatomic_inc(&bs->copy_on_read); in bdrv_enable_copy_on_read()
250 int old = qatomic_fetch_dec(&bs->copy_on_read); in bdrv_disable_copy_on_read()
274 if (qatomic_read(&bs->in_flight)) { in bdrv_drain_poll()
297 Coroutine *co = data->co; in bdrv_co_drain_bh_cb()
298 BlockDriverState *bs = data->bs; in bdrv_co_drain_bh_cb()
302 if (data->begin) { in bdrv_co_drain_bh_cb()
303 bdrv_do_drained_begin(bs, data->parent, data->poll); in bdrv_co_drain_bh_cb()
305 assert(!data->poll); in bdrv_co_drain_bh_cb()
306 bdrv_do_drained_end(bs, data->parent); in bdrv_co_drain_bh_cb()
309 assert(data->begin); in bdrv_co_drain_bh_cb()
313 data->done = true; in bdrv_co_drain_bh_cb()
363 /* Stop things in parent-to-child order */ in bdrv_do_drained_begin()
364 if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) { in bdrv_do_drained_begin()
367 if (bs->drv && bs->drv->bdrv_drain_begin) { in bdrv_do_drained_begin()
368 bs->drv->bdrv_drain_begin(bs); in bdrv_do_drained_begin()
375 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The in bdrv_do_drained_begin()
377 * though we don't return to the main AioContext loop - this automatically in bdrv_do_drained_begin()
415 assert(bs->quiesce_counter > 0); in bdrv_do_drained_end()
417 /* Re-enable things in child-to-parent order */ in bdrv_do_drained_end()
418 old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter); in bdrv_do_drained_end()
421 if (bs->drv && bs->drv->bdrv_drain_end) { in bdrv_do_drained_end()
422 bs->drv->bdrv_drain_end(bs); in bdrv_do_drained_end()
447 assert(qatomic_read(&bs->in_flight) == 0); in bdrv_drain_assert_idle()
448 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { in bdrv_drain_assert_idle()
449 bdrv_drain_assert_idle(child->bs); in bdrv_drain_assert_idle()
506 /* Quiesce all nodes, without polling in-flight requests yet. The graph in bdrv_drain_all_begin_nopoll()
533 /* Now poll the in-flight requests */ in bdrv_drain_all_begin()
545 g_assert(bs->quiesce_counter > 0); in bdrv_drain_all_end_quiesce()
546 g_assert(!bs->refcnt); in bdrv_drain_all_end_quiesce()
548 while (bs->quiesce_counter) { in bdrv_drain_all_end_quiesce()
573 bdrv_drain_all_count--; in bdrv_drain_all_end()
590 if (req->serialising) { in tracked_request_end()
591 qatomic_dec(&req->bs->serialising_in_flight); in tracked_request_end()
594 qemu_mutex_lock(&req->bs->reqs_lock); in tracked_request_end()
596 qemu_mutex_unlock(&req->bs->reqs_lock); in tracked_request_end()
599 * At this point qemu_co_queue_wait(&req->wait_queue, ...) won't be called in tracked_request_end()
603 qemu_co_queue_restart_all(&req->wait_queue); in tracked_request_end()
628 qemu_co_queue_init(&req->wait_queue); in tracked_request_begin()
630 qemu_mutex_lock(&bs->reqs_lock); in tracked_request_begin()
631 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); in tracked_request_begin()
632 qemu_mutex_unlock(&bs->reqs_lock); in tracked_request_begin()
641 if (offset >= req->overlap_offset + req->overlap_bytes) { in tracked_request_overlaps()
645 if (req->overlap_offset >= offset + bytes) { in tracked_request_overlaps()
651 /* Called with self->bs->reqs_lock held */
657 QLIST_FOREACH(req, &self->bs->tracked_requests, list) { in bdrv_find_conflicting_request()
658 if (req == self || (!req->serialising && !self->serialising)) { in bdrv_find_conflicting_request()
661 if (tracked_request_overlaps(req, self->overlap_offset, in bdrv_find_conflicting_request()
662 self->overlap_bytes)) in bdrv_find_conflicting_request()
669 assert(qemu_coroutine_self() != req->co); in bdrv_find_conflicting_request()
676 if (!req->waiting_for) { in bdrv_find_conflicting_request()
685 /* Called with self->bs->reqs_lock held */
692 self->waiting_for = req; in bdrv_wait_serialising_requests_locked()
693 qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock); in bdrv_wait_serialising_requests_locked()
694 self->waiting_for = NULL; in bdrv_wait_serialising_requests_locked()
698 /* Called with req->bs->reqs_lock held */
702 int64_t overlap_offset = req->offset & ~(align - 1); in tracked_request_set_serialising()
704 ROUND_UP(req->offset + req->bytes, align) - overlap_offset; in tracked_request_set_serialising()
706 bdrv_check_request(req->offset, req->bytes, &error_abort); in tracked_request_set_serialising()
708 if (!req->serialising) { in tracked_request_set_serialising()
709 qatomic_inc(&req->bs->serialising_in_flight); in tracked_request_set_serialising()
710 req->serialising = true; in tracked_request_set_serialising()
713 req->overlap_offset = MIN(req->overlap_offset, overlap_offset); in tracked_request_set_serialising()
714 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); in tracked_request_set_serialising()
727 QLIST_FOREACH(req, &bs->tracked_requests, list) { in bdrv_co_get_self_request()
728 if (req->co == self) { in bdrv_co_get_self_request()
751 *align_bytes = QEMU_ALIGN_UP(offset - *align_offset + bytes, c); in bdrv_round_to_subclusters()
762 return bs->bl.request_alignment; in bdrv_get_cluster_size()
771 qatomic_inc(&bs->in_flight); in bdrv_inc_in_flight()
783 qatomic_dec(&bs->in_flight); in bdrv_dec_in_flight()
790 BlockDriverState *bs = self->bs; in bdrv_wait_serialising_requests()
792 if (!qatomic_read(&bs->serialising_in_flight)) { in bdrv_wait_serialising_requests()
796 qemu_mutex_lock(&bs->reqs_lock); in bdrv_wait_serialising_requests()
798 qemu_mutex_unlock(&bs->reqs_lock); in bdrv_wait_serialising_requests()
806 qemu_mutex_lock(&req->bs->reqs_lock); in bdrv_make_request_serialising()
811 qemu_mutex_unlock(&req->bs->reqs_lock); in bdrv_make_request_serialising()
824 return -EIO; in bdrv_check_qiov_request()
829 return -EIO; in bdrv_check_qiov_request()
835 return -EIO; in bdrv_check_qiov_request()
841 return -EIO; in bdrv_check_qiov_request()
844 if (offset > BDRV_MAX_LENGTH - bytes) { in bdrv_check_qiov_request()
848 return -EIO; in bdrv_check_qiov_request()
859 if (qiov_offset > qiov->size) { in bdrv_check_qiov_request()
861 qiov_offset, qiov->size); in bdrv_check_qiov_request()
862 return -EIO; in bdrv_check_qiov_request()
865 if (bytes > qiov->size - qiov_offset) { in bdrv_check_qiov_request()
867 "vector size(%zu)", bytes, qiov_offset, qiov->size); in bdrv_check_qiov_request()
868 return -EIO; in bdrv_check_qiov_request()
888 return -EIO; in bdrv_check_request32()
907 BlockDriverState *bs = child->bs; in bdrv_make_zero()
916 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES); in bdrv_make_zero()
940 * Returns 0 on success, -errno in error cases.
955 ret = bdrv_co_flush(child->bs); in bdrv_co_pwrite_sync()
972 co->ret = ret; in bdrv_co_io_em_complete()
973 aio_co_wake(co->coroutine); in bdrv_co_io_em_complete()
980 BlockDriver *drv = bs->drv; in bdrv_driver_preadv()
988 assert(!(flags & ~bs->supported_read_flags)); in bdrv_driver_preadv()
991 return -ENOMEDIUM; in bdrv_driver_preadv()
994 if (drv->bdrv_co_preadv_part) { in bdrv_driver_preadv()
995 return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset, in bdrv_driver_preadv()
999 if (qiov_offset > 0 || bytes != qiov->size) { in bdrv_driver_preadv()
1004 if (drv->bdrv_co_preadv) { in bdrv_driver_preadv()
1005 ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags); in bdrv_driver_preadv()
1009 if (drv->bdrv_aio_preadv) { in bdrv_driver_preadv()
1015 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags, in bdrv_driver_preadv()
1018 ret = -EIO; in bdrv_driver_preadv()
1033 assert(drv->bdrv_co_readv); in bdrv_driver_preadv()
1035 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); in bdrv_driver_preadv()
1050 BlockDriver *drv = bs->drv; in bdrv_driver_pwritev()
1061 return -ENOMEDIUM; in bdrv_driver_pwritev()
1064 if (bs->open_flags & BDRV_O_NO_FLUSH) { in bdrv_driver_pwritev()
1069 (~bs->supported_write_flags & BDRV_REQ_FUA)) { in bdrv_driver_pwritev()
1074 flags &= bs->supported_write_flags; in bdrv_driver_pwritev()
1076 if (drv->bdrv_co_pwritev_part) { in bdrv_driver_pwritev()
1077 ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset, in bdrv_driver_pwritev()
1082 if (qiov_offset > 0 || bytes != qiov->size) { in bdrv_driver_pwritev()
1087 if (drv->bdrv_co_pwritev) { in bdrv_driver_pwritev()
1088 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, flags); in bdrv_driver_pwritev()
1092 if (drv->bdrv_aio_pwritev) { in bdrv_driver_pwritev()
1098 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, flags, in bdrv_driver_pwritev()
1101 ret = -EIO; in bdrv_driver_pwritev()
1116 assert(drv->bdrv_co_writev); in bdrv_driver_pwritev()
1117 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, flags); in bdrv_driver_pwritev()
1136 BlockDriver *drv = bs->drv; in bdrv_driver_pwritev_compressed()
1144 return -ENOMEDIUM; in bdrv_driver_pwritev_compressed()
1148 return -ENOTSUP; in bdrv_driver_pwritev_compressed()
1151 if (drv->bdrv_co_pwritev_compressed_part) { in bdrv_driver_pwritev_compressed()
1152 return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes, in bdrv_driver_pwritev_compressed()
1157 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov); in bdrv_driver_pwritev_compressed()
1161 ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov); in bdrv_driver_pwritev_compressed()
1171 BlockDriverState *bs = child->bs; in bdrv_co_do_copy_on_readv()
1175 * modifying the image file. This is critical for zero-copy guest I/O in bdrv_co_do_copy_on_readv()
1180 BlockDriver *drv = bs->drv; in bdrv_co_do_copy_on_readv()
1185 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, in bdrv_co_do_copy_on_readv()
1193 return -ENOMEDIUM; in bdrv_co_do_copy_on_readv()
1200 skip_write = (bs->open_flags & BDRV_O_INACTIVE); in bdrv_co_do_copy_on_readv()
1204 * would be obtained anyway, but internally by the copy-on-read code. As in bdrv_co_do_copy_on_readv()
1206 * the copy-on-read code doesn't have its own BdrvChild, however, for which in bdrv_co_do_copy_on_readv()
1209 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); in bdrv_co_do_copy_on_readv()
1217 skip_bytes = offset - align_offset; in bdrv_co_do_copy_on_readv()
1252 /* Must copy-on-read; use the bounce buffer */ in bdrv_co_do_copy_on_readv()
1255 int64_t max_we_need = MAX(pnum, align_bytes - pnum); in bdrv_co_do_copy_on_readv()
1261 ret = -ENOMEM; in bdrv_co_do_copy_on_readv()
1274 if (drv->bdrv_co_pwrite_zeroes && in bdrv_co_do_copy_on_readv()
1292 * requests. If this is a deliberate copy-on-read in bdrv_co_do_copy_on_readv()
1302 MIN(pnum - skip_bytes, bytes - progress)); in bdrv_co_do_copy_on_readv()
1307 MIN(pnum - skip_bytes, bytes - progress), in bdrv_co_do_copy_on_readv()
1315 align_bytes -= pnum; in bdrv_co_do_copy_on_readv()
1316 progress += pnum - skip_bytes; in bdrv_co_do_copy_on_readv()
1336 BlockDriverState *bs = child->bs; in bdrv_aligned_preadv()
1344 assert((offset & (align - 1)) == 0); in bdrv_aligned_preadv()
1345 assert((bytes & (align - 1)) == 0); in bdrv_aligned_preadv()
1346 assert((bs->open_flags & BDRV_O_NO_IO) == 0); in bdrv_aligned_preadv()
1347 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), in bdrv_aligned_preadv()
1351 * TODO: We would need a per-BDS .supported_read_flags and in bdrv_aligned_preadv()
1363 * with each other for the same cluster. For example, in copy-on-read in bdrv_aligned_preadv()
1398 assert(!(flags & ~(bs->supported_read_flags | BDRV_REQ_REGISTERED_BUF))); in bdrv_aligned_preadv()
1400 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align); in bdrv_aligned_preadv()
1413 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining, in bdrv_aligned_preadv()
1415 qiov_offset + bytes - bytes_remaining, in bdrv_aligned_preadv()
1417 max_bytes -= num; in bdrv_aligned_preadv()
1420 ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining, in bdrv_aligned_preadv()
1426 bytes_remaining -= num; in bdrv_aligned_preadv()
1436 * |<---- align ----->| |<----- align ---->|
1437 * |<- head ->|<------------- bytes ------------->|<-- tail -->|
1439 * -*----------$-------*-------- ... --------*-----$------------*---
1448 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk
1459 * as the bounce buffer in such cases. @pre_collapse_qiov has the pre-collapse
1483 int64_t align = bs->bl.request_alignment; in bdrv_init_padding()
1492 pad->head = offset & (align - 1); in bdrv_init_padding()
1493 pad->tail = ((offset + bytes) & (align - 1)); in bdrv_init_padding()
1494 if (pad->tail) { in bdrv_init_padding()
1495 pad->tail = align - pad->tail; in bdrv_init_padding()
1498 if (!pad->head && !pad->tail) { in bdrv_init_padding()
1502 assert(bytes); /* Nothing good in aligning zero-length requests */ in bdrv_init_padding()
1504 sum = pad->head + bytes + pad->tail; in bdrv_init_padding()
1505 pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align; in bdrv_init_padding()
1506 pad->buf = qemu_blockalign(bs, pad->buf_len); in bdrv_init_padding()
1507 pad->merge_reads = sum == pad->buf_len; in bdrv_init_padding()
1508 if (pad->tail) { in bdrv_init_padding()
1509 pad->tail_buf = pad->buf + pad->buf_len - align; in bdrv_init_padding()
1512 pad->write = write; in bdrv_init_padding()
1522 BlockDriverState *bs = child->bs; in bdrv_padding_rmw_read()
1523 uint64_t align = bs->bl.request_alignment; in bdrv_padding_rmw_read()
1526 assert(req->serialising && pad->buf); in bdrv_padding_rmw_read()
1528 if (pad->head || pad->merge_reads) { in bdrv_padding_rmw_read()
1529 int64_t bytes = pad->merge_reads ? pad->buf_len : align; in bdrv_padding_rmw_read()
1531 qemu_iovec_init_buf(&local_qiov, pad->buf, bytes); in bdrv_padding_rmw_read()
1533 if (pad->head) { in bdrv_padding_rmw_read()
1536 if (pad->merge_reads && pad->tail) { in bdrv_padding_rmw_read()
1539 ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes, in bdrv_padding_rmw_read()
1544 if (pad->head) { in bdrv_padding_rmw_read()
1547 if (pad->merge_reads && pad->tail) { in bdrv_padding_rmw_read()
1551 if (pad->merge_reads) { in bdrv_padding_rmw_read()
1556 if (pad->tail) { in bdrv_padding_rmw_read()
1557 qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align); in bdrv_padding_rmw_read()
1562 req->overlap_offset + req->overlap_bytes - align, in bdrv_padding_rmw_read()
1572 memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail); in bdrv_padding_rmw_read()
1583 if (pad->collapse_bounce_buf) { in bdrv_padding_finalize()
1584 if (!pad->write) { in bdrv_padding_finalize()
1589 qemu_iovec_from_buf(&pad->pre_collapse_qiov, 0, in bdrv_padding_finalize()
1590 pad->collapse_bounce_buf, pad->collapse_len); in bdrv_padding_finalize()
1592 qemu_vfree(pad->collapse_bounce_buf); in bdrv_padding_finalize()
1593 qemu_iovec_destroy(&pad->pre_collapse_qiov); in bdrv_padding_finalize()
1595 if (pad->buf) { in bdrv_padding_finalize()
1596 qemu_vfree(pad->buf); in bdrv_padding_finalize()
1597 qemu_iovec_destroy(&pad->local_qiov); in bdrv_padding_finalize()
1603 * Create pad->local_qiov by wrapping @iov in the padding head and tail, while
1607 * merged into pad->collapse_bounce_buf and replaced by a reference to that
1608 * bounce buffer in pad->local_qiov.
1611 * copied back into pad->pre_collapse_qiov (e.g. by bdrv_padding_finalize()).
1626 * this will practically never happen on 64-bit systems. in bdrv_create_padded_qiov()
1628 if (SIZE_MAX - pad->head < bytes || in bdrv_create_padded_qiov()
1629 SIZE_MAX - pad->head - bytes < pad->tail) in bdrv_create_padded_qiov()
1631 return -EINVAL; in bdrv_create_padded_qiov()
1635 padded_niov = !!pad->head + niov + !!pad->tail; in bdrv_create_padded_qiov()
1637 qemu_iovec_init(&pad->local_qiov, MIN(padded_niov, IOV_MAX)); in bdrv_create_padded_qiov()
1639 if (pad->head) { in bdrv_create_padded_qiov()
1640 qemu_iovec_add(&pad->local_qiov, pad->buf, pad->head); in bdrv_create_padded_qiov()
1655 surplus_count = padded_niov - IOV_MAX; in bdrv_create_padded_qiov()
1656 assert(surplus_count <= !!pad->head + !!pad->tail); in bdrv_create_padded_qiov()
1660 * Move the elements to collapse into `pad->pre_collapse_qiov`, then in bdrv_create_padded_qiov()
1663 qemu_iovec_init(&pad->pre_collapse_qiov, collapse_count); in bdrv_create_padded_qiov()
1664 qemu_iovec_concat_iov(&pad->pre_collapse_qiov, iov, in bdrv_create_padded_qiov()
1668 niov -= collapse_count; in bdrv_create_padded_qiov()
1669 bytes -= pad->pre_collapse_qiov.size; in bdrv_create_padded_qiov()
1672 * Construct the bounce buffer to match the length of the to-collapse in bdrv_create_padded_qiov()
1674 * from those elements. Then add it to `pad->local_qiov`. in bdrv_create_padded_qiov()
1676 pad->collapse_len = pad->pre_collapse_qiov.size; in bdrv_create_padded_qiov()
1677 pad->collapse_bounce_buf = qemu_blockalign(bs, pad->collapse_len); in bdrv_create_padded_qiov()
1678 if (pad->write) { in bdrv_create_padded_qiov()
1679 qemu_iovec_to_buf(&pad->pre_collapse_qiov, 0, in bdrv_create_padded_qiov()
1680 pad->collapse_bounce_buf, pad->collapse_len); in bdrv_create_padded_qiov()
1682 qemu_iovec_add(&pad->local_qiov, in bdrv_create_padded_qiov()
1683 pad->collapse_bounce_buf, pad->collapse_len); in bdrv_create_padded_qiov()
1686 qemu_iovec_concat_iov(&pad->local_qiov, iov, niov, iov_offset, bytes); in bdrv_create_padded_qiov()
1688 if (pad->tail) { in bdrv_create_padded_qiov()
1689 qemu_iovec_add(&pad->local_qiov, in bdrv_create_padded_qiov()
1690 pad->buf + pad->buf_len - pad->tail, pad->tail); in bdrv_create_padded_qiov()
1693 assert(pad->local_qiov.niov == MIN(padded_niov, IOV_MAX)); in bdrv_create_padded_qiov()
1706 * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out:
1707 * - on function start they represent original request
1708 * - on failure or when padding is not needed they are unchanged
1709 * - on success when padding is needed they represent padded request
1738 * only copy-on-read matters. in bdrv_pad_request()
1753 *qiov = &pad->local_qiov; in bdrv_pad_request()
1757 *bytes += pad->head + pad->tail; in bdrv_pad_request()
1758 *offset -= pad->head; in bdrv_pad_request()
1783 BlockDriverState *bs = child->bs; in bdrv_co_preadv_part()
1792 return -ENOMEDIUM; in bdrv_co_preadv_part()
1800 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { in bdrv_co_preadv_part()
1803 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass in bdrv_co_preadv_part()
1807 * zero-length read occasionally. in bdrv_co_preadv_part()
1814 /* Don't do copy-on-read if we read data before write operation */ in bdrv_co_preadv_part()
1815 if (qatomic_read(&bs->copy_on_read)) { in bdrv_co_preadv_part()
1827 bs->bl.request_alignment, in bdrv_co_preadv_part()
1842 BlockDriver *drv = bs->drv; in bdrv_co_do_pwrite_zeroes()
1850 int64_t max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, in bdrv_co_do_pwrite_zeroes()
1852 int alignment = MAX(bs->bl.pwrite_zeroes_alignment, in bdrv_co_do_pwrite_zeroes()
1853 bs->bl.request_alignment); in bdrv_co_do_pwrite_zeroes()
1854 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER); in bdrv_co_do_pwrite_zeroes()
1860 return -ENOMEDIUM; in bdrv_co_do_pwrite_zeroes()
1863 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) { in bdrv_co_do_pwrite_zeroes()
1864 return -ENOTSUP; in bdrv_co_do_pwrite_zeroes()
1869 return -EINVAL; in bdrv_co_do_pwrite_zeroes()
1873 if (!(bs->open_flags & BDRV_O_UNMAP)) { in bdrv_co_do_pwrite_zeroes()
1877 /* Invalidate the cached block-status data range if this write overlaps */ in bdrv_co_do_pwrite_zeroes()
1880 assert(alignment % bs->bl.request_alignment == 0); in bdrv_co_do_pwrite_zeroes()
1884 assert(max_write_zeroes >= bs->bl.request_alignment); in bdrv_co_do_pwrite_zeroes()
1897 num = MIN(MIN(bytes, max_transfer), alignment - head); in bdrv_co_do_pwrite_zeroes()
1902 num -= tail; in bdrv_co_do_pwrite_zeroes()
1910 ret = -ENOTSUP; in bdrv_co_do_pwrite_zeroes()
1912 if (drv->bdrv_co_pwrite_zeroes) { in bdrv_co_do_pwrite_zeroes()
1913 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num, in bdrv_co_do_pwrite_zeroes()
1914 flags & bs->supported_zero_flags); in bdrv_co_do_pwrite_zeroes()
1915 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) && in bdrv_co_do_pwrite_zeroes()
1916 !(bs->supported_zero_flags & BDRV_REQ_FUA)) { in bdrv_co_do_pwrite_zeroes()
1920 assert(!bs->supported_zero_flags); in bdrv_co_do_pwrite_zeroes()
1923 if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) { in bdrv_co_do_pwrite_zeroes()
1928 !(bs->supported_write_flags & BDRV_REQ_FUA)) { in bdrv_co_do_pwrite_zeroes()
1938 ret = -ENOMEM; in bdrv_co_do_pwrite_zeroes()
1956 bytes -= num; in bdrv_co_do_pwrite_zeroes()
1971 BlockDriverState *bs = child->bs; in bdrv_co_write_req_prepare()
1976 return -EPERM; in bdrv_co_write_req_prepare()
1979 assert(!(bs->open_flags & BDRV_O_INACTIVE)); in bdrv_co_write_req_prepare()
1980 assert((bs->open_flags & BDRV_O_NO_IO) == 0); in bdrv_co_write_req_prepare()
1985 QEMU_LOCK_GUARD(&bs->reqs_lock); in bdrv_co_write_req_prepare()
1990 return -EBUSY; in bdrv_co_write_req_prepare()
1998 assert(req->overlap_offset <= offset); in bdrv_co_write_req_prepare()
1999 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); in bdrv_co_write_req_prepare()
2000 assert(offset + bytes <= bs->total_sectors * BDRV_SECTOR_SIZE || in bdrv_co_write_req_prepare()
2001 child->perm & BLK_PERM_RESIZE); in bdrv_co_write_req_prepare()
2003 switch (req->type) { in bdrv_co_write_req_prepare()
2007 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); in bdrv_co_write_req_prepare()
2009 assert(child->perm & BLK_PERM_WRITE); in bdrv_co_write_req_prepare()
2014 assert(child->perm & BLK_PERM_RESIZE); in bdrv_co_write_req_prepare()
2026 BlockDriverState *bs = child->bs; in bdrv_co_write_req_finish()
2030 qatomic_inc(&bs->write_gen); in bdrv_co_write_req_finish()
2040 (req->type == BDRV_TRACKED_TRUNCATE || in bdrv_co_write_req_finish()
2041 end_sector > bs->total_sectors) && in bdrv_co_write_req_finish()
2042 req->type != BDRV_TRACKED_DISCARD) { in bdrv_co_write_req_finish()
2043 bs->total_sectors = end_sector; in bdrv_co_write_req_finish()
2047 if (req->bytes) { in bdrv_co_write_req_finish()
2048 switch (req->type) { in bdrv_co_write_req_finish()
2050 stat64_max(&bs->wr_highest_offset, offset + bytes); in bdrv_co_write_req_finish()
2071 BlockDriverState *bs = child->bs; in bdrv_aligned_pwritev()
2072 BlockDriver *drv = bs->drv; in bdrv_aligned_pwritev()
2081 return -ENOMEDIUM; in bdrv_aligned_pwritev()
2085 return -EPERM; in bdrv_aligned_pwritev()
2089 assert((offset & (align - 1)) == 0); in bdrv_aligned_pwritev()
2090 assert((bytes & (align - 1)) == 0); in bdrv_aligned_pwritev()
2091 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), in bdrv_aligned_pwritev()
2096 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && in bdrv_aligned_pwritev()
2097 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes && in bdrv_aligned_pwritev()
2100 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { in bdrv_aligned_pwritev()
2127 !(bs->supported_write_flags & BDRV_REQ_FUA)) { in bdrv_aligned_pwritev()
2133 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining, in bdrv_aligned_pwritev()
2135 qiov_offset + bytes - bytes_remaining, in bdrv_aligned_pwritev()
2140 bytes_remaining -= num; in bdrv_aligned_pwritev()
2157 BlockDriverState *bs = child->bs; in bdrv_co_do_zero_pwritev()
2159 uint64_t align = bs->bl.request_alignment; in bdrv_co_do_zero_pwritev()
2175 int64_t aligned_offset = offset & ~(align - 1); in bdrv_co_do_zero_pwritev()
2186 offset += write_bytes - pad.head; in bdrv_co_do_zero_pwritev()
2187 bytes -= write_bytes - pad.head; in bdrv_co_do_zero_pwritev()
2191 assert(!bytes || (offset & (align - 1)) == 0); in bdrv_co_do_zero_pwritev()
2194 int64_t aligned_bytes = bytes & ~(align - 1); in bdrv_co_do_zero_pwritev()
2200 bytes -= aligned_bytes; in bdrv_co_do_zero_pwritev()
2204 assert(!bytes || (offset & (align - 1)) == 0); in bdrv_co_do_zero_pwritev()
2235 BlockDriverState *bs = child->bs; in bdrv_co_pwritev_part()
2237 uint64_t align = bs->bl.request_alignment; in bdrv_co_pwritev_part()
2243 trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags); in bdrv_co_pwritev_part()
2246 return -ENOMEDIUM; in bdrv_co_pwritev_part()
2262 return -ENOTSUP; in bdrv_co_pwritev_part()
2265 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { in bdrv_co_pwritev_part()
2268 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass in bdrv_co_pwritev_part()
2272 * zero-length write occasionally. in bdrv_co_pwritev_part()
2279 * Pad request for following read-modify-write cycle. in bdrv_co_pwritev_part()
2302 * padded. We are going to do read-modify-write, and must in bdrv_co_pwritev_part()
2327 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags); in bdrv_co_pwrite_zeroes()
2384 * The returned value is non-zero on success except at end-of-file.
2387 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2422 n = total_size - offset; in bdrv_co_do_block_status()
2427 /* Must be non-NULL or bdrv_co_getlength() would have failed */ in bdrv_co_do_block_status()
2428 assert(bs->drv); in bdrv_co_do_block_status()
2430 if (!bs->drv->bdrv_co_block_status && !has_filtered_child) { in bdrv_co_do_block_status()
2436 if (bs->drv->protocol_name) { in bdrv_co_do_block_status()
2447 align = bs->bl.request_alignment; in bdrv_co_do_block_status()
2449 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset; in bdrv_co_do_block_status()
2451 if (bs->drv->bdrv_co_block_status) { in bdrv_co_do_block_status()
2453 * Use the block-status cache only for protocol nodes: Format in bdrv_co_do_block_status()
2462 * cache the last-identified data region. in bdrv_co_do_block_status()
2473 if (QLIST_EMPTY(&bs->children) && in bdrv_co_do_block_status()
2480 ret = bs->drv->bdrv_co_block_status(bs, mode, aligned_offset, in bdrv_co_do_block_status()
2485 * Note that checking QLIST_EMPTY(&bs->children) is also done when in bdrv_co_do_block_status()
2488 * non-protocol nodes, and then it is never used. However, filling in bdrv_co_do_block_status()
2497 QLIST_EMPTY(&bs->children)) in bdrv_co_do_block_status()
2531 * The driver's result must be a non-zero multiple of request_alignment. in bdrv_co_do_block_status()
2535 align > offset - aligned_offset); in bdrv_co_do_block_status()
2542 *pnum -= offset - aligned_offset; in bdrv_co_do_block_status()
2547 local_map += offset - aligned_offset; in bdrv_co_do_block_status()
2559 } else if (bs->drv->supports_backing) { in bdrv_co_do_block_status()
2603 * with more complicated block graphs like snapshot-access -> in bdrv_co_do_block_status()
2604 * copy-before-write -> qcow2, where the return value will be propagated in bdrv_co_do_block_status()
2755 * Return 1 if that is the case, 0 otherwise and -errno on error.
2757 * does not guarantee non-zero data; but a return of 1 is reliable.
2778 bytes -= pnum; in bdrv_co_is_zero_fast()
2787 * Return 1 if that is the case, 0 otherwise and -errno on error.
2789 * does not guarantee non-zero data; however, a return of 1 is reliable,
2805 /* First probe - see if the entire image reads as zero */ in bdrv_co_is_all_zeroes()
2813 return bdrv_co_is_zero_fast(bs, pnum, bytes - pnum); in bdrv_co_is_all_zeroes()
2817 * Because of the way 'blockdev-create' works, raw files tend to in bdrv_co_is_all_zeroes()
2818 * be created with a non-sparse region at the front to make in bdrv_co_is_all_zeroes()
2827 ret = bdrv_co_is_zero_fast(bs, pnum, bytes - pnum); in bdrv_co_is_all_zeroes()
2859 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2861 * Return a positive depth if (a prefix of) the given range is allocated
2901 BlockDriver *drv = bs->drv; in bdrv_co_readv_vmstate()
2907 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); in bdrv_co_readv_vmstate()
2913 return -ENOMEDIUM; in bdrv_co_readv_vmstate()
2918 if (drv->bdrv_co_load_vmstate) { in bdrv_co_readv_vmstate()
2919 ret = drv->bdrv_co_load_vmstate(bs, qiov, pos); in bdrv_co_readv_vmstate()
2923 ret = -ENOTSUP; in bdrv_co_readv_vmstate()
2934 BlockDriver *drv = bs->drv; in bdrv_co_writev_vmstate()
2940 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); in bdrv_co_writev_vmstate()
2946 return -ENOMEDIUM; in bdrv_co_writev_vmstate()
2951 if (drv->bdrv_co_save_vmstate) { in bdrv_co_writev_vmstate()
2952 ret = drv->bdrv_co_save_vmstate(bs, qiov, pos); in bdrv_co_writev_vmstate()
2956 ret = -ENOTSUP; in bdrv_co_writev_vmstate()
2985 /* async I/Os */
2998 AIO_WAIT_WHILE_UNLOCKED(NULL, acb->refcnt > 1); in bdrv_aio_cancel()
3002 /* Async version of aio cancel. The caller is not blocked if the acb implements
3008 if (acb->aiocb_info->cancel_async) { in bdrv_aio_cancel_async()
3009 acb->aiocb_info->cancel_async(acb); in bdrv_aio_cancel_async()
3032 qemu_mutex_lock(&bs->reqs_lock); in bdrv_co_flush()
3033 current_gen = qatomic_read(&bs->write_gen); in bdrv_co_flush()
3036 while (bs->active_flush_req) { in bdrv_co_flush()
3037 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock); in bdrv_co_flush()
3041 bs->active_flush_req = true; in bdrv_co_flush()
3042 qemu_mutex_unlock(&bs->reqs_lock); in bdrv_co_flush()
3045 if (bs->drv->bdrv_co_flush) { in bdrv_co_flush()
3046 ret = bs->drv->bdrv_co_flush(bs); in bdrv_co_flush()
3052 if (bs->drv->bdrv_co_flush_to_os) { in bdrv_co_flush()
3053 ret = bs->drv->bdrv_co_flush_to_os(bs); in bdrv_co_flush()
3060 if (bs->open_flags & BDRV_O_NO_FLUSH) { in bdrv_co_flush()
3065 if (bs->flushed_gen == current_gen) { in bdrv_co_flush()
3070 if (!bs->drv) { in bdrv_co_flush()
3071 /* bs->drv->bdrv_co_flush() might have ejected the BDS in bdrv_co_flush()
3073 ret = -ENOMEDIUM; in bdrv_co_flush()
3076 if (bs->drv->bdrv_co_flush_to_disk) { in bdrv_co_flush()
3077 ret = bs->drv->bdrv_co_flush_to_disk(bs); in bdrv_co_flush()
3078 } else if (bs->drv->bdrv_aio_flush) { in bdrv_co_flush()
3084 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); in bdrv_co_flush()
3086 ret = -EIO; in bdrv_co_flush()
3096 * depends on server-side configuration), so we can't ensure that in bdrv_co_flush()
3115 QLIST_FOREACH(child, &bs->children, next) { in bdrv_co_flush()
3116 if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) { in bdrv_co_flush()
3117 int this_child_ret = bdrv_co_flush(child->bs); in bdrv_co_flush()
3127 bs->flushed_gen = current_gen; in bdrv_co_flush()
3130 qemu_mutex_lock(&bs->reqs_lock); in bdrv_co_flush()
3131 bs->active_flush_req = false; in bdrv_co_flush()
3132 /* Return value is ignored - it's ok if wait queue is empty */ in bdrv_co_flush()
3133 qemu_co_queue_next(&bs->flush_queue); in bdrv_co_flush()
3134 qemu_mutex_unlock(&bs->reqs_lock); in bdrv_co_flush()
3148 BlockDriverState *bs = child->bs; in bdrv_co_pdiscard()
3152 if (!bs || !bs->drv || !bdrv_co_is_inserted(bs)) { in bdrv_co_pdiscard()
3153 return -ENOMEDIUM; in bdrv_co_pdiscard()
3157 return -EPERM; in bdrv_co_pdiscard()
3166 if (!(bs->open_flags & BDRV_O_UNMAP)) { in bdrv_co_pdiscard()
3170 if (!bs->drv->bdrv_co_pdiscard) { in bdrv_co_pdiscard()
3174 /* Invalidate the cached block-status data range if this discard overlaps */ in bdrv_co_pdiscard()
3181 * -EINVAL or -ENOTSUP, so we must fragment the request accordingly. in bdrv_co_pdiscard()
3183 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment); in bdrv_co_pdiscard()
3184 assert(align % bs->bl.request_alignment == 0); in bdrv_co_pdiscard()
3196 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT64_MAX), in bdrv_co_pdiscard()
3198 assert(max_pdiscard >= bs->bl.request_alignment); in bdrv_co_pdiscard()
3205 num = MIN(bytes, align - head); in bdrv_co_pdiscard()
3206 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) { in bdrv_co_pdiscard()
3207 num %= bs->bl.request_alignment; in bdrv_co_pdiscard()
3214 num -= tail; in bdrv_co_pdiscard()
3215 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) && in bdrv_co_pdiscard()
3216 tail > bs->bl.request_alignment) { in bdrv_co_pdiscard()
3217 tail %= bs->bl.request_alignment; in bdrv_co_pdiscard()
3218 num -= tail; in bdrv_co_pdiscard()
3226 if (!bs->drv) { in bdrv_co_pdiscard()
3227 ret = -ENOMEDIUM; in bdrv_co_pdiscard()
3231 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num); in bdrv_co_pdiscard()
3232 if (ret && ret != -ENOTSUP) { in bdrv_co_pdiscard()
3233 if (ret == -EINVAL && (offset % align != 0 || num % align != 0)) { in bdrv_co_pdiscard()
3241 bytes -= num; in bdrv_co_pdiscard()
3253 BlockDriver *drv = bs->drv; in bdrv_co_ioctl()
3262 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) { in bdrv_co_ioctl()
3263 co.ret = -ENOTSUP; in bdrv_co_ioctl()
3267 if (drv->bdrv_co_ioctl) { in bdrv_co_ioctl()
3268 co.ret = drv->bdrv_co_ioctl(bs, req, buf); in bdrv_co_ioctl()
3270 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co); in bdrv_co_ioctl()
3272 co.ret = -ENOTSUP; in bdrv_co_ioctl()
3286 BlockDriver *drv = bs->drv; in bdrv_co_zone_report()
3293 if (!drv || !drv->bdrv_co_zone_report || bs->bl.zoned == BLK_Z_NONE) { in bdrv_co_zone_report()
3294 co.ret = -ENOTSUP; in bdrv_co_zone_report()
3297 co.ret = drv->bdrv_co_zone_report(bs, offset, nr_zones, zones); in bdrv_co_zone_report()
3306 BlockDriver *drv = bs->drv; in bdrv_co_zone_mgmt()
3313 if (!drv || !drv->bdrv_co_zone_mgmt || bs->bl.zoned == BLK_Z_NONE) { in bdrv_co_zone_mgmt()
3314 co.ret = -ENOTSUP; in bdrv_co_zone_mgmt()
3317 co.ret = drv->bdrv_co_zone_mgmt(bs, op, offset, len); in bdrv_co_zone_mgmt()
3328 BlockDriver *drv = bs->drv; in bdrv_co_zone_append()
3334 ret = bdrv_check_qiov_request(*offset, qiov->size, qiov, 0, NULL); in bdrv_co_zone_append()
3340 if (!drv || !drv->bdrv_co_zone_append || bs->bl.zoned == BLK_Z_NONE) { in bdrv_co_zone_append()
3341 co.ret = -ENOTSUP; in bdrv_co_zone_append()
3344 co.ret = drv->bdrv_co_zone_append(bs, offset, qiov, flags); in bdrv_co_zone_append()
3398 QLIST_FOREACH(child, &bs->children, next) { in bdrv_register_buf_rollback()
3403 bdrv_unregister_buf(child->bs, host, size); in bdrv_register_buf_rollback()
3406 if (bs->drv && bs->drv->bdrv_unregister_buf) { in bdrv_register_buf_rollback()
3407 bs->drv->bdrv_unregister_buf(bs, host, size); in bdrv_register_buf_rollback()
3419 if (bs->drv && bs->drv->bdrv_register_buf) { in bdrv_register_buf()
3420 if (!bs->drv->bdrv_register_buf(bs, host, size, errp)) { in bdrv_register_buf()
3424 QLIST_FOREACH(child, &bs->children, next) { in bdrv_register_buf()
3425 if (!bdrv_register_buf(child->bs, host, size, errp)) { in bdrv_register_buf()
3440 if (bs->drv && bs->drv->bdrv_unregister_buf) { in bdrv_unregister_buf()
3441 bs->drv->bdrv_unregister_buf(bs, host, size); in bdrv_unregister_buf()
3443 QLIST_FOREACH(child, &bs->children, next) { in bdrv_unregister_buf()
3444 bdrv_unregister_buf(child->bs, host, size); in bdrv_unregister_buf()
3464 if (!dst || !dst->bs || !bdrv_co_is_inserted(dst->bs)) { in bdrv_co_copy_range_internal()
3465 return -ENOMEDIUM; in bdrv_co_copy_range_internal()
3475 if (!src || !src->bs || !bdrv_co_is_inserted(src->bs)) { in bdrv_co_copy_range_internal()
3476 return -ENOMEDIUM; in bdrv_co_copy_range_internal()
3483 if (!src->bs->drv->bdrv_co_copy_range_from in bdrv_co_copy_range_internal()
3484 || !dst->bs->drv->bdrv_co_copy_range_to in bdrv_co_copy_range_internal()
3485 || src->bs->encrypted || dst->bs->encrypted) { in bdrv_co_copy_range_internal()
3486 return -ENOTSUP; in bdrv_co_copy_range_internal()
3490 bdrv_inc_in_flight(src->bs); in bdrv_co_copy_range_internal()
3491 tracked_request_begin(&req, src->bs, src_offset, bytes, in bdrv_co_copy_range_internal()
3498 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs, in bdrv_co_copy_range_internal()
3505 bdrv_dec_in_flight(src->bs); in bdrv_co_copy_range_internal()
3507 bdrv_inc_in_flight(dst->bs); in bdrv_co_copy_range_internal()
3508 tracked_request_begin(&req, dst->bs, dst_offset, bytes, in bdrv_co_copy_range_internal()
3513 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs, in bdrv_co_copy_range_internal()
3521 bdrv_dec_in_flight(dst->bs); in bdrv_co_copy_range_internal()
3583 QLIST_FOREACH(c, &bs->parents, next_parent) { in bdrv_parent_cb_resize()
3584 if (c->klass->resize) { in bdrv_parent_cb_resize()
3585 c->klass->resize(c); in bdrv_parent_cb_resize()
3601 BlockDriverState *bs = child->bs; in bdrv_co_truncate()
3603 BlockDriver *drv = bs->drv; in bdrv_co_truncate()
3610 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */ in bdrv_co_truncate()
3613 return -ENOMEDIUM; in bdrv_co_truncate()
3617 return -EINVAL; in bdrv_co_truncate()
3627 error_setg_errno(errp, -old_size, "Failed to get old image size"); in bdrv_co_truncate()
3632 error_setg(errp, "Image is read-only"); in bdrv_co_truncate()
3633 return -EACCES; in bdrv_co_truncate()
3637 new_bytes = offset - old_size; in bdrv_co_truncate()
3643 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes, in bdrv_co_truncate()
3652 ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req, in bdrv_co_truncate()
3655 error_setg_errno(errp, -ret, in bdrv_co_truncate()
3666 * then the backing file content would become visible. Instead, zero-fill in bdrv_co_truncate()
3676 backing_len = bdrv_co_getlength(backing->bs); in bdrv_co_truncate()
3679 error_setg_errno(errp, -ret, "Could not get backing file size"); in bdrv_co_truncate()
3688 if (drv->bdrv_co_truncate) { in bdrv_co_truncate()
3689 if (flags & ~bs->supported_truncate_flags) { in bdrv_co_truncate()
3691 ret = -ENOTSUP; in bdrv_co_truncate()
3694 ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp); in bdrv_co_truncate()
3699 ret = -ENOTSUP; in bdrv_co_truncate()
3708 error_setg_errno(errp, -ret, "Could not refresh total sector count"); in bdrv_co_truncate()
3710 offset = bs->total_sectors * BDRV_SECTOR_SIZE; in bdrv_co_truncate()
3717 bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0); in bdrv_co_truncate()
3731 if (!bs || !bs->drv) { in bdrv_cancel_in_flight()
3735 if (bs->drv->bdrv_cancel_in_flight) { in bdrv_cancel_in_flight()
3736 bs->drv->bdrv_cancel_in_flight(bs); in bdrv_cancel_in_flight()
3744 BlockDriverState *bs = child->bs; in bdrv_co_preadv_snapshot()
3745 BlockDriver *drv = bs->drv; in bdrv_co_preadv_snapshot()
3751 return -ENOMEDIUM; in bdrv_co_preadv_snapshot()
3754 if (!drv->bdrv_co_preadv_snapshot) { in bdrv_co_preadv_snapshot()
3755 return -ENOTSUP; in bdrv_co_preadv_snapshot()
3759 ret = drv->bdrv_co_preadv_snapshot(bs, offset, bytes, qiov, qiov_offset); in bdrv_co_preadv_snapshot()
3771 BlockDriver *drv = bs->drv; in bdrv_co_snapshot_block_status()
3777 return -ENOMEDIUM; in bdrv_co_snapshot_block_status()
3780 if (!drv->bdrv_co_snapshot_block_status) { in bdrv_co_snapshot_block_status()
3781 return -ENOTSUP; in bdrv_co_snapshot_block_status()
3785 ret = drv->bdrv_co_snapshot_block_status(bs, mode, offset, bytes, in bdrv_co_snapshot_block_status()
3795 BlockDriver *drv = bs->drv; in bdrv_co_pdiscard_snapshot()
3801 return -ENOMEDIUM; in bdrv_co_pdiscard_snapshot()
3804 if (!drv->bdrv_co_pdiscard_snapshot) { in bdrv_co_pdiscard_snapshot()
3805 return -ENOTSUP; in bdrv_co_pdiscard_snapshot()
3809 ret = drv->bdrv_co_pdiscard_snapshot(bs, offset, bytes); in bdrv_co_pdiscard_snapshot()