Lines Matching full:if
46 /* Maximum read size for checking if data reads as zero, in bytes */
63 if (c == ignore) { in bdrv_parent_drained_begin()
77 if (c->klass->drained_end) { in bdrv_parent_drained_end_single()
90 if (c == ignore) { in bdrv_parent_drained_end()
101 if (c->klass->drained_poll) { in bdrv_parent_drained_poll_single()
117 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) { in bdrv_parent_drained_poll()
133 if (c->klass->drained_begin) { in bdrv_parent_drained_begin_single()
182 if (tran) { in bdrv_refresh_limits()
193 if (!drv) { in bdrv_refresh_limits()
205 if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW)) in bdrv_refresh_limits()
211 if (c->role & BDRV_CHILD_FILTERED) { in bdrv_refresh_limits()
216 if (!have_limits) { in bdrv_refresh_limits()
225 if (drv->bdrv_refresh_limits) { in bdrv_refresh_limits()
227 if (*errp) { in bdrv_refresh_limits()
232 if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) { in bdrv_refresh_limits()
264 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
270 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) { in bdrv_drain_poll()
274 if (qatomic_read(&bs->in_flight)) { in bdrv_drain_poll()
300 if (bs) { in bdrv_co_drain_bh_cb()
302 if (data->begin) { in bdrv_co_drain_bh_cb()
326 * other coroutines run if they were queued by aio_co_enter(). */ in bdrv_co_yield_to_drain()
338 if (bs) { in bdrv_co_yield_to_drain()
346 /* If we are resumed from some other event (such as an aio completion or a in bdrv_co_yield_to_drain()
356 if (qemu_in_coroutine()) { in bdrv_do_drained_begin()
364 if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) { in bdrv_do_drained_begin()
367 if (bs->drv && bs->drv->bdrv_drain_begin) { in bdrv_do_drained_begin()
381 if (poll) { in bdrv_do_drained_begin()
408 if (qemu_in_coroutine()) { in bdrv_do_drained_end()
419 if (old_quiesce_counter == 1) { in bdrv_do_drained_end()
421 if (bs->drv && bs->drv->bdrv_drain_end) { in bdrv_do_drained_end()
496 if (replay_events_enabled()) { in bdrv_drain_all_begin_nopoll()
517 if (qemu_in_coroutine()) { in bdrv_drain_all_begin()
527 if (replay_events_enabled()) { in bdrv_drain_all_begin()
563 if (replay_events_enabled()) { in bdrv_drain_all_end()
590 if (req->serialising) { in tracked_request_end()
641 if (offset >= req->overlap_offset + req->overlap_bytes) { in tracked_request_overlaps()
645 if (req->overlap_offset >= offset + bytes) { in tracked_request_overlaps()
658 if (req == self || (!req->serialising && !self->serialising)) { in bdrv_find_conflicting_request()
661 if (tracked_request_overlaps(req, self->overlap_offset, in bdrv_find_conflicting_request()
672 * If the request is already (indirectly) waiting for us, or in bdrv_find_conflicting_request()
676 if (!req->waiting_for) { in bdrv_find_conflicting_request()
708 if (!req->serialising) { in tracked_request_set_serialising()
719 * NULL if there is none.
728 if (req->co == self) { in bdrv_co_get_self_request()
737 * Round a region to subcluster (if supported) or cluster boundaries
745 if (bdrv_co_get_info(bs, &bdi) < 0 || bdi.subcluster_size == 0) { in bdrv_round_to_subclusters()
761 if (ret < 0 || bdi.cluster_size == 0) { in bdrv_get_cluster_size()
792 if (!qatomic_read(&bs->serialising_in_flight)) { in bdrv_wait_serialising_requests()
822 if (offset < 0) { in bdrv_check_qiov_request()
827 if (bytes < 0) { in bdrv_check_qiov_request()
832 if (bytes > BDRV_MAX_LENGTH) { in bdrv_check_qiov_request()
838 if (offset > BDRV_MAX_LENGTH) { in bdrv_check_qiov_request()
844 if (offset > BDRV_MAX_LENGTH - bytes) { in bdrv_check_qiov_request()
851 if (!qiov) { in bdrv_check_qiov_request()
859 if (qiov_offset > qiov->size) { in bdrv_check_qiov_request()
865 if (bytes > qiov->size - qiov_offset) { in bdrv_check_qiov_request()
883 if (ret < 0) { in bdrv_check_request32()
887 if (bytes > BDRV_REQUEST_MAX_BYTES) { in bdrv_check_request32()
897 * zeroes to the device if they currently do not return zeroes. Optional
911 if (target_size < 0) { in bdrv_make_zero()
917 if (bytes <= 0) { in bdrv_make_zero()
921 if (ret < 0) { in bdrv_make_zero()
924 if (ret & BDRV_BLOCK_ZERO) { in bdrv_make_zero()
929 if (ret < 0) { in bdrv_make_zero()
951 if (ret < 0) { in bdrv_co_pwrite_sync()
956 if (ret < 0) { in bdrv_co_pwrite_sync()
990 if (!drv) { in bdrv_driver_preadv()
994 if (drv->bdrv_co_preadv_part) { in bdrv_driver_preadv()
999 if (qiov_offset > 0 || bytes != qiov->size) { in bdrv_driver_preadv()
1004 if (drv->bdrv_co_preadv) { in bdrv_driver_preadv()
1009 if (drv->bdrv_aio_preadv) { in bdrv_driver_preadv()
1017 if (acb == NULL) { in bdrv_driver_preadv()
1038 if (qiov == &local_qiov) { in bdrv_driver_preadv()
1060 if (!drv) { in bdrv_driver_pwritev()
1064 if (bs->open_flags & BDRV_O_NO_FLUSH) { in bdrv_driver_pwritev()
1068 if ((flags & BDRV_REQ_FUA) && in bdrv_driver_pwritev()
1076 if (drv->bdrv_co_pwritev_part) { in bdrv_driver_pwritev()
1082 if (qiov_offset > 0 || bytes != qiov->size) { in bdrv_driver_pwritev()
1087 if (drv->bdrv_co_pwritev) { in bdrv_driver_pwritev()
1092 if (drv->bdrv_aio_pwritev) { in bdrv_driver_pwritev()
1100 if (acb == NULL) { in bdrv_driver_pwritev()
1120 if (ret == 0 && emulate_fua) { in bdrv_driver_pwritev()
1124 if (qiov == &local_qiov) { in bdrv_driver_pwritev()
1143 if (!drv) { in bdrv_driver_pwritev_compressed()
1147 if (!block_driver_can_compress(drv)) { in bdrv_driver_pwritev_compressed()
1151 if (drv->bdrv_co_pwritev_compressed_part) { in bdrv_driver_pwritev_compressed()
1156 if (qiov_offset == 0) { in bdrv_driver_pwritev_compressed()
1192 if (!drv) { in bdrv_co_do_copy_on_readv()
1203 * are doing is a read request. If we did things right, write permissions in bdrv_co_do_copy_on_readv()
1225 if (skip_write) { in bdrv_co_do_copy_on_readv()
1231 if (ret < 0) { in bdrv_co_do_copy_on_readv()
1233 * Safe to treat errors in querying allocation as if in bdrv_co_do_copy_on_readv()
1240 /* Stop at EOF if the image ends in the middle of the cluster */ in bdrv_co_do_copy_on_readv()
1241 if (ret == 0 && pnum == 0) { in bdrv_co_do_copy_on_readv()
1249 if (ret <= 0) { in bdrv_co_do_copy_on_readv()
1254 if (!bounce_buffer) { in bdrv_co_do_copy_on_readv()
1260 if (!bounce_buffer) { in bdrv_co_do_copy_on_readv()
1269 if (ret < 0) { in bdrv_co_do_copy_on_readv()
1274 if (drv->bdrv_co_pwrite_zeroes && in bdrv_co_do_copy_on_readv()
1277 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy in bdrv_co_do_copy_on_readv()
1290 if (ret < 0) { in bdrv_co_do_copy_on_readv()
1292 * requests. If this is a deliberate copy-on-read in bdrv_co_do_copy_on_readv()
1299 if (!(flags & BDRV_REQ_PREFETCH)) { in bdrv_co_do_copy_on_readv()
1304 } else if (!(flags & BDRV_REQ_PREFETCH)) { in bdrv_co_do_copy_on_readv()
1309 if (ret < 0) { in bdrv_co_do_copy_on_readv()
1352 * potential fallback support, if we ever implement any read flags in bdrv_aligned_preadv()
1360 if (flags & BDRV_REQ_COPY_ON_READ) { in bdrv_aligned_preadv()
1361 /* If we touch the same cluster it counts as an overlap. This in bdrv_aligned_preadv()
1371 if (flags & BDRV_REQ_COPY_ON_READ) { in bdrv_aligned_preadv()
1378 if (ret < 0) { in bdrv_aligned_preadv()
1382 if (!ret || pnum != bytes) { in bdrv_aligned_preadv()
1386 } else if (flags & BDRV_REQ_PREFETCH) { in bdrv_aligned_preadv()
1393 if (total_bytes < 0) { in bdrv_aligned_preadv()
1401 if (bytes <= max_bytes && bytes <= max_transfer) { in bdrv_aligned_preadv()
1409 if (max_bytes) { in bdrv_aligned_preadv()
1423 if (ret < 0) { in bdrv_aligned_preadv()
1449 * around tail, if tail exists.
1452 * if @buf_len == @head + bytes + @tail. In this case it is possible that both
1457 * If padding makes the vector too long (exceeding IOV_MAX), then we need to
1494 if (pad->tail) { in bdrv_init_padding()
1498 if (!pad->head && !pad->tail) { in bdrv_init_padding()
1508 if (pad->tail) { in bdrv_init_padding()
1528 if (pad->head || pad->merge_reads) { in bdrv_padding_rmw_read()
1533 if (pad->head) { in bdrv_padding_rmw_read()
1536 if (pad->merge_reads && pad->tail) { in bdrv_padding_rmw_read()
1541 if (ret < 0) { in bdrv_padding_rmw_read()
1544 if (pad->head) { in bdrv_padding_rmw_read()
1547 if (pad->merge_reads && pad->tail) { in bdrv_padding_rmw_read()
1551 if (pad->merge_reads) { in bdrv_padding_rmw_read()
1556 if (pad->tail) { in bdrv_padding_rmw_read()
1564 if (ret < 0) { in bdrv_padding_rmw_read()
1571 if (zero_middle) { in bdrv_padding_rmw_read()
1583 if (pad->collapse_bounce_buf) { in bdrv_padding_finalize()
1584 if (!pad->write) { in bdrv_padding_finalize()
1586 * If padding required elements in the vector to be collapsed into a in bdrv_padding_finalize()
1595 if (pad->buf) { in bdrv_padding_finalize()
1624 * Cannot pad if resulting length would exceed SIZE_MAX. Returning an error in bdrv_create_padded_qiov()
1628 if (SIZE_MAX - pad->head < bytes || in bdrv_create_padded_qiov()
1634 /* Length of the resulting IOV if we just concatenated everything */ in bdrv_create_padded_qiov()
1639 if (pad->head) { in bdrv_create_padded_qiov()
1644 * If padded_niov > IOV_MAX, we cannot just concatenate everything. in bdrv_create_padded_qiov()
1648 if (padded_niov > IOV_MAX) { in bdrv_create_padded_qiov()
1678 if (pad->write) { in bdrv_create_padded_qiov()
1688 if (pad->tail) { in bdrv_create_padded_qiov()
1700 * Exchange request parameters with padded request if needed. Don't include RMW
1701 * read of padding, bdrv_padding_rmw_read() should be called separately if
1725 if (ret < 0) { in bdrv_pad_request()
1729 if (!bdrv_init_padding(bs, *offset, *bytes, write, pad)) { in bdrv_pad_request()
1730 if (padded) { in bdrv_pad_request()
1740 if (*qiov) { in bdrv_pad_request()
1749 if (ret < 0) { in bdrv_pad_request()
1759 if (padded) { in bdrv_pad_request()
1762 if (flags) { in bdrv_pad_request()
1791 if (!bdrv_co_is_inserted(bs)) { in bdrv_co_preadv_part()
1796 if (ret < 0) { in bdrv_co_preadv_part()
1800 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { in bdrv_co_preadv_part()
1802 * Aligning zero request is nonsense. Even if driver has special meaning in bdrv_co_preadv_part()
1806 * Still, no reason to return an error if someone do unaligned in bdrv_co_preadv_part()
1814 /* Don't do copy-on-read if we read data before write operation */ in bdrv_co_preadv_part()
1815 if (qatomic_read(&bs->copy_on_read)) { in bdrv_co_preadv_part()
1821 if (ret < 0) { in bdrv_co_preadv_part()
1859 if (!drv) { in bdrv_co_do_pwrite_zeroes()
1863 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) { in bdrv_co_do_pwrite_zeroes()
1868 if (flags & BDRV_REQ_REGISTERED_BUF) { in bdrv_co_do_pwrite_zeroes()
1872 /* If opened with discard=off we should never unmap. */ in bdrv_co_do_pwrite_zeroes()
1873 if (!(bs->open_flags & BDRV_O_UNMAP)) { in bdrv_co_do_pwrite_zeroes()
1877 /* Invalidate the cached block-status data range if this write overlaps */ in bdrv_co_do_pwrite_zeroes()
1893 if (head) { in bdrv_co_do_pwrite_zeroes()
1895 * convenience, limit this request to max_transfer even if in bdrv_co_do_pwrite_zeroes()
1900 } else if (tail && num > alignment) { in bdrv_co_do_pwrite_zeroes()
1906 if (num > max_write_zeroes) { in bdrv_co_do_pwrite_zeroes()
1912 if (drv->bdrv_co_pwrite_zeroes) { in bdrv_co_do_pwrite_zeroes()
1915 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) && in bdrv_co_do_pwrite_zeroes()
1923 if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) { in bdrv_co_do_pwrite_zeroes()
1924 /* Fall back to bounce buffer if write zeroes is unsupported */ in bdrv_co_do_pwrite_zeroes()
1927 if ((flags & BDRV_REQ_FUA) && in bdrv_co_do_pwrite_zeroes()
1935 if (buf == NULL) { in bdrv_co_do_pwrite_zeroes()
1937 if (buf == NULL) { in bdrv_co_do_pwrite_zeroes()
1946 /* Keep bounce buffer around if it is big enough for all in bdrv_co_do_pwrite_zeroes()
1949 if (num < max_transfer) { in bdrv_co_do_pwrite_zeroes()
1960 if (ret == 0 && need_flush) { in bdrv_co_do_pwrite_zeroes()
1975 if (bdrv_is_read_only(bs)) { in bdrv_co_write_req_prepare()
1984 if (flags & BDRV_REQ_SERIALISING) { in bdrv_co_write_req_prepare()
1989 if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) { in bdrv_co_write_req_prepare()
2006 if (flags & BDRV_REQ_WRITE_UNCHANGED) { in bdrv_co_write_req_prepare()
2039 if (ret == 0 && in bdrv_co_write_req_finish()
2047 if (req->bytes) { in bdrv_co_write_req_finish()
2080 if (!drv) { in bdrv_aligned_pwritev()
2084 if (bdrv_has_readonly_bitmaps(bs)) { in bdrv_aligned_pwritev()
2096 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && in bdrv_aligned_pwritev()
2100 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { in bdrv_aligned_pwritev()
2108 if (ret < 0) { in bdrv_aligned_pwritev()
2110 } else if (flags & BDRV_REQ_ZERO_WRITE) { in bdrv_aligned_pwritev()
2113 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) { in bdrv_aligned_pwritev()
2116 } else if (bytes <= max_transfer) { in bdrv_aligned_pwritev()
2126 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) && in bdrv_aligned_pwritev()
2128 /* If FUA is going to be emulated by flush, we only in bdrv_aligned_pwritev()
2137 if (ret < 0) { in bdrv_aligned_pwritev()
2145 if (ret >= 0) { in bdrv_aligned_pwritev()
2168 if (padding) { in bdrv_co_do_zero_pwritev()
2174 if (pad.head || pad.merge_reads) { in bdrv_co_do_zero_pwritev()
2182 if (ret < 0 || pad.merge_reads) { in bdrv_co_do_zero_pwritev()
2192 if (bytes >= align) { in bdrv_co_do_zero_pwritev()
2197 if (ret < 0) { in bdrv_co_do_zero_pwritev()
2205 if (bytes) { in bdrv_co_do_zero_pwritev()
2245 if (!bdrv_co_is_inserted(bs)) { in bdrv_co_pwritev_part()
2249 if (flags & BDRV_REQ_ZERO_WRITE) { in bdrv_co_pwritev_part()
2254 if (ret < 0) { in bdrv_co_pwritev_part()
2258 /* If the request is misaligned then we can't make it efficient */ in bdrv_co_pwritev_part()
2259 if ((flags & BDRV_REQ_NO_FALLBACK) && in bdrv_co_pwritev_part()
2265 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { in bdrv_co_pwritev_part()
2267 * Aligning zero request is nonsense. Even if driver has special meaning in bdrv_co_pwritev_part()
2271 * Still, no reason to return an error if someone do unaligned in bdrv_co_pwritev_part()
2277 if (!(flags & BDRV_REQ_ZERO_WRITE)) { in bdrv_co_pwritev_part()
2281 * alignment only if there is no ZERO flag. in bdrv_co_pwritev_part()
2285 if (ret < 0) { in bdrv_co_pwritev_part()
2293 if (flags & BDRV_REQ_ZERO_WRITE) { in bdrv_co_pwritev_part()
2299 if (padded) { in bdrv_co_pwritev_part()
2335 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
2351 if (replay_events_enabled()) { in bdrv_flush_all()
2357 if (ret < 0 && !result) { in bdrv_flush_all()
2373 * If 'offset' is beyond the end of the disk image the return value is
2376 * 'bytes' is the max value 'pnum' should be set to. If bytes goes
2377 * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2386 * Returns negative errno on failure. Otherwise, if the
2387 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2408 if (total_size < 0) { in bdrv_co_do_block_status()
2413 if (offset >= total_size) { in bdrv_co_do_block_status()
2417 if (!bytes) { in bdrv_co_do_block_status()
2423 if (n < bytes) { in bdrv_co_do_block_status()
2430 if (!bs->drv->bdrv_co_block_status && !has_filtered_child) { in bdrv_co_do_block_status()
2433 if (offset + bytes == total_size) { in bdrv_co_do_block_status()
2436 if (bs->drv->protocol_name) { in bdrv_co_do_block_status()
2451 if (bs->drv->bdrv_co_block_status) { in bdrv_co_do_block_status()
2473 if (QLIST_EMPTY(&bs->children) && in bdrv_co_do_block_status()
2490 * such an update if possible. in bdrv_co_do_block_status()
2495 if (mode == BDRV_WANT_PRECISE && in bdrv_co_do_block_status()
2525 if (ret < 0) { in bdrv_co_do_block_status()
2536 if (ret & BDRV_BLOCK_RECURSE) { in bdrv_co_do_block_status()
2543 if (*pnum > bytes) { in bdrv_co_do_block_status()
2546 if (ret & BDRV_BLOCK_OFFSET_VALID) { in bdrv_co_do_block_status()
2550 if (ret & BDRV_BLOCK_RAW) { in bdrv_co_do_block_status()
2557 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { in bdrv_co_do_block_status()
2559 } else if (bs->drv->supports_backing) { in bdrv_co_do_block_status()
2562 if (!cow_bs) { in bdrv_co_do_block_status()
2564 } else if (mode == BDRV_WANT_PRECISE) { in bdrv_co_do_block_status()
2567 if (size2 >= 0 && offset >= size2) { in bdrv_co_do_block_status()
2573 if (mode == BDRV_WANT_PRECISE && ret & BDRV_BLOCK_RECURSE && in bdrv_co_do_block_status()
2582 if (ret2 >= 0) { in bdrv_co_do_block_status()
2586 if (ret2 & BDRV_BLOCK_EOF && in bdrv_co_do_block_status()
2614 if (ret >= 0 && offset + *pnum == total_size) { in bdrv_co_do_block_status()
2618 if (file) { in bdrv_co_do_block_status()
2621 if (map) { in bdrv_co_do_block_status()
2648 if (!depth) { in bdrv_co_common_block_status_above()
2653 if (!include_base && bs == base) { in bdrv_co_common_block_status_above()
2661 if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) { in bdrv_co_common_block_status_above()
2665 if (ret & BDRV_BLOCK_EOF) { in bdrv_co_common_block_status_above()
2678 if (ret < 0) { in bdrv_co_common_block_status_above()
2681 if (*pnum == 0) { in bdrv_co_common_block_status_above()
2684 * short, any zeroes that we synthesize beyond EOF behave as if they in bdrv_co_common_block_status_above()
2688 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see in bdrv_co_common_block_status_above()
2693 if (file) { in bdrv_co_common_block_status_above()
2699 if (ret & BDRV_BLOCK_ALLOCATED) { in bdrv_co_common_block_status_above()
2704 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see in bdrv_co_common_block_status_above()
2711 if (p == base) { in bdrv_co_common_block_status_above()
2724 if (offset + *pnum == eof) { in bdrv_co_common_block_status_above()
2753 * Check @bs (and its backing chain) to see if the range defined
2755 * Return 1 if that is the case, 0 otherwise and -errno on error.
2771 if (ret < 0) { in bdrv_co_is_zero_fast()
2774 if (!(ret & BDRV_BLOCK_ZERO)) { in bdrv_co_is_zero_fast()
2785 * Check @bs (and its backing chain) to see if the entire image is known
2787 * Return 1 if that is the case, 0 otherwise and -errno on error.
2801 if (bytes < 0) { in bdrv_co_is_all_zeroes()
2805 /* First probe - see if the entire image reads as zero */ in bdrv_co_is_all_zeroes()
2809 if (ret < 0) { in bdrv_co_is_all_zeroes()
2812 if (ret & BDRV_BLOCK_ZERO) { in bdrv_co_is_all_zeroes()
2819 * alignment probing easier. If the block starts with only a in bdrv_co_is_all_zeroes()
2820 * small allocated region, it is still worth the effort to see if in bdrv_co_is_all_zeroes()
2822 * reading the first region to see if it reads zero after all. in bdrv_co_is_all_zeroes()
2824 if (pnum > MAX_ZERO_CHECK_BUFFER) { in bdrv_co_is_all_zeroes()
2828 if (ret <= 0) { in bdrv_co_is_all_zeroes()
2835 if (ret >= 0) { in bdrv_co_is_all_zeroes()
2852 if (ret < 0) { in bdrv_co_is_allocated()
2861 * Return a positive depth if (a prefix of) the given range is allocated
2862 * in any image between BASE and TOP (BASE is only included if include_base
2864 * BASE can be NULL to check if the given offset is allocated in any
2888 if (ret < 0) { in bdrv_co_is_allocated_above()
2892 if (ret & BDRV_BLOCK_ALLOCATED) { in bdrv_co_is_allocated_above()
2908 if (ret < 0) { in bdrv_co_readv_vmstate()
2912 if (!drv) { in bdrv_co_readv_vmstate()
2918 if (drv->bdrv_co_load_vmstate) { in bdrv_co_readv_vmstate()
2920 } else if (child_bs) { in bdrv_co_readv_vmstate()
2941 if (ret < 0) { in bdrv_co_writev_vmstate()
2945 if (!drv) { in bdrv_co_writev_vmstate()
2951 if (drv->bdrv_co_save_vmstate) { in bdrv_co_writev_vmstate()
2953 } else if (child_bs) { in bdrv_co_writev_vmstate()
3002 /* Async version of aio cancel. The caller is not blocked if the acb implements
3008 if (acb->aiocb_info->cancel_async) { in bdrv_aio_cancel_async()
3027 if (!bdrv_co_is_inserted(bs) || bdrv_is_read_only(bs) || in bdrv_co_flush()
3045 if (bs->drv->bdrv_co_flush) { in bdrv_co_flush()
3052 if (bs->drv->bdrv_co_flush_to_os) { in bdrv_co_flush()
3054 if (ret < 0) { in bdrv_co_flush()
3060 if (bs->open_flags & BDRV_O_NO_FLUSH) { in bdrv_co_flush()
3064 /* Check if we really need to flush anything */ in bdrv_co_flush()
3065 if (bs->flushed_gen == current_gen) { in bdrv_co_flush()
3070 if (!bs->drv) { in bdrv_co_flush()
3076 if (bs->drv->bdrv_co_flush_to_disk) { in bdrv_co_flush()
3078 } else if (bs->drv->bdrv_aio_flush) { in bdrv_co_flush()
3085 if (acb == NULL) { in bdrv_co_flush()
3098 * that would break guests even if the server operates in writethrough in bdrv_co_flush()
3106 if (ret < 0) { in bdrv_co_flush()
3116 if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) { in bdrv_co_flush()
3118 if (!ret) { in bdrv_co_flush()
3126 if (ret == 0) { in bdrv_co_flush()
3132 /* Return value is ignored - it's ok if wait queue is empty */ in bdrv_co_flush()
3152 if (!bs || !bs->drv || !bdrv_co_is_inserted(bs)) { in bdrv_co_pdiscard()
3156 if (bdrv_has_readonly_bitmaps(bs)) { in bdrv_co_pdiscard()
3161 if (ret < 0) { in bdrv_co_pdiscard()
3165 /* Do nothing if disabled. */ in bdrv_co_pdiscard()
3166 if (!(bs->open_flags & BDRV_O_UNMAP)) { in bdrv_co_pdiscard()
3170 if (!bs->drv->bdrv_co_pdiscard) { in bdrv_co_pdiscard()
3174 /* Invalidate the cached block-status data range if this discard overlaps */ in bdrv_co_pdiscard()
3192 if (ret < 0) { in bdrv_co_pdiscard()
3203 if (head) { in bdrv_co_pdiscard()
3206 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) { in bdrv_co_pdiscard()
3211 } else if (tail) { in bdrv_co_pdiscard()
3212 if (num > align) { in bdrv_co_pdiscard()
3215 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) && in bdrv_co_pdiscard()
3222 if (num > max_pdiscard) { in bdrv_co_pdiscard()
3226 if (!bs->drv) { in bdrv_co_pdiscard()
3232 if (ret && ret != -ENOTSUP) { in bdrv_co_pdiscard()
3233 if (ret == -EINVAL && (offset % align != 0 || num % align != 0)) { in bdrv_co_pdiscard()
3262 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) { in bdrv_co_ioctl()
3267 if (drv->bdrv_co_ioctl) { in bdrv_co_ioctl()
3271 if (!acb) { in bdrv_co_ioctl()
3293 if (!drv || !drv->bdrv_co_zone_report || bs->bl.zoned == BLK_Z_NONE) { in bdrv_co_zone_report()
3313 if (!drv || !drv->bdrv_co_zone_mgmt || bs->bl.zoned == BLK_Z_NONE) { in bdrv_co_zone_mgmt()
3335 if (ret < 0) { in bdrv_co_zone_append()
3340 if (!drv || !drv->bdrv_co_zone_append || bs->bl.zoned == BLK_Z_NONE) { in bdrv_co_zone_append()
3369 if (size == 0) { in qemu_try_blockalign()
3381 if (mem) { in qemu_try_blockalign0()
3399 if (child == final_child) { in bdrv_register_buf_rollback()
3406 if (bs->drv && bs->drv->bdrv_unregister_buf) { in bdrv_register_buf_rollback()
3419 if (bs->drv && bs->drv->bdrv_register_buf) { in bdrv_register_buf()
3420 if (!bs->drv->bdrv_register_buf(bs, host, size, errp)) { in bdrv_register_buf()
3425 if (!bdrv_register_buf(child->bs, host, size, errp)) { in bdrv_register_buf()
3440 if (bs->drv && bs->drv->bdrv_unregister_buf) { in bdrv_unregister_buf()
3464 if (!dst || !dst->bs || !bdrv_co_is_inserted(dst->bs)) { in bdrv_co_copy_range_internal()
3468 if (ret) { in bdrv_co_copy_range_internal()
3471 if (write_flags & BDRV_REQ_ZERO_WRITE) { in bdrv_co_copy_range_internal()
3475 if (!src || !src->bs || !bdrv_co_is_inserted(src->bs)) { in bdrv_co_copy_range_internal()
3479 if (ret) { in bdrv_co_copy_range_internal()
3483 if (!src->bs->drv->bdrv_co_copy_range_from in bdrv_co_copy_range_internal()
3489 if (recurse_src) { in bdrv_co_copy_range_internal()
3512 if (!ret) { in bdrv_co_copy_range_internal()
3584 if (c->klass->resize) { in bdrv_parent_cb_resize()
3593 * If 'exact' is true, the file must be resized to exactly the given
3610 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */ in bdrv_co_truncate()
3611 if (!drv) { in bdrv_co_truncate()
3615 if (offset < 0) { in bdrv_co_truncate()
3621 if (ret < 0) { in bdrv_co_truncate()
3626 if (old_size < 0) { in bdrv_co_truncate()
3631 if (bdrv_is_read_only(bs)) { in bdrv_co_truncate()
3636 if (offset > old_size) { in bdrv_co_truncate()
3646 /* If we are growing the image and potentially using preallocation for the in bdrv_co_truncate()
3649 if (new_bytes) { in bdrv_co_truncate()
3654 if (ret < 0) { in bdrv_co_truncate()
3664 * If the image has a backing file that is large enough that it would in bdrv_co_truncate()
3669 * Note that if the image has a backing file, but was opened without the in bdrv_co_truncate()
3673 if (new_bytes && backing) { in bdrv_co_truncate()
3677 if (backing_len < 0) { in bdrv_co_truncate()
3683 if (backing_len > old_size) { in bdrv_co_truncate()
3688 if (drv->bdrv_co_truncate) { in bdrv_co_truncate()
3689 if (flags & ~bs->supported_truncate_flags) { in bdrv_co_truncate()
3695 } else if (filtered) { in bdrv_co_truncate()
3702 if (ret < 0) { in bdrv_co_truncate()
3707 if (ret < 0) { in bdrv_co_truncate()
3731 if (!bs || !bs->drv) { in bdrv_cancel_in_flight()
3735 if (bs->drv->bdrv_cancel_in_flight) { in bdrv_cancel_in_flight()
3750 if (!drv) { in bdrv_co_preadv_snapshot()
3754 if (!drv->bdrv_co_preadv_snapshot) { in bdrv_co_preadv_snapshot()
3776 if (!drv) { in bdrv_co_snapshot_block_status()
3780 if (!drv->bdrv_co_snapshot_block_status) { in bdrv_co_snapshot_block_status()
3800 if (!drv) { in bdrv_co_pdiscard_snapshot()
3804 if (!drv->bdrv_co_pdiscard_snapshot) { in bdrv_co_pdiscard_snapshot()