xref: /qemu/block/io.c (revision 0af02bd10783e08220a784eb8d7e2f821a64f0fa)
161007b31SStefan Hajnoczi /*
261007b31SStefan Hajnoczi  * Block layer I/O functions
361007b31SStefan Hajnoczi  *
461007b31SStefan Hajnoczi  * Copyright (c) 2003 Fabrice Bellard
561007b31SStefan Hajnoczi  *
661007b31SStefan Hajnoczi  * Permission is hereby granted, free of charge, to any person obtaining a copy
761007b31SStefan Hajnoczi  * of this software and associated documentation files (the "Software"), to deal
861007b31SStefan Hajnoczi  * in the Software without restriction, including without limitation the rights
961007b31SStefan Hajnoczi  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
1061007b31SStefan Hajnoczi  * copies of the Software, and to permit persons to whom the Software is
1161007b31SStefan Hajnoczi  * furnished to do so, subject to the following conditions:
1261007b31SStefan Hajnoczi  *
1361007b31SStefan Hajnoczi  * The above copyright notice and this permission notice shall be included in
1461007b31SStefan Hajnoczi  * all copies or substantial portions of the Software.
1561007b31SStefan Hajnoczi  *
1661007b31SStefan Hajnoczi  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1761007b31SStefan Hajnoczi  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1861007b31SStefan Hajnoczi  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1961007b31SStefan Hajnoczi  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2061007b31SStefan Hajnoczi  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
2161007b31SStefan Hajnoczi  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
2261007b31SStefan Hajnoczi  * THE SOFTWARE.
2361007b31SStefan Hajnoczi  */
2461007b31SStefan Hajnoczi 
2580c71a24SPeter Maydell #include "qemu/osdep.h"
2661007b31SStefan Hajnoczi #include "trace.h"
277f0e9da6SMax Reitz #include "sysemu/block-backend.h"
287719f3c9SStefan Hajnoczi #include "block/aio-wait.h"
2961007b31SStefan Hajnoczi #include "block/blockjob.h"
30f321dcb5SPaolo Bonzini #include "block/blockjob_int.h"
3161007b31SStefan Hajnoczi #include "block/block_int.h"
3221c2283eSVladimir Sementsov-Ogievskiy #include "block/coroutines.h"
33e2c1c34fSMarkus Armbruster #include "block/dirty-bitmap.h"
3494783301SVladimir Sementsov-Ogievskiy #include "block/write-threshold.h"
35f348b6d1SVeronia Bahaa #include "qemu/cutils.h"
365df022cfSPeter Maydell #include "qemu/memalign.h"
37da34e65cSMarkus Armbruster #include "qapi/error.h"
38d49b6836SMarkus Armbruster #include "qemu/error-report.h"
39db725815SMarkus Armbruster #include "qemu/main-loop.h"
40c8aa7895SPavel Dovgalyuk #include "sysemu/replay.h"
4161007b31SStefan Hajnoczi 
42cb2e2878SEric Blake /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
43cb2e2878SEric Blake #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
44cb2e2878SEric Blake 
457f8f03efSFam Zheng static void bdrv_parent_cb_resize(BlockDriverState *bs);
46d05aa8bbSEric Blake static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
475ae07b14SVladimir Sementsov-Ogievskiy     int64_t offset, int64_t bytes, BdrvRequestFlags flags);
4861007b31SStefan Hajnoczi 
49a82a3bd1SKevin Wolf static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore)
5061007b31SStefan Hajnoczi {
5102d21300SKevin Wolf     BdrvChild *c, *next;
5227ccdd52SKevin Wolf 
5302d21300SKevin Wolf     QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
54a82a3bd1SKevin Wolf         if (c == ignore) {
550152bf40SKevin Wolf             continue;
560152bf40SKevin Wolf         }
57606ed756SKevin Wolf         bdrv_parent_drained_begin_single(c);
58ce0f1412SPaolo Bonzini     }
59ce0f1412SPaolo Bonzini }
60ce0f1412SPaolo Bonzini 
612f65df6eSKevin Wolf void bdrv_parent_drained_end_single(BdrvChild *c)
62804db8eaSMax Reitz {
63ab613350SStefan Hajnoczi     GLOBAL_STATE_CODE();
642f65df6eSKevin Wolf 
6557e05be3SKevin Wolf     assert(c->quiesced_parent);
6657e05be3SKevin Wolf     c->quiesced_parent = false;
6757e05be3SKevin Wolf 
68bd86fb99SMax Reitz     if (c->klass->drained_end) {
692f65df6eSKevin Wolf         c->klass->drained_end(c);
70804db8eaSMax Reitz     }
71804db8eaSMax Reitz }
72804db8eaSMax Reitz 
73a82a3bd1SKevin Wolf static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore)
74ce0f1412SPaolo Bonzini {
7561ad631cSMax Reitz     BdrvChild *c;
7627ccdd52SKevin Wolf 
7761ad631cSMax Reitz     QLIST_FOREACH(c, &bs->parents, next_parent) {
78a82a3bd1SKevin Wolf         if (c == ignore) {
790152bf40SKevin Wolf             continue;
800152bf40SKevin Wolf         }
812f65df6eSKevin Wolf         bdrv_parent_drained_end_single(c);
82c2066af0SKevin Wolf     }
8361007b31SStefan Hajnoczi }
8461007b31SStefan Hajnoczi 
8523987471SKevin Wolf bool bdrv_parent_drained_poll_single(BdrvChild *c)
864be6a6d1SKevin Wolf {
87bd86fb99SMax Reitz     if (c->klass->drained_poll) {
88bd86fb99SMax Reitz         return c->klass->drained_poll(c);
894be6a6d1SKevin Wolf     }
904be6a6d1SKevin Wolf     return false;
914be6a6d1SKevin Wolf }
924be6a6d1SKevin Wolf 
936cd5c9d7SKevin Wolf static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
946cd5c9d7SKevin Wolf                                      bool ignore_bds_parents)
9589bd0305SKevin Wolf {
9689bd0305SKevin Wolf     BdrvChild *c, *next;
9789bd0305SKevin Wolf     bool busy = false;
9889bd0305SKevin Wolf 
9989bd0305SKevin Wolf     QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
100bd86fb99SMax Reitz         if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
10189bd0305SKevin Wolf             continue;
10289bd0305SKevin Wolf         }
1034be6a6d1SKevin Wolf         busy |= bdrv_parent_drained_poll_single(c);
10489bd0305SKevin Wolf     }
10589bd0305SKevin Wolf 
10689bd0305SKevin Wolf     return busy;
10789bd0305SKevin Wolf }
10889bd0305SKevin Wolf 
109606ed756SKevin Wolf void bdrv_parent_drained_begin_single(BdrvChild *c)
1104be6a6d1SKevin Wolf {
111ab613350SStefan Hajnoczi     GLOBAL_STATE_CODE();
11257e05be3SKevin Wolf 
11357e05be3SKevin Wolf     assert(!c->quiesced_parent);
11457e05be3SKevin Wolf     c->quiesced_parent = true;
11557e05be3SKevin Wolf 
116bd86fb99SMax Reitz     if (c->klass->drained_begin) {
117bd86fb99SMax Reitz         c->klass->drained_begin(c);
1184be6a6d1SKevin Wolf     }
1194be6a6d1SKevin Wolf }
1204be6a6d1SKevin Wolf 
121d9e0dfa2SEric Blake static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
122d9e0dfa2SEric Blake {
1239f460c64SAkihiko Odaki     dst->pdiscard_alignment = MAX(dst->pdiscard_alignment,
1249f460c64SAkihiko Odaki                                   src->pdiscard_alignment);
125d9e0dfa2SEric Blake     dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
126d9e0dfa2SEric Blake     dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
12724b36e98SPaolo Bonzini     dst->max_hw_transfer = MIN_NON_ZERO(dst->max_hw_transfer,
12824b36e98SPaolo Bonzini                                         src->max_hw_transfer);
129d9e0dfa2SEric Blake     dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
130d9e0dfa2SEric Blake                                  src->opt_mem_alignment);
131d9e0dfa2SEric Blake     dst->min_mem_alignment = MAX(dst->min_mem_alignment,
132d9e0dfa2SEric Blake                                  src->min_mem_alignment);
133d9e0dfa2SEric Blake     dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
134cc071629SPaolo Bonzini     dst->max_hw_iov = MIN_NON_ZERO(dst->max_hw_iov, src->max_hw_iov);
135d9e0dfa2SEric Blake }
136d9e0dfa2SEric Blake 
1371e4c797cSVladimir Sementsov-Ogievskiy typedef struct BdrvRefreshLimitsState {
1381e4c797cSVladimir Sementsov-Ogievskiy     BlockDriverState *bs;
1391e4c797cSVladimir Sementsov-Ogievskiy     BlockLimits old_bl;
1401e4c797cSVladimir Sementsov-Ogievskiy } BdrvRefreshLimitsState;
1411e4c797cSVladimir Sementsov-Ogievskiy 
1421e4c797cSVladimir Sementsov-Ogievskiy static void bdrv_refresh_limits_abort(void *opaque)
1431e4c797cSVladimir Sementsov-Ogievskiy {
1441e4c797cSVladimir Sementsov-Ogievskiy     BdrvRefreshLimitsState *s = opaque;
1451e4c797cSVladimir Sementsov-Ogievskiy 
1461e4c797cSVladimir Sementsov-Ogievskiy     s->bs->bl = s->old_bl;
1471e4c797cSVladimir Sementsov-Ogievskiy }
1481e4c797cSVladimir Sementsov-Ogievskiy 
1491e4c797cSVladimir Sementsov-Ogievskiy static TransactionActionDrv bdrv_refresh_limits_drv = {
1501e4c797cSVladimir Sementsov-Ogievskiy     .abort = bdrv_refresh_limits_abort,
1511e4c797cSVladimir Sementsov-Ogievskiy     .clean = g_free,
1521e4c797cSVladimir Sementsov-Ogievskiy };
1531e4c797cSVladimir Sementsov-Ogievskiy 
1541e4c797cSVladimir Sementsov-Ogievskiy /* @tran is allowed to be NULL, in this case no rollback is possible. */
1551e4c797cSVladimir Sementsov-Ogievskiy void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp)
15661007b31SStefan Hajnoczi {
15733985614SVladimir Sementsov-Ogievskiy     ERRP_GUARD();
15861007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
15966b129acSMax Reitz     BdrvChild *c;
16066b129acSMax Reitz     bool have_limits;
16161007b31SStefan Hajnoczi 
162f791bf7fSEmanuele Giuseppe Esposito     GLOBAL_STATE_CODE();
163f791bf7fSEmanuele Giuseppe Esposito 
1641e4c797cSVladimir Sementsov-Ogievskiy     if (tran) {
1651e4c797cSVladimir Sementsov-Ogievskiy         BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1);
1661e4c797cSVladimir Sementsov-Ogievskiy         *s = (BdrvRefreshLimitsState) {
1671e4c797cSVladimir Sementsov-Ogievskiy             .bs = bs,
1681e4c797cSVladimir Sementsov-Ogievskiy             .old_bl = bs->bl,
1691e4c797cSVladimir Sementsov-Ogievskiy         };
1701e4c797cSVladimir Sementsov-Ogievskiy         tran_add(tran, &bdrv_refresh_limits_drv, s);
1711e4c797cSVladimir Sementsov-Ogievskiy     }
1721e4c797cSVladimir Sementsov-Ogievskiy 
17361007b31SStefan Hajnoczi     memset(&bs->bl, 0, sizeof(bs->bl));
17461007b31SStefan Hajnoczi 
17561007b31SStefan Hajnoczi     if (!drv) {
17661007b31SStefan Hajnoczi         return;
17761007b31SStefan Hajnoczi     }
17861007b31SStefan Hajnoczi 
17979ba8c98SEric Blake     /* Default alignment based on whether driver has byte interface */
180e31f6864SEric Blake     bs->bl.request_alignment = (drv->bdrv_co_preadv ||
181ac850bf0SVladimir Sementsov-Ogievskiy                                 drv->bdrv_aio_preadv ||
182ac850bf0SVladimir Sementsov-Ogievskiy                                 drv->bdrv_co_preadv_part) ? 1 : 512;
18379ba8c98SEric Blake 
18461007b31SStefan Hajnoczi     /* Take some limits from the children as a default */
18566b129acSMax Reitz     have_limits = false;
18666b129acSMax Reitz     QLIST_FOREACH(c, &bs->children, next) {
18766b129acSMax Reitz         if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW))
18866b129acSMax Reitz         {
18966b129acSMax Reitz             bdrv_merge_limits(&bs->bl, &c->bs->bl);
19066b129acSMax Reitz             have_limits = true;
19166b129acSMax Reitz         }
192160a29e2SPaolo Bonzini 
193160a29e2SPaolo Bonzini         if (c->role & BDRV_CHILD_FILTERED) {
194160a29e2SPaolo Bonzini             bs->bl.has_variable_length |= c->bs->bl.has_variable_length;
195160a29e2SPaolo Bonzini         }
19666b129acSMax Reitz     }
19766b129acSMax Reitz 
19866b129acSMax Reitz     if (!have_limits) {
1994196d2f0SDenis V. Lunev         bs->bl.min_mem_alignment = 512;
2008e3b0cbbSMarc-André Lureau         bs->bl.opt_mem_alignment = qemu_real_host_page_size();
201bd44feb7SStefan Hajnoczi 
202bd44feb7SStefan Hajnoczi         /* Safe default since most protocols use readv()/writev()/etc */
203bd44feb7SStefan Hajnoczi         bs->bl.max_iov = IOV_MAX;
20461007b31SStefan Hajnoczi     }
20561007b31SStefan Hajnoczi 
20661007b31SStefan Hajnoczi     /* Then let the driver override it */
20761007b31SStefan Hajnoczi     if (drv->bdrv_refresh_limits) {
20861007b31SStefan Hajnoczi         drv->bdrv_refresh_limits(bs, errp);
2098b117001SVladimir Sementsov-Ogievskiy         if (*errp) {
2108b117001SVladimir Sementsov-Ogievskiy             return;
2118b117001SVladimir Sementsov-Ogievskiy         }
2128b117001SVladimir Sementsov-Ogievskiy     }
2138b117001SVladimir Sementsov-Ogievskiy 
2148b117001SVladimir Sementsov-Ogievskiy     if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) {
2158b117001SVladimir Sementsov-Ogievskiy         error_setg(errp, "Driver requires too large request alignment");
21661007b31SStefan Hajnoczi     }
21761007b31SStefan Hajnoczi }
21861007b31SStefan Hajnoczi 
21961007b31SStefan Hajnoczi /**
22061007b31SStefan Hajnoczi  * The copy-on-read flag is actually a reference count so multiple users may
22161007b31SStefan Hajnoczi  * use the feature without worrying about clobbering its previous state.
22261007b31SStefan Hajnoczi  * Copy-on-read stays enabled until all users have called to disable it.
22361007b31SStefan Hajnoczi  */
22461007b31SStefan Hajnoczi void bdrv_enable_copy_on_read(BlockDriverState *bs)
22561007b31SStefan Hajnoczi {
226384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
227d73415a3SStefan Hajnoczi     qatomic_inc(&bs->copy_on_read);
22861007b31SStefan Hajnoczi }
22961007b31SStefan Hajnoczi 
23061007b31SStefan Hajnoczi void bdrv_disable_copy_on_read(BlockDriverState *bs)
23161007b31SStefan Hajnoczi {
232d73415a3SStefan Hajnoczi     int old = qatomic_fetch_dec(&bs->copy_on_read);
233384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
234d3faa13eSPaolo Bonzini     assert(old >= 1);
23561007b31SStefan Hajnoczi }
23661007b31SStefan Hajnoczi 
23761124f03SPaolo Bonzini typedef struct {
23861124f03SPaolo Bonzini     Coroutine *co;
23961124f03SPaolo Bonzini     BlockDriverState *bs;
24061124f03SPaolo Bonzini     bool done;
241481cad48SManos Pitsidianakis     bool begin;
242fe4f0614SKevin Wolf     bool poll;
2430152bf40SKevin Wolf     BdrvChild *parent;
24461124f03SPaolo Bonzini } BdrvCoDrainData;
24561124f03SPaolo Bonzini 
2461cc8e54aSKevin Wolf /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
247299403aeSKevin Wolf bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent,
248299403aeSKevin Wolf                      bool ignore_bds_parents)
24989bd0305SKevin Wolf {
250ab613350SStefan Hajnoczi     GLOBAL_STATE_CODE();
251fe4f0614SKevin Wolf 
2526cd5c9d7SKevin Wolf     if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
25389bd0305SKevin Wolf         return true;
25489bd0305SKevin Wolf     }
25589bd0305SKevin Wolf 
256d73415a3SStefan Hajnoczi     if (qatomic_read(&bs->in_flight)) {
257fe4f0614SKevin Wolf         return true;
25889bd0305SKevin Wolf     }
25989bd0305SKevin Wolf 
260fe4f0614SKevin Wolf     return false;
261fe4f0614SKevin Wolf }
262fe4f0614SKevin Wolf 
263299403aeSKevin Wolf static bool bdrv_drain_poll_top_level(BlockDriverState *bs,
26489bd0305SKevin Wolf                                       BdrvChild *ignore_parent)
2651cc8e54aSKevin Wolf {
266299403aeSKevin Wolf     return bdrv_drain_poll(bs, ignore_parent, false);
2671cc8e54aSKevin Wolf }
2681cc8e54aSKevin Wolf 
269299403aeSKevin Wolf static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
270a82a3bd1SKevin Wolf                                   bool poll);
271a82a3bd1SKevin Wolf static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent);
2720152bf40SKevin Wolf 
273a77fd4bbSFam Zheng static void bdrv_co_drain_bh_cb(void *opaque)
274a77fd4bbSFam Zheng {
275a77fd4bbSFam Zheng     BdrvCoDrainData *data = opaque;
276a77fd4bbSFam Zheng     Coroutine *co = data->co;
27799723548SPaolo Bonzini     BlockDriverState *bs = data->bs;
278a77fd4bbSFam Zheng 
279c8ca33d0SKevin Wolf     if (bs) {
280aa1361d5SKevin Wolf         AioContext *ctx = bdrv_get_aio_context(bs);
281aa1361d5SKevin Wolf         aio_context_acquire(ctx);
28299723548SPaolo Bonzini         bdrv_dec_in_flight(bs);
283481cad48SManos Pitsidianakis         if (data->begin) {
284a82a3bd1SKevin Wolf             bdrv_do_drained_begin(bs, data->parent, data->poll);
285481cad48SManos Pitsidianakis         } else {
286e037c09cSMax Reitz             assert(!data->poll);
287a82a3bd1SKevin Wolf             bdrv_do_drained_end(bs, data->parent);
288481cad48SManos Pitsidianakis         }
289aa1361d5SKevin Wolf         aio_context_release(ctx);
290c8ca33d0SKevin Wolf     } else {
291c8ca33d0SKevin Wolf         assert(data->begin);
292c8ca33d0SKevin Wolf         bdrv_drain_all_begin();
293c8ca33d0SKevin Wolf     }
294481cad48SManos Pitsidianakis 
295a77fd4bbSFam Zheng     data->done = true;
2961919631eSPaolo Bonzini     aio_co_wake(co);
297a77fd4bbSFam Zheng }
298a77fd4bbSFam Zheng 
299481cad48SManos Pitsidianakis static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
300299403aeSKevin Wolf                                                 bool begin,
3016cd5c9d7SKevin Wolf                                                 BdrvChild *parent,
3022f65df6eSKevin Wolf                                                 bool poll)
303a77fd4bbSFam Zheng {
304a77fd4bbSFam Zheng     BdrvCoDrainData data;
305960d5fb3SKevin Wolf     Coroutine *self = qemu_coroutine_self();
306960d5fb3SKevin Wolf     AioContext *ctx = bdrv_get_aio_context(bs);
307960d5fb3SKevin Wolf     AioContext *co_ctx = qemu_coroutine_get_aio_context(self);
308a77fd4bbSFam Zheng 
309a77fd4bbSFam Zheng     /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
310c40a2545SStefan Hajnoczi      * other coroutines run if they were queued by aio_co_enter(). */
311a77fd4bbSFam Zheng 
312a77fd4bbSFam Zheng     assert(qemu_in_coroutine());
313a77fd4bbSFam Zheng     data = (BdrvCoDrainData) {
314960d5fb3SKevin Wolf         .co = self,
315a77fd4bbSFam Zheng         .bs = bs,
316a77fd4bbSFam Zheng         .done = false,
317481cad48SManos Pitsidianakis         .begin = begin,
3180152bf40SKevin Wolf         .parent = parent,
319fe4f0614SKevin Wolf         .poll = poll,
320a77fd4bbSFam Zheng     };
3218e1da77eSMax Reitz 
322c8ca33d0SKevin Wolf     if (bs) {
32399723548SPaolo Bonzini         bdrv_inc_in_flight(bs);
324c8ca33d0SKevin Wolf     }
325960d5fb3SKevin Wolf 
326960d5fb3SKevin Wolf     /*
327960d5fb3SKevin Wolf      * Temporarily drop the lock across yield or we would get deadlocks.
328960d5fb3SKevin Wolf      * bdrv_co_drain_bh_cb() reaquires the lock as needed.
329960d5fb3SKevin Wolf      *
330960d5fb3SKevin Wolf      * When we yield below, the lock for the current context will be
331960d5fb3SKevin Wolf      * released, so if this is actually the lock that protects bs, don't drop
332960d5fb3SKevin Wolf      * it a second time.
333960d5fb3SKevin Wolf      */
334960d5fb3SKevin Wolf     if (ctx != co_ctx) {
335960d5fb3SKevin Wolf         aio_context_release(ctx);
336960d5fb3SKevin Wolf     }
337ab613350SStefan Hajnoczi     replay_bh_schedule_oneshot_event(qemu_get_aio_context(),
338ab613350SStefan Hajnoczi                                      bdrv_co_drain_bh_cb, &data);
339a77fd4bbSFam Zheng 
340a77fd4bbSFam Zheng     qemu_coroutine_yield();
341a77fd4bbSFam Zheng     /* If we are resumed from some other event (such as an aio completion or a
342a77fd4bbSFam Zheng      * timer callback), it is a bug in the caller that should be fixed. */
343a77fd4bbSFam Zheng     assert(data.done);
344960d5fb3SKevin Wolf 
345960d5fb3SKevin Wolf     /* Reaquire the AioContext of bs if we dropped it */
346960d5fb3SKevin Wolf     if (ctx != co_ctx) {
347960d5fb3SKevin Wolf         aio_context_acquire(ctx);
348960d5fb3SKevin Wolf     }
349a77fd4bbSFam Zheng }
350a77fd4bbSFam Zheng 
35105c272ffSKevin Wolf static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
35205c272ffSKevin Wolf                                   bool poll)
353dcf94a23SKevin Wolf {
354384a48fbSEmanuele Giuseppe Esposito     IO_OR_GS_CODE();
35505c272ffSKevin Wolf 
35605c272ffSKevin Wolf     if (qemu_in_coroutine()) {
35705c272ffSKevin Wolf         bdrv_co_yield_to_drain(bs, true, parent, poll);
35805c272ffSKevin Wolf         return;
35905c272ffSKevin Wolf     }
360dcf94a23SKevin Wolf 
361ab613350SStefan Hajnoczi     GLOBAL_STATE_CODE();
362ab613350SStefan Hajnoczi 
363dcf94a23SKevin Wolf     /* Stop things in parent-to-child order */
364d73415a3SStefan Hajnoczi     if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) {
365a82a3bd1SKevin Wolf         bdrv_parent_drained_begin(bs, parent);
366c7bc05f7SKevin Wolf         if (bs->drv && bs->drv->bdrv_drain_begin) {
367c7bc05f7SKevin Wolf             bs->drv->bdrv_drain_begin(bs);
368c7bc05f7SKevin Wolf         }
369dcf94a23SKevin Wolf     }
370d30b8e64SKevin Wolf 
371fe4f0614SKevin Wolf     /*
372fe4f0614SKevin Wolf      * Wait for drained requests to finish.
373fe4f0614SKevin Wolf      *
374fe4f0614SKevin Wolf      * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
375fe4f0614SKevin Wolf      * call is needed so things in this AioContext can make progress even
376fe4f0614SKevin Wolf      * though we don't return to the main AioContext loop - this automatically
377fe4f0614SKevin Wolf      * includes other nodes in the same AioContext and therefore all child
378fe4f0614SKevin Wolf      * nodes.
379fe4f0614SKevin Wolf      */
380fe4f0614SKevin Wolf     if (poll) {
381299403aeSKevin Wolf         BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, parent));
382fe4f0614SKevin Wolf     }
3836820643fSKevin Wolf }
3846820643fSKevin Wolf 
38505c272ffSKevin Wolf void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, BdrvChild *parent)
38605c272ffSKevin Wolf {
38705c272ffSKevin Wolf     bdrv_do_drained_begin(bs, parent, false);
38805c272ffSKevin Wolf }
38905c272ffSKevin Wolf 
3900152bf40SKevin Wolf void bdrv_drained_begin(BlockDriverState *bs)
3910152bf40SKevin Wolf {
392384a48fbSEmanuele Giuseppe Esposito     IO_OR_GS_CODE();
393a82a3bd1SKevin Wolf     bdrv_do_drained_begin(bs, NULL, true);
394b0165585SKevin Wolf }
395b0165585SKevin Wolf 
396e037c09cSMax Reitz /**
397e037c09cSMax Reitz  * This function does not poll, nor must any of its recursively called
3982f65df6eSKevin Wolf  * functions.
399e037c09cSMax Reitz  */
400a82a3bd1SKevin Wolf static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent)
401b0165585SKevin Wolf {
4020f115168SKevin Wolf     int old_quiesce_counter;
4030f115168SKevin Wolf 
404ab613350SStefan Hajnoczi     IO_OR_GS_CODE();
405ab613350SStefan Hajnoczi 
406481cad48SManos Pitsidianakis     if (qemu_in_coroutine()) {
407a82a3bd1SKevin Wolf         bdrv_co_yield_to_drain(bs, false, parent, false);
408481cad48SManos Pitsidianakis         return;
409481cad48SManos Pitsidianakis     }
4106820643fSKevin Wolf     assert(bs->quiesce_counter > 0);
411ab613350SStefan Hajnoczi     GLOBAL_STATE_CODE();
4126820643fSKevin Wolf 
41360369b86SKevin Wolf     /* Re-enable things in child-to-parent order */
41457e05be3SKevin Wolf     old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter);
41557e05be3SKevin Wolf     if (old_quiesce_counter == 1) {
416c7bc05f7SKevin Wolf         if (bs->drv && bs->drv->bdrv_drain_end) {
417c7bc05f7SKevin Wolf             bs->drv->bdrv_drain_end(bs);
418c7bc05f7SKevin Wolf         }
419a82a3bd1SKevin Wolf         bdrv_parent_drained_end(bs, parent);
4206820643fSKevin Wolf     }
4210f115168SKevin Wolf }
4226820643fSKevin Wolf 
4230152bf40SKevin Wolf void bdrv_drained_end(BlockDriverState *bs)
4240152bf40SKevin Wolf {
425384a48fbSEmanuele Giuseppe Esposito     IO_OR_GS_CODE();
426a82a3bd1SKevin Wolf     bdrv_do_drained_end(bs, NULL);
427d736f119SKevin Wolf }
428d736f119SKevin Wolf 
42961007b31SStefan Hajnoczi void bdrv_drain(BlockDriverState *bs)
43061007b31SStefan Hajnoczi {
431384a48fbSEmanuele Giuseppe Esposito     IO_OR_GS_CODE();
4326820643fSKevin Wolf     bdrv_drained_begin(bs);
4336820643fSKevin Wolf     bdrv_drained_end(bs);
43461007b31SStefan Hajnoczi }
43561007b31SStefan Hajnoczi 
436c13ad59fSKevin Wolf static void bdrv_drain_assert_idle(BlockDriverState *bs)
437c13ad59fSKevin Wolf {
438c13ad59fSKevin Wolf     BdrvChild *child, *next;
439c13ad59fSKevin Wolf 
440d73415a3SStefan Hajnoczi     assert(qatomic_read(&bs->in_flight) == 0);
441c13ad59fSKevin Wolf     QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
442c13ad59fSKevin Wolf         bdrv_drain_assert_idle(child->bs);
443c13ad59fSKevin Wolf     }
444c13ad59fSKevin Wolf }
445c13ad59fSKevin Wolf 
4460f12264eSKevin Wolf unsigned int bdrv_drain_all_count = 0;
4470f12264eSKevin Wolf 
4480f12264eSKevin Wolf static bool bdrv_drain_all_poll(void)
4490f12264eSKevin Wolf {
4500f12264eSKevin Wolf     BlockDriverState *bs = NULL;
4510f12264eSKevin Wolf     bool result = false;
452f791bf7fSEmanuele Giuseppe Esposito     GLOBAL_STATE_CODE();
4530f12264eSKevin Wolf 
4540f12264eSKevin Wolf     /* bdrv_drain_poll() can't make changes to the graph and we are holding the
4550f12264eSKevin Wolf      * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
4560f12264eSKevin Wolf     while ((bs = bdrv_next_all_states(bs))) {
4570f12264eSKevin Wolf         AioContext *aio_context = bdrv_get_aio_context(bs);
4580f12264eSKevin Wolf         aio_context_acquire(aio_context);
459299403aeSKevin Wolf         result |= bdrv_drain_poll(bs, NULL, true);
4600f12264eSKevin Wolf         aio_context_release(aio_context);
4610f12264eSKevin Wolf     }
4620f12264eSKevin Wolf 
4630f12264eSKevin Wolf     return result;
4640f12264eSKevin Wolf }
4650f12264eSKevin Wolf 
46661007b31SStefan Hajnoczi /*
46761007b31SStefan Hajnoczi  * Wait for pending requests to complete across all BlockDriverStates
46861007b31SStefan Hajnoczi  *
46961007b31SStefan Hajnoczi  * This function does not flush data to disk, use bdrv_flush_all() for that
47061007b31SStefan Hajnoczi  * after calling this function.
471c0778f66SAlberto Garcia  *
472c0778f66SAlberto Garcia  * This pauses all block jobs and disables external clients. It must
473c0778f66SAlberto Garcia  * be paired with bdrv_drain_all_end().
474c0778f66SAlberto Garcia  *
475c0778f66SAlberto Garcia  * NOTE: no new block jobs or BlockDriverStates can be created between
476c0778f66SAlberto Garcia  * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
47761007b31SStefan Hajnoczi  */
478da0bd744SKevin Wolf void bdrv_drain_all_begin_nopoll(void)
47961007b31SStefan Hajnoczi {
4800f12264eSKevin Wolf     BlockDriverState *bs = NULL;
481f791bf7fSEmanuele Giuseppe Esposito     GLOBAL_STATE_CODE();
48261007b31SStefan Hajnoczi 
483c8aa7895SPavel Dovgalyuk     /*
484c8aa7895SPavel Dovgalyuk      * bdrv queue is managed by record/replay,
485c8aa7895SPavel Dovgalyuk      * waiting for finishing the I/O requests may
486c8aa7895SPavel Dovgalyuk      * be infinite
487c8aa7895SPavel Dovgalyuk      */
488c8aa7895SPavel Dovgalyuk     if (replay_events_enabled()) {
489c8aa7895SPavel Dovgalyuk         return;
490c8aa7895SPavel Dovgalyuk     }
491c8aa7895SPavel Dovgalyuk 
4920f12264eSKevin Wolf     /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
4930f12264eSKevin Wolf      * loop AioContext, so make sure we're in the main context. */
4949a7e86c8SKevin Wolf     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
4950f12264eSKevin Wolf     assert(bdrv_drain_all_count < INT_MAX);
4960f12264eSKevin Wolf     bdrv_drain_all_count++;
4979a7e86c8SKevin Wolf 
4980f12264eSKevin Wolf     /* Quiesce all nodes, without polling in-flight requests yet. The graph
4990f12264eSKevin Wolf      * cannot change during this loop. */
5000f12264eSKevin Wolf     while ((bs = bdrv_next_all_states(bs))) {
50161007b31SStefan Hajnoczi         AioContext *aio_context = bdrv_get_aio_context(bs);
50261007b31SStefan Hajnoczi 
50361007b31SStefan Hajnoczi         aio_context_acquire(aio_context);
504a82a3bd1SKevin Wolf         bdrv_do_drained_begin(bs, NULL, false);
50561007b31SStefan Hajnoczi         aio_context_release(aio_context);
50661007b31SStefan Hajnoczi     }
507da0bd744SKevin Wolf }
508da0bd744SKevin Wolf 
509da0bd744SKevin Wolf void bdrv_drain_all_begin(void)
510da0bd744SKevin Wolf {
511da0bd744SKevin Wolf     BlockDriverState *bs = NULL;
512da0bd744SKevin Wolf 
513da0bd744SKevin Wolf     if (qemu_in_coroutine()) {
514da0bd744SKevin Wolf         bdrv_co_yield_to_drain(NULL, true, NULL, true);
515da0bd744SKevin Wolf         return;
516da0bd744SKevin Wolf     }
517da0bd744SKevin Wolf 
51863945789SPeter Maydell     /*
51963945789SPeter Maydell      * bdrv queue is managed by record/replay,
52063945789SPeter Maydell      * waiting for finishing the I/O requests may
52163945789SPeter Maydell      * be infinite
52263945789SPeter Maydell      */
52363945789SPeter Maydell     if (replay_events_enabled()) {
52463945789SPeter Maydell         return;
52563945789SPeter Maydell     }
52663945789SPeter Maydell 
527da0bd744SKevin Wolf     bdrv_drain_all_begin_nopoll();
52861007b31SStefan Hajnoczi 
5290f12264eSKevin Wolf     /* Now poll the in-flight requests */
530263d5e12SStefan Hajnoczi     AIO_WAIT_WHILE_UNLOCKED(NULL, bdrv_drain_all_poll());
5310f12264eSKevin Wolf 
5320f12264eSKevin Wolf     while ((bs = bdrv_next_all_states(bs))) {
533c13ad59fSKevin Wolf         bdrv_drain_assert_idle(bs);
534f406c03cSAlexander Yarygin     }
535f406c03cSAlexander Yarygin }
536c0778f66SAlberto Garcia 
5371a6d3bd2SGreg Kurz void bdrv_drain_all_end_quiesce(BlockDriverState *bs)
5381a6d3bd2SGreg Kurz {
539b4ad82aaSEmanuele Giuseppe Esposito     GLOBAL_STATE_CODE();
5401a6d3bd2SGreg Kurz 
5411a6d3bd2SGreg Kurz     g_assert(bs->quiesce_counter > 0);
5421a6d3bd2SGreg Kurz     g_assert(!bs->refcnt);
5431a6d3bd2SGreg Kurz 
5441a6d3bd2SGreg Kurz     while (bs->quiesce_counter) {
545a82a3bd1SKevin Wolf         bdrv_do_drained_end(bs, NULL);
5461a6d3bd2SGreg Kurz     }
5471a6d3bd2SGreg Kurz }
5481a6d3bd2SGreg Kurz 
549c0778f66SAlberto Garcia void bdrv_drain_all_end(void)
550c0778f66SAlberto Garcia {
5510f12264eSKevin Wolf     BlockDriverState *bs = NULL;
552f791bf7fSEmanuele Giuseppe Esposito     GLOBAL_STATE_CODE();
553c0778f66SAlberto Garcia 
554c8aa7895SPavel Dovgalyuk     /*
555c8aa7895SPavel Dovgalyuk      * bdrv queue is managed by record/replay,
556c8aa7895SPavel Dovgalyuk      * waiting for finishing the I/O requests may
557c8aa7895SPavel Dovgalyuk      * be endless
558c8aa7895SPavel Dovgalyuk      */
559c8aa7895SPavel Dovgalyuk     if (replay_events_enabled()) {
560c8aa7895SPavel Dovgalyuk         return;
561c8aa7895SPavel Dovgalyuk     }
562c8aa7895SPavel Dovgalyuk 
5630f12264eSKevin Wolf     while ((bs = bdrv_next_all_states(bs))) {
56461007b31SStefan Hajnoczi         AioContext *aio_context = bdrv_get_aio_context(bs);
56561007b31SStefan Hajnoczi 
56661007b31SStefan Hajnoczi         aio_context_acquire(aio_context);
567a82a3bd1SKevin Wolf         bdrv_do_drained_end(bs, NULL);
56861007b31SStefan Hajnoczi         aio_context_release(aio_context);
56961007b31SStefan Hajnoczi     }
5700f12264eSKevin Wolf 
571e037c09cSMax Reitz     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
5720f12264eSKevin Wolf     assert(bdrv_drain_all_count > 0);
5730f12264eSKevin Wolf     bdrv_drain_all_count--;
57461007b31SStefan Hajnoczi }
57561007b31SStefan Hajnoczi 
576c0778f66SAlberto Garcia void bdrv_drain_all(void)
577c0778f66SAlberto Garcia {
578f791bf7fSEmanuele Giuseppe Esposito     GLOBAL_STATE_CODE();
579c0778f66SAlberto Garcia     bdrv_drain_all_begin();
580c0778f66SAlberto Garcia     bdrv_drain_all_end();
581c0778f66SAlberto Garcia }
582c0778f66SAlberto Garcia 
58361007b31SStefan Hajnoczi /**
58461007b31SStefan Hajnoczi  * Remove an active request from the tracked requests list
58561007b31SStefan Hajnoczi  *
58661007b31SStefan Hajnoczi  * This function should be called when a tracked request is completing.
58761007b31SStefan Hajnoczi  */
588f0d43b1eSPaolo Bonzini static void coroutine_fn tracked_request_end(BdrvTrackedRequest *req)
58961007b31SStefan Hajnoczi {
59061007b31SStefan Hajnoczi     if (req->serialising) {
591d73415a3SStefan Hajnoczi         qatomic_dec(&req->bs->serialising_in_flight);
59261007b31SStefan Hajnoczi     }
59361007b31SStefan Hajnoczi 
5943783fa3dSPaolo Bonzini     qemu_co_mutex_lock(&req->bs->reqs_lock);
59561007b31SStefan Hajnoczi     QLIST_REMOVE(req, list);
59661007b31SStefan Hajnoczi     qemu_co_queue_restart_all(&req->wait_queue);
5973783fa3dSPaolo Bonzini     qemu_co_mutex_unlock(&req->bs->reqs_lock);
59861007b31SStefan Hajnoczi }
59961007b31SStefan Hajnoczi 
60061007b31SStefan Hajnoczi /**
60161007b31SStefan Hajnoczi  * Add an active request to the tracked requests list
60261007b31SStefan Hajnoczi  */
603881a4c55SPaolo Bonzini static void coroutine_fn tracked_request_begin(BdrvTrackedRequest *req,
60461007b31SStefan Hajnoczi                                                BlockDriverState *bs,
60561007b31SStefan Hajnoczi                                                int64_t offset,
60680247264SEric Blake                                                int64_t bytes,
607ebde595cSFam Zheng                                                enum BdrvTrackedRequestType type)
60861007b31SStefan Hajnoczi {
60980247264SEric Blake     bdrv_check_request(offset, bytes, &error_abort);
61022931a15SFam Zheng 
61161007b31SStefan Hajnoczi     *req = (BdrvTrackedRequest){
61261007b31SStefan Hajnoczi         .bs = bs,
61361007b31SStefan Hajnoczi         .offset         = offset,
61461007b31SStefan Hajnoczi         .bytes          = bytes,
615ebde595cSFam Zheng         .type           = type,
61661007b31SStefan Hajnoczi         .co             = qemu_coroutine_self(),
61761007b31SStefan Hajnoczi         .serialising    = false,
61861007b31SStefan Hajnoczi         .overlap_offset = offset,
61961007b31SStefan Hajnoczi         .overlap_bytes  = bytes,
62061007b31SStefan Hajnoczi     };
62161007b31SStefan Hajnoczi 
62261007b31SStefan Hajnoczi     qemu_co_queue_init(&req->wait_queue);
62361007b31SStefan Hajnoczi 
6243783fa3dSPaolo Bonzini     qemu_co_mutex_lock(&bs->reqs_lock);
62561007b31SStefan Hajnoczi     QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
6263783fa3dSPaolo Bonzini     qemu_co_mutex_unlock(&bs->reqs_lock);
62761007b31SStefan Hajnoczi }
62861007b31SStefan Hajnoczi 
6293ba0e1a0SPaolo Bonzini static bool tracked_request_overlaps(BdrvTrackedRequest *req,
63080247264SEric Blake                                      int64_t offset, int64_t bytes)
6313ba0e1a0SPaolo Bonzini {
63280247264SEric Blake     bdrv_check_request(offset, bytes, &error_abort);
63380247264SEric Blake 
6343ba0e1a0SPaolo Bonzini     /*        aaaa   bbbb */
6353ba0e1a0SPaolo Bonzini     if (offset >= req->overlap_offset + req->overlap_bytes) {
6363ba0e1a0SPaolo Bonzini         return false;
6373ba0e1a0SPaolo Bonzini     }
6383ba0e1a0SPaolo Bonzini     /* bbbb   aaaa        */
6393ba0e1a0SPaolo Bonzini     if (req->overlap_offset >= offset + bytes) {
6403ba0e1a0SPaolo Bonzini         return false;
6413ba0e1a0SPaolo Bonzini     }
6423ba0e1a0SPaolo Bonzini     return true;
6433ba0e1a0SPaolo Bonzini }
6443ba0e1a0SPaolo Bonzini 
6453183937fSVladimir Sementsov-Ogievskiy /* Called with self->bs->reqs_lock held */
646881a4c55SPaolo Bonzini static coroutine_fn BdrvTrackedRequest *
6473183937fSVladimir Sementsov-Ogievskiy bdrv_find_conflicting_request(BdrvTrackedRequest *self)
6483ba0e1a0SPaolo Bonzini {
6493ba0e1a0SPaolo Bonzini     BdrvTrackedRequest *req;
6503ba0e1a0SPaolo Bonzini 
6513183937fSVladimir Sementsov-Ogievskiy     QLIST_FOREACH(req, &self->bs->tracked_requests, list) {
6523ba0e1a0SPaolo Bonzini         if (req == self || (!req->serialising && !self->serialising)) {
6533ba0e1a0SPaolo Bonzini             continue;
6543ba0e1a0SPaolo Bonzini         }
6553ba0e1a0SPaolo Bonzini         if (tracked_request_overlaps(req, self->overlap_offset,
6563ba0e1a0SPaolo Bonzini                                      self->overlap_bytes))
6573ba0e1a0SPaolo Bonzini         {
6583183937fSVladimir Sementsov-Ogievskiy             /*
6593183937fSVladimir Sementsov-Ogievskiy              * Hitting this means there was a reentrant request, for
6603ba0e1a0SPaolo Bonzini              * example, a block driver issuing nested requests.  This must
6613ba0e1a0SPaolo Bonzini              * never happen since it means deadlock.
6623ba0e1a0SPaolo Bonzini              */
6633ba0e1a0SPaolo Bonzini             assert(qemu_coroutine_self() != req->co);
6643ba0e1a0SPaolo Bonzini 
6653183937fSVladimir Sementsov-Ogievskiy             /*
6663183937fSVladimir Sementsov-Ogievskiy              * If the request is already (indirectly) waiting for us, or
6673ba0e1a0SPaolo Bonzini              * will wait for us as soon as it wakes up, then just go on
6683183937fSVladimir Sementsov-Ogievskiy              * (instead of producing a deadlock in the former case).
6693183937fSVladimir Sementsov-Ogievskiy              */
6703ba0e1a0SPaolo Bonzini             if (!req->waiting_for) {
6713183937fSVladimir Sementsov-Ogievskiy                 return req;
6723183937fSVladimir Sementsov-Ogievskiy             }
6733183937fSVladimir Sementsov-Ogievskiy         }
6743183937fSVladimir Sementsov-Ogievskiy     }
6753183937fSVladimir Sementsov-Ogievskiy 
6763183937fSVladimir Sementsov-Ogievskiy     return NULL;
6773183937fSVladimir Sementsov-Ogievskiy }
6783183937fSVladimir Sementsov-Ogievskiy 
679ec1c8868SVladimir Sementsov-Ogievskiy /* Called with self->bs->reqs_lock held */
680131498f7SDenis V. Lunev static void coroutine_fn
681ec1c8868SVladimir Sementsov-Ogievskiy bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self)
6823183937fSVladimir Sementsov-Ogievskiy {
6833183937fSVladimir Sementsov-Ogievskiy     BdrvTrackedRequest *req;
6843183937fSVladimir Sementsov-Ogievskiy 
6853183937fSVladimir Sementsov-Ogievskiy     while ((req = bdrv_find_conflicting_request(self))) {
6863ba0e1a0SPaolo Bonzini         self->waiting_for = req;
687ec1c8868SVladimir Sementsov-Ogievskiy         qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock);
6883ba0e1a0SPaolo Bonzini         self->waiting_for = NULL;
6893ba0e1a0SPaolo Bonzini     }
6903ba0e1a0SPaolo Bonzini }
6913ba0e1a0SPaolo Bonzini 
6928ac5aab2SVladimir Sementsov-Ogievskiy /* Called with req->bs->reqs_lock held */
6938ac5aab2SVladimir Sementsov-Ogievskiy static void tracked_request_set_serialising(BdrvTrackedRequest *req,
6948ac5aab2SVladimir Sementsov-Ogievskiy                                             uint64_t align)
69561007b31SStefan Hajnoczi {
69661007b31SStefan Hajnoczi     int64_t overlap_offset = req->offset & ~(align - 1);
69780247264SEric Blake     int64_t overlap_bytes =
69880247264SEric Blake         ROUND_UP(req->offset + req->bytes, align) - overlap_offset;
69980247264SEric Blake 
70080247264SEric Blake     bdrv_check_request(req->offset, req->bytes, &error_abort);
70161007b31SStefan Hajnoczi 
70261007b31SStefan Hajnoczi     if (!req->serialising) {
703d73415a3SStefan Hajnoczi         qatomic_inc(&req->bs->serialising_in_flight);
70461007b31SStefan Hajnoczi         req->serialising = true;
70561007b31SStefan Hajnoczi     }
70661007b31SStefan Hajnoczi 
70761007b31SStefan Hajnoczi     req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
70861007b31SStefan Hajnoczi     req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
70909d2f948SVladimir Sementsov-Ogievskiy }
71009d2f948SVladimir Sementsov-Ogievskiy 
71161007b31SStefan Hajnoczi /**
712c28107e9SMax Reitz  * Return the tracked request on @bs for the current coroutine, or
713c28107e9SMax Reitz  * NULL if there is none.
714c28107e9SMax Reitz  */
715c28107e9SMax Reitz BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs)
716c28107e9SMax Reitz {
717c28107e9SMax Reitz     BdrvTrackedRequest *req;
718c28107e9SMax Reitz     Coroutine *self = qemu_coroutine_self();
719967d7905SEmanuele Giuseppe Esposito     IO_CODE();
720c28107e9SMax Reitz 
721c28107e9SMax Reitz     QLIST_FOREACH(req, &bs->tracked_requests, list) {
722c28107e9SMax Reitz         if (req->co == self) {
723c28107e9SMax Reitz             return req;
724c28107e9SMax Reitz         }
725c28107e9SMax Reitz     }
726c28107e9SMax Reitz 
727c28107e9SMax Reitz     return NULL;
728c28107e9SMax Reitz }
729c28107e9SMax Reitz 
730c28107e9SMax Reitz /**
731244483e6SKevin Wolf  * Round a region to cluster boundaries
732244483e6SKevin Wolf  */
733a00e70c0SEmanuele Giuseppe Esposito void coroutine_fn GRAPH_RDLOCK
734a00e70c0SEmanuele Giuseppe Esposito bdrv_round_to_clusters(BlockDriverState *bs, int64_t offset, int64_t bytes,
735a00e70c0SEmanuele Giuseppe Esposito                        int64_t *cluster_offset, int64_t *cluster_bytes)
736244483e6SKevin Wolf {
737244483e6SKevin Wolf     BlockDriverInfo bdi;
738384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
7393d47eb0aSEmanuele Giuseppe Esposito     if (bdrv_co_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
740244483e6SKevin Wolf         *cluster_offset = offset;
741244483e6SKevin Wolf         *cluster_bytes = bytes;
742244483e6SKevin Wolf     } else {
743244483e6SKevin Wolf         int64_t c = bdi.cluster_size;
744244483e6SKevin Wolf         *cluster_offset = QEMU_ALIGN_DOWN(offset, c);
745244483e6SKevin Wolf         *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
746244483e6SKevin Wolf     }
747244483e6SKevin Wolf }
748244483e6SKevin Wolf 
749a00e70c0SEmanuele Giuseppe Esposito static int coroutine_fn GRAPH_RDLOCK bdrv_get_cluster_size(BlockDriverState *bs)
75061007b31SStefan Hajnoczi {
75161007b31SStefan Hajnoczi     BlockDriverInfo bdi;
75261007b31SStefan Hajnoczi     int ret;
75361007b31SStefan Hajnoczi 
7543d47eb0aSEmanuele Giuseppe Esposito     ret = bdrv_co_get_info(bs, &bdi);
75561007b31SStefan Hajnoczi     if (ret < 0 || bdi.cluster_size == 0) {
756a5b8dd2cSEric Blake         return bs->bl.request_alignment;
75761007b31SStefan Hajnoczi     } else {
75861007b31SStefan Hajnoczi         return bdi.cluster_size;
75961007b31SStefan Hajnoczi     }
76061007b31SStefan Hajnoczi }
76161007b31SStefan Hajnoczi 
76299723548SPaolo Bonzini void bdrv_inc_in_flight(BlockDriverState *bs)
76399723548SPaolo Bonzini {
764967d7905SEmanuele Giuseppe Esposito     IO_CODE();
765d73415a3SStefan Hajnoczi     qatomic_inc(&bs->in_flight);
76699723548SPaolo Bonzini }
76799723548SPaolo Bonzini 
768c9d1a561SPaolo Bonzini void bdrv_wakeup(BlockDriverState *bs)
769c9d1a561SPaolo Bonzini {
770967d7905SEmanuele Giuseppe Esposito     IO_CODE();
771cfe29d82SKevin Wolf     aio_wait_kick();
772c9d1a561SPaolo Bonzini }
773c9d1a561SPaolo Bonzini 
77499723548SPaolo Bonzini void bdrv_dec_in_flight(BlockDriverState *bs)
77599723548SPaolo Bonzini {
776967d7905SEmanuele Giuseppe Esposito     IO_CODE();
777d73415a3SStefan Hajnoczi     qatomic_dec(&bs->in_flight);
778c9d1a561SPaolo Bonzini     bdrv_wakeup(bs);
77999723548SPaolo Bonzini }
78099723548SPaolo Bonzini 
781131498f7SDenis V. Lunev static void coroutine_fn
782131498f7SDenis V. Lunev bdrv_wait_serialising_requests(BdrvTrackedRequest *self)
78361007b31SStefan Hajnoczi {
78461007b31SStefan Hajnoczi     BlockDriverState *bs = self->bs;
78561007b31SStefan Hajnoczi 
786d73415a3SStefan Hajnoczi     if (!qatomic_read(&bs->serialising_in_flight)) {
787131498f7SDenis V. Lunev         return;
78861007b31SStefan Hajnoczi     }
78961007b31SStefan Hajnoczi 
7903783fa3dSPaolo Bonzini     qemu_co_mutex_lock(&bs->reqs_lock);
791131498f7SDenis V. Lunev     bdrv_wait_serialising_requests_locked(self);
7923783fa3dSPaolo Bonzini     qemu_co_mutex_unlock(&bs->reqs_lock);
79361007b31SStefan Hajnoczi }
79461007b31SStefan Hajnoczi 
795131498f7SDenis V. Lunev void coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
7968ac5aab2SVladimir Sementsov-Ogievskiy                                                 uint64_t align)
7978ac5aab2SVladimir Sementsov-Ogievskiy {
798967d7905SEmanuele Giuseppe Esposito     IO_CODE();
7998ac5aab2SVladimir Sementsov-Ogievskiy 
8008ac5aab2SVladimir Sementsov-Ogievskiy     qemu_co_mutex_lock(&req->bs->reqs_lock);
8018ac5aab2SVladimir Sementsov-Ogievskiy 
8028ac5aab2SVladimir Sementsov-Ogievskiy     tracked_request_set_serialising(req, align);
803131498f7SDenis V. Lunev     bdrv_wait_serialising_requests_locked(req);
8048ac5aab2SVladimir Sementsov-Ogievskiy 
8058ac5aab2SVladimir Sementsov-Ogievskiy     qemu_co_mutex_unlock(&req->bs->reqs_lock);
8068ac5aab2SVladimir Sementsov-Ogievskiy }
8078ac5aab2SVladimir Sementsov-Ogievskiy 
808558902ccSVladimir Sementsov-Ogievskiy int bdrv_check_qiov_request(int64_t offset, int64_t bytes,
80963f4ad11SVladimir Sementsov-Ogievskiy                             QEMUIOVector *qiov, size_t qiov_offset,
81063f4ad11SVladimir Sementsov-Ogievskiy                             Error **errp)
81161007b31SStefan Hajnoczi {
81263f4ad11SVladimir Sementsov-Ogievskiy     /*
81363f4ad11SVladimir Sementsov-Ogievskiy      * Check generic offset/bytes correctness
81463f4ad11SVladimir Sementsov-Ogievskiy      */
81563f4ad11SVladimir Sementsov-Ogievskiy 
81669b55e03SVladimir Sementsov-Ogievskiy     if (offset < 0) {
81769b55e03SVladimir Sementsov-Ogievskiy         error_setg(errp, "offset is negative: %" PRIi64, offset);
81869b55e03SVladimir Sementsov-Ogievskiy         return -EIO;
81969b55e03SVladimir Sementsov-Ogievskiy     }
82069b55e03SVladimir Sementsov-Ogievskiy 
82169b55e03SVladimir Sementsov-Ogievskiy     if (bytes < 0) {
82269b55e03SVladimir Sementsov-Ogievskiy         error_setg(errp, "bytes is negative: %" PRIi64, bytes);
82361007b31SStefan Hajnoczi         return -EIO;
82461007b31SStefan Hajnoczi     }
82561007b31SStefan Hajnoczi 
8268b117001SVladimir Sementsov-Ogievskiy     if (bytes > BDRV_MAX_LENGTH) {
82769b55e03SVladimir Sementsov-Ogievskiy         error_setg(errp, "bytes(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
82869b55e03SVladimir Sementsov-Ogievskiy                    bytes, BDRV_MAX_LENGTH);
82969b55e03SVladimir Sementsov-Ogievskiy         return -EIO;
83069b55e03SVladimir Sementsov-Ogievskiy     }
83169b55e03SVladimir Sementsov-Ogievskiy 
83269b55e03SVladimir Sementsov-Ogievskiy     if (offset > BDRV_MAX_LENGTH) {
83369b55e03SVladimir Sementsov-Ogievskiy         error_setg(errp, "offset(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
83469b55e03SVladimir Sementsov-Ogievskiy                    offset, BDRV_MAX_LENGTH);
8358b117001SVladimir Sementsov-Ogievskiy         return -EIO;
8368b117001SVladimir Sementsov-Ogievskiy     }
8378b117001SVladimir Sementsov-Ogievskiy 
8388b117001SVladimir Sementsov-Ogievskiy     if (offset > BDRV_MAX_LENGTH - bytes) {
83969b55e03SVladimir Sementsov-Ogievskiy         error_setg(errp, "sum of offset(%" PRIi64 ") and bytes(%" PRIi64 ") "
84069b55e03SVladimir Sementsov-Ogievskiy                    "exceeds maximum(%" PRIi64 ")", offset, bytes,
84169b55e03SVladimir Sementsov-Ogievskiy                    BDRV_MAX_LENGTH);
8428b117001SVladimir Sementsov-Ogievskiy         return -EIO;
8438b117001SVladimir Sementsov-Ogievskiy     }
8448b117001SVladimir Sementsov-Ogievskiy 
84563f4ad11SVladimir Sementsov-Ogievskiy     if (!qiov) {
8468b117001SVladimir Sementsov-Ogievskiy         return 0;
8478b117001SVladimir Sementsov-Ogievskiy     }
8488b117001SVladimir Sementsov-Ogievskiy 
84963f4ad11SVladimir Sementsov-Ogievskiy     /*
85063f4ad11SVladimir Sementsov-Ogievskiy      * Check qiov and qiov_offset
85163f4ad11SVladimir Sementsov-Ogievskiy      */
85263f4ad11SVladimir Sementsov-Ogievskiy 
85363f4ad11SVladimir Sementsov-Ogievskiy     if (qiov_offset > qiov->size) {
85463f4ad11SVladimir Sementsov-Ogievskiy         error_setg(errp, "qiov_offset(%zu) overflow io vector size(%zu)",
85563f4ad11SVladimir Sementsov-Ogievskiy                    qiov_offset, qiov->size);
85663f4ad11SVladimir Sementsov-Ogievskiy         return -EIO;
85763f4ad11SVladimir Sementsov-Ogievskiy     }
85863f4ad11SVladimir Sementsov-Ogievskiy 
85963f4ad11SVladimir Sementsov-Ogievskiy     if (bytes > qiov->size - qiov_offset) {
86063f4ad11SVladimir Sementsov-Ogievskiy         error_setg(errp, "bytes(%" PRIi64 ") + qiov_offset(%zu) overflow io "
86163f4ad11SVladimir Sementsov-Ogievskiy                    "vector size(%zu)", bytes, qiov_offset, qiov->size);
86263f4ad11SVladimir Sementsov-Ogievskiy         return -EIO;
86363f4ad11SVladimir Sementsov-Ogievskiy     }
86463f4ad11SVladimir Sementsov-Ogievskiy 
86563f4ad11SVladimir Sementsov-Ogievskiy     return 0;
86663f4ad11SVladimir Sementsov-Ogievskiy }
86763f4ad11SVladimir Sementsov-Ogievskiy 
86863f4ad11SVladimir Sementsov-Ogievskiy int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp)
8698b117001SVladimir Sementsov-Ogievskiy {
87063f4ad11SVladimir Sementsov-Ogievskiy     return bdrv_check_qiov_request(offset, bytes, NULL, 0, errp);
87163f4ad11SVladimir Sementsov-Ogievskiy }
87263f4ad11SVladimir Sementsov-Ogievskiy 
87363f4ad11SVladimir Sementsov-Ogievskiy static int bdrv_check_request32(int64_t offset, int64_t bytes,
87463f4ad11SVladimir Sementsov-Ogievskiy                                 QEMUIOVector *qiov, size_t qiov_offset)
87563f4ad11SVladimir Sementsov-Ogievskiy {
87663f4ad11SVladimir Sementsov-Ogievskiy     int ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
8778b117001SVladimir Sementsov-Ogievskiy     if (ret < 0) {
8788b117001SVladimir Sementsov-Ogievskiy         return ret;
8798b117001SVladimir Sementsov-Ogievskiy     }
8808b117001SVladimir Sementsov-Ogievskiy 
8818b117001SVladimir Sementsov-Ogievskiy     if (bytes > BDRV_REQUEST_MAX_BYTES) {
88261007b31SStefan Hajnoczi         return -EIO;
88361007b31SStefan Hajnoczi     }
88461007b31SStefan Hajnoczi 
88561007b31SStefan Hajnoczi     return 0;
88661007b31SStefan Hajnoczi }
88761007b31SStefan Hajnoczi 
88861007b31SStefan Hajnoczi /*
88974021bc4SEric Blake  * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
89061007b31SStefan Hajnoczi  * The operation is sped up by checking the block status and only writing
89161007b31SStefan Hajnoczi  * zeroes to the device if they currently do not return zeroes. Optional
89274021bc4SEric Blake  * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
893465fe887SEric Blake  * BDRV_REQ_FUA).
89461007b31SStefan Hajnoczi  *
895f4649069SEric Blake  * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite().
89661007b31SStefan Hajnoczi  */
897720ff280SKevin Wolf int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
89861007b31SStefan Hajnoczi {
899237d78f8SEric Blake     int ret;
900237d78f8SEric Blake     int64_t target_size, bytes, offset = 0;
901720ff280SKevin Wolf     BlockDriverState *bs = child->bs;
902384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
90361007b31SStefan Hajnoczi 
9047286d610SEric Blake     target_size = bdrv_getlength(bs);
9057286d610SEric Blake     if (target_size < 0) {
9067286d610SEric Blake         return target_size;
90761007b31SStefan Hajnoczi     }
90861007b31SStefan Hajnoczi 
90961007b31SStefan Hajnoczi     for (;;) {
9107286d610SEric Blake         bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES);
9117286d610SEric Blake         if (bytes <= 0) {
91261007b31SStefan Hajnoczi             return 0;
91361007b31SStefan Hajnoczi         }
914237d78f8SEric Blake         ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL);
91561007b31SStefan Hajnoczi         if (ret < 0) {
91661007b31SStefan Hajnoczi             return ret;
91761007b31SStefan Hajnoczi         }
91861007b31SStefan Hajnoczi         if (ret & BDRV_BLOCK_ZERO) {
919237d78f8SEric Blake             offset += bytes;
92061007b31SStefan Hajnoczi             continue;
92161007b31SStefan Hajnoczi         }
922237d78f8SEric Blake         ret = bdrv_pwrite_zeroes(child, offset, bytes, flags);
92361007b31SStefan Hajnoczi         if (ret < 0) {
92461007b31SStefan Hajnoczi             return ret;
92561007b31SStefan Hajnoczi         }
926237d78f8SEric Blake         offset += bytes;
92761007b31SStefan Hajnoczi     }
92861007b31SStefan Hajnoczi }
92961007b31SStefan Hajnoczi 
93061007b31SStefan Hajnoczi /*
93161007b31SStefan Hajnoczi  * Writes to the file and ensures that no writes are reordered across this
93261007b31SStefan Hajnoczi  * request (acts as a barrier)
93361007b31SStefan Hajnoczi  *
93461007b31SStefan Hajnoczi  * Returns 0 on success, -errno in error cases.
93561007b31SStefan Hajnoczi  */
936e97190a4SAlberto Faria int coroutine_fn bdrv_co_pwrite_sync(BdrvChild *child, int64_t offset,
937e97190a4SAlberto Faria                                      int64_t bytes, const void *buf,
938e97190a4SAlberto Faria                                      BdrvRequestFlags flags)
93961007b31SStefan Hajnoczi {
94061007b31SStefan Hajnoczi     int ret;
941384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
942b24a4c41SKevin Wolf     assert_bdrv_graph_readable();
94388095349SEmanuele Giuseppe Esposito 
944e97190a4SAlberto Faria     ret = bdrv_co_pwrite(child, offset, bytes, buf, flags);
94561007b31SStefan Hajnoczi     if (ret < 0) {
94661007b31SStefan Hajnoczi         return ret;
94761007b31SStefan Hajnoczi     }
94861007b31SStefan Hajnoczi 
949e97190a4SAlberto Faria     ret = bdrv_co_flush(child->bs);
950855a6a93SKevin Wolf     if (ret < 0) {
951855a6a93SKevin Wolf         return ret;
95261007b31SStefan Hajnoczi     }
95361007b31SStefan Hajnoczi 
95461007b31SStefan Hajnoczi     return 0;
95561007b31SStefan Hajnoczi }
95661007b31SStefan Hajnoczi 
95708844473SKevin Wolf typedef struct CoroutineIOCompletion {
95808844473SKevin Wolf     Coroutine *coroutine;
95908844473SKevin Wolf     int ret;
96008844473SKevin Wolf } CoroutineIOCompletion;
96108844473SKevin Wolf 
96208844473SKevin Wolf static void bdrv_co_io_em_complete(void *opaque, int ret)
96308844473SKevin Wolf {
96408844473SKevin Wolf     CoroutineIOCompletion *co = opaque;
96508844473SKevin Wolf 
96608844473SKevin Wolf     co->ret = ret;
967b9e413ddSPaolo Bonzini     aio_co_wake(co->coroutine);
96808844473SKevin Wolf }
96908844473SKevin Wolf 
9707b1fb72eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
9717b1fb72eSKevin Wolf bdrv_driver_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
9727b1fb72eSKevin Wolf                    QEMUIOVector *qiov, size_t qiov_offset, int flags)
973166fe960SKevin Wolf {
974166fe960SKevin Wolf     BlockDriver *drv = bs->drv;
9753fb06697SKevin Wolf     int64_t sector_num;
9763fb06697SKevin Wolf     unsigned int nb_sectors;
977ac850bf0SVladimir Sementsov-Ogievskiy     QEMUIOVector local_qiov;
978ac850bf0SVladimir Sementsov-Ogievskiy     int ret;
979b9b10c35SKevin Wolf     assert_bdrv_graph_readable();
9803fb06697SKevin Wolf 
98117abcbeeSVladimir Sementsov-Ogievskiy     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
982e8b65355SStefan Hajnoczi     assert(!(flags & ~bs->supported_read_flags));
983fa166538SEric Blake 
984d470ad42SMax Reitz     if (!drv) {
985d470ad42SMax Reitz         return -ENOMEDIUM;
986d470ad42SMax Reitz     }
987d470ad42SMax Reitz 
988ac850bf0SVladimir Sementsov-Ogievskiy     if (drv->bdrv_co_preadv_part) {
989ac850bf0SVladimir Sementsov-Ogievskiy         return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset,
990ac850bf0SVladimir Sementsov-Ogievskiy                                         flags);
991ac850bf0SVladimir Sementsov-Ogievskiy     }
992ac850bf0SVladimir Sementsov-Ogievskiy 
993ac850bf0SVladimir Sementsov-Ogievskiy     if (qiov_offset > 0 || bytes != qiov->size) {
994ac850bf0SVladimir Sementsov-Ogievskiy         qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
995ac850bf0SVladimir Sementsov-Ogievskiy         qiov = &local_qiov;
996ac850bf0SVladimir Sementsov-Ogievskiy     }
997ac850bf0SVladimir Sementsov-Ogievskiy 
9983fb06697SKevin Wolf     if (drv->bdrv_co_preadv) {
999ac850bf0SVladimir Sementsov-Ogievskiy         ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
1000ac850bf0SVladimir Sementsov-Ogievskiy         goto out;
10013fb06697SKevin Wolf     }
10023fb06697SKevin Wolf 
1003edfab6a0SEric Blake     if (drv->bdrv_aio_preadv) {
100408844473SKevin Wolf         BlockAIOCB *acb;
100508844473SKevin Wolf         CoroutineIOCompletion co = {
100608844473SKevin Wolf             .coroutine = qemu_coroutine_self(),
100708844473SKevin Wolf         };
100808844473SKevin Wolf 
1009e31f6864SEric Blake         acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags,
101008844473SKevin Wolf                                    bdrv_co_io_em_complete, &co);
101108844473SKevin Wolf         if (acb == NULL) {
1012ac850bf0SVladimir Sementsov-Ogievskiy             ret = -EIO;
1013ac850bf0SVladimir Sementsov-Ogievskiy             goto out;
101408844473SKevin Wolf         } else {
101508844473SKevin Wolf             qemu_coroutine_yield();
1016ac850bf0SVladimir Sementsov-Ogievskiy             ret = co.ret;
1017ac850bf0SVladimir Sementsov-Ogievskiy             goto out;
101808844473SKevin Wolf         }
101908844473SKevin Wolf     }
1020edfab6a0SEric Blake 
1021edfab6a0SEric Blake     sector_num = offset >> BDRV_SECTOR_BITS;
1022edfab6a0SEric Blake     nb_sectors = bytes >> BDRV_SECTOR_BITS;
1023edfab6a0SEric Blake 
10241bbbf32dSNir Soffer     assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
10251bbbf32dSNir Soffer     assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
102641ae31e3SAlberto Garcia     assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1027edfab6a0SEric Blake     assert(drv->bdrv_co_readv);
1028edfab6a0SEric Blake 
1029ac850bf0SVladimir Sementsov-Ogievskiy     ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
1030ac850bf0SVladimir Sementsov-Ogievskiy 
1031ac850bf0SVladimir Sementsov-Ogievskiy out:
1032ac850bf0SVladimir Sementsov-Ogievskiy     if (qiov == &local_qiov) {
1033ac850bf0SVladimir Sementsov-Ogievskiy         qemu_iovec_destroy(&local_qiov);
1034ac850bf0SVladimir Sementsov-Ogievskiy     }
1035ac850bf0SVladimir Sementsov-Ogievskiy 
1036ac850bf0SVladimir Sementsov-Ogievskiy     return ret;
1037166fe960SKevin Wolf }
1038166fe960SKevin Wolf 
10397b1fb72eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
10407b1fb72eSKevin Wolf bdrv_driver_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
10417b1fb72eSKevin Wolf                     QEMUIOVector *qiov, size_t qiov_offset,
1042e75abedaSVladimir Sementsov-Ogievskiy                     BdrvRequestFlags flags)
104378a07294SKevin Wolf {
104478a07294SKevin Wolf     BlockDriver *drv = bs->drv;
1045e8b65355SStefan Hajnoczi     bool emulate_fua = false;
10463fb06697SKevin Wolf     int64_t sector_num;
10473fb06697SKevin Wolf     unsigned int nb_sectors;
1048ac850bf0SVladimir Sementsov-Ogievskiy     QEMUIOVector local_qiov;
104978a07294SKevin Wolf     int ret;
1050b9b10c35SKevin Wolf     assert_bdrv_graph_readable();
105178a07294SKevin Wolf 
105217abcbeeSVladimir Sementsov-Ogievskiy     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1053fa166538SEric Blake 
1054d470ad42SMax Reitz     if (!drv) {
1055d470ad42SMax Reitz         return -ENOMEDIUM;
1056d470ad42SMax Reitz     }
1057d470ad42SMax Reitz 
1058e8b65355SStefan Hajnoczi     if ((flags & BDRV_REQ_FUA) &&
1059e8b65355SStefan Hajnoczi         (~bs->supported_write_flags & BDRV_REQ_FUA)) {
1060e8b65355SStefan Hajnoczi         flags &= ~BDRV_REQ_FUA;
1061e8b65355SStefan Hajnoczi         emulate_fua = true;
1062e8b65355SStefan Hajnoczi     }
1063e8b65355SStefan Hajnoczi 
1064e8b65355SStefan Hajnoczi     flags &= bs->supported_write_flags;
1065e8b65355SStefan Hajnoczi 
1066ac850bf0SVladimir Sementsov-Ogievskiy     if (drv->bdrv_co_pwritev_part) {
1067ac850bf0SVladimir Sementsov-Ogievskiy         ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset,
1068e8b65355SStefan Hajnoczi                                         flags);
1069ac850bf0SVladimir Sementsov-Ogievskiy         goto emulate_flags;
1070ac850bf0SVladimir Sementsov-Ogievskiy     }
1071ac850bf0SVladimir Sementsov-Ogievskiy 
1072ac850bf0SVladimir Sementsov-Ogievskiy     if (qiov_offset > 0 || bytes != qiov->size) {
1073ac850bf0SVladimir Sementsov-Ogievskiy         qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1074ac850bf0SVladimir Sementsov-Ogievskiy         qiov = &local_qiov;
1075ac850bf0SVladimir Sementsov-Ogievskiy     }
1076ac850bf0SVladimir Sementsov-Ogievskiy 
10773fb06697SKevin Wolf     if (drv->bdrv_co_pwritev) {
1078e8b65355SStefan Hajnoczi         ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, flags);
10793fb06697SKevin Wolf         goto emulate_flags;
10803fb06697SKevin Wolf     }
10813fb06697SKevin Wolf 
1082edfab6a0SEric Blake     if (drv->bdrv_aio_pwritev) {
108308844473SKevin Wolf         BlockAIOCB *acb;
108408844473SKevin Wolf         CoroutineIOCompletion co = {
108508844473SKevin Wolf             .coroutine = qemu_coroutine_self(),
108608844473SKevin Wolf         };
108708844473SKevin Wolf 
1088e8b65355SStefan Hajnoczi         acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, flags,
108908844473SKevin Wolf                                     bdrv_co_io_em_complete, &co);
109008844473SKevin Wolf         if (acb == NULL) {
10913fb06697SKevin Wolf             ret = -EIO;
109208844473SKevin Wolf         } else {
109308844473SKevin Wolf             qemu_coroutine_yield();
10943fb06697SKevin Wolf             ret = co.ret;
109508844473SKevin Wolf         }
1096edfab6a0SEric Blake         goto emulate_flags;
1097edfab6a0SEric Blake     }
1098edfab6a0SEric Blake 
1099edfab6a0SEric Blake     sector_num = offset >> BDRV_SECTOR_BITS;
1100edfab6a0SEric Blake     nb_sectors = bytes >> BDRV_SECTOR_BITS;
1101edfab6a0SEric Blake 
11021bbbf32dSNir Soffer     assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
11031bbbf32dSNir Soffer     assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
110441ae31e3SAlberto Garcia     assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1105edfab6a0SEric Blake 
1106e18a58b4SEric Blake     assert(drv->bdrv_co_writev);
1107e8b65355SStefan Hajnoczi     ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, flags);
110878a07294SKevin Wolf 
11093fb06697SKevin Wolf emulate_flags:
1110e8b65355SStefan Hajnoczi     if (ret == 0 && emulate_fua) {
111178a07294SKevin Wolf         ret = bdrv_co_flush(bs);
111278a07294SKevin Wolf     }
111378a07294SKevin Wolf 
1114ac850bf0SVladimir Sementsov-Ogievskiy     if (qiov == &local_qiov) {
1115ac850bf0SVladimir Sementsov-Ogievskiy         qemu_iovec_destroy(&local_qiov);
1116ac850bf0SVladimir Sementsov-Ogievskiy     }
1117ac850bf0SVladimir Sementsov-Ogievskiy 
111878a07294SKevin Wolf     return ret;
111978a07294SKevin Wolf }
112078a07294SKevin Wolf 
11217b1fb72eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
112217abcbeeSVladimir Sementsov-Ogievskiy bdrv_driver_pwritev_compressed(BlockDriverState *bs, int64_t offset,
112317abcbeeSVladimir Sementsov-Ogievskiy                                int64_t bytes, QEMUIOVector *qiov,
1124ac850bf0SVladimir Sementsov-Ogievskiy                                size_t qiov_offset)
112529a298afSPavel Butsykin {
112629a298afSPavel Butsykin     BlockDriver *drv = bs->drv;
1127ac850bf0SVladimir Sementsov-Ogievskiy     QEMUIOVector local_qiov;
1128ac850bf0SVladimir Sementsov-Ogievskiy     int ret;
1129b9b10c35SKevin Wolf     assert_bdrv_graph_readable();
113029a298afSPavel Butsykin 
113117abcbeeSVladimir Sementsov-Ogievskiy     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
113217abcbeeSVladimir Sementsov-Ogievskiy 
1133d470ad42SMax Reitz     if (!drv) {
1134d470ad42SMax Reitz         return -ENOMEDIUM;
1135d470ad42SMax Reitz     }
1136d470ad42SMax Reitz 
1137ac850bf0SVladimir Sementsov-Ogievskiy     if (!block_driver_can_compress(drv)) {
113829a298afSPavel Butsykin         return -ENOTSUP;
113929a298afSPavel Butsykin     }
114029a298afSPavel Butsykin 
1141ac850bf0SVladimir Sementsov-Ogievskiy     if (drv->bdrv_co_pwritev_compressed_part) {
1142ac850bf0SVladimir Sementsov-Ogievskiy         return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes,
1143ac850bf0SVladimir Sementsov-Ogievskiy                                                     qiov, qiov_offset);
1144ac850bf0SVladimir Sementsov-Ogievskiy     }
1145ac850bf0SVladimir Sementsov-Ogievskiy 
1146ac850bf0SVladimir Sementsov-Ogievskiy     if (qiov_offset == 0) {
114729a298afSPavel Butsykin         return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
114829a298afSPavel Butsykin     }
114929a298afSPavel Butsykin 
1150ac850bf0SVladimir Sementsov-Ogievskiy     qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1151ac850bf0SVladimir Sementsov-Ogievskiy     ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov);
1152ac850bf0SVladimir Sementsov-Ogievskiy     qemu_iovec_destroy(&local_qiov);
1153ac850bf0SVladimir Sementsov-Ogievskiy 
1154ac850bf0SVladimir Sementsov-Ogievskiy     return ret;
1155ac850bf0SVladimir Sementsov-Ogievskiy }
1156ac850bf0SVladimir Sementsov-Ogievskiy 
11577b1fb72eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
11587b1fb72eSKevin Wolf bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
11597b1fb72eSKevin Wolf                          QEMUIOVector *qiov, size_t qiov_offset, int flags)
116061007b31SStefan Hajnoczi {
116185c97ca7SKevin Wolf     BlockDriverState *bs = child->bs;
116285c97ca7SKevin Wolf 
116361007b31SStefan Hajnoczi     /* Perform I/O through a temporary buffer so that users who scribble over
116461007b31SStefan Hajnoczi      * their read buffer while the operation is in progress do not end up
116561007b31SStefan Hajnoczi      * modifying the image file.  This is critical for zero-copy guest I/O
116661007b31SStefan Hajnoczi      * where anything might happen inside guest memory.
116761007b31SStefan Hajnoczi      */
11682275cc90SVladimir Sementsov-Ogievskiy     void *bounce_buffer = NULL;
116961007b31SStefan Hajnoczi 
117061007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
1171244483e6SKevin Wolf     int64_t cluster_offset;
11727cfd5275SEric Blake     int64_t cluster_bytes;
11739df5afbdSVladimir Sementsov-Ogievskiy     int64_t skip_bytes;
117461007b31SStefan Hajnoczi     int ret;
1175cb2e2878SEric Blake     int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1176cb2e2878SEric Blake                                     BDRV_REQUEST_MAX_BYTES);
11779df5afbdSVladimir Sementsov-Ogievskiy     int64_t progress = 0;
11788644476eSMax Reitz     bool skip_write;
117961007b31SStefan Hajnoczi 
11809df5afbdSVladimir Sementsov-Ogievskiy     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
11819df5afbdSVladimir Sementsov-Ogievskiy 
1182d470ad42SMax Reitz     if (!drv) {
1183d470ad42SMax Reitz         return -ENOMEDIUM;
1184d470ad42SMax Reitz     }
1185d470ad42SMax Reitz 
11868644476eSMax Reitz     /*
11878644476eSMax Reitz      * Do not write anything when the BDS is inactive.  That is not
11888644476eSMax Reitz      * allowed, and it would not help.
11898644476eSMax Reitz      */
11908644476eSMax Reitz     skip_write = (bs->open_flags & BDRV_O_INACTIVE);
11918644476eSMax Reitz 
11921bf03e66SKevin Wolf     /* FIXME We cannot require callers to have write permissions when all they
11931bf03e66SKevin Wolf      * are doing is a read request. If we did things right, write permissions
11941bf03e66SKevin Wolf      * would be obtained anyway, but internally by the copy-on-read code. As
1195765d9df9SEric Blake      * long as it is implemented here rather than in a separate filter driver,
11961bf03e66SKevin Wolf      * the copy-on-read code doesn't have its own BdrvChild, however, for which
11971bf03e66SKevin Wolf      * it could request permissions. Therefore we have to bypass the permission
11981bf03e66SKevin Wolf      * system for the moment. */
11991bf03e66SKevin Wolf     // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1200afa4b293SKevin Wolf 
120161007b31SStefan Hajnoczi     /* Cover entire cluster so no additional backing file I/O is required when
1202cb2e2878SEric Blake      * allocating cluster in the image file.  Note that this value may exceed
1203cb2e2878SEric Blake      * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1204cb2e2878SEric Blake      * is one reason we loop rather than doing it all at once.
120561007b31SStefan Hajnoczi      */
1206244483e6SKevin Wolf     bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
1207cb2e2878SEric Blake     skip_bytes = offset - cluster_offset;
120861007b31SStefan Hajnoczi 
1209244483e6SKevin Wolf     trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
1210244483e6SKevin Wolf                                    cluster_offset, cluster_bytes);
121161007b31SStefan Hajnoczi 
1212cb2e2878SEric Blake     while (cluster_bytes) {
1213cb2e2878SEric Blake         int64_t pnum;
121461007b31SStefan Hajnoczi 
12158644476eSMax Reitz         if (skip_write) {
12168644476eSMax Reitz             ret = 1; /* "already allocated", so nothing will be copied */
12178644476eSMax Reitz             pnum = MIN(cluster_bytes, max_transfer);
12188644476eSMax Reitz         } else {
1219cb2e2878SEric Blake             ret = bdrv_is_allocated(bs, cluster_offset,
1220cb2e2878SEric Blake                                     MIN(cluster_bytes, max_transfer), &pnum);
1221cb2e2878SEric Blake             if (ret < 0) {
12228644476eSMax Reitz                 /*
12238644476eSMax Reitz                  * Safe to treat errors in querying allocation as if
1224cb2e2878SEric Blake                  * unallocated; we'll probably fail again soon on the
1225cb2e2878SEric Blake                  * read, but at least that will set a decent errno.
1226cb2e2878SEric Blake                  */
1227cb2e2878SEric Blake                 pnum = MIN(cluster_bytes, max_transfer);
1228cb2e2878SEric Blake             }
1229cb2e2878SEric Blake 
1230b0ddcbbbSKevin Wolf             /* Stop at EOF if the image ends in the middle of the cluster */
1231b0ddcbbbSKevin Wolf             if (ret == 0 && pnum == 0) {
1232b0ddcbbbSKevin Wolf                 assert(progress >= bytes);
1233b0ddcbbbSKevin Wolf                 break;
1234b0ddcbbbSKevin Wolf             }
1235b0ddcbbbSKevin Wolf 
1236cb2e2878SEric Blake             assert(skip_bytes < pnum);
12378644476eSMax Reitz         }
1238cb2e2878SEric Blake 
1239cb2e2878SEric Blake         if (ret <= 0) {
12401143ec5eSVladimir Sementsov-Ogievskiy             QEMUIOVector local_qiov;
12411143ec5eSVladimir Sementsov-Ogievskiy 
1242cb2e2878SEric Blake             /* Must copy-on-read; use the bounce buffer */
12430d93ed08SVladimir Sementsov-Ogievskiy             pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
12442275cc90SVladimir Sementsov-Ogievskiy             if (!bounce_buffer) {
12452275cc90SVladimir Sementsov-Ogievskiy                 int64_t max_we_need = MAX(pnum, cluster_bytes - pnum);
12462275cc90SVladimir Sementsov-Ogievskiy                 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER);
12472275cc90SVladimir Sementsov-Ogievskiy                 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed);
12482275cc90SVladimir Sementsov-Ogievskiy 
12492275cc90SVladimir Sementsov-Ogievskiy                 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len);
12502275cc90SVladimir Sementsov-Ogievskiy                 if (!bounce_buffer) {
12512275cc90SVladimir Sementsov-Ogievskiy                     ret = -ENOMEM;
12522275cc90SVladimir Sementsov-Ogievskiy                     goto err;
12532275cc90SVladimir Sementsov-Ogievskiy                 }
12542275cc90SVladimir Sementsov-Ogievskiy             }
12550d93ed08SVladimir Sementsov-Ogievskiy             qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum);
1256cb2e2878SEric Blake 
1257cb2e2878SEric Blake             ret = bdrv_driver_preadv(bs, cluster_offset, pnum,
1258ac850bf0SVladimir Sementsov-Ogievskiy                                      &local_qiov, 0, 0);
125961007b31SStefan Hajnoczi             if (ret < 0) {
126061007b31SStefan Hajnoczi                 goto err;
126161007b31SStefan Hajnoczi             }
126261007b31SStefan Hajnoczi 
1263c834dc05SEmanuele Giuseppe Esposito             bdrv_co_debug_event(bs, BLKDBG_COR_WRITE);
1264c1499a5eSEric Blake             if (drv->bdrv_co_pwrite_zeroes &&
1265cb2e2878SEric Blake                 buffer_is_zero(bounce_buffer, pnum)) {
1266a604fa2bSEric Blake                 /* FIXME: Should we (perhaps conditionally) be setting
1267a604fa2bSEric Blake                  * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1268a604fa2bSEric Blake                  * that still correctly reads as zero? */
12697adcf59fSMax Reitz                 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum,
12707adcf59fSMax Reitz                                                BDRV_REQ_WRITE_UNCHANGED);
127161007b31SStefan Hajnoczi             } else {
1272cb2e2878SEric Blake                 /* This does not change the data on the disk, it is not
1273cb2e2878SEric Blake                  * necessary to flush even in cache=writethrough mode.
127461007b31SStefan Hajnoczi                  */
1275cb2e2878SEric Blake                 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum,
1276ac850bf0SVladimir Sementsov-Ogievskiy                                           &local_qiov, 0,
12777adcf59fSMax Reitz                                           BDRV_REQ_WRITE_UNCHANGED);
127861007b31SStefan Hajnoczi             }
127961007b31SStefan Hajnoczi 
128061007b31SStefan Hajnoczi             if (ret < 0) {
1281cb2e2878SEric Blake                 /* It might be okay to ignore write errors for guest
1282cb2e2878SEric Blake                  * requests.  If this is a deliberate copy-on-read
1283cb2e2878SEric Blake                  * then we don't want to ignore the error.  Simply
1284cb2e2878SEric Blake                  * report it in all cases.
128561007b31SStefan Hajnoczi                  */
128661007b31SStefan Hajnoczi                 goto err;
128761007b31SStefan Hajnoczi             }
128861007b31SStefan Hajnoczi 
12893299e5ecSVladimir Sementsov-Ogievskiy             if (!(flags & BDRV_REQ_PREFETCH)) {
12901143ec5eSVladimir Sementsov-Ogievskiy                 qemu_iovec_from_buf(qiov, qiov_offset + progress,
12911143ec5eSVladimir Sementsov-Ogievskiy                                     bounce_buffer + skip_bytes,
12924ab78b19SVladimir Sementsov-Ogievskiy                                     MIN(pnum - skip_bytes, bytes - progress));
12933299e5ecSVladimir Sementsov-Ogievskiy             }
12943299e5ecSVladimir Sementsov-Ogievskiy         } else if (!(flags & BDRV_REQ_PREFETCH)) {
1295cb2e2878SEric Blake             /* Read directly into the destination */
12961143ec5eSVladimir Sementsov-Ogievskiy             ret = bdrv_driver_preadv(bs, offset + progress,
12971143ec5eSVladimir Sementsov-Ogievskiy                                      MIN(pnum - skip_bytes, bytes - progress),
12981143ec5eSVladimir Sementsov-Ogievskiy                                      qiov, qiov_offset + progress, 0);
1299cb2e2878SEric Blake             if (ret < 0) {
1300cb2e2878SEric Blake                 goto err;
1301cb2e2878SEric Blake             }
1302cb2e2878SEric Blake         }
1303cb2e2878SEric Blake 
1304cb2e2878SEric Blake         cluster_offset += pnum;
1305cb2e2878SEric Blake         cluster_bytes -= pnum;
1306cb2e2878SEric Blake         progress += pnum - skip_bytes;
1307cb2e2878SEric Blake         skip_bytes = 0;
1308cb2e2878SEric Blake     }
1309cb2e2878SEric Blake     ret = 0;
131061007b31SStefan Hajnoczi 
131161007b31SStefan Hajnoczi err:
131261007b31SStefan Hajnoczi     qemu_vfree(bounce_buffer);
131361007b31SStefan Hajnoczi     return ret;
131461007b31SStefan Hajnoczi }
131561007b31SStefan Hajnoczi 
131661007b31SStefan Hajnoczi /*
131761007b31SStefan Hajnoczi  * Forwards an already correctly aligned request to the BlockDriver. This
13181a62d0acSEric Blake  * handles copy on read, zeroing after EOF, and fragmentation of large
13191a62d0acSEric Blake  * reads; any other features must be implemented by the caller.
132061007b31SStefan Hajnoczi  */
13217b1fb72eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
13227b1fb72eSKevin Wolf bdrv_aligned_preadv(BdrvChild *child, BdrvTrackedRequest *req,
13237b1fb72eSKevin Wolf                     int64_t offset, int64_t bytes, int64_t align,
13247b1fb72eSKevin Wolf                     QEMUIOVector *qiov, size_t qiov_offset, int flags)
132561007b31SStefan Hajnoczi {
132685c97ca7SKevin Wolf     BlockDriverState *bs = child->bs;
1327c9d20029SKevin Wolf     int64_t total_bytes, max_bytes;
13281a62d0acSEric Blake     int ret = 0;
13298b0c5d76SVladimir Sementsov-Ogievskiy     int64_t bytes_remaining = bytes;
13301a62d0acSEric Blake     int max_transfer;
133161007b31SStefan Hajnoczi 
13328b0c5d76SVladimir Sementsov-Ogievskiy     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
133349c07526SKevin Wolf     assert(is_power_of_2(align));
133449c07526SKevin Wolf     assert((offset & (align - 1)) == 0);
133549c07526SKevin Wolf     assert((bytes & (align - 1)) == 0);
1336abb06c5aSDaniel P. Berrange     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
13371a62d0acSEric Blake     max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
13381a62d0acSEric Blake                                    align);
1339a604fa2bSEric Blake 
1340e8b65355SStefan Hajnoczi     /*
1341e8b65355SStefan Hajnoczi      * TODO: We would need a per-BDS .supported_read_flags and
1342a604fa2bSEric Blake      * potential fallback support, if we ever implement any read flags
1343a604fa2bSEric Blake      * to pass through to drivers.  For now, there aren't any
1344e8b65355SStefan Hajnoczi      * passthrough flags except the BDRV_REQ_REGISTERED_BUF optimization hint.
1345e8b65355SStefan Hajnoczi      */
1346e8b65355SStefan Hajnoczi     assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH |
1347e8b65355SStefan Hajnoczi                        BDRV_REQ_REGISTERED_BUF)));
134861007b31SStefan Hajnoczi 
134961007b31SStefan Hajnoczi     /* Handle Copy on Read and associated serialisation */
135061007b31SStefan Hajnoczi     if (flags & BDRV_REQ_COPY_ON_READ) {
135161007b31SStefan Hajnoczi         /* If we touch the same cluster it counts as an overlap.  This
135261007b31SStefan Hajnoczi          * guarantees that allocating writes will be serialized and not race
135361007b31SStefan Hajnoczi          * with each other for the same cluster.  For example, in copy-on-read
135461007b31SStefan Hajnoczi          * it ensures that the CoR read and write operations are atomic and
135561007b31SStefan Hajnoczi          * guest writes cannot interleave between them. */
13568ac5aab2SVladimir Sementsov-Ogievskiy         bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs));
135718fbd0deSPaolo Bonzini     } else {
1358304d9d7fSMax Reitz         bdrv_wait_serialising_requests(req);
135918fbd0deSPaolo Bonzini     }
136061007b31SStefan Hajnoczi 
136161007b31SStefan Hajnoczi     if (flags & BDRV_REQ_COPY_ON_READ) {
1362d6a644bbSEric Blake         int64_t pnum;
136361007b31SStefan Hajnoczi 
1364897dd0ecSAndrey Shinkevich         /* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */
1365897dd0ecSAndrey Shinkevich         flags &= ~BDRV_REQ_COPY_ON_READ;
1366897dd0ecSAndrey Shinkevich 
136788e63df2SEric Blake         ret = bdrv_is_allocated(bs, offset, bytes, &pnum);
136861007b31SStefan Hajnoczi         if (ret < 0) {
136961007b31SStefan Hajnoczi             goto out;
137061007b31SStefan Hajnoczi         }
137161007b31SStefan Hajnoczi 
137288e63df2SEric Blake         if (!ret || pnum != bytes) {
137365cd4424SVladimir Sementsov-Ogievskiy             ret = bdrv_co_do_copy_on_readv(child, offset, bytes,
137465cd4424SVladimir Sementsov-Ogievskiy                                            qiov, qiov_offset, flags);
13753299e5ecSVladimir Sementsov-Ogievskiy             goto out;
13763299e5ecSVladimir Sementsov-Ogievskiy         } else if (flags & BDRV_REQ_PREFETCH) {
137761007b31SStefan Hajnoczi             goto out;
137861007b31SStefan Hajnoczi         }
137961007b31SStefan Hajnoczi     }
138061007b31SStefan Hajnoczi 
13811a62d0acSEric Blake     /* Forward the request to the BlockDriver, possibly fragmenting it */
1382*0af02bd1SPaolo Bonzini     total_bytes = bdrv_co_getlength(bs);
138349c07526SKevin Wolf     if (total_bytes < 0) {
138449c07526SKevin Wolf         ret = total_bytes;
138561007b31SStefan Hajnoczi         goto out;
138661007b31SStefan Hajnoczi     }
138761007b31SStefan Hajnoczi 
1388e8b65355SStefan Hajnoczi     assert(!(flags & ~(bs->supported_read_flags | BDRV_REQ_REGISTERED_BUF)));
1389897dd0ecSAndrey Shinkevich 
139049c07526SKevin Wolf     max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
13911a62d0acSEric Blake     if (bytes <= max_bytes && bytes <= max_transfer) {
1392897dd0ecSAndrey Shinkevich         ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, flags);
13931a62d0acSEric Blake         goto out;
139461007b31SStefan Hajnoczi     }
139561007b31SStefan Hajnoczi 
13961a62d0acSEric Blake     while (bytes_remaining) {
13978b0c5d76SVladimir Sementsov-Ogievskiy         int64_t num;
13981a62d0acSEric Blake 
13991a62d0acSEric Blake         if (max_bytes) {
14001a62d0acSEric Blake             num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
14011a62d0acSEric Blake             assert(num);
14021a62d0acSEric Blake 
14031a62d0acSEric Blake             ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
1404134b7decSMax Reitz                                      num, qiov,
1405897dd0ecSAndrey Shinkevich                                      qiov_offset + bytes - bytes_remaining,
1406897dd0ecSAndrey Shinkevich                                      flags);
14071a62d0acSEric Blake             max_bytes -= num;
14081a62d0acSEric Blake         } else {
14091a62d0acSEric Blake             num = bytes_remaining;
1410134b7decSMax Reitz             ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining,
1411134b7decSMax Reitz                                     0, bytes_remaining);
14121a62d0acSEric Blake         }
14131a62d0acSEric Blake         if (ret < 0) {
14141a62d0acSEric Blake             goto out;
14151a62d0acSEric Blake         }
14161a62d0acSEric Blake         bytes_remaining -= num;
141761007b31SStefan Hajnoczi     }
141861007b31SStefan Hajnoczi 
141961007b31SStefan Hajnoczi out:
14201a62d0acSEric Blake     return ret < 0 ? ret : 0;
142161007b31SStefan Hajnoczi }
142261007b31SStefan Hajnoczi 
142361007b31SStefan Hajnoczi /*
14247a3f542fSVladimir Sementsov-Ogievskiy  * Request padding
14257a3f542fSVladimir Sementsov-Ogievskiy  *
14267a3f542fSVladimir Sementsov-Ogievskiy  *  |<---- align ----->|                     |<----- align ---->|
14277a3f542fSVladimir Sementsov-Ogievskiy  *  |<- head ->|<------------- bytes ------------->|<-- tail -->|
14287a3f542fSVladimir Sementsov-Ogievskiy  *  |          |       |                     |     |            |
14297a3f542fSVladimir Sementsov-Ogievskiy  * -*----------$-------*-------- ... --------*-----$------------*---
14307a3f542fSVladimir Sementsov-Ogievskiy  *  |          |       |                     |     |            |
14317a3f542fSVladimir Sementsov-Ogievskiy  *  |          offset  |                     |     end          |
14327a3f542fSVladimir Sementsov-Ogievskiy  *  ALIGN_DOWN(offset) ALIGN_UP(offset)      ALIGN_DOWN(end)   ALIGN_UP(end)
14337a3f542fSVladimir Sementsov-Ogievskiy  *  [buf   ... )                             [tail_buf          )
14347a3f542fSVladimir Sementsov-Ogievskiy  *
14357a3f542fSVladimir Sementsov-Ogievskiy  * @buf is an aligned allocation needed to store @head and @tail paddings. @head
14367a3f542fSVladimir Sementsov-Ogievskiy  * is placed at the beginning of @buf and @tail at the @end.
14377a3f542fSVladimir Sementsov-Ogievskiy  *
14387a3f542fSVladimir Sementsov-Ogievskiy  * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk
14397a3f542fSVladimir Sementsov-Ogievskiy  * around tail, if tail exists.
14407a3f542fSVladimir Sementsov-Ogievskiy  *
14417a3f542fSVladimir Sementsov-Ogievskiy  * @merge_reads is true for small requests,
14427a3f542fSVladimir Sementsov-Ogievskiy  * if @buf_len == @head + bytes + @tail. In this case it is possible that both
14437a3f542fSVladimir Sementsov-Ogievskiy  * head and tail exist but @buf_len == align and @tail_buf == @buf.
144418743311SHanna Czenczek  *
144518743311SHanna Czenczek  * @write is true for write requests, false for read requests.
144618743311SHanna Czenczek  *
144718743311SHanna Czenczek  * If padding makes the vector too long (exceeding IOV_MAX), then we need to
144818743311SHanna Czenczek  * merge existing vector elements into a single one.  @collapse_bounce_buf acts
144918743311SHanna Czenczek  * as the bounce buffer in such cases.  @pre_collapse_qiov has the pre-collapse
145018743311SHanna Czenczek  * I/O vector elements so for read requests, the data can be copied back after
145118743311SHanna Czenczek  * the read is done.
145261007b31SStefan Hajnoczi  */
14537a3f542fSVladimir Sementsov-Ogievskiy typedef struct BdrvRequestPadding {
14547a3f542fSVladimir Sementsov-Ogievskiy     uint8_t *buf;
14557a3f542fSVladimir Sementsov-Ogievskiy     size_t buf_len;
14567a3f542fSVladimir Sementsov-Ogievskiy     uint8_t *tail_buf;
14577a3f542fSVladimir Sementsov-Ogievskiy     size_t head;
14587a3f542fSVladimir Sementsov-Ogievskiy     size_t tail;
14597a3f542fSVladimir Sementsov-Ogievskiy     bool merge_reads;
146018743311SHanna Czenczek     bool write;
14617a3f542fSVladimir Sementsov-Ogievskiy     QEMUIOVector local_qiov;
146218743311SHanna Czenczek 
146318743311SHanna Czenczek     uint8_t *collapse_bounce_buf;
146418743311SHanna Czenczek     size_t collapse_len;
146518743311SHanna Czenczek     QEMUIOVector pre_collapse_qiov;
14667a3f542fSVladimir Sementsov-Ogievskiy } BdrvRequestPadding;
14677a3f542fSVladimir Sementsov-Ogievskiy 
14687a3f542fSVladimir Sementsov-Ogievskiy static bool bdrv_init_padding(BlockDriverState *bs,
14697a3f542fSVladimir Sementsov-Ogievskiy                               int64_t offset, int64_t bytes,
147018743311SHanna Czenczek                               bool write,
14717a3f542fSVladimir Sementsov-Ogievskiy                               BdrvRequestPadding *pad)
14727a3f542fSVladimir Sementsov-Ogievskiy {
1473a56ed80cSVladimir Sementsov-Ogievskiy     int64_t align = bs->bl.request_alignment;
1474a56ed80cSVladimir Sementsov-Ogievskiy     int64_t sum;
1475a56ed80cSVladimir Sementsov-Ogievskiy 
1476a56ed80cSVladimir Sementsov-Ogievskiy     bdrv_check_request(offset, bytes, &error_abort);
1477a56ed80cSVladimir Sementsov-Ogievskiy     assert(align <= INT_MAX); /* documented in block/block_int.h */
1478a56ed80cSVladimir Sementsov-Ogievskiy     assert(align <= SIZE_MAX / 2); /* so we can allocate the buffer */
14797a3f542fSVladimir Sementsov-Ogievskiy 
14807a3f542fSVladimir Sementsov-Ogievskiy     memset(pad, 0, sizeof(*pad));
14817a3f542fSVladimir Sementsov-Ogievskiy 
14827a3f542fSVladimir Sementsov-Ogievskiy     pad->head = offset & (align - 1);
14837a3f542fSVladimir Sementsov-Ogievskiy     pad->tail = ((offset + bytes) & (align - 1));
14847a3f542fSVladimir Sementsov-Ogievskiy     if (pad->tail) {
14857a3f542fSVladimir Sementsov-Ogievskiy         pad->tail = align - pad->tail;
14867a3f542fSVladimir Sementsov-Ogievskiy     }
14877a3f542fSVladimir Sementsov-Ogievskiy 
1488ac9d00bfSVladimir Sementsov-Ogievskiy     if (!pad->head && !pad->tail) {
14897a3f542fSVladimir Sementsov-Ogievskiy         return false;
14907a3f542fSVladimir Sementsov-Ogievskiy     }
14917a3f542fSVladimir Sementsov-Ogievskiy 
1492ac9d00bfSVladimir Sementsov-Ogievskiy     assert(bytes); /* Nothing good in aligning zero-length requests */
1493ac9d00bfSVladimir Sementsov-Ogievskiy 
14947a3f542fSVladimir Sementsov-Ogievskiy     sum = pad->head + bytes + pad->tail;
14957a3f542fSVladimir Sementsov-Ogievskiy     pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align;
14967a3f542fSVladimir Sementsov-Ogievskiy     pad->buf = qemu_blockalign(bs, pad->buf_len);
14977a3f542fSVladimir Sementsov-Ogievskiy     pad->merge_reads = sum == pad->buf_len;
14987a3f542fSVladimir Sementsov-Ogievskiy     if (pad->tail) {
14997a3f542fSVladimir Sementsov-Ogievskiy         pad->tail_buf = pad->buf + pad->buf_len - align;
15007a3f542fSVladimir Sementsov-Ogievskiy     }
15017a3f542fSVladimir Sementsov-Ogievskiy 
150218743311SHanna Czenczek     pad->write = write;
150318743311SHanna Czenczek 
15047a3f542fSVladimir Sementsov-Ogievskiy     return true;
15057a3f542fSVladimir Sementsov-Ogievskiy }
15067a3f542fSVladimir Sementsov-Ogievskiy 
15077b1fb72eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
15087b1fb72eSKevin Wolf bdrv_padding_rmw_read(BdrvChild *child, BdrvTrackedRequest *req,
15097b1fb72eSKevin Wolf                       BdrvRequestPadding *pad, bool zero_middle)
15107a3f542fSVladimir Sementsov-Ogievskiy {
15117a3f542fSVladimir Sementsov-Ogievskiy     QEMUIOVector local_qiov;
15127a3f542fSVladimir Sementsov-Ogievskiy     BlockDriverState *bs = child->bs;
15137a3f542fSVladimir Sementsov-Ogievskiy     uint64_t align = bs->bl.request_alignment;
15147a3f542fSVladimir Sementsov-Ogievskiy     int ret;
15157a3f542fSVladimir Sementsov-Ogievskiy 
15167a3f542fSVladimir Sementsov-Ogievskiy     assert(req->serialising && pad->buf);
15177a3f542fSVladimir Sementsov-Ogievskiy 
15187a3f542fSVladimir Sementsov-Ogievskiy     if (pad->head || pad->merge_reads) {
15198b0c5d76SVladimir Sementsov-Ogievskiy         int64_t bytes = pad->merge_reads ? pad->buf_len : align;
15207a3f542fSVladimir Sementsov-Ogievskiy 
15217a3f542fSVladimir Sementsov-Ogievskiy         qemu_iovec_init_buf(&local_qiov, pad->buf, bytes);
15227a3f542fSVladimir Sementsov-Ogievskiy 
15237a3f542fSVladimir Sementsov-Ogievskiy         if (pad->head) {
1524c834dc05SEmanuele Giuseppe Esposito             bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
15257a3f542fSVladimir Sementsov-Ogievskiy         }
15267a3f542fSVladimir Sementsov-Ogievskiy         if (pad->merge_reads && pad->tail) {
1527c834dc05SEmanuele Giuseppe Esposito             bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
15287a3f542fSVladimir Sementsov-Ogievskiy         }
15297a3f542fSVladimir Sementsov-Ogievskiy         ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes,
153065cd4424SVladimir Sementsov-Ogievskiy                                   align, &local_qiov, 0, 0);
15317a3f542fSVladimir Sementsov-Ogievskiy         if (ret < 0) {
15327a3f542fSVladimir Sementsov-Ogievskiy             return ret;
15337a3f542fSVladimir Sementsov-Ogievskiy         }
15347a3f542fSVladimir Sementsov-Ogievskiy         if (pad->head) {
1535c834dc05SEmanuele Giuseppe Esposito             bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
15367a3f542fSVladimir Sementsov-Ogievskiy         }
15377a3f542fSVladimir Sementsov-Ogievskiy         if (pad->merge_reads && pad->tail) {
1538c834dc05SEmanuele Giuseppe Esposito             bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
15397a3f542fSVladimir Sementsov-Ogievskiy         }
15407a3f542fSVladimir Sementsov-Ogievskiy 
15417a3f542fSVladimir Sementsov-Ogievskiy         if (pad->merge_reads) {
15427a3f542fSVladimir Sementsov-Ogievskiy             goto zero_mem;
15437a3f542fSVladimir Sementsov-Ogievskiy         }
15447a3f542fSVladimir Sementsov-Ogievskiy     }
15457a3f542fSVladimir Sementsov-Ogievskiy 
15467a3f542fSVladimir Sementsov-Ogievskiy     if (pad->tail) {
15477a3f542fSVladimir Sementsov-Ogievskiy         qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align);
15487a3f542fSVladimir Sementsov-Ogievskiy 
1549c834dc05SEmanuele Giuseppe Esposito         bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
15507a3f542fSVladimir Sementsov-Ogievskiy         ret = bdrv_aligned_preadv(
15517a3f542fSVladimir Sementsov-Ogievskiy                 child, req,
15527a3f542fSVladimir Sementsov-Ogievskiy                 req->overlap_offset + req->overlap_bytes - align,
155365cd4424SVladimir Sementsov-Ogievskiy                 align, align, &local_qiov, 0, 0);
15547a3f542fSVladimir Sementsov-Ogievskiy         if (ret < 0) {
15557a3f542fSVladimir Sementsov-Ogievskiy             return ret;
15567a3f542fSVladimir Sementsov-Ogievskiy         }
1557c834dc05SEmanuele Giuseppe Esposito         bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
15587a3f542fSVladimir Sementsov-Ogievskiy     }
15597a3f542fSVladimir Sementsov-Ogievskiy 
15607a3f542fSVladimir Sementsov-Ogievskiy zero_mem:
15617a3f542fSVladimir Sementsov-Ogievskiy     if (zero_middle) {
15627a3f542fSVladimir Sementsov-Ogievskiy         memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail);
15637a3f542fSVladimir Sementsov-Ogievskiy     }
15647a3f542fSVladimir Sementsov-Ogievskiy 
15657a3f542fSVladimir Sementsov-Ogievskiy     return 0;
15667a3f542fSVladimir Sementsov-Ogievskiy }
15677a3f542fSVladimir Sementsov-Ogievskiy 
156818743311SHanna Czenczek /**
156918743311SHanna Czenczek  * Free *pad's associated buffers, and perform any necessary finalization steps.
157018743311SHanna Czenczek  */
157118743311SHanna Czenczek static void bdrv_padding_finalize(BdrvRequestPadding *pad)
15727a3f542fSVladimir Sementsov-Ogievskiy {
157318743311SHanna Czenczek     if (pad->collapse_bounce_buf) {
157418743311SHanna Czenczek         if (!pad->write) {
157518743311SHanna Czenczek             /*
157618743311SHanna Czenczek              * If padding required elements in the vector to be collapsed into a
157718743311SHanna Czenczek              * bounce buffer, copy the bounce buffer content back
157818743311SHanna Czenczek              */
157918743311SHanna Czenczek             qemu_iovec_from_buf(&pad->pre_collapse_qiov, 0,
158018743311SHanna Czenczek                                 pad->collapse_bounce_buf, pad->collapse_len);
158118743311SHanna Czenczek         }
158218743311SHanna Czenczek         qemu_vfree(pad->collapse_bounce_buf);
158318743311SHanna Czenczek         qemu_iovec_destroy(&pad->pre_collapse_qiov);
158418743311SHanna Czenczek     }
15857a3f542fSVladimir Sementsov-Ogievskiy     if (pad->buf) {
15867a3f542fSVladimir Sementsov-Ogievskiy         qemu_vfree(pad->buf);
15877a3f542fSVladimir Sementsov-Ogievskiy         qemu_iovec_destroy(&pad->local_qiov);
15887a3f542fSVladimir Sementsov-Ogievskiy     }
158998ca4549SVladimir Sementsov-Ogievskiy     memset(pad, 0, sizeof(*pad));
15907a3f542fSVladimir Sementsov-Ogievskiy }
15917a3f542fSVladimir Sementsov-Ogievskiy 
15927a3f542fSVladimir Sementsov-Ogievskiy /*
159318743311SHanna Czenczek  * Create pad->local_qiov by wrapping @iov in the padding head and tail, while
159418743311SHanna Czenczek  * ensuring that the resulting vector will not exceed IOV_MAX elements.
159518743311SHanna Czenczek  *
159618743311SHanna Czenczek  * To ensure this, when necessary, the first two or three elements of @iov are
159718743311SHanna Czenczek  * merged into pad->collapse_bounce_buf and replaced by a reference to that
159818743311SHanna Czenczek  * bounce buffer in pad->local_qiov.
159918743311SHanna Czenczek  *
160018743311SHanna Czenczek  * After performing a read request, the data from the bounce buffer must be
160118743311SHanna Czenczek  * copied back into pad->pre_collapse_qiov (e.g. by bdrv_padding_finalize()).
160218743311SHanna Czenczek  */
160318743311SHanna Czenczek static int bdrv_create_padded_qiov(BlockDriverState *bs,
160418743311SHanna Czenczek                                    BdrvRequestPadding *pad,
160518743311SHanna Czenczek                                    struct iovec *iov, int niov,
160618743311SHanna Czenczek                                    size_t iov_offset, size_t bytes)
160718743311SHanna Czenczek {
160818743311SHanna Czenczek     int padded_niov, surplus_count, collapse_count;
160918743311SHanna Czenczek 
161018743311SHanna Czenczek     /* Assert this invariant */
161118743311SHanna Czenczek     assert(niov <= IOV_MAX);
161218743311SHanna Czenczek 
161318743311SHanna Czenczek     /*
161418743311SHanna Czenczek      * Cannot pad if resulting length would exceed SIZE_MAX.  Returning an error
161518743311SHanna Czenczek      * to the guest is not ideal, but there is little else we can do.  At least
161618743311SHanna Czenczek      * this will practically never happen on 64-bit systems.
161718743311SHanna Czenczek      */
161818743311SHanna Czenczek     if (SIZE_MAX - pad->head < bytes ||
161918743311SHanna Czenczek         SIZE_MAX - pad->head - bytes < pad->tail)
162018743311SHanna Czenczek     {
162118743311SHanna Czenczek         return -EINVAL;
162218743311SHanna Czenczek     }
162318743311SHanna Czenczek 
162418743311SHanna Czenczek     /* Length of the resulting IOV if we just concatenated everything */
162518743311SHanna Czenczek     padded_niov = !!pad->head + niov + !!pad->tail;
162618743311SHanna Czenczek 
162718743311SHanna Czenczek     qemu_iovec_init(&pad->local_qiov, MIN(padded_niov, IOV_MAX));
162818743311SHanna Czenczek 
162918743311SHanna Czenczek     if (pad->head) {
163018743311SHanna Czenczek         qemu_iovec_add(&pad->local_qiov, pad->buf, pad->head);
163118743311SHanna Czenczek     }
163218743311SHanna Czenczek 
163318743311SHanna Czenczek     /*
163418743311SHanna Czenczek      * If padded_niov > IOV_MAX, we cannot just concatenate everything.
163518743311SHanna Czenczek      * Instead, merge the first two or three elements of @iov to reduce the
163618743311SHanna Czenczek      * number of vector elements as necessary.
163718743311SHanna Czenczek      */
163818743311SHanna Czenczek     if (padded_niov > IOV_MAX) {
163918743311SHanna Czenczek         /*
164018743311SHanna Czenczek          * Only head and tail can have lead to the number of entries exceeding
164118743311SHanna Czenczek          * IOV_MAX, so we can exceed it by the head and tail at most.  We need
164218743311SHanna Czenczek          * to reduce the number of elements by `surplus_count`, so we merge that
164318743311SHanna Czenczek          * many elements plus one into one element.
164418743311SHanna Czenczek          */
164518743311SHanna Czenczek         surplus_count = padded_niov - IOV_MAX;
164618743311SHanna Czenczek         assert(surplus_count <= !!pad->head + !!pad->tail);
164718743311SHanna Czenczek         collapse_count = surplus_count + 1;
164818743311SHanna Czenczek 
164918743311SHanna Czenczek         /*
165018743311SHanna Czenczek          * Move the elements to collapse into `pad->pre_collapse_qiov`, then
165118743311SHanna Czenczek          * advance `iov` (and associated variables) by those elements.
165218743311SHanna Czenczek          */
165318743311SHanna Czenczek         qemu_iovec_init(&pad->pre_collapse_qiov, collapse_count);
165418743311SHanna Czenczek         qemu_iovec_concat_iov(&pad->pre_collapse_qiov, iov,
165518743311SHanna Czenczek                               collapse_count, iov_offset, SIZE_MAX);
165618743311SHanna Czenczek         iov += collapse_count;
165718743311SHanna Czenczek         iov_offset = 0;
165818743311SHanna Czenczek         niov -= collapse_count;
165918743311SHanna Czenczek         bytes -= pad->pre_collapse_qiov.size;
166018743311SHanna Czenczek 
166118743311SHanna Czenczek         /*
166218743311SHanna Czenczek          * Construct the bounce buffer to match the length of the to-collapse
166318743311SHanna Czenczek          * vector elements, and for write requests, initialize it with the data
166418743311SHanna Czenczek          * from those elements.  Then add it to `pad->local_qiov`.
166518743311SHanna Czenczek          */
166618743311SHanna Czenczek         pad->collapse_len = pad->pre_collapse_qiov.size;
166718743311SHanna Czenczek         pad->collapse_bounce_buf = qemu_blockalign(bs, pad->collapse_len);
166818743311SHanna Czenczek         if (pad->write) {
166918743311SHanna Czenczek             qemu_iovec_to_buf(&pad->pre_collapse_qiov, 0,
167018743311SHanna Czenczek                               pad->collapse_bounce_buf, pad->collapse_len);
167118743311SHanna Czenczek         }
167218743311SHanna Czenczek         qemu_iovec_add(&pad->local_qiov,
167318743311SHanna Czenczek                        pad->collapse_bounce_buf, pad->collapse_len);
167418743311SHanna Czenczek     }
167518743311SHanna Czenczek 
167618743311SHanna Czenczek     qemu_iovec_concat_iov(&pad->local_qiov, iov, niov, iov_offset, bytes);
167718743311SHanna Czenczek 
167818743311SHanna Czenczek     if (pad->tail) {
167918743311SHanna Czenczek         qemu_iovec_add(&pad->local_qiov,
168018743311SHanna Czenczek                        pad->buf + pad->buf_len - pad->tail, pad->tail);
168118743311SHanna Czenczek     }
168218743311SHanna Czenczek 
168318743311SHanna Czenczek     assert(pad->local_qiov.niov == MIN(padded_niov, IOV_MAX));
168418743311SHanna Czenczek     return 0;
168518743311SHanna Czenczek }
168618743311SHanna Czenczek 
168718743311SHanna Czenczek /*
16887a3f542fSVladimir Sementsov-Ogievskiy  * bdrv_pad_request
16897a3f542fSVladimir Sementsov-Ogievskiy  *
16907a3f542fSVladimir Sementsov-Ogievskiy  * Exchange request parameters with padded request if needed. Don't include RMW
16917a3f542fSVladimir Sementsov-Ogievskiy  * read of padding, bdrv_padding_rmw_read() should be called separately if
16927a3f542fSVladimir Sementsov-Ogievskiy  * needed.
16937a3f542fSVladimir Sementsov-Ogievskiy  *
169418743311SHanna Czenczek  * @write is true for write requests, false for read requests.
169518743311SHanna Czenczek  *
169698ca4549SVladimir Sementsov-Ogievskiy  * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out:
169798ca4549SVladimir Sementsov-Ogievskiy  *  - on function start they represent original request
169898ca4549SVladimir Sementsov-Ogievskiy  *  - on failure or when padding is not needed they are unchanged
169998ca4549SVladimir Sementsov-Ogievskiy  *  - on success when padding is needed they represent padded request
17007a3f542fSVladimir Sementsov-Ogievskiy  */
170198ca4549SVladimir Sementsov-Ogievskiy static int bdrv_pad_request(BlockDriverState *bs,
17021acc3466SVladimir Sementsov-Ogievskiy                             QEMUIOVector **qiov, size_t *qiov_offset,
170337e9403eSVladimir Sementsov-Ogievskiy                             int64_t *offset, int64_t *bytes,
170418743311SHanna Czenczek                             bool write,
1705e8b65355SStefan Hajnoczi                             BdrvRequestPadding *pad, bool *padded,
1706e8b65355SStefan Hajnoczi                             BdrvRequestFlags *flags)
17077a3f542fSVladimir Sementsov-Ogievskiy {
17084c002cefSVladimir Sementsov-Ogievskiy     int ret;
170918743311SHanna Czenczek     struct iovec *sliced_iov;
171018743311SHanna Czenczek     int sliced_niov;
171118743311SHanna Czenczek     size_t sliced_head, sliced_tail;
17124c002cefSVladimir Sementsov-Ogievskiy 
171337e9403eSVladimir Sementsov-Ogievskiy     bdrv_check_qiov_request(*offset, *bytes, *qiov, *qiov_offset, &error_abort);
171437e9403eSVladimir Sementsov-Ogievskiy 
171518743311SHanna Czenczek     if (!bdrv_init_padding(bs, *offset, *bytes, write, pad)) {
171698ca4549SVladimir Sementsov-Ogievskiy         if (padded) {
171798ca4549SVladimir Sementsov-Ogievskiy             *padded = false;
171898ca4549SVladimir Sementsov-Ogievskiy         }
171998ca4549SVladimir Sementsov-Ogievskiy         return 0;
17207a3f542fSVladimir Sementsov-Ogievskiy     }
17217a3f542fSVladimir Sementsov-Ogievskiy 
172218743311SHanna Czenczek     sliced_iov = qemu_iovec_slice(*qiov, *qiov_offset, *bytes,
172318743311SHanna Czenczek                                   &sliced_head, &sliced_tail,
172418743311SHanna Czenczek                                   &sliced_niov);
172518743311SHanna Czenczek 
172618743311SHanna Czenczek     /* Guaranteed by bdrv_check_qiov_request() */
172718743311SHanna Czenczek     assert(*bytes <= SIZE_MAX);
172818743311SHanna Czenczek     ret = bdrv_create_padded_qiov(bs, pad, sliced_iov, sliced_niov,
172918743311SHanna Czenczek                                   sliced_head, *bytes);
173098ca4549SVladimir Sementsov-Ogievskiy     if (ret < 0) {
173118743311SHanna Czenczek         bdrv_padding_finalize(pad);
173298ca4549SVladimir Sementsov-Ogievskiy         return ret;
173398ca4549SVladimir Sementsov-Ogievskiy     }
17347a3f542fSVladimir Sementsov-Ogievskiy     *bytes += pad->head + pad->tail;
17357a3f542fSVladimir Sementsov-Ogievskiy     *offset -= pad->head;
17367a3f542fSVladimir Sementsov-Ogievskiy     *qiov = &pad->local_qiov;
17371acc3466SVladimir Sementsov-Ogievskiy     *qiov_offset = 0;
173898ca4549SVladimir Sementsov-Ogievskiy     if (padded) {
173998ca4549SVladimir Sementsov-Ogievskiy         *padded = true;
174098ca4549SVladimir Sementsov-Ogievskiy     }
1741e8b65355SStefan Hajnoczi     if (flags) {
1742e8b65355SStefan Hajnoczi         /* Can't use optimization hint with bounce buffer */
1743e8b65355SStefan Hajnoczi         *flags &= ~BDRV_REQ_REGISTERED_BUF;
1744e8b65355SStefan Hajnoczi     }
17457a3f542fSVladimir Sementsov-Ogievskiy 
174698ca4549SVladimir Sementsov-Ogievskiy     return 0;
17477a3f542fSVladimir Sementsov-Ogievskiy }
17487a3f542fSVladimir Sementsov-Ogievskiy 
1749a03ef88fSKevin Wolf int coroutine_fn bdrv_co_preadv(BdrvChild *child,
1750e9e52efdSVladimir Sementsov-Ogievskiy     int64_t offset, int64_t bytes, QEMUIOVector *qiov,
175161007b31SStefan Hajnoczi     BdrvRequestFlags flags)
175261007b31SStefan Hajnoczi {
1753967d7905SEmanuele Giuseppe Esposito     IO_CODE();
17541acc3466SVladimir Sementsov-Ogievskiy     return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags);
17551acc3466SVladimir Sementsov-Ogievskiy }
17561acc3466SVladimir Sementsov-Ogievskiy 
17571acc3466SVladimir Sementsov-Ogievskiy int coroutine_fn bdrv_co_preadv_part(BdrvChild *child,
175837e9403eSVladimir Sementsov-Ogievskiy     int64_t offset, int64_t bytes,
17591acc3466SVladimir Sementsov-Ogievskiy     QEMUIOVector *qiov, size_t qiov_offset,
17601acc3466SVladimir Sementsov-Ogievskiy     BdrvRequestFlags flags)
17611acc3466SVladimir Sementsov-Ogievskiy {
1762a03ef88fSKevin Wolf     BlockDriverState *bs = child->bs;
176361007b31SStefan Hajnoczi     BdrvTrackedRequest req;
17647a3f542fSVladimir Sementsov-Ogievskiy     BdrvRequestPadding pad;
176561007b31SStefan Hajnoczi     int ret;
1766967d7905SEmanuele Giuseppe Esposito     IO_CODE();
176761007b31SStefan Hajnoczi 
176837e9403eSVladimir Sementsov-Ogievskiy     trace_bdrv_co_preadv_part(bs, offset, bytes, flags);
176961007b31SStefan Hajnoczi 
17701e97be91SEmanuele Giuseppe Esposito     if (!bdrv_co_is_inserted(bs)) {
1771f4dad307SVladimir Sementsov-Ogievskiy         return -ENOMEDIUM;
1772f4dad307SVladimir Sementsov-Ogievskiy     }
1773f4dad307SVladimir Sementsov-Ogievskiy 
177463f4ad11SVladimir Sementsov-Ogievskiy     ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
177561007b31SStefan Hajnoczi     if (ret < 0) {
177661007b31SStefan Hajnoczi         return ret;
177761007b31SStefan Hajnoczi     }
177861007b31SStefan Hajnoczi 
1779ac9d00bfSVladimir Sementsov-Ogievskiy     if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
1780ac9d00bfSVladimir Sementsov-Ogievskiy         /*
1781ac9d00bfSVladimir Sementsov-Ogievskiy          * Aligning zero request is nonsense. Even if driver has special meaning
1782ac9d00bfSVladimir Sementsov-Ogievskiy          * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
1783ac9d00bfSVladimir Sementsov-Ogievskiy          * it to driver due to request_alignment.
1784ac9d00bfSVladimir Sementsov-Ogievskiy          *
1785ac9d00bfSVladimir Sementsov-Ogievskiy          * Still, no reason to return an error if someone do unaligned
1786ac9d00bfSVladimir Sementsov-Ogievskiy          * zero-length read occasionally.
1787ac9d00bfSVladimir Sementsov-Ogievskiy          */
1788ac9d00bfSVladimir Sementsov-Ogievskiy         return 0;
1789ac9d00bfSVladimir Sementsov-Ogievskiy     }
1790ac9d00bfSVladimir Sementsov-Ogievskiy 
179199723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
179299723548SPaolo Bonzini 
17939568b511SWen Congyang     /* Don't do copy-on-read if we read data before write operation */
1794d73415a3SStefan Hajnoczi     if (qatomic_read(&bs->copy_on_read)) {
179561007b31SStefan Hajnoczi         flags |= BDRV_REQ_COPY_ON_READ;
179661007b31SStefan Hajnoczi     }
179761007b31SStefan Hajnoczi 
179818743311SHanna Czenczek     ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, false,
179918743311SHanna Czenczek                            &pad, NULL, &flags);
180098ca4549SVladimir Sementsov-Ogievskiy     if (ret < 0) {
180187ab8802SKevin Wolf         goto fail;
180298ca4549SVladimir Sementsov-Ogievskiy     }
180361007b31SStefan Hajnoczi 
1804ebde595cSFam Zheng     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
18057a3f542fSVladimir Sementsov-Ogievskiy     ret = bdrv_aligned_preadv(child, &req, offset, bytes,
18067a3f542fSVladimir Sementsov-Ogievskiy                               bs->bl.request_alignment,
18071acc3466SVladimir Sementsov-Ogievskiy                               qiov, qiov_offset, flags);
180861007b31SStefan Hajnoczi     tracked_request_end(&req);
180918743311SHanna Czenczek     bdrv_padding_finalize(&pad);
181061007b31SStefan Hajnoczi 
181187ab8802SKevin Wolf fail:
181287ab8802SKevin Wolf     bdrv_dec_in_flight(bs);
181387ab8802SKevin Wolf 
181461007b31SStefan Hajnoczi     return ret;
181561007b31SStefan Hajnoczi }
181661007b31SStefan Hajnoczi 
1817eeb47775SKevin Wolf static int coroutine_fn GRAPH_RDLOCK
1818eeb47775SKevin Wolf bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
1819eeb47775SKevin Wolf                          BdrvRequestFlags flags)
182061007b31SStefan Hajnoczi {
182161007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
182261007b31SStefan Hajnoczi     QEMUIOVector qiov;
18230d93ed08SVladimir Sementsov-Ogievskiy     void *buf = NULL;
182461007b31SStefan Hajnoczi     int ret = 0;
1825465fe887SEric Blake     bool need_flush = false;
1826443668caSDenis V. Lunev     int head = 0;
1827443668caSDenis V. Lunev     int tail = 0;
182861007b31SStefan Hajnoczi 
18292aaa3f9bSVladimir Sementsov-Ogievskiy     int64_t max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes,
18302aaa3f9bSVladimir Sementsov-Ogievskiy                                             INT64_MAX);
1831a5b8dd2cSEric Blake     int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1832a5b8dd2cSEric Blake                         bs->bl.request_alignment);
1833cb2e2878SEric Blake     int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
1834cf081fcaSEric Blake 
1835abaf8b75SKevin Wolf     assert_bdrv_graph_readable();
18365ae07b14SVladimir Sementsov-Ogievskiy     bdrv_check_request(offset, bytes, &error_abort);
18375ae07b14SVladimir Sementsov-Ogievskiy 
1838d470ad42SMax Reitz     if (!drv) {
1839d470ad42SMax Reitz         return -ENOMEDIUM;
1840d470ad42SMax Reitz     }
1841d470ad42SMax Reitz 
1842fe0480d6SKevin Wolf     if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) {
1843fe0480d6SKevin Wolf         return -ENOTSUP;
1844fe0480d6SKevin Wolf     }
1845fe0480d6SKevin Wolf 
1846e8b65355SStefan Hajnoczi     /* By definition there is no user buffer so this flag doesn't make sense */
1847e8b65355SStefan Hajnoczi     if (flags & BDRV_REQ_REGISTERED_BUF) {
1848e8b65355SStefan Hajnoczi         return -EINVAL;
1849e8b65355SStefan Hajnoczi     }
1850e8b65355SStefan Hajnoczi 
18510bc329fbSHanna Reitz     /* Invalidate the cached block-status data range if this write overlaps */
18520bc329fbSHanna Reitz     bdrv_bsc_invalidate_range(bs, offset, bytes);
18530bc329fbSHanna Reitz 
1854b8d0a980SEric Blake     assert(alignment % bs->bl.request_alignment == 0);
1855b8d0a980SEric Blake     head = offset % alignment;
1856f5a5ca79SManos Pitsidianakis     tail = (offset + bytes) % alignment;
1857b8d0a980SEric Blake     max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1858b8d0a980SEric Blake     assert(max_write_zeroes >= bs->bl.request_alignment);
185961007b31SStefan Hajnoczi 
1860f5a5ca79SManos Pitsidianakis     while (bytes > 0 && !ret) {
18615ae07b14SVladimir Sementsov-Ogievskiy         int64_t num = bytes;
186261007b31SStefan Hajnoczi 
186361007b31SStefan Hajnoczi         /* Align request.  Block drivers can expect the "bulk" of the request
1864443668caSDenis V. Lunev          * to be aligned, and that unaligned requests do not cross cluster
1865443668caSDenis V. Lunev          * boundaries.
186661007b31SStefan Hajnoczi          */
1867443668caSDenis V. Lunev         if (head) {
1868b2f95feeSEric Blake             /* Make a small request up to the first aligned sector. For
1869b2f95feeSEric Blake              * convenience, limit this request to max_transfer even if
1870b2f95feeSEric Blake              * we don't need to fall back to writes.  */
1871f5a5ca79SManos Pitsidianakis             num = MIN(MIN(bytes, max_transfer), alignment - head);
1872b2f95feeSEric Blake             head = (head + num) % alignment;
1873b2f95feeSEric Blake             assert(num < max_write_zeroes);
1874d05aa8bbSEric Blake         } else if (tail && num > alignment) {
1875443668caSDenis V. Lunev             /* Shorten the request to the last aligned sector.  */
1876443668caSDenis V. Lunev             num -= tail;
187761007b31SStefan Hajnoczi         }
187861007b31SStefan Hajnoczi 
187961007b31SStefan Hajnoczi         /* limit request size */
188061007b31SStefan Hajnoczi         if (num > max_write_zeroes) {
188161007b31SStefan Hajnoczi             num = max_write_zeroes;
188261007b31SStefan Hajnoczi         }
188361007b31SStefan Hajnoczi 
188461007b31SStefan Hajnoczi         ret = -ENOTSUP;
188561007b31SStefan Hajnoczi         /* First try the efficient write zeroes operation */
1886d05aa8bbSEric Blake         if (drv->bdrv_co_pwrite_zeroes) {
1887d05aa8bbSEric Blake             ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1888d05aa8bbSEric Blake                                              flags & bs->supported_zero_flags);
1889d05aa8bbSEric Blake             if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1890d05aa8bbSEric Blake                 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1891d05aa8bbSEric Blake                 need_flush = true;
1892d05aa8bbSEric Blake             }
1893465fe887SEric Blake         } else {
1894465fe887SEric Blake             assert(!bs->supported_zero_flags);
189561007b31SStefan Hajnoczi         }
189661007b31SStefan Hajnoczi 
1897294682ccSAndrey Shinkevich         if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) {
189861007b31SStefan Hajnoczi             /* Fall back to bounce buffer if write zeroes is unsupported */
1899465fe887SEric Blake             BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1900465fe887SEric Blake 
1901465fe887SEric Blake             if ((flags & BDRV_REQ_FUA) &&
1902465fe887SEric Blake                 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1903465fe887SEric Blake                 /* No need for bdrv_driver_pwrite() to do a fallback
1904465fe887SEric Blake                  * flush on each chunk; use just one at the end */
1905465fe887SEric Blake                 write_flags &= ~BDRV_REQ_FUA;
1906465fe887SEric Blake                 need_flush = true;
1907465fe887SEric Blake             }
19085def6b80SEric Blake             num = MIN(num, max_transfer);
19090d93ed08SVladimir Sementsov-Ogievskiy             if (buf == NULL) {
19100d93ed08SVladimir Sementsov-Ogievskiy                 buf = qemu_try_blockalign0(bs, num);
19110d93ed08SVladimir Sementsov-Ogievskiy                 if (buf == NULL) {
191261007b31SStefan Hajnoczi                     ret = -ENOMEM;
191361007b31SStefan Hajnoczi                     goto fail;
191461007b31SStefan Hajnoczi                 }
191561007b31SStefan Hajnoczi             }
19160d93ed08SVladimir Sementsov-Ogievskiy             qemu_iovec_init_buf(&qiov, buf, num);
191761007b31SStefan Hajnoczi 
1918ac850bf0SVladimir Sementsov-Ogievskiy             ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags);
191961007b31SStefan Hajnoczi 
192061007b31SStefan Hajnoczi             /* Keep bounce buffer around if it is big enough for all
192161007b31SStefan Hajnoczi              * all future requests.
192261007b31SStefan Hajnoczi              */
19235def6b80SEric Blake             if (num < max_transfer) {
19240d93ed08SVladimir Sementsov-Ogievskiy                 qemu_vfree(buf);
19250d93ed08SVladimir Sementsov-Ogievskiy                 buf = NULL;
192661007b31SStefan Hajnoczi             }
192761007b31SStefan Hajnoczi         }
192861007b31SStefan Hajnoczi 
1929d05aa8bbSEric Blake         offset += num;
1930f5a5ca79SManos Pitsidianakis         bytes -= num;
193161007b31SStefan Hajnoczi     }
193261007b31SStefan Hajnoczi 
193361007b31SStefan Hajnoczi fail:
1934465fe887SEric Blake     if (ret == 0 && need_flush) {
1935465fe887SEric Blake         ret = bdrv_co_flush(bs);
1936465fe887SEric Blake     }
19370d93ed08SVladimir Sementsov-Ogievskiy     qemu_vfree(buf);
193861007b31SStefan Hajnoczi     return ret;
193961007b31SStefan Hajnoczi }
194061007b31SStefan Hajnoczi 
1941a00e70c0SEmanuele Giuseppe Esposito static inline int coroutine_fn GRAPH_RDLOCK
1942fcfd9adeSVladimir Sementsov-Ogievskiy bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, int64_t bytes,
194385fe2479SFam Zheng                           BdrvTrackedRequest *req, int flags)
194485fe2479SFam Zheng {
194585fe2479SFam Zheng     BlockDriverState *bs = child->bs;
1946fcfd9adeSVladimir Sementsov-Ogievskiy 
1947fcfd9adeSVladimir Sementsov-Ogievskiy     bdrv_check_request(offset, bytes, &error_abort);
194885fe2479SFam Zheng 
1949307261b2SVladimir Sementsov-Ogievskiy     if (bdrv_is_read_only(bs)) {
195085fe2479SFam Zheng         return -EPERM;
195185fe2479SFam Zheng     }
195285fe2479SFam Zheng 
195385fe2479SFam Zheng     assert(!(bs->open_flags & BDRV_O_INACTIVE));
195485fe2479SFam Zheng     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
195585fe2479SFam Zheng     assert(!(flags & ~BDRV_REQ_MASK));
1956d1a764d1SVladimir Sementsov-Ogievskiy     assert(!((flags & BDRV_REQ_NO_WAIT) && !(flags & BDRV_REQ_SERIALISING)));
195785fe2479SFam Zheng 
195885fe2479SFam Zheng     if (flags & BDRV_REQ_SERIALISING) {
1959d1a764d1SVladimir Sementsov-Ogievskiy         QEMU_LOCK_GUARD(&bs->reqs_lock);
1960d1a764d1SVladimir Sementsov-Ogievskiy 
1961d1a764d1SVladimir Sementsov-Ogievskiy         tracked_request_set_serialising(req, bdrv_get_cluster_size(bs));
1962d1a764d1SVladimir Sementsov-Ogievskiy 
1963d1a764d1SVladimir Sementsov-Ogievskiy         if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) {
1964d1a764d1SVladimir Sementsov-Ogievskiy             return -EBUSY;
1965d1a764d1SVladimir Sementsov-Ogievskiy         }
1966d1a764d1SVladimir Sementsov-Ogievskiy 
1967d1a764d1SVladimir Sementsov-Ogievskiy         bdrv_wait_serialising_requests_locked(req);
196818fbd0deSPaolo Bonzini     } else {
196918fbd0deSPaolo Bonzini         bdrv_wait_serialising_requests(req);
197085fe2479SFam Zheng     }
197185fe2479SFam Zheng 
197285fe2479SFam Zheng     assert(req->overlap_offset <= offset);
197385fe2479SFam Zheng     assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1974fcfd9adeSVladimir Sementsov-Ogievskiy     assert(offset + bytes <= bs->total_sectors * BDRV_SECTOR_SIZE ||
1975fcfd9adeSVladimir Sementsov-Ogievskiy            child->perm & BLK_PERM_RESIZE);
197685fe2479SFam Zheng 
1977cd47d792SFam Zheng     switch (req->type) {
1978cd47d792SFam Zheng     case BDRV_TRACKED_WRITE:
1979cd47d792SFam Zheng     case BDRV_TRACKED_DISCARD:
198085fe2479SFam Zheng         if (flags & BDRV_REQ_WRITE_UNCHANGED) {
198185fe2479SFam Zheng             assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
198285fe2479SFam Zheng         } else {
198385fe2479SFam Zheng             assert(child->perm & BLK_PERM_WRITE);
198485fe2479SFam Zheng         }
198594783301SVladimir Sementsov-Ogievskiy         bdrv_write_threshold_check_write(bs, offset, bytes);
198694783301SVladimir Sementsov-Ogievskiy         return 0;
1987cd47d792SFam Zheng     case BDRV_TRACKED_TRUNCATE:
1988cd47d792SFam Zheng         assert(child->perm & BLK_PERM_RESIZE);
1989cd47d792SFam Zheng         return 0;
1990cd47d792SFam Zheng     default:
1991cd47d792SFam Zheng         abort();
1992cd47d792SFam Zheng     }
199385fe2479SFam Zheng }
199485fe2479SFam Zheng 
199585fe2479SFam Zheng static inline void coroutine_fn
1996fcfd9adeSVladimir Sementsov-Ogievskiy bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes,
199785fe2479SFam Zheng                          BdrvTrackedRequest *req, int ret)
199885fe2479SFam Zheng {
199985fe2479SFam Zheng     int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
200085fe2479SFam Zheng     BlockDriverState *bs = child->bs;
200185fe2479SFam Zheng 
2002fcfd9adeSVladimir Sementsov-Ogievskiy     bdrv_check_request(offset, bytes, &error_abort);
2003fcfd9adeSVladimir Sementsov-Ogievskiy 
2004d73415a3SStefan Hajnoczi     qatomic_inc(&bs->write_gen);
200585fe2479SFam Zheng 
200600695c27SFam Zheng     /*
200700695c27SFam Zheng      * Discard cannot extend the image, but in error handling cases, such as
200800695c27SFam Zheng      * when reverting a qcow2 cluster allocation, the discarded range can pass
200900695c27SFam Zheng      * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
201000695c27SFam Zheng      * here. Instead, just skip it, since semantically a discard request
201100695c27SFam Zheng      * beyond EOF cannot expand the image anyway.
201200695c27SFam Zheng      */
20137f8f03efSFam Zheng     if (ret == 0 &&
2014cd47d792SFam Zheng         (req->type == BDRV_TRACKED_TRUNCATE ||
2015cd47d792SFam Zheng          end_sector > bs->total_sectors) &&
201600695c27SFam Zheng         req->type != BDRV_TRACKED_DISCARD) {
20177f8f03efSFam Zheng         bs->total_sectors = end_sector;
20187f8f03efSFam Zheng         bdrv_parent_cb_resize(bs);
20197f8f03efSFam Zheng         bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS);
202085fe2479SFam Zheng     }
202100695c27SFam Zheng     if (req->bytes) {
202200695c27SFam Zheng         switch (req->type) {
202300695c27SFam Zheng         case BDRV_TRACKED_WRITE:
202400695c27SFam Zheng             stat64_max(&bs->wr_highest_offset, offset + bytes);
202500695c27SFam Zheng             /* fall through, to set dirty bits */
202600695c27SFam Zheng         case BDRV_TRACKED_DISCARD:
20277f8f03efSFam Zheng             bdrv_set_dirty(bs, offset, bytes);
202800695c27SFam Zheng             break;
202900695c27SFam Zheng         default:
203000695c27SFam Zheng             break;
203100695c27SFam Zheng         }
203200695c27SFam Zheng     }
203385fe2479SFam Zheng }
203485fe2479SFam Zheng 
203561007b31SStefan Hajnoczi /*
203604ed95f4SEric Blake  * Forwards an already correctly aligned write request to the BlockDriver,
203704ed95f4SEric Blake  * after possibly fragmenting it.
203861007b31SStefan Hajnoczi  */
20397b1fb72eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
20407b1fb72eSKevin Wolf bdrv_aligned_pwritev(BdrvChild *child, BdrvTrackedRequest *req,
20417b1fb72eSKevin Wolf                      int64_t offset, int64_t bytes, int64_t align,
20427b1fb72eSKevin Wolf                      QEMUIOVector *qiov, size_t qiov_offset,
2043e75abedaSVladimir Sementsov-Ogievskiy                      BdrvRequestFlags flags)
204461007b31SStefan Hajnoczi {
204585c97ca7SKevin Wolf     BlockDriverState *bs = child->bs;
204661007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
204761007b31SStefan Hajnoczi     int ret;
204861007b31SStefan Hajnoczi 
2049fcfd9adeSVladimir Sementsov-Ogievskiy     int64_t bytes_remaining = bytes;
205004ed95f4SEric Blake     int max_transfer;
205161007b31SStefan Hajnoczi 
2052fcfd9adeSVladimir Sementsov-Ogievskiy     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
2053fcfd9adeSVladimir Sementsov-Ogievskiy 
2054d470ad42SMax Reitz     if (!drv) {
2055d470ad42SMax Reitz         return -ENOMEDIUM;
2056d470ad42SMax Reitz     }
2057d470ad42SMax Reitz 
2058d6883bc9SVladimir Sementsov-Ogievskiy     if (bdrv_has_readonly_bitmaps(bs)) {
2059d6883bc9SVladimir Sementsov-Ogievskiy         return -EPERM;
2060d6883bc9SVladimir Sementsov-Ogievskiy     }
2061d6883bc9SVladimir Sementsov-Ogievskiy 
2062cff86b38SEric Blake     assert(is_power_of_2(align));
2063cff86b38SEric Blake     assert((offset & (align - 1)) == 0);
2064cff86b38SEric Blake     assert((bytes & (align - 1)) == 0);
206504ed95f4SEric Blake     max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
206604ed95f4SEric Blake                                    align);
206761007b31SStefan Hajnoczi 
206885fe2479SFam Zheng     ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags);
206961007b31SStefan Hajnoczi 
207061007b31SStefan Hajnoczi     if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
2071c1499a5eSEric Blake         !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
207228c4da28SVladimir Sementsov-Ogievskiy         qemu_iovec_is_zero(qiov, qiov_offset, bytes)) {
207361007b31SStefan Hajnoczi         flags |= BDRV_REQ_ZERO_WRITE;
207461007b31SStefan Hajnoczi         if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
207561007b31SStefan Hajnoczi             flags |= BDRV_REQ_MAY_UNMAP;
207661007b31SStefan Hajnoczi         }
20773c586715SStefan Hajnoczi 
20783c586715SStefan Hajnoczi         /* Can't use optimization hint with bufferless zero write */
20793c586715SStefan Hajnoczi         flags &= ~BDRV_REQ_REGISTERED_BUF;
208061007b31SStefan Hajnoczi     }
208161007b31SStefan Hajnoczi 
208261007b31SStefan Hajnoczi     if (ret < 0) {
208361007b31SStefan Hajnoczi         /* Do nothing, write notifier decided to fail this request */
208461007b31SStefan Hajnoczi     } else if (flags & BDRV_REQ_ZERO_WRITE) {
2085c834dc05SEmanuele Giuseppe Esposito         bdrv_co_debug_event(bs, BLKDBG_PWRITEV_ZERO);
20869896c876SKevin Wolf         ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
20873ea1a091SPavel Butsykin     } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
208828c4da28SVladimir Sementsov-Ogievskiy         ret = bdrv_driver_pwritev_compressed(bs, offset, bytes,
208928c4da28SVladimir Sementsov-Ogievskiy                                              qiov, qiov_offset);
209004ed95f4SEric Blake     } else if (bytes <= max_transfer) {
2091c834dc05SEmanuele Giuseppe Esposito         bdrv_co_debug_event(bs, BLKDBG_PWRITEV);
209228c4da28SVladimir Sementsov-Ogievskiy         ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags);
209304ed95f4SEric Blake     } else {
2094c834dc05SEmanuele Giuseppe Esposito         bdrv_co_debug_event(bs, BLKDBG_PWRITEV);
209504ed95f4SEric Blake         while (bytes_remaining) {
209604ed95f4SEric Blake             int num = MIN(bytes_remaining, max_transfer);
209704ed95f4SEric Blake             int local_flags = flags;
209804ed95f4SEric Blake 
209904ed95f4SEric Blake             assert(num);
210004ed95f4SEric Blake             if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
210104ed95f4SEric Blake                 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
210204ed95f4SEric Blake                 /* If FUA is going to be emulated by flush, we only
210304ed95f4SEric Blake                  * need to flush on the last iteration */
210404ed95f4SEric Blake                 local_flags &= ~BDRV_REQ_FUA;
210504ed95f4SEric Blake             }
210604ed95f4SEric Blake 
210704ed95f4SEric Blake             ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
2108134b7decSMax Reitz                                       num, qiov,
2109134b7decSMax Reitz                                       qiov_offset + bytes - bytes_remaining,
211028c4da28SVladimir Sementsov-Ogievskiy                                       local_flags);
211104ed95f4SEric Blake             if (ret < 0) {
211204ed95f4SEric Blake                 break;
211304ed95f4SEric Blake             }
211404ed95f4SEric Blake             bytes_remaining -= num;
211504ed95f4SEric Blake         }
211661007b31SStefan Hajnoczi     }
2117c834dc05SEmanuele Giuseppe Esposito     bdrv_co_debug_event(bs, BLKDBG_PWRITEV_DONE);
211861007b31SStefan Hajnoczi 
211961007b31SStefan Hajnoczi     if (ret >= 0) {
212004ed95f4SEric Blake         ret = 0;
212161007b31SStefan Hajnoczi     }
212285fe2479SFam Zheng     bdrv_co_write_req_finish(child, offset, bytes, req, ret);
212361007b31SStefan Hajnoczi 
212461007b31SStefan Hajnoczi     return ret;
212561007b31SStefan Hajnoczi }
212661007b31SStefan Hajnoczi 
21277b1fb72eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
21287b1fb72eSKevin Wolf bdrv_co_do_zero_pwritev(BdrvChild *child, int64_t offset, int64_t bytes,
21297b1fb72eSKevin Wolf                         BdrvRequestFlags flags, BdrvTrackedRequest *req)
21309eeb6dd1SFam Zheng {
213185c97ca7SKevin Wolf     BlockDriverState *bs = child->bs;
21329eeb6dd1SFam Zheng     QEMUIOVector local_qiov;
2133a5b8dd2cSEric Blake     uint64_t align = bs->bl.request_alignment;
21349eeb6dd1SFam Zheng     int ret = 0;
21357a3f542fSVladimir Sementsov-Ogievskiy     bool padding;
21367a3f542fSVladimir Sementsov-Ogievskiy     BdrvRequestPadding pad;
21379eeb6dd1SFam Zheng 
2138e8b65355SStefan Hajnoczi     /* This flag doesn't make sense for padding or zero writes */
2139e8b65355SStefan Hajnoczi     flags &= ~BDRV_REQ_REGISTERED_BUF;
2140e8b65355SStefan Hajnoczi 
214118743311SHanna Czenczek     padding = bdrv_init_padding(bs, offset, bytes, true, &pad);
21427a3f542fSVladimir Sementsov-Ogievskiy     if (padding) {
214345e62b46SVladimir Sementsov-Ogievskiy         assert(!(flags & BDRV_REQ_NO_WAIT));
21448ac5aab2SVladimir Sementsov-Ogievskiy         bdrv_make_request_serialising(req, align);
21459eeb6dd1SFam Zheng 
21467a3f542fSVladimir Sementsov-Ogievskiy         bdrv_padding_rmw_read(child, req, &pad, true);
21477a3f542fSVladimir Sementsov-Ogievskiy 
21487a3f542fSVladimir Sementsov-Ogievskiy         if (pad.head || pad.merge_reads) {
21497a3f542fSVladimir Sementsov-Ogievskiy             int64_t aligned_offset = offset & ~(align - 1);
21507a3f542fSVladimir Sementsov-Ogievskiy             int64_t write_bytes = pad.merge_reads ? pad.buf_len : align;
21517a3f542fSVladimir Sementsov-Ogievskiy 
21527a3f542fSVladimir Sementsov-Ogievskiy             qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes);
21537a3f542fSVladimir Sementsov-Ogievskiy             ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes,
215428c4da28SVladimir Sementsov-Ogievskiy                                        align, &local_qiov, 0,
21559eeb6dd1SFam Zheng                                        flags & ~BDRV_REQ_ZERO_WRITE);
21567a3f542fSVladimir Sementsov-Ogievskiy             if (ret < 0 || pad.merge_reads) {
21577a3f542fSVladimir Sementsov-Ogievskiy                 /* Error or all work is done */
21587a3f542fSVladimir Sementsov-Ogievskiy                 goto out;
21599eeb6dd1SFam Zheng             }
21607a3f542fSVladimir Sementsov-Ogievskiy             offset += write_bytes - pad.head;
21617a3f542fSVladimir Sementsov-Ogievskiy             bytes -= write_bytes - pad.head;
21627a3f542fSVladimir Sementsov-Ogievskiy         }
21639eeb6dd1SFam Zheng     }
21649eeb6dd1SFam Zheng 
21659eeb6dd1SFam Zheng     assert(!bytes || (offset & (align - 1)) == 0);
21669eeb6dd1SFam Zheng     if (bytes >= align) {
21679eeb6dd1SFam Zheng         /* Write the aligned part in the middle. */
2168fcfd9adeSVladimir Sementsov-Ogievskiy         int64_t aligned_bytes = bytes & ~(align - 1);
216985c97ca7SKevin Wolf         ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
217028c4da28SVladimir Sementsov-Ogievskiy                                    NULL, 0, flags);
21719eeb6dd1SFam Zheng         if (ret < 0) {
21727a3f542fSVladimir Sementsov-Ogievskiy             goto out;
21739eeb6dd1SFam Zheng         }
21749eeb6dd1SFam Zheng         bytes -= aligned_bytes;
21759eeb6dd1SFam Zheng         offset += aligned_bytes;
21769eeb6dd1SFam Zheng     }
21779eeb6dd1SFam Zheng 
21789eeb6dd1SFam Zheng     assert(!bytes || (offset & (align - 1)) == 0);
21799eeb6dd1SFam Zheng     if (bytes) {
21807a3f542fSVladimir Sementsov-Ogievskiy         assert(align == pad.tail + bytes);
21819eeb6dd1SFam Zheng 
21827a3f542fSVladimir Sementsov-Ogievskiy         qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align);
218385c97ca7SKevin Wolf         ret = bdrv_aligned_pwritev(child, req, offset, align, align,
218428c4da28SVladimir Sementsov-Ogievskiy                                    &local_qiov, 0,
218528c4da28SVladimir Sementsov-Ogievskiy                                    flags & ~BDRV_REQ_ZERO_WRITE);
21869eeb6dd1SFam Zheng     }
21879eeb6dd1SFam Zheng 
21887a3f542fSVladimir Sementsov-Ogievskiy out:
218918743311SHanna Czenczek     bdrv_padding_finalize(&pad);
21907a3f542fSVladimir Sementsov-Ogievskiy 
21917a3f542fSVladimir Sementsov-Ogievskiy     return ret;
21929eeb6dd1SFam Zheng }
21939eeb6dd1SFam Zheng 
219461007b31SStefan Hajnoczi /*
219561007b31SStefan Hajnoczi  * Handle a write request in coroutine context
219661007b31SStefan Hajnoczi  */
2197a03ef88fSKevin Wolf int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
2198e9e52efdSVladimir Sementsov-Ogievskiy     int64_t offset, int64_t bytes, QEMUIOVector *qiov,
219961007b31SStefan Hajnoczi     BdrvRequestFlags flags)
220061007b31SStefan Hajnoczi {
2201967d7905SEmanuele Giuseppe Esposito     IO_CODE();
22021acc3466SVladimir Sementsov-Ogievskiy     return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags);
22031acc3466SVladimir Sementsov-Ogievskiy }
22041acc3466SVladimir Sementsov-Ogievskiy 
22051acc3466SVladimir Sementsov-Ogievskiy int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
220637e9403eSVladimir Sementsov-Ogievskiy     int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
22071acc3466SVladimir Sementsov-Ogievskiy     BdrvRequestFlags flags)
22081acc3466SVladimir Sementsov-Ogievskiy {
2209a03ef88fSKevin Wolf     BlockDriverState *bs = child->bs;
221061007b31SStefan Hajnoczi     BdrvTrackedRequest req;
2211a5b8dd2cSEric Blake     uint64_t align = bs->bl.request_alignment;
22127a3f542fSVladimir Sementsov-Ogievskiy     BdrvRequestPadding pad;
221361007b31SStefan Hajnoczi     int ret;
2214f0deecffSVladimir Sementsov-Ogievskiy     bool padded = false;
2215967d7905SEmanuele Giuseppe Esposito     IO_CODE();
221661007b31SStefan Hajnoczi 
221737e9403eSVladimir Sementsov-Ogievskiy     trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags);
2218f42cf447SDaniel P. Berrange 
22191e97be91SEmanuele Giuseppe Esposito     if (!bdrv_co_is_inserted(bs)) {
222061007b31SStefan Hajnoczi         return -ENOMEDIUM;
222161007b31SStefan Hajnoczi     }
222261007b31SStefan Hajnoczi 
22232aaa3f9bSVladimir Sementsov-Ogievskiy     if (flags & BDRV_REQ_ZERO_WRITE) {
22242aaa3f9bSVladimir Sementsov-Ogievskiy         ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
22252aaa3f9bSVladimir Sementsov-Ogievskiy     } else {
222663f4ad11SVladimir Sementsov-Ogievskiy         ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
22272aaa3f9bSVladimir Sementsov-Ogievskiy     }
222861007b31SStefan Hajnoczi     if (ret < 0) {
222961007b31SStefan Hajnoczi         return ret;
223061007b31SStefan Hajnoczi     }
223161007b31SStefan Hajnoczi 
2232f2208fdcSAlberto Garcia     /* If the request is misaligned then we can't make it efficient */
2233f2208fdcSAlberto Garcia     if ((flags & BDRV_REQ_NO_FALLBACK) &&
2234f2208fdcSAlberto Garcia         !QEMU_IS_ALIGNED(offset | bytes, align))
2235f2208fdcSAlberto Garcia     {
2236f2208fdcSAlberto Garcia         return -ENOTSUP;
2237f2208fdcSAlberto Garcia     }
2238f2208fdcSAlberto Garcia 
2239ac9d00bfSVladimir Sementsov-Ogievskiy     if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
2240ac9d00bfSVladimir Sementsov-Ogievskiy         /*
2241ac9d00bfSVladimir Sementsov-Ogievskiy          * Aligning zero request is nonsense. Even if driver has special meaning
2242ac9d00bfSVladimir Sementsov-Ogievskiy          * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
2243ac9d00bfSVladimir Sementsov-Ogievskiy          * it to driver due to request_alignment.
2244ac9d00bfSVladimir Sementsov-Ogievskiy          *
2245ac9d00bfSVladimir Sementsov-Ogievskiy          * Still, no reason to return an error if someone do unaligned
2246ac9d00bfSVladimir Sementsov-Ogievskiy          * zero-length write occasionally.
2247ac9d00bfSVladimir Sementsov-Ogievskiy          */
2248ac9d00bfSVladimir Sementsov-Ogievskiy         return 0;
2249ac9d00bfSVladimir Sementsov-Ogievskiy     }
2250ac9d00bfSVladimir Sementsov-Ogievskiy 
2251f0deecffSVladimir Sementsov-Ogievskiy     if (!(flags & BDRV_REQ_ZERO_WRITE)) {
225261007b31SStefan Hajnoczi         /*
2253f0deecffSVladimir Sementsov-Ogievskiy          * Pad request for following read-modify-write cycle.
2254f0deecffSVladimir Sementsov-Ogievskiy          * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do
2255f0deecffSVladimir Sementsov-Ogievskiy          * alignment only if there is no ZERO flag.
225661007b31SStefan Hajnoczi          */
225718743311SHanna Czenczek         ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, true,
225818743311SHanna Czenczek                                &pad, &padded, &flags);
225998ca4549SVladimir Sementsov-Ogievskiy         if (ret < 0) {
226098ca4549SVladimir Sementsov-Ogievskiy             return ret;
226198ca4549SVladimir Sementsov-Ogievskiy         }
2262f0deecffSVladimir Sementsov-Ogievskiy     }
2263f0deecffSVladimir Sementsov-Ogievskiy 
2264f0deecffSVladimir Sementsov-Ogievskiy     bdrv_inc_in_flight(bs);
2265ebde595cSFam Zheng     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
226661007b31SStefan Hajnoczi 
226718a59f03SAnton Nefedov     if (flags & BDRV_REQ_ZERO_WRITE) {
2268f0deecffSVladimir Sementsov-Ogievskiy         assert(!padded);
226985c97ca7SKevin Wolf         ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
22709eeb6dd1SFam Zheng         goto out;
22719eeb6dd1SFam Zheng     }
22729eeb6dd1SFam Zheng 
2273f0deecffSVladimir Sementsov-Ogievskiy     if (padded) {
2274f0deecffSVladimir Sementsov-Ogievskiy         /*
2275f0deecffSVladimir Sementsov-Ogievskiy          * Request was unaligned to request_alignment and therefore
2276f0deecffSVladimir Sementsov-Ogievskiy          * padded.  We are going to do read-modify-write, and must
2277f0deecffSVladimir Sementsov-Ogievskiy          * serialize the request to prevent interactions of the
2278f0deecffSVladimir Sementsov-Ogievskiy          * widened region with other transactions.
2279f0deecffSVladimir Sementsov-Ogievskiy          */
228045e62b46SVladimir Sementsov-Ogievskiy         assert(!(flags & BDRV_REQ_NO_WAIT));
22818ac5aab2SVladimir Sementsov-Ogievskiy         bdrv_make_request_serialising(&req, align);
22827a3f542fSVladimir Sementsov-Ogievskiy         bdrv_padding_rmw_read(child, &req, &pad, false);
228361007b31SStefan Hajnoczi     }
228461007b31SStefan Hajnoczi 
228585c97ca7SKevin Wolf     ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
22861acc3466SVladimir Sementsov-Ogievskiy                                qiov, qiov_offset, flags);
228761007b31SStefan Hajnoczi 
228818743311SHanna Czenczek     bdrv_padding_finalize(&pad);
228961007b31SStefan Hajnoczi 
22909eeb6dd1SFam Zheng out:
22919eeb6dd1SFam Zheng     tracked_request_end(&req);
229299723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
22937a3f542fSVladimir Sementsov-Ogievskiy 
229461007b31SStefan Hajnoczi     return ret;
229561007b31SStefan Hajnoczi }
229661007b31SStefan Hajnoczi 
2297a03ef88fSKevin Wolf int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
2298e9e52efdSVladimir Sementsov-Ogievskiy                                        int64_t bytes, BdrvRequestFlags flags)
229961007b31SStefan Hajnoczi {
2300384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
2301f5a5ca79SManos Pitsidianakis     trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
2302abaf8b75SKevin Wolf     assert_bdrv_graph_readable();
230361007b31SStefan Hajnoczi 
2304a03ef88fSKevin Wolf     if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
230561007b31SStefan Hajnoczi         flags &= ~BDRV_REQ_MAY_UNMAP;
230661007b31SStefan Hajnoczi     }
230761007b31SStefan Hajnoczi 
2308f5a5ca79SManos Pitsidianakis     return bdrv_co_pwritev(child, offset, bytes, NULL,
230961007b31SStefan Hajnoczi                            BDRV_REQ_ZERO_WRITE | flags);
231061007b31SStefan Hajnoczi }
231161007b31SStefan Hajnoczi 
23124085f5c7SJohn Snow /*
23134085f5c7SJohn Snow  * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
23144085f5c7SJohn Snow  */
23154085f5c7SJohn Snow int bdrv_flush_all(void)
23164085f5c7SJohn Snow {
23174085f5c7SJohn Snow     BdrvNextIterator it;
23184085f5c7SJohn Snow     BlockDriverState *bs = NULL;
23194085f5c7SJohn Snow     int result = 0;
23204085f5c7SJohn Snow 
2321f791bf7fSEmanuele Giuseppe Esposito     GLOBAL_STATE_CODE();
2322f791bf7fSEmanuele Giuseppe Esposito 
2323c8aa7895SPavel Dovgalyuk     /*
2324c8aa7895SPavel Dovgalyuk      * bdrv queue is managed by record/replay,
2325c8aa7895SPavel Dovgalyuk      * creating new flush request for stopping
2326c8aa7895SPavel Dovgalyuk      * the VM may break the determinism
2327c8aa7895SPavel Dovgalyuk      */
2328c8aa7895SPavel Dovgalyuk     if (replay_events_enabled()) {
2329c8aa7895SPavel Dovgalyuk         return result;
2330c8aa7895SPavel Dovgalyuk     }
2331c8aa7895SPavel Dovgalyuk 
23324085f5c7SJohn Snow     for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
23334085f5c7SJohn Snow         AioContext *aio_context = bdrv_get_aio_context(bs);
23344085f5c7SJohn Snow         int ret;
23354085f5c7SJohn Snow 
23364085f5c7SJohn Snow         aio_context_acquire(aio_context);
23374085f5c7SJohn Snow         ret = bdrv_flush(bs);
23384085f5c7SJohn Snow         if (ret < 0 && !result) {
23394085f5c7SJohn Snow             result = ret;
23404085f5c7SJohn Snow         }
23414085f5c7SJohn Snow         aio_context_release(aio_context);
23424085f5c7SJohn Snow     }
23434085f5c7SJohn Snow 
23444085f5c7SJohn Snow     return result;
23454085f5c7SJohn Snow }
23464085f5c7SJohn Snow 
234761007b31SStefan Hajnoczi /*
234861007b31SStefan Hajnoczi  * Returns the allocation status of the specified sectors.
234961007b31SStefan Hajnoczi  * Drivers not implementing the functionality are assumed to not support
235061007b31SStefan Hajnoczi  * backing files, hence all their sectors are reported as allocated.
235161007b31SStefan Hajnoczi  *
235286a3d5c6SEric Blake  * If 'want_zero' is true, the caller is querying for mapping
235386a3d5c6SEric Blake  * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
235486a3d5c6SEric Blake  * _ZERO where possible; otherwise, the result favors larger 'pnum',
235586a3d5c6SEric Blake  * with a focus on accurate BDRV_BLOCK_ALLOCATED.
2356c9ce8c4dSEric Blake  *
23572e8bc787SEric Blake  * If 'offset' is beyond the end of the disk image the return value is
2358fb0d8654SEric Blake  * BDRV_BLOCK_EOF and 'pnum' is set to 0.
235961007b31SStefan Hajnoczi  *
23602e8bc787SEric Blake  * 'bytes' is the max value 'pnum' should be set to.  If bytes goes
2361fb0d8654SEric Blake  * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2362fb0d8654SEric Blake  * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
236367a0fd2aSFam Zheng  *
23642e8bc787SEric Blake  * 'pnum' is set to the number of bytes (including and immediately
23652e8bc787SEric Blake  * following the specified offset) that are easily known to be in the
23662e8bc787SEric Blake  * same allocated/unallocated state.  Note that a second call starting
23672e8bc787SEric Blake  * at the original offset plus returned pnum may have the same status.
23682e8bc787SEric Blake  * The returned value is non-zero on success except at end-of-file.
23692e8bc787SEric Blake  *
23702e8bc787SEric Blake  * Returns negative errno on failure.  Otherwise, if the
23712e8bc787SEric Blake  * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
23722e8bc787SEric Blake  * set to the host mapping and BDS corresponding to the guest offset.
237361007b31SStefan Hajnoczi  */
23747ff9579eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
23757ff9579eSKevin Wolf bdrv_co_block_status(BlockDriverState *bs, bool want_zero,
23762e8bc787SEric Blake                      int64_t offset, int64_t bytes,
23777ff9579eSKevin Wolf                      int64_t *pnum, int64_t *map, BlockDriverState **file)
237861007b31SStefan Hajnoczi {
23792e8bc787SEric Blake     int64_t total_size;
23802e8bc787SEric Blake     int64_t n; /* bytes */
2381efa6e2edSEric Blake     int ret;
23822e8bc787SEric Blake     int64_t local_map = 0;
2383298a1665SEric Blake     BlockDriverState *local_file = NULL;
2384efa6e2edSEric Blake     int64_t aligned_offset, aligned_bytes;
2385efa6e2edSEric Blake     uint32_t align;
2386549ec0d9SMax Reitz     bool has_filtered_child;
238761007b31SStefan Hajnoczi 
2388298a1665SEric Blake     assert(pnum);
23897ff9579eSKevin Wolf     assert_bdrv_graph_readable();
2390298a1665SEric Blake     *pnum = 0;
2391*0af02bd1SPaolo Bonzini     total_size = bdrv_co_getlength(bs);
23922e8bc787SEric Blake     if (total_size < 0) {
23932e8bc787SEric Blake         ret = total_size;
2394298a1665SEric Blake         goto early_out;
239561007b31SStefan Hajnoczi     }
239661007b31SStefan Hajnoczi 
23972e8bc787SEric Blake     if (offset >= total_size) {
2398298a1665SEric Blake         ret = BDRV_BLOCK_EOF;
2399298a1665SEric Blake         goto early_out;
240061007b31SStefan Hajnoczi     }
24012e8bc787SEric Blake     if (!bytes) {
2402298a1665SEric Blake         ret = 0;
2403298a1665SEric Blake         goto early_out;
24049cdcfd9fSEric Blake     }
240561007b31SStefan Hajnoczi 
24062e8bc787SEric Blake     n = total_size - offset;
24072e8bc787SEric Blake     if (n < bytes) {
24082e8bc787SEric Blake         bytes = n;
240961007b31SStefan Hajnoczi     }
241061007b31SStefan Hajnoczi 
2411*0af02bd1SPaolo Bonzini     /* Must be non-NULL or bdrv_co_getlength() would have failed */
2412d470ad42SMax Reitz     assert(bs->drv);
2413549ec0d9SMax Reitz     has_filtered_child = bdrv_filter_child(bs);
2414549ec0d9SMax Reitz     if (!bs->drv->bdrv_co_block_status && !has_filtered_child) {
24152e8bc787SEric Blake         *pnum = bytes;
241661007b31SStefan Hajnoczi         ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
24172e8bc787SEric Blake         if (offset + bytes == total_size) {
2418fb0d8654SEric Blake             ret |= BDRV_BLOCK_EOF;
2419fb0d8654SEric Blake         }
242061007b31SStefan Hajnoczi         if (bs->drv->protocol_name) {
24212e8bc787SEric Blake             ret |= BDRV_BLOCK_OFFSET_VALID;
24222e8bc787SEric Blake             local_map = offset;
2423298a1665SEric Blake             local_file = bs;
242461007b31SStefan Hajnoczi         }
2425298a1665SEric Blake         goto early_out;
242661007b31SStefan Hajnoczi     }
242761007b31SStefan Hajnoczi 
242899723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
2429efa6e2edSEric Blake 
2430efa6e2edSEric Blake     /* Round out to request_alignment boundaries */
243186a3d5c6SEric Blake     align = bs->bl.request_alignment;
2432efa6e2edSEric Blake     aligned_offset = QEMU_ALIGN_DOWN(offset, align);
2433efa6e2edSEric Blake     aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
2434efa6e2edSEric Blake 
2435549ec0d9SMax Reitz     if (bs->drv->bdrv_co_block_status) {
24360bc329fbSHanna Reitz         /*
24370bc329fbSHanna Reitz          * Use the block-status cache only for protocol nodes: Format
24380bc329fbSHanna Reitz          * drivers are generally quick to inquire the status, but protocol
24390bc329fbSHanna Reitz          * drivers often need to get information from outside of qemu, so
24400bc329fbSHanna Reitz          * we do not have control over the actual implementation.  There
24410bc329fbSHanna Reitz          * have been cases where inquiring the status took an unreasonably
24420bc329fbSHanna Reitz          * long time, and we can do nothing in qemu to fix it.
24430bc329fbSHanna Reitz          * This is especially problematic for images with large data areas,
24440bc329fbSHanna Reitz          * because finding the few holes in them and giving them special
24450bc329fbSHanna Reitz          * treatment does not gain much performance.  Therefore, we try to
24460bc329fbSHanna Reitz          * cache the last-identified data region.
24470bc329fbSHanna Reitz          *
24480bc329fbSHanna Reitz          * Second, limiting ourselves to protocol nodes allows us to assume
24490bc329fbSHanna Reitz          * the block status for data regions to be DATA | OFFSET_VALID, and
24500bc329fbSHanna Reitz          * that the host offset is the same as the guest offset.
24510bc329fbSHanna Reitz          *
24520bc329fbSHanna Reitz          * Note that it is possible that external writers zero parts of
24530bc329fbSHanna Reitz          * the cached regions without the cache being invalidated, and so
24540bc329fbSHanna Reitz          * we may report zeroes as data.  This is not catastrophic,
24550bc329fbSHanna Reitz          * however, because reporting zeroes as data is fine.
24560bc329fbSHanna Reitz          */
24570bc329fbSHanna Reitz         if (QLIST_EMPTY(&bs->children) &&
24580bc329fbSHanna Reitz             bdrv_bsc_is_data(bs, aligned_offset, pnum))
24590bc329fbSHanna Reitz         {
24600bc329fbSHanna Reitz             ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
24610bc329fbSHanna Reitz             local_file = bs;
24620bc329fbSHanna Reitz             local_map = aligned_offset;
24630bc329fbSHanna Reitz         } else {
246486a3d5c6SEric Blake             ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
246586a3d5c6SEric Blake                                                 aligned_bytes, pnum, &local_map,
246686a3d5c6SEric Blake                                                 &local_file);
24670bc329fbSHanna Reitz 
24680bc329fbSHanna Reitz             /*
24690bc329fbSHanna Reitz              * Note that checking QLIST_EMPTY(&bs->children) is also done when
24700bc329fbSHanna Reitz              * the cache is queried above.  Technically, we do not need to check
24710bc329fbSHanna Reitz              * it here; the worst that can happen is that we fill the cache for
24720bc329fbSHanna Reitz              * non-protocol nodes, and then it is never used.  However, filling
24730bc329fbSHanna Reitz              * the cache requires an RCU update, so double check here to avoid
24740bc329fbSHanna Reitz              * such an update if possible.
2475113b727cSHanna Reitz              *
2476113b727cSHanna Reitz              * Check want_zero, because we only want to update the cache when we
2477113b727cSHanna Reitz              * have accurate information about what is zero and what is data.
24780bc329fbSHanna Reitz              */
2479113b727cSHanna Reitz             if (want_zero &&
2480113b727cSHanna Reitz                 ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) &&
24810bc329fbSHanna Reitz                 QLIST_EMPTY(&bs->children))
24820bc329fbSHanna Reitz             {
24830bc329fbSHanna Reitz                 /*
24840bc329fbSHanna Reitz                  * When a protocol driver reports BLOCK_OFFSET_VALID, the
24850bc329fbSHanna Reitz                  * returned local_map value must be the same as the offset we
24860bc329fbSHanna Reitz                  * have passed (aligned_offset), and local_bs must be the node
24870bc329fbSHanna Reitz                  * itself.
24880bc329fbSHanna Reitz                  * Assert this, because we follow this rule when reading from
24890bc329fbSHanna Reitz                  * the cache (see the `local_file = bs` and
24900bc329fbSHanna Reitz                  * `local_map = aligned_offset` assignments above), and the
24910bc329fbSHanna Reitz                  * result the cache delivers must be the same as the driver
24920bc329fbSHanna Reitz                  * would deliver.
24930bc329fbSHanna Reitz                  */
24940bc329fbSHanna Reitz                 assert(local_file == bs);
24950bc329fbSHanna Reitz                 assert(local_map == aligned_offset);
24960bc329fbSHanna Reitz                 bdrv_bsc_fill(bs, aligned_offset, *pnum);
24970bc329fbSHanna Reitz             }
24980bc329fbSHanna Reitz         }
2499549ec0d9SMax Reitz     } else {
2500549ec0d9SMax Reitz         /* Default code for filters */
2501549ec0d9SMax Reitz 
2502549ec0d9SMax Reitz         local_file = bdrv_filter_bs(bs);
2503549ec0d9SMax Reitz         assert(local_file);
2504549ec0d9SMax Reitz 
2505549ec0d9SMax Reitz         *pnum = aligned_bytes;
2506549ec0d9SMax Reitz         local_map = aligned_offset;
2507549ec0d9SMax Reitz         ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
2508549ec0d9SMax Reitz     }
250986a3d5c6SEric Blake     if (ret < 0) {
251086a3d5c6SEric Blake         *pnum = 0;
251186a3d5c6SEric Blake         goto out;
251286a3d5c6SEric Blake     }
2513efa6e2edSEric Blake 
2514efa6e2edSEric Blake     /*
2515636cb512SEric Blake      * The driver's result must be a non-zero multiple of request_alignment.
2516efa6e2edSEric Blake      * Clamp pnum and adjust map to original request.
2517efa6e2edSEric Blake      */
2518636cb512SEric Blake     assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) &&
2519636cb512SEric Blake            align > offset - aligned_offset);
252069f47505SVladimir Sementsov-Ogievskiy     if (ret & BDRV_BLOCK_RECURSE) {
252169f47505SVladimir Sementsov-Ogievskiy         assert(ret & BDRV_BLOCK_DATA);
252269f47505SVladimir Sementsov-Ogievskiy         assert(ret & BDRV_BLOCK_OFFSET_VALID);
252369f47505SVladimir Sementsov-Ogievskiy         assert(!(ret & BDRV_BLOCK_ZERO));
252469f47505SVladimir Sementsov-Ogievskiy     }
252569f47505SVladimir Sementsov-Ogievskiy 
2526efa6e2edSEric Blake     *pnum -= offset - aligned_offset;
2527efa6e2edSEric Blake     if (*pnum > bytes) {
2528efa6e2edSEric Blake         *pnum = bytes;
2529efa6e2edSEric Blake     }
2530efa6e2edSEric Blake     if (ret & BDRV_BLOCK_OFFSET_VALID) {
2531efa6e2edSEric Blake         local_map += offset - aligned_offset;
2532efa6e2edSEric Blake     }
253361007b31SStefan Hajnoczi 
253461007b31SStefan Hajnoczi     if (ret & BDRV_BLOCK_RAW) {
2535298a1665SEric Blake         assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
25362e8bc787SEric Blake         ret = bdrv_co_block_status(local_file, want_zero, local_map,
25372e8bc787SEric Blake                                    *pnum, pnum, &local_map, &local_file);
253899723548SPaolo Bonzini         goto out;
253961007b31SStefan Hajnoczi     }
254061007b31SStefan Hajnoczi 
254161007b31SStefan Hajnoczi     if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
254261007b31SStefan Hajnoczi         ret |= BDRV_BLOCK_ALLOCATED;
2543d40f4a56SAlberto Garcia     } else if (bs->drv->supports_backing) {
2544cb850315SMax Reitz         BlockDriverState *cow_bs = bdrv_cow_bs(bs);
2545cb850315SMax Reitz 
2546d40f4a56SAlberto Garcia         if (!cow_bs) {
2547d40f4a56SAlberto Garcia             ret |= BDRV_BLOCK_ZERO;
2548d40f4a56SAlberto Garcia         } else if (want_zero) {
2549*0af02bd1SPaolo Bonzini             int64_t size2 = bdrv_co_getlength(cow_bs);
2550c9ce8c4dSEric Blake 
25512e8bc787SEric Blake             if (size2 >= 0 && offset >= size2) {
255261007b31SStefan Hajnoczi                 ret |= BDRV_BLOCK_ZERO;
255361007b31SStefan Hajnoczi             }
25547b1efe99SVladimir Sementsov-Ogievskiy         }
255561007b31SStefan Hajnoczi     }
255661007b31SStefan Hajnoczi 
255769f47505SVladimir Sementsov-Ogievskiy     if (want_zero && ret & BDRV_BLOCK_RECURSE &&
255869f47505SVladimir Sementsov-Ogievskiy         local_file && local_file != bs &&
255961007b31SStefan Hajnoczi         (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
256061007b31SStefan Hajnoczi         (ret & BDRV_BLOCK_OFFSET_VALID)) {
25612e8bc787SEric Blake         int64_t file_pnum;
25622e8bc787SEric Blake         int ret2;
256361007b31SStefan Hajnoczi 
25642e8bc787SEric Blake         ret2 = bdrv_co_block_status(local_file, want_zero, local_map,
25652e8bc787SEric Blake                                     *pnum, &file_pnum, NULL, NULL);
256661007b31SStefan Hajnoczi         if (ret2 >= 0) {
256761007b31SStefan Hajnoczi             /* Ignore errors.  This is just providing extra information, it
256861007b31SStefan Hajnoczi              * is useful but not necessary.
256961007b31SStefan Hajnoczi              */
2570c61e684eSEric Blake             if (ret2 & BDRV_BLOCK_EOF &&
2571c61e684eSEric Blake                 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
2572c61e684eSEric Blake                 /*
2573c61e684eSEric Blake                  * It is valid for the format block driver to read
2574c61e684eSEric Blake                  * beyond the end of the underlying file's current
2575c61e684eSEric Blake                  * size; such areas read as zero.
2576c61e684eSEric Blake                  */
257761007b31SStefan Hajnoczi                 ret |= BDRV_BLOCK_ZERO;
257861007b31SStefan Hajnoczi             } else {
257961007b31SStefan Hajnoczi                 /* Limit request to the range reported by the protocol driver */
258061007b31SStefan Hajnoczi                 *pnum = file_pnum;
258161007b31SStefan Hajnoczi                 ret |= (ret2 & BDRV_BLOCK_ZERO);
258261007b31SStefan Hajnoczi             }
258361007b31SStefan Hajnoczi         }
258461007b31SStefan Hajnoczi     }
258561007b31SStefan Hajnoczi 
258699723548SPaolo Bonzini out:
258799723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
25882e8bc787SEric Blake     if (ret >= 0 && offset + *pnum == total_size) {
2589fb0d8654SEric Blake         ret |= BDRV_BLOCK_EOF;
2590fb0d8654SEric Blake     }
2591298a1665SEric Blake early_out:
2592298a1665SEric Blake     if (file) {
2593298a1665SEric Blake         *file = local_file;
2594298a1665SEric Blake     }
25952e8bc787SEric Blake     if (map) {
25962e8bc787SEric Blake         *map = local_map;
25972e8bc787SEric Blake     }
259861007b31SStefan Hajnoczi     return ret;
259961007b31SStefan Hajnoczi }
260061007b31SStefan Hajnoczi 
260121c2283eSVladimir Sementsov-Ogievskiy int coroutine_fn
2602f9e694cbSVladimir Sementsov-Ogievskiy bdrv_co_common_block_status_above(BlockDriverState *bs,
2603ba3f0e25SFam Zheng                                   BlockDriverState *base,
26043555a432SVladimir Sementsov-Ogievskiy                                   bool include_base,
2605c9ce8c4dSEric Blake                                   bool want_zero,
26065b648c67SEric Blake                                   int64_t offset,
26075b648c67SEric Blake                                   int64_t bytes,
26085b648c67SEric Blake                                   int64_t *pnum,
26095b648c67SEric Blake                                   int64_t *map,
2610a92b1b06SEric Blake                                   BlockDriverState **file,
2611a92b1b06SEric Blake                                   int *depth)
2612ba3f0e25SFam Zheng {
261367c095c8SVladimir Sementsov-Ogievskiy     int ret;
2614ba3f0e25SFam Zheng     BlockDriverState *p;
261567c095c8SVladimir Sementsov-Ogievskiy     int64_t eof = 0;
2616a92b1b06SEric Blake     int dummy;
26171581a70dSEmanuele Giuseppe Esposito     IO_CODE();
2618ba3f0e25SFam Zheng 
26193555a432SVladimir Sementsov-Ogievskiy     assert(!include_base || base); /* Can't include NULL base */
26207ff9579eSKevin Wolf     assert_bdrv_graph_readable();
262167c095c8SVladimir Sementsov-Ogievskiy 
2622a92b1b06SEric Blake     if (!depth) {
2623a92b1b06SEric Blake         depth = &dummy;
2624a92b1b06SEric Blake     }
2625a92b1b06SEric Blake     *depth = 0;
2626a92b1b06SEric Blake 
2627624f27bbSVladimir Sementsov-Ogievskiy     if (!include_base && bs == base) {
2628624f27bbSVladimir Sementsov-Ogievskiy         *pnum = bytes;
2629624f27bbSVladimir Sementsov-Ogievskiy         return 0;
2630624f27bbSVladimir Sementsov-Ogievskiy     }
2631624f27bbSVladimir Sementsov-Ogievskiy 
263267c095c8SVladimir Sementsov-Ogievskiy     ret = bdrv_co_block_status(bs, want_zero, offset, bytes, pnum, map, file);
2633a92b1b06SEric Blake     ++*depth;
26343555a432SVladimir Sementsov-Ogievskiy     if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) {
263567c095c8SVladimir Sementsov-Ogievskiy         return ret;
263667c095c8SVladimir Sementsov-Ogievskiy     }
263767c095c8SVladimir Sementsov-Ogievskiy 
263867c095c8SVladimir Sementsov-Ogievskiy     if (ret & BDRV_BLOCK_EOF) {
263967c095c8SVladimir Sementsov-Ogievskiy         eof = offset + *pnum;
264067c095c8SVladimir Sementsov-Ogievskiy     }
264167c095c8SVladimir Sementsov-Ogievskiy 
264267c095c8SVladimir Sementsov-Ogievskiy     assert(*pnum <= bytes);
264367c095c8SVladimir Sementsov-Ogievskiy     bytes = *pnum;
264467c095c8SVladimir Sementsov-Ogievskiy 
26453555a432SVladimir Sementsov-Ogievskiy     for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base;
264667c095c8SVladimir Sementsov-Ogievskiy          p = bdrv_filter_or_cow_bs(p))
264767c095c8SVladimir Sementsov-Ogievskiy     {
26485b648c67SEric Blake         ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map,
26495b648c67SEric Blake                                    file);
2650a92b1b06SEric Blake         ++*depth;
2651c61e684eSEric Blake         if (ret < 0) {
265267c095c8SVladimir Sementsov-Ogievskiy             return ret;
2653c61e684eSEric Blake         }
265467c095c8SVladimir Sementsov-Ogievskiy         if (*pnum == 0) {
2655c61e684eSEric Blake             /*
265667c095c8SVladimir Sementsov-Ogievskiy              * The top layer deferred to this layer, and because this layer is
265767c095c8SVladimir Sementsov-Ogievskiy              * short, any zeroes that we synthesize beyond EOF behave as if they
265867c095c8SVladimir Sementsov-Ogievskiy              * were allocated at this layer.
265967c095c8SVladimir Sementsov-Ogievskiy              *
266067c095c8SVladimir Sementsov-Ogievskiy              * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be
266167c095c8SVladimir Sementsov-Ogievskiy              * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
266267c095c8SVladimir Sementsov-Ogievskiy              * below.
2663c61e684eSEric Blake              */
266467c095c8SVladimir Sementsov-Ogievskiy             assert(ret & BDRV_BLOCK_EOF);
26655b648c67SEric Blake             *pnum = bytes;
266667c095c8SVladimir Sementsov-Ogievskiy             if (file) {
266767c095c8SVladimir Sementsov-Ogievskiy                 *file = p;
2668c61e684eSEric Blake             }
266967c095c8SVladimir Sementsov-Ogievskiy             ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED;
2670ba3f0e25SFam Zheng             break;
2671ba3f0e25SFam Zheng         }
267267c095c8SVladimir Sementsov-Ogievskiy         if (ret & BDRV_BLOCK_ALLOCATED) {
267367c095c8SVladimir Sementsov-Ogievskiy             /*
267467c095c8SVladimir Sementsov-Ogievskiy              * We've found the node and the status, we must break.
267567c095c8SVladimir Sementsov-Ogievskiy              *
267667c095c8SVladimir Sementsov-Ogievskiy              * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be
267767c095c8SVladimir Sementsov-Ogievskiy              * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
267867c095c8SVladimir Sementsov-Ogievskiy              * below.
267967c095c8SVladimir Sementsov-Ogievskiy              */
268067c095c8SVladimir Sementsov-Ogievskiy             ret &= ~BDRV_BLOCK_EOF;
268167c095c8SVladimir Sementsov-Ogievskiy             break;
2682ba3f0e25SFam Zheng         }
268367c095c8SVladimir Sementsov-Ogievskiy 
26843555a432SVladimir Sementsov-Ogievskiy         if (p == base) {
26853555a432SVladimir Sementsov-Ogievskiy             assert(include_base);
26863555a432SVladimir Sementsov-Ogievskiy             break;
26873555a432SVladimir Sementsov-Ogievskiy         }
26883555a432SVladimir Sementsov-Ogievskiy 
268967c095c8SVladimir Sementsov-Ogievskiy         /*
269067c095c8SVladimir Sementsov-Ogievskiy          * OK, [offset, offset + *pnum) region is unallocated on this layer,
269167c095c8SVladimir Sementsov-Ogievskiy          * let's continue the diving.
269267c095c8SVladimir Sementsov-Ogievskiy          */
269367c095c8SVladimir Sementsov-Ogievskiy         assert(*pnum <= bytes);
269467c095c8SVladimir Sementsov-Ogievskiy         bytes = *pnum;
269567c095c8SVladimir Sementsov-Ogievskiy     }
269667c095c8SVladimir Sementsov-Ogievskiy 
269767c095c8SVladimir Sementsov-Ogievskiy     if (offset + *pnum == eof) {
269867c095c8SVladimir Sementsov-Ogievskiy         ret |= BDRV_BLOCK_EOF;
269967c095c8SVladimir Sementsov-Ogievskiy     }
270067c095c8SVladimir Sementsov-Ogievskiy 
2701ba3f0e25SFam Zheng     return ret;
2702ba3f0e25SFam Zheng }
2703ba3f0e25SFam Zheng 
27047b52a921SEmanuele Giuseppe Esposito int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs,
27057b52a921SEmanuele Giuseppe Esposito                                             BlockDriverState *base,
27067b52a921SEmanuele Giuseppe Esposito                                             int64_t offset, int64_t bytes,
27077b52a921SEmanuele Giuseppe Esposito                                             int64_t *pnum, int64_t *map,
27087b52a921SEmanuele Giuseppe Esposito                                             BlockDriverState **file)
27097b52a921SEmanuele Giuseppe Esposito {
27107b52a921SEmanuele Giuseppe Esposito     IO_CODE();
27117b52a921SEmanuele Giuseppe Esposito     return bdrv_co_common_block_status_above(bs, base, false, true, offset,
27127b52a921SEmanuele Giuseppe Esposito                                              bytes, pnum, map, file, NULL);
27137b52a921SEmanuele Giuseppe Esposito }
27147b52a921SEmanuele Giuseppe Esposito 
271531826642SEric Blake int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
271631826642SEric Blake                             int64_t offset, int64_t bytes, int64_t *pnum,
271731826642SEric Blake                             int64_t *map, BlockDriverState **file)
2718c9ce8c4dSEric Blake {
2719384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
27203555a432SVladimir Sementsov-Ogievskiy     return bdrv_common_block_status_above(bs, base, false, true, offset, bytes,
2721a92b1b06SEric Blake                                           pnum, map, file, NULL);
2722c9ce8c4dSEric Blake }
2723c9ce8c4dSEric Blake 
2724237d78f8SEric Blake int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
2725237d78f8SEric Blake                       int64_t *pnum, int64_t *map, BlockDriverState **file)
2726ba3f0e25SFam Zheng {
2727384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
2728cb850315SMax Reitz     return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs),
272931826642SEric Blake                                    offset, bytes, pnum, map, file);
2730ba3f0e25SFam Zheng }
2731ba3f0e25SFam Zheng 
273246cd1e8aSAlberto Garcia /*
273346cd1e8aSAlberto Garcia  * Check @bs (and its backing chain) to see if the range defined
273446cd1e8aSAlberto Garcia  * by @offset and @bytes is known to read as zeroes.
273546cd1e8aSAlberto Garcia  * Return 1 if that is the case, 0 otherwise and -errno on error.
273646cd1e8aSAlberto Garcia  * This test is meant to be fast rather than accurate so returning 0
273746cd1e8aSAlberto Garcia  * does not guarantee non-zero data.
273846cd1e8aSAlberto Garcia  */
273946cd1e8aSAlberto Garcia int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset,
274046cd1e8aSAlberto Garcia                                       int64_t bytes)
274146cd1e8aSAlberto Garcia {
274246cd1e8aSAlberto Garcia     int ret;
274346cd1e8aSAlberto Garcia     int64_t pnum = bytes;
2744384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
274546cd1e8aSAlberto Garcia 
274646cd1e8aSAlberto Garcia     if (!bytes) {
274746cd1e8aSAlberto Garcia         return 1;
274846cd1e8aSAlberto Garcia     }
274946cd1e8aSAlberto Garcia 
2750ce47ff20SAlberto Faria     ret = bdrv_co_common_block_status_above(bs, NULL, false, false, offset,
2751a92b1b06SEric Blake                                             bytes, &pnum, NULL, NULL, NULL);
275246cd1e8aSAlberto Garcia 
275346cd1e8aSAlberto Garcia     if (ret < 0) {
275446cd1e8aSAlberto Garcia         return ret;
275546cd1e8aSAlberto Garcia     }
275646cd1e8aSAlberto Garcia 
275746cd1e8aSAlberto Garcia     return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO);
275846cd1e8aSAlberto Garcia }
275946cd1e8aSAlberto Garcia 
27607b52a921SEmanuele Giuseppe Esposito int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset,
27617b52a921SEmanuele Giuseppe Esposito                                       int64_t bytes, int64_t *pnum)
27627b52a921SEmanuele Giuseppe Esposito {
27637b52a921SEmanuele Giuseppe Esposito     int ret;
27647b52a921SEmanuele Giuseppe Esposito     int64_t dummy;
27657b52a921SEmanuele Giuseppe Esposito     IO_CODE();
27667b52a921SEmanuele Giuseppe Esposito 
27677b52a921SEmanuele Giuseppe Esposito     ret = bdrv_co_common_block_status_above(bs, bs, true, false, offset,
27687b52a921SEmanuele Giuseppe Esposito                                             bytes, pnum ? pnum : &dummy, NULL,
27697b52a921SEmanuele Giuseppe Esposito                                             NULL, NULL);
27707b52a921SEmanuele Giuseppe Esposito     if (ret < 0) {
27717b52a921SEmanuele Giuseppe Esposito         return ret;
27727b52a921SEmanuele Giuseppe Esposito     }
27737b52a921SEmanuele Giuseppe Esposito     return !!(ret & BDRV_BLOCK_ALLOCATED);
27747b52a921SEmanuele Giuseppe Esposito }
27757b52a921SEmanuele Giuseppe Esposito 
27767c85803cSAlberto Faria int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes,
27777c85803cSAlberto Faria                       int64_t *pnum)
277861007b31SStefan Hajnoczi {
27797ddb99b9SEric Blake     int ret;
27807ddb99b9SEric Blake     int64_t dummy;
2781384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
2782d6a644bbSEric Blake 
27833555a432SVladimir Sementsov-Ogievskiy     ret = bdrv_common_block_status_above(bs, bs, true, false, offset,
27843555a432SVladimir Sementsov-Ogievskiy                                          bytes, pnum ? pnum : &dummy, NULL,
2785a92b1b06SEric Blake                                          NULL, NULL);
278661007b31SStefan Hajnoczi     if (ret < 0) {
278761007b31SStefan Hajnoczi         return ret;
278861007b31SStefan Hajnoczi     }
278961007b31SStefan Hajnoczi     return !!(ret & BDRV_BLOCK_ALLOCATED);
279061007b31SStefan Hajnoczi }
279161007b31SStefan Hajnoczi 
27927b52a921SEmanuele Giuseppe Esposito /* See bdrv_is_allocated_above for documentation */
27937b52a921SEmanuele Giuseppe Esposito int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top,
27947b52a921SEmanuele Giuseppe Esposito                                             BlockDriverState *base,
27957b52a921SEmanuele Giuseppe Esposito                                             bool include_base, int64_t offset,
27967b52a921SEmanuele Giuseppe Esposito                                             int64_t bytes, int64_t *pnum)
27977b52a921SEmanuele Giuseppe Esposito {
27987b52a921SEmanuele Giuseppe Esposito     int depth;
27997b52a921SEmanuele Giuseppe Esposito     int ret;
28007b52a921SEmanuele Giuseppe Esposito     IO_CODE();
28017b52a921SEmanuele Giuseppe Esposito 
28027b52a921SEmanuele Giuseppe Esposito     ret = bdrv_co_common_block_status_above(top, base, include_base, false,
28037b52a921SEmanuele Giuseppe Esposito                                             offset, bytes, pnum, NULL, NULL,
28047b52a921SEmanuele Giuseppe Esposito                                             &depth);
28057b52a921SEmanuele Giuseppe Esposito     if (ret < 0) {
28067b52a921SEmanuele Giuseppe Esposito         return ret;
28077b52a921SEmanuele Giuseppe Esposito     }
28087b52a921SEmanuele Giuseppe Esposito 
28097b52a921SEmanuele Giuseppe Esposito     if (ret & BDRV_BLOCK_ALLOCATED) {
28107b52a921SEmanuele Giuseppe Esposito         return depth;
28117b52a921SEmanuele Giuseppe Esposito     }
28127b52a921SEmanuele Giuseppe Esposito     return 0;
28137b52a921SEmanuele Giuseppe Esposito }
28147b52a921SEmanuele Giuseppe Esposito 
281561007b31SStefan Hajnoczi /*
281661007b31SStefan Hajnoczi  * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
281761007b31SStefan Hajnoczi  *
2818a92b1b06SEric Blake  * Return a positive depth if (a prefix of) the given range is allocated
2819a92b1b06SEric Blake  * in any image between BASE and TOP (BASE is only included if include_base
2820a92b1b06SEric Blake  * is set).  Depth 1 is TOP, 2 is the first backing layer, and so forth.
2821170d3bd3SAndrey Shinkevich  * BASE can be NULL to check if the given offset is allocated in any
2822170d3bd3SAndrey Shinkevich  * image of the chain.  Return 0 otherwise, or negative errno on
2823170d3bd3SAndrey Shinkevich  * failure.
282461007b31SStefan Hajnoczi  *
282551b0a488SEric Blake  * 'pnum' is set to the number of bytes (including and immediately
282651b0a488SEric Blake  * following the specified offset) that are known to be in the same
282751b0a488SEric Blake  * allocated/unallocated state.  Note that a subsequent call starting
282851b0a488SEric Blake  * at 'offset + *pnum' may return the same allocation status (in other
282951b0a488SEric Blake  * words, the result is not necessarily the maximum possible range);
283051b0a488SEric Blake  * but 'pnum' will only be 0 when end of file is reached.
283161007b31SStefan Hajnoczi  */
283261007b31SStefan Hajnoczi int bdrv_is_allocated_above(BlockDriverState *top,
283361007b31SStefan Hajnoczi                             BlockDriverState *base,
2834170d3bd3SAndrey Shinkevich                             bool include_base, int64_t offset,
2835170d3bd3SAndrey Shinkevich                             int64_t bytes, int64_t *pnum)
283661007b31SStefan Hajnoczi {
2837a92b1b06SEric Blake     int depth;
28387b52a921SEmanuele Giuseppe Esposito     int ret;
28397b52a921SEmanuele Giuseppe Esposito     IO_CODE();
28407b52a921SEmanuele Giuseppe Esposito 
28417b52a921SEmanuele Giuseppe Esposito     ret = bdrv_common_block_status_above(top, base, include_base, false,
2842a92b1b06SEric Blake                                          offset, bytes, pnum, NULL, NULL,
2843a92b1b06SEric Blake                                          &depth);
284461007b31SStefan Hajnoczi     if (ret < 0) {
284561007b31SStefan Hajnoczi         return ret;
2846d6a644bbSEric Blake     }
284761007b31SStefan Hajnoczi 
2848a92b1b06SEric Blake     if (ret & BDRV_BLOCK_ALLOCATED) {
2849a92b1b06SEric Blake         return depth;
2850a92b1b06SEric Blake     }
2851a92b1b06SEric Blake     return 0;
285261007b31SStefan Hajnoczi }
285361007b31SStefan Hajnoczi 
285421c2283eSVladimir Sementsov-Ogievskiy int coroutine_fn
2855b33b354fSVladimir Sementsov-Ogievskiy bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
28561a8ae822SKevin Wolf {
28571a8ae822SKevin Wolf     BlockDriver *drv = bs->drv;
2858c4db2e25SMax Reitz     BlockDriverState *child_bs = bdrv_primary_bs(bs);
2859b984b296SVladimir Sementsov-Ogievskiy     int ret;
28601581a70dSEmanuele Giuseppe Esposito     IO_CODE();
28611b3ff9feSKevin Wolf     assert_bdrv_graph_readable();
2862b984b296SVladimir Sementsov-Ogievskiy 
2863b984b296SVladimir Sementsov-Ogievskiy     ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
2864b984b296SVladimir Sementsov-Ogievskiy     if (ret < 0) {
2865b984b296SVladimir Sementsov-Ogievskiy         return ret;
2866b984b296SVladimir Sementsov-Ogievskiy     }
2867dc88a467SStefan Hajnoczi 
2868b33b354fSVladimir Sementsov-Ogievskiy     if (!drv) {
2869b33b354fSVladimir Sementsov-Ogievskiy         return -ENOMEDIUM;
2870b33b354fSVladimir Sementsov-Ogievskiy     }
2871b33b354fSVladimir Sementsov-Ogievskiy 
2872dc88a467SStefan Hajnoczi     bdrv_inc_in_flight(bs);
28731a8ae822SKevin Wolf 
2874ca5e2ad9SEmanuele Giuseppe Esposito     if (drv->bdrv_co_load_vmstate) {
2875ca5e2ad9SEmanuele Giuseppe Esposito         ret = drv->bdrv_co_load_vmstate(bs, qiov, pos);
2876c4db2e25SMax Reitz     } else if (child_bs) {
2877b33b354fSVladimir Sementsov-Ogievskiy         ret = bdrv_co_readv_vmstate(child_bs, qiov, pos);
2878b984b296SVladimir Sementsov-Ogievskiy     } else {
2879b984b296SVladimir Sementsov-Ogievskiy         ret = -ENOTSUP;
28801a8ae822SKevin Wolf     }
28811a8ae822SKevin Wolf 
2882dc88a467SStefan Hajnoczi     bdrv_dec_in_flight(bs);
2883b33b354fSVladimir Sementsov-Ogievskiy 
2884b33b354fSVladimir Sementsov-Ogievskiy     return ret;
2885b33b354fSVladimir Sementsov-Ogievskiy }
2886b33b354fSVladimir Sementsov-Ogievskiy 
2887b33b354fSVladimir Sementsov-Ogievskiy int coroutine_fn
2888b33b354fSVladimir Sementsov-Ogievskiy bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2889b33b354fSVladimir Sementsov-Ogievskiy {
2890b33b354fSVladimir Sementsov-Ogievskiy     BlockDriver *drv = bs->drv;
2891b33b354fSVladimir Sementsov-Ogievskiy     BlockDriverState *child_bs = bdrv_primary_bs(bs);
2892b984b296SVladimir Sementsov-Ogievskiy     int ret;
28931581a70dSEmanuele Giuseppe Esposito     IO_CODE();
28941b3ff9feSKevin Wolf     assert_bdrv_graph_readable();
2895b984b296SVladimir Sementsov-Ogievskiy 
2896b984b296SVladimir Sementsov-Ogievskiy     ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
2897b984b296SVladimir Sementsov-Ogievskiy     if (ret < 0) {
2898b984b296SVladimir Sementsov-Ogievskiy         return ret;
2899b984b296SVladimir Sementsov-Ogievskiy     }
2900b33b354fSVladimir Sementsov-Ogievskiy 
2901b33b354fSVladimir Sementsov-Ogievskiy     if (!drv) {
2902b33b354fSVladimir Sementsov-Ogievskiy         return -ENOMEDIUM;
2903b33b354fSVladimir Sementsov-Ogievskiy     }
2904b33b354fSVladimir Sementsov-Ogievskiy 
2905b33b354fSVladimir Sementsov-Ogievskiy     bdrv_inc_in_flight(bs);
2906b33b354fSVladimir Sementsov-Ogievskiy 
2907ca5e2ad9SEmanuele Giuseppe Esposito     if (drv->bdrv_co_save_vmstate) {
2908ca5e2ad9SEmanuele Giuseppe Esposito         ret = drv->bdrv_co_save_vmstate(bs, qiov, pos);
2909b33b354fSVladimir Sementsov-Ogievskiy     } else if (child_bs) {
2910b33b354fSVladimir Sementsov-Ogievskiy         ret = bdrv_co_writev_vmstate(child_bs, qiov, pos);
2911b984b296SVladimir Sementsov-Ogievskiy     } else {
2912b984b296SVladimir Sementsov-Ogievskiy         ret = -ENOTSUP;
2913b33b354fSVladimir Sementsov-Ogievskiy     }
2914b33b354fSVladimir Sementsov-Ogievskiy 
2915b33b354fSVladimir Sementsov-Ogievskiy     bdrv_dec_in_flight(bs);
2916b33b354fSVladimir Sementsov-Ogievskiy 
2917dc88a467SStefan Hajnoczi     return ret;
29181a8ae822SKevin Wolf }
29191a8ae822SKevin Wolf 
292061007b31SStefan Hajnoczi int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
292161007b31SStefan Hajnoczi                       int64_t pos, int size)
292261007b31SStefan Hajnoczi {
29230d93ed08SVladimir Sementsov-Ogievskiy     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2924b33b354fSVladimir Sementsov-Ogievskiy     int ret = bdrv_writev_vmstate(bs, &qiov, pos);
2925384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
292661007b31SStefan Hajnoczi 
2927b33b354fSVladimir Sementsov-Ogievskiy     return ret < 0 ? ret : size;
292861007b31SStefan Hajnoczi }
292961007b31SStefan Hajnoczi 
293061007b31SStefan Hajnoczi int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
293161007b31SStefan Hajnoczi                       int64_t pos, int size)
293261007b31SStefan Hajnoczi {
29330d93ed08SVladimir Sementsov-Ogievskiy     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2934b33b354fSVladimir Sementsov-Ogievskiy     int ret = bdrv_readv_vmstate(bs, &qiov, pos);
2935384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
29365ddda0b8SKevin Wolf 
2937b33b354fSVladimir Sementsov-Ogievskiy     return ret < 0 ? ret : size;
293861007b31SStefan Hajnoczi }
293961007b31SStefan Hajnoczi 
294061007b31SStefan Hajnoczi /**************************************************************/
294161007b31SStefan Hajnoczi /* async I/Os */
294261007b31SStefan Hajnoczi 
294361007b31SStefan Hajnoczi void bdrv_aio_cancel(BlockAIOCB *acb)
294461007b31SStefan Hajnoczi {
2945384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
294661007b31SStefan Hajnoczi     qemu_aio_ref(acb);
294761007b31SStefan Hajnoczi     bdrv_aio_cancel_async(acb);
294861007b31SStefan Hajnoczi     while (acb->refcnt > 1) {
294961007b31SStefan Hajnoczi         if (acb->aiocb_info->get_aio_context) {
295061007b31SStefan Hajnoczi             aio_poll(acb->aiocb_info->get_aio_context(acb), true);
295161007b31SStefan Hajnoczi         } else if (acb->bs) {
29522f47da5fSPaolo Bonzini             /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
29532f47da5fSPaolo Bonzini              * assert that we're not using an I/O thread.  Thread-safe
29542f47da5fSPaolo Bonzini              * code should use bdrv_aio_cancel_async exclusively.
29552f47da5fSPaolo Bonzini              */
29562f47da5fSPaolo Bonzini             assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
295761007b31SStefan Hajnoczi             aio_poll(bdrv_get_aio_context(acb->bs), true);
295861007b31SStefan Hajnoczi         } else {
295961007b31SStefan Hajnoczi             abort();
296061007b31SStefan Hajnoczi         }
296161007b31SStefan Hajnoczi     }
296261007b31SStefan Hajnoczi     qemu_aio_unref(acb);
296361007b31SStefan Hajnoczi }
296461007b31SStefan Hajnoczi 
296561007b31SStefan Hajnoczi /* Async version of aio cancel. The caller is not blocked if the acb implements
296661007b31SStefan Hajnoczi  * cancel_async, otherwise we do nothing and let the request normally complete.
296761007b31SStefan Hajnoczi  * In either case the completion callback must be called. */
296861007b31SStefan Hajnoczi void bdrv_aio_cancel_async(BlockAIOCB *acb)
296961007b31SStefan Hajnoczi {
2970384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
297161007b31SStefan Hajnoczi     if (acb->aiocb_info->cancel_async) {
297261007b31SStefan Hajnoczi         acb->aiocb_info->cancel_async(acb);
297361007b31SStefan Hajnoczi     }
297461007b31SStefan Hajnoczi }
297561007b31SStefan Hajnoczi 
297661007b31SStefan Hajnoczi /**************************************************************/
297761007b31SStefan Hajnoczi /* Coroutine block device emulation */
297861007b31SStefan Hajnoczi 
297961007b31SStefan Hajnoczi int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
298061007b31SStefan Hajnoczi {
2981883833e2SMax Reitz     BdrvChild *primary_child = bdrv_primary_child(bs);
2982883833e2SMax Reitz     BdrvChild *child;
298349ca6259SFam Zheng     int current_gen;
298449ca6259SFam Zheng     int ret = 0;
2985384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
298661007b31SStefan Hajnoczi 
298788095349SEmanuele Giuseppe Esposito     assert_bdrv_graph_readable();
298899723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
2989c32b82afSPavel Dovgalyuk 
29901e97be91SEmanuele Giuseppe Esposito     if (!bdrv_co_is_inserted(bs) || bdrv_is_read_only(bs) ||
299149ca6259SFam Zheng         bdrv_is_sg(bs)) {
299249ca6259SFam Zheng         goto early_exit;
299349ca6259SFam Zheng     }
299449ca6259SFam Zheng 
29953783fa3dSPaolo Bonzini     qemu_co_mutex_lock(&bs->reqs_lock);
2996d73415a3SStefan Hajnoczi     current_gen = qatomic_read(&bs->write_gen);
29973ff2f67aSEvgeny Yakovlev 
29983ff2f67aSEvgeny Yakovlev     /* Wait until any previous flushes are completed */
299999723548SPaolo Bonzini     while (bs->active_flush_req) {
30003783fa3dSPaolo Bonzini         qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
30013ff2f67aSEvgeny Yakovlev     }
30023ff2f67aSEvgeny Yakovlev 
30033783fa3dSPaolo Bonzini     /* Flushes reach this point in nondecreasing current_gen order.  */
300499723548SPaolo Bonzini     bs->active_flush_req = true;
30053783fa3dSPaolo Bonzini     qemu_co_mutex_unlock(&bs->reqs_lock);
30063ff2f67aSEvgeny Yakovlev 
3007c32b82afSPavel Dovgalyuk     /* Write back all layers by calling one driver function */
3008c32b82afSPavel Dovgalyuk     if (bs->drv->bdrv_co_flush) {
3009c32b82afSPavel Dovgalyuk         ret = bs->drv->bdrv_co_flush(bs);
3010c32b82afSPavel Dovgalyuk         goto out;
3011c32b82afSPavel Dovgalyuk     }
3012c32b82afSPavel Dovgalyuk 
301361007b31SStefan Hajnoczi     /* Write back cached data to the OS even with cache=unsafe */
3014883833e2SMax Reitz     BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_OS);
301561007b31SStefan Hajnoczi     if (bs->drv->bdrv_co_flush_to_os) {
301661007b31SStefan Hajnoczi         ret = bs->drv->bdrv_co_flush_to_os(bs);
301761007b31SStefan Hajnoczi         if (ret < 0) {
3018cdb5e315SFam Zheng             goto out;
301961007b31SStefan Hajnoczi         }
302061007b31SStefan Hajnoczi     }
302161007b31SStefan Hajnoczi 
302261007b31SStefan Hajnoczi     /* But don't actually force it to the disk with cache=unsafe */
302361007b31SStefan Hajnoczi     if (bs->open_flags & BDRV_O_NO_FLUSH) {
3024883833e2SMax Reitz         goto flush_children;
302561007b31SStefan Hajnoczi     }
302661007b31SStefan Hajnoczi 
30273ff2f67aSEvgeny Yakovlev     /* Check if we really need to flush anything */
30283ff2f67aSEvgeny Yakovlev     if (bs->flushed_gen == current_gen) {
3029883833e2SMax Reitz         goto flush_children;
30303ff2f67aSEvgeny Yakovlev     }
30313ff2f67aSEvgeny Yakovlev 
3032883833e2SMax Reitz     BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK);
3033d470ad42SMax Reitz     if (!bs->drv) {
3034d470ad42SMax Reitz         /* bs->drv->bdrv_co_flush() might have ejected the BDS
3035d470ad42SMax Reitz          * (even in case of apparent success) */
3036d470ad42SMax Reitz         ret = -ENOMEDIUM;
3037d470ad42SMax Reitz         goto out;
3038d470ad42SMax Reitz     }
303961007b31SStefan Hajnoczi     if (bs->drv->bdrv_co_flush_to_disk) {
304061007b31SStefan Hajnoczi         ret = bs->drv->bdrv_co_flush_to_disk(bs);
304161007b31SStefan Hajnoczi     } else if (bs->drv->bdrv_aio_flush) {
304261007b31SStefan Hajnoczi         BlockAIOCB *acb;
304361007b31SStefan Hajnoczi         CoroutineIOCompletion co = {
304461007b31SStefan Hajnoczi             .coroutine = qemu_coroutine_self(),
304561007b31SStefan Hajnoczi         };
304661007b31SStefan Hajnoczi 
304761007b31SStefan Hajnoczi         acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
304861007b31SStefan Hajnoczi         if (acb == NULL) {
304961007b31SStefan Hajnoczi             ret = -EIO;
305061007b31SStefan Hajnoczi         } else {
305161007b31SStefan Hajnoczi             qemu_coroutine_yield();
305261007b31SStefan Hajnoczi             ret = co.ret;
305361007b31SStefan Hajnoczi         }
305461007b31SStefan Hajnoczi     } else {
305561007b31SStefan Hajnoczi         /*
305661007b31SStefan Hajnoczi          * Some block drivers always operate in either writethrough or unsafe
305761007b31SStefan Hajnoczi          * mode and don't support bdrv_flush therefore. Usually qemu doesn't
305861007b31SStefan Hajnoczi          * know how the server works (because the behaviour is hardcoded or
305961007b31SStefan Hajnoczi          * depends on server-side configuration), so we can't ensure that
306061007b31SStefan Hajnoczi          * everything is safe on disk. Returning an error doesn't work because
306161007b31SStefan Hajnoczi          * that would break guests even if the server operates in writethrough
306261007b31SStefan Hajnoczi          * mode.
306361007b31SStefan Hajnoczi          *
306461007b31SStefan Hajnoczi          * Let's hope the user knows what he's doing.
306561007b31SStefan Hajnoczi          */
306661007b31SStefan Hajnoczi         ret = 0;
306761007b31SStefan Hajnoczi     }
30683ff2f67aSEvgeny Yakovlev 
306961007b31SStefan Hajnoczi     if (ret < 0) {
3070cdb5e315SFam Zheng         goto out;
307161007b31SStefan Hajnoczi     }
307261007b31SStefan Hajnoczi 
307361007b31SStefan Hajnoczi     /* Now flush the underlying protocol.  It will also have BDRV_O_NO_FLUSH
307461007b31SStefan Hajnoczi      * in the case of cache=unsafe, so there are no useless flushes.
307561007b31SStefan Hajnoczi      */
3076883833e2SMax Reitz flush_children:
3077883833e2SMax Reitz     ret = 0;
3078883833e2SMax Reitz     QLIST_FOREACH(child, &bs->children, next) {
3079883833e2SMax Reitz         if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) {
3080883833e2SMax Reitz             int this_child_ret = bdrv_co_flush(child->bs);
3081883833e2SMax Reitz             if (!ret) {
3082883833e2SMax Reitz                 ret = this_child_ret;
3083883833e2SMax Reitz             }
3084883833e2SMax Reitz         }
3085883833e2SMax Reitz     }
3086883833e2SMax Reitz 
3087cdb5e315SFam Zheng out:
30883ff2f67aSEvgeny Yakovlev     /* Notify any pending flushes that we have completed */
3089e6af1e08SKevin Wolf     if (ret == 0) {
30903ff2f67aSEvgeny Yakovlev         bs->flushed_gen = current_gen;
3091e6af1e08SKevin Wolf     }
30923783fa3dSPaolo Bonzini 
30933783fa3dSPaolo Bonzini     qemu_co_mutex_lock(&bs->reqs_lock);
309499723548SPaolo Bonzini     bs->active_flush_req = false;
3095156af3acSDenis V. Lunev     /* Return value is ignored - it's ok if wait queue is empty */
3096156af3acSDenis V. Lunev     qemu_co_queue_next(&bs->flush_queue);
30973783fa3dSPaolo Bonzini     qemu_co_mutex_unlock(&bs->reqs_lock);
30983ff2f67aSEvgeny Yakovlev 
309949ca6259SFam Zheng early_exit:
310099723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
3101cdb5e315SFam Zheng     return ret;
310261007b31SStefan Hajnoczi }
310361007b31SStefan Hajnoczi 
3104d93e5726SVladimir Sementsov-Ogievskiy int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
3105d93e5726SVladimir Sementsov-Ogievskiy                                   int64_t bytes)
310661007b31SStefan Hajnoczi {
3107b1066c87SFam Zheng     BdrvTrackedRequest req;
310839af49c0SVladimir Sementsov-Ogievskiy     int ret;
310939af49c0SVladimir Sementsov-Ogievskiy     int64_t max_pdiscard;
31103482b9bcSEric Blake     int head, tail, align;
31110b9fd3f4SFam Zheng     BlockDriverState *bs = child->bs;
3112384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
31139a5a1c62SEmanuele Giuseppe Esposito     assert_bdrv_graph_readable();
311461007b31SStefan Hajnoczi 
31151e97be91SEmanuele Giuseppe Esposito     if (!bs || !bs->drv || !bdrv_co_is_inserted(bs)) {
311661007b31SStefan Hajnoczi         return -ENOMEDIUM;
311761007b31SStefan Hajnoczi     }
311861007b31SStefan Hajnoczi 
3119d6883bc9SVladimir Sementsov-Ogievskiy     if (bdrv_has_readonly_bitmaps(bs)) {
3120d6883bc9SVladimir Sementsov-Ogievskiy         return -EPERM;
3121d6883bc9SVladimir Sementsov-Ogievskiy     }
3122d6883bc9SVladimir Sementsov-Ogievskiy 
312369b55e03SVladimir Sementsov-Ogievskiy     ret = bdrv_check_request(offset, bytes, NULL);
31248b117001SVladimir Sementsov-Ogievskiy     if (ret < 0) {
31258b117001SVladimir Sementsov-Ogievskiy         return ret;
312661007b31SStefan Hajnoczi     }
312761007b31SStefan Hajnoczi 
312861007b31SStefan Hajnoczi     /* Do nothing if disabled.  */
312961007b31SStefan Hajnoczi     if (!(bs->open_flags & BDRV_O_UNMAP)) {
313061007b31SStefan Hajnoczi         return 0;
313161007b31SStefan Hajnoczi     }
313261007b31SStefan Hajnoczi 
313302aefe43SEric Blake     if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
313461007b31SStefan Hajnoczi         return 0;
313561007b31SStefan Hajnoczi     }
313661007b31SStefan Hajnoczi 
31370bc329fbSHanna Reitz     /* Invalidate the cached block-status data range if this discard overlaps */
31380bc329fbSHanna Reitz     bdrv_bsc_invalidate_range(bs, offset, bytes);
31390bc329fbSHanna Reitz 
31403482b9bcSEric Blake     /* Discard is advisory, but some devices track and coalesce
31413482b9bcSEric Blake      * unaligned requests, so we must pass everything down rather than
31423482b9bcSEric Blake      * round here.  Still, most devices will just silently ignore
31433482b9bcSEric Blake      * unaligned requests (by returning -ENOTSUP), so we must fragment
31443482b9bcSEric Blake      * the request accordingly.  */
314502aefe43SEric Blake     align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
3146b8d0a980SEric Blake     assert(align % bs->bl.request_alignment == 0);
3147b8d0a980SEric Blake     head = offset % align;
3148f5a5ca79SManos Pitsidianakis     tail = (offset + bytes) % align;
31499f1963b3SEric Blake 
315099723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
3151f5a5ca79SManos Pitsidianakis     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
315250824995SFam Zheng 
315300695c27SFam Zheng     ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0);
3154ec050f77SDenis V. Lunev     if (ret < 0) {
3155ec050f77SDenis V. Lunev         goto out;
3156ec050f77SDenis V. Lunev     }
3157ec050f77SDenis V. Lunev 
31586a8f3dbbSVladimir Sementsov-Ogievskiy     max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT64_MAX),
31599f1963b3SEric Blake                                    align);
31603482b9bcSEric Blake     assert(max_pdiscard >= bs->bl.request_alignment);
31619f1963b3SEric Blake 
3162f5a5ca79SManos Pitsidianakis     while (bytes > 0) {
3163d93e5726SVladimir Sementsov-Ogievskiy         int64_t num = bytes;
31643482b9bcSEric Blake 
31653482b9bcSEric Blake         if (head) {
31663482b9bcSEric Blake             /* Make small requests to get to alignment boundaries. */
3167f5a5ca79SManos Pitsidianakis             num = MIN(bytes, align - head);
31683482b9bcSEric Blake             if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
31693482b9bcSEric Blake                 num %= bs->bl.request_alignment;
31703482b9bcSEric Blake             }
31713482b9bcSEric Blake             head = (head + num) % align;
31723482b9bcSEric Blake             assert(num < max_pdiscard);
31733482b9bcSEric Blake         } else if (tail) {
31743482b9bcSEric Blake             if (num > align) {
31753482b9bcSEric Blake                 /* Shorten the request to the last aligned cluster.  */
31763482b9bcSEric Blake                 num -= tail;
31773482b9bcSEric Blake             } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
31783482b9bcSEric Blake                        tail > bs->bl.request_alignment) {
31793482b9bcSEric Blake                 tail %= bs->bl.request_alignment;
31803482b9bcSEric Blake                 num -= tail;
31813482b9bcSEric Blake             }
31823482b9bcSEric Blake         }
31833482b9bcSEric Blake         /* limit request size */
31843482b9bcSEric Blake         if (num > max_pdiscard) {
31853482b9bcSEric Blake             num = max_pdiscard;
31863482b9bcSEric Blake         }
318761007b31SStefan Hajnoczi 
3188d470ad42SMax Reitz         if (!bs->drv) {
3189d470ad42SMax Reitz             ret = -ENOMEDIUM;
3190d470ad42SMax Reitz             goto out;
3191d470ad42SMax Reitz         }
319247a5486dSEric Blake         if (bs->drv->bdrv_co_pdiscard) {
319347a5486dSEric Blake             ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
319461007b31SStefan Hajnoczi         } else {
319561007b31SStefan Hajnoczi             BlockAIOCB *acb;
319661007b31SStefan Hajnoczi             CoroutineIOCompletion co = {
319761007b31SStefan Hajnoczi                 .coroutine = qemu_coroutine_self(),
319861007b31SStefan Hajnoczi             };
319961007b31SStefan Hajnoczi 
32004da444a0SEric Blake             acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
320161007b31SStefan Hajnoczi                                              bdrv_co_io_em_complete, &co);
320261007b31SStefan Hajnoczi             if (acb == NULL) {
3203b1066c87SFam Zheng                 ret = -EIO;
3204b1066c87SFam Zheng                 goto out;
320561007b31SStefan Hajnoczi             } else {
320661007b31SStefan Hajnoczi                 qemu_coroutine_yield();
320761007b31SStefan Hajnoczi                 ret = co.ret;
320861007b31SStefan Hajnoczi             }
320961007b31SStefan Hajnoczi         }
321061007b31SStefan Hajnoczi         if (ret && ret != -ENOTSUP) {
3211b1066c87SFam Zheng             goto out;
321261007b31SStefan Hajnoczi         }
321361007b31SStefan Hajnoczi 
32149f1963b3SEric Blake         offset += num;
3215f5a5ca79SManos Pitsidianakis         bytes -= num;
321661007b31SStefan Hajnoczi     }
3217b1066c87SFam Zheng     ret = 0;
3218b1066c87SFam Zheng out:
321900695c27SFam Zheng     bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret);
3220b1066c87SFam Zheng     tracked_request_end(&req);
322199723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
3222b1066c87SFam Zheng     return ret;
322361007b31SStefan Hajnoczi }
322461007b31SStefan Hajnoczi 
3225881a4c55SPaolo Bonzini int coroutine_fn bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
322661007b31SStefan Hajnoczi {
322761007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
32285c5ae76aSFam Zheng     CoroutineIOCompletion co = {
32295c5ae76aSFam Zheng         .coroutine = qemu_coroutine_self(),
32305c5ae76aSFam Zheng     };
32315c5ae76aSFam Zheng     BlockAIOCB *acb;
3232384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
323326c518abSKevin Wolf     assert_bdrv_graph_readable();
323461007b31SStefan Hajnoczi 
323599723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
323616a389dcSKevin Wolf     if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
32375c5ae76aSFam Zheng         co.ret = -ENOTSUP;
32385c5ae76aSFam Zheng         goto out;
32395c5ae76aSFam Zheng     }
32405c5ae76aSFam Zheng 
324116a389dcSKevin Wolf     if (drv->bdrv_co_ioctl) {
324216a389dcSKevin Wolf         co.ret = drv->bdrv_co_ioctl(bs, req, buf);
324316a389dcSKevin Wolf     } else {
32445c5ae76aSFam Zheng         acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
32455c5ae76aSFam Zheng         if (!acb) {
3246c8a9fd80SFam Zheng             co.ret = -ENOTSUP;
3247c8a9fd80SFam Zheng             goto out;
32485c5ae76aSFam Zheng         }
32495c5ae76aSFam Zheng         qemu_coroutine_yield();
325016a389dcSKevin Wolf     }
32515c5ae76aSFam Zheng out:
325299723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
32535c5ae76aSFam Zheng     return co.ret;
32545c5ae76aSFam Zheng }
32555c5ae76aSFam Zheng 
32566d43eaa3SSam Li int coroutine_fn bdrv_co_zone_report(BlockDriverState *bs, int64_t offset,
32576d43eaa3SSam Li                         unsigned int *nr_zones,
32586d43eaa3SSam Li                         BlockZoneDescriptor *zones)
32596d43eaa3SSam Li {
32606d43eaa3SSam Li     BlockDriver *drv = bs->drv;
32616d43eaa3SSam Li     CoroutineIOCompletion co = {
32626d43eaa3SSam Li             .coroutine = qemu_coroutine_self(),
32636d43eaa3SSam Li     };
32646d43eaa3SSam Li     IO_CODE();
32656d43eaa3SSam Li 
32666d43eaa3SSam Li     bdrv_inc_in_flight(bs);
32676d43eaa3SSam Li     if (!drv || !drv->bdrv_co_zone_report || bs->bl.zoned == BLK_Z_NONE) {
32686d43eaa3SSam Li         co.ret = -ENOTSUP;
32696d43eaa3SSam Li         goto out;
32706d43eaa3SSam Li     }
32716d43eaa3SSam Li     co.ret = drv->bdrv_co_zone_report(bs, offset, nr_zones, zones);
32726d43eaa3SSam Li out:
32736d43eaa3SSam Li     bdrv_dec_in_flight(bs);
32746d43eaa3SSam Li     return co.ret;
32756d43eaa3SSam Li }
32766d43eaa3SSam Li 
32776d43eaa3SSam Li int coroutine_fn bdrv_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
32786d43eaa3SSam Li         int64_t offset, int64_t len)
32796d43eaa3SSam Li {
32806d43eaa3SSam Li     BlockDriver *drv = bs->drv;
32816d43eaa3SSam Li     CoroutineIOCompletion co = {
32826d43eaa3SSam Li             .coroutine = qemu_coroutine_self(),
32836d43eaa3SSam Li     };
32846d43eaa3SSam Li     IO_CODE();
32856d43eaa3SSam Li 
32866d43eaa3SSam Li     bdrv_inc_in_flight(bs);
32876d43eaa3SSam Li     if (!drv || !drv->bdrv_co_zone_mgmt || bs->bl.zoned == BLK_Z_NONE) {
32886d43eaa3SSam Li         co.ret = -ENOTSUP;
32896d43eaa3SSam Li         goto out;
32906d43eaa3SSam Li     }
32916d43eaa3SSam Li     co.ret = drv->bdrv_co_zone_mgmt(bs, op, offset, len);
32926d43eaa3SSam Li out:
32936d43eaa3SSam Li     bdrv_dec_in_flight(bs);
32946d43eaa3SSam Li     return co.ret;
32956d43eaa3SSam Li }
32966d43eaa3SSam Li 
32974751d09aSSam Li int coroutine_fn bdrv_co_zone_append(BlockDriverState *bs, int64_t *offset,
32984751d09aSSam Li                         QEMUIOVector *qiov,
32994751d09aSSam Li                         BdrvRequestFlags flags)
33004751d09aSSam Li {
33014751d09aSSam Li     int ret;
33024751d09aSSam Li     BlockDriver *drv = bs->drv;
33034751d09aSSam Li     CoroutineIOCompletion co = {
33044751d09aSSam Li             .coroutine = qemu_coroutine_self(),
33054751d09aSSam Li     };
33064751d09aSSam Li     IO_CODE();
33074751d09aSSam Li 
33084751d09aSSam Li     ret = bdrv_check_qiov_request(*offset, qiov->size, qiov, 0, NULL);
33094751d09aSSam Li     if (ret < 0) {
33104751d09aSSam Li         return ret;
33114751d09aSSam Li     }
33124751d09aSSam Li 
33134751d09aSSam Li     bdrv_inc_in_flight(bs);
33144751d09aSSam Li     if (!drv || !drv->bdrv_co_zone_append || bs->bl.zoned == BLK_Z_NONE) {
33154751d09aSSam Li         co.ret = -ENOTSUP;
33164751d09aSSam Li         goto out;
33174751d09aSSam Li     }
33184751d09aSSam Li     co.ret = drv->bdrv_co_zone_append(bs, offset, qiov, flags);
33194751d09aSSam Li out:
33204751d09aSSam Li     bdrv_dec_in_flight(bs);
33214751d09aSSam Li     return co.ret;
33224751d09aSSam Li }
33234751d09aSSam Li 
332461007b31SStefan Hajnoczi void *qemu_blockalign(BlockDriverState *bs, size_t size)
332561007b31SStefan Hajnoczi {
3326384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
332761007b31SStefan Hajnoczi     return qemu_memalign(bdrv_opt_mem_align(bs), size);
332861007b31SStefan Hajnoczi }
332961007b31SStefan Hajnoczi 
333061007b31SStefan Hajnoczi void *qemu_blockalign0(BlockDriverState *bs, size_t size)
333161007b31SStefan Hajnoczi {
3332384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
333361007b31SStefan Hajnoczi     return memset(qemu_blockalign(bs, size), 0, size);
333461007b31SStefan Hajnoczi }
333561007b31SStefan Hajnoczi 
333661007b31SStefan Hajnoczi void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
333761007b31SStefan Hajnoczi {
333861007b31SStefan Hajnoczi     size_t align = bdrv_opt_mem_align(bs);
3339384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
334061007b31SStefan Hajnoczi 
334161007b31SStefan Hajnoczi     /* Ensure that NULL is never returned on success */
334261007b31SStefan Hajnoczi     assert(align > 0);
334361007b31SStefan Hajnoczi     if (size == 0) {
334461007b31SStefan Hajnoczi         size = align;
334561007b31SStefan Hajnoczi     }
334661007b31SStefan Hajnoczi 
334761007b31SStefan Hajnoczi     return qemu_try_memalign(align, size);
334861007b31SStefan Hajnoczi }
334961007b31SStefan Hajnoczi 
335061007b31SStefan Hajnoczi void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
335161007b31SStefan Hajnoczi {
335261007b31SStefan Hajnoczi     void *mem = qemu_try_blockalign(bs, size);
3353384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
335461007b31SStefan Hajnoczi 
335561007b31SStefan Hajnoczi     if (mem) {
335661007b31SStefan Hajnoczi         memset(mem, 0, size);
335761007b31SStefan Hajnoczi     }
335861007b31SStefan Hajnoczi 
335961007b31SStefan Hajnoczi     return mem;
336061007b31SStefan Hajnoczi }
336161007b31SStefan Hajnoczi 
3362f4ec04baSStefan Hajnoczi /* Helper that undoes bdrv_register_buf() when it fails partway through */
3363d9249c25SKevin Wolf static void GRAPH_RDLOCK
3364d9249c25SKevin Wolf bdrv_register_buf_rollback(BlockDriverState *bs, void *host, size_t size,
3365f4ec04baSStefan Hajnoczi                            BdrvChild *final_child)
3366f4ec04baSStefan Hajnoczi {
3367f4ec04baSStefan Hajnoczi     BdrvChild *child;
3368f4ec04baSStefan Hajnoczi 
3369d9249c25SKevin Wolf     GLOBAL_STATE_CODE();
3370d9249c25SKevin Wolf     assert_bdrv_graph_readable();
3371d9249c25SKevin Wolf 
3372f4ec04baSStefan Hajnoczi     QLIST_FOREACH(child, &bs->children, next) {
3373f4ec04baSStefan Hajnoczi         if (child == final_child) {
3374f4ec04baSStefan Hajnoczi             break;
3375f4ec04baSStefan Hajnoczi         }
3376f4ec04baSStefan Hajnoczi 
3377f4ec04baSStefan Hajnoczi         bdrv_unregister_buf(child->bs, host, size);
3378f4ec04baSStefan Hajnoczi     }
3379f4ec04baSStefan Hajnoczi 
3380f4ec04baSStefan Hajnoczi     if (bs->drv && bs->drv->bdrv_unregister_buf) {
3381f4ec04baSStefan Hajnoczi         bs->drv->bdrv_unregister_buf(bs, host, size);
3382f4ec04baSStefan Hajnoczi     }
3383f4ec04baSStefan Hajnoczi }
3384f4ec04baSStefan Hajnoczi 
3385f4ec04baSStefan Hajnoczi bool bdrv_register_buf(BlockDriverState *bs, void *host, size_t size,
3386f4ec04baSStefan Hajnoczi                        Error **errp)
338723d0ba93SFam Zheng {
338823d0ba93SFam Zheng     BdrvChild *child;
338923d0ba93SFam Zheng 
3390f791bf7fSEmanuele Giuseppe Esposito     GLOBAL_STATE_CODE();
3391d9249c25SKevin Wolf     GRAPH_RDLOCK_GUARD_MAINLOOP();
3392d9249c25SKevin Wolf 
339323d0ba93SFam Zheng     if (bs->drv && bs->drv->bdrv_register_buf) {
3394f4ec04baSStefan Hajnoczi         if (!bs->drv->bdrv_register_buf(bs, host, size, errp)) {
3395f4ec04baSStefan Hajnoczi             return false;
3396f4ec04baSStefan Hajnoczi         }
339723d0ba93SFam Zheng     }
339823d0ba93SFam Zheng     QLIST_FOREACH(child, &bs->children, next) {
3399f4ec04baSStefan Hajnoczi         if (!bdrv_register_buf(child->bs, host, size, errp)) {
3400f4ec04baSStefan Hajnoczi             bdrv_register_buf_rollback(bs, host, size, child);
3401f4ec04baSStefan Hajnoczi             return false;
340223d0ba93SFam Zheng         }
340323d0ba93SFam Zheng     }
3404f4ec04baSStefan Hajnoczi     return true;
3405f4ec04baSStefan Hajnoczi }
340623d0ba93SFam Zheng 
34074f384011SStefan Hajnoczi void bdrv_unregister_buf(BlockDriverState *bs, void *host, size_t size)
340823d0ba93SFam Zheng {
340923d0ba93SFam Zheng     BdrvChild *child;
341023d0ba93SFam Zheng 
3411f791bf7fSEmanuele Giuseppe Esposito     GLOBAL_STATE_CODE();
3412d9249c25SKevin Wolf     GRAPH_RDLOCK_GUARD_MAINLOOP();
3413d9249c25SKevin Wolf 
341423d0ba93SFam Zheng     if (bs->drv && bs->drv->bdrv_unregister_buf) {
34154f384011SStefan Hajnoczi         bs->drv->bdrv_unregister_buf(bs, host, size);
341623d0ba93SFam Zheng     }
341723d0ba93SFam Zheng     QLIST_FOREACH(child, &bs->children, next) {
34184f384011SStefan Hajnoczi         bdrv_unregister_buf(child->bs, host, size);
341923d0ba93SFam Zheng     }
342023d0ba93SFam Zheng }
3421fcc67678SFam Zheng 
3422abaf8b75SKevin Wolf static int coroutine_fn GRAPH_RDLOCK bdrv_co_copy_range_internal(
3423a5215b8fSVladimir Sementsov-Ogievskiy         BdrvChild *src, int64_t src_offset, BdrvChild *dst,
3424a5215b8fSVladimir Sementsov-Ogievskiy         int64_t dst_offset, int64_t bytes,
342567b51fb9SVladimir Sementsov-Ogievskiy         BdrvRequestFlags read_flags, BdrvRequestFlags write_flags,
3426fcc67678SFam Zheng         bool recurse_src)
3427fcc67678SFam Zheng {
3428999658a0SVladimir Sementsov-Ogievskiy     BdrvTrackedRequest req;
3429fcc67678SFam Zheng     int ret;
3430742bf09bSEmanuele Giuseppe Esposito     assert_bdrv_graph_readable();
3431fcc67678SFam Zheng 
3432fe0480d6SKevin Wolf     /* TODO We can support BDRV_REQ_NO_FALLBACK here */
3433fe0480d6SKevin Wolf     assert(!(read_flags & BDRV_REQ_NO_FALLBACK));
3434fe0480d6SKevin Wolf     assert(!(write_flags & BDRV_REQ_NO_FALLBACK));
343545e62b46SVladimir Sementsov-Ogievskiy     assert(!(read_flags & BDRV_REQ_NO_WAIT));
343645e62b46SVladimir Sementsov-Ogievskiy     assert(!(write_flags & BDRV_REQ_NO_WAIT));
3437fe0480d6SKevin Wolf 
34381e97be91SEmanuele Giuseppe Esposito     if (!dst || !dst->bs || !bdrv_co_is_inserted(dst->bs)) {
3439fcc67678SFam Zheng         return -ENOMEDIUM;
3440fcc67678SFam Zheng     }
344163f4ad11SVladimir Sementsov-Ogievskiy     ret = bdrv_check_request32(dst_offset, bytes, NULL, 0);
3442fcc67678SFam Zheng     if (ret) {
3443fcc67678SFam Zheng         return ret;
3444fcc67678SFam Zheng     }
344567b51fb9SVladimir Sementsov-Ogievskiy     if (write_flags & BDRV_REQ_ZERO_WRITE) {
344667b51fb9SVladimir Sementsov-Ogievskiy         return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags);
3447fcc67678SFam Zheng     }
3448fcc67678SFam Zheng 
34491e97be91SEmanuele Giuseppe Esposito     if (!src || !src->bs || !bdrv_co_is_inserted(src->bs)) {
3450d4d3e5a0SFam Zheng         return -ENOMEDIUM;
3451d4d3e5a0SFam Zheng     }
345263f4ad11SVladimir Sementsov-Ogievskiy     ret = bdrv_check_request32(src_offset, bytes, NULL, 0);
3453d4d3e5a0SFam Zheng     if (ret) {
3454d4d3e5a0SFam Zheng         return ret;
3455d4d3e5a0SFam Zheng     }
3456d4d3e5a0SFam Zheng 
3457fcc67678SFam Zheng     if (!src->bs->drv->bdrv_co_copy_range_from
3458fcc67678SFam Zheng         || !dst->bs->drv->bdrv_co_copy_range_to
3459fcc67678SFam Zheng         || src->bs->encrypted || dst->bs->encrypted) {
3460fcc67678SFam Zheng         return -ENOTSUP;
3461fcc67678SFam Zheng     }
3462999658a0SVladimir Sementsov-Ogievskiy 
3463999658a0SVladimir Sementsov-Ogievskiy     if (recurse_src) {
3464d4d3e5a0SFam Zheng         bdrv_inc_in_flight(src->bs);
3465999658a0SVladimir Sementsov-Ogievskiy         tracked_request_begin(&req, src->bs, src_offset, bytes,
3466999658a0SVladimir Sementsov-Ogievskiy                               BDRV_TRACKED_READ);
346737aec7d7SFam Zheng 
346809d2f948SVladimir Sementsov-Ogievskiy         /* BDRV_REQ_SERIALISING is only for write operation */
346909d2f948SVladimir Sementsov-Ogievskiy         assert(!(read_flags & BDRV_REQ_SERIALISING));
3470304d9d7fSMax Reitz         bdrv_wait_serialising_requests(&req);
3471999658a0SVladimir Sementsov-Ogievskiy 
347237aec7d7SFam Zheng         ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
3473fcc67678SFam Zheng                                                     src, src_offset,
3474fcc67678SFam Zheng                                                     dst, dst_offset,
347567b51fb9SVladimir Sementsov-Ogievskiy                                                     bytes,
347667b51fb9SVladimir Sementsov-Ogievskiy                                                     read_flags, write_flags);
3477999658a0SVladimir Sementsov-Ogievskiy 
3478999658a0SVladimir Sementsov-Ogievskiy         tracked_request_end(&req);
3479999658a0SVladimir Sementsov-Ogievskiy         bdrv_dec_in_flight(src->bs);
3480fcc67678SFam Zheng     } else {
3481999658a0SVladimir Sementsov-Ogievskiy         bdrv_inc_in_flight(dst->bs);
3482999658a0SVladimir Sementsov-Ogievskiy         tracked_request_begin(&req, dst->bs, dst_offset, bytes,
3483999658a0SVladimir Sementsov-Ogievskiy                               BDRV_TRACKED_WRITE);
34840eb1e891SFam Zheng         ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req,
34850eb1e891SFam Zheng                                         write_flags);
34860eb1e891SFam Zheng         if (!ret) {
348737aec7d7SFam Zheng             ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs,
3488fcc67678SFam Zheng                                                       src, src_offset,
3489fcc67678SFam Zheng                                                       dst, dst_offset,
349067b51fb9SVladimir Sementsov-Ogievskiy                                                       bytes,
349167b51fb9SVladimir Sementsov-Ogievskiy                                                       read_flags, write_flags);
34920eb1e891SFam Zheng         }
34930eb1e891SFam Zheng         bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret);
3494999658a0SVladimir Sementsov-Ogievskiy         tracked_request_end(&req);
3495d4d3e5a0SFam Zheng         bdrv_dec_in_flight(dst->bs);
3496999658a0SVladimir Sementsov-Ogievskiy     }
3497999658a0SVladimir Sementsov-Ogievskiy 
349837aec7d7SFam Zheng     return ret;
3499fcc67678SFam Zheng }
3500fcc67678SFam Zheng 
3501fcc67678SFam Zheng /* Copy range from @src to @dst.
3502fcc67678SFam Zheng  *
3503fcc67678SFam Zheng  * See the comment of bdrv_co_copy_range for the parameter and return value
3504fcc67678SFam Zheng  * semantics. */
3505a5215b8fSVladimir Sementsov-Ogievskiy int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset,
3506a5215b8fSVladimir Sementsov-Ogievskiy                                          BdrvChild *dst, int64_t dst_offset,
3507a5215b8fSVladimir Sementsov-Ogievskiy                                          int64_t bytes,
350867b51fb9SVladimir Sementsov-Ogievskiy                                          BdrvRequestFlags read_flags,
350967b51fb9SVladimir Sementsov-Ogievskiy                                          BdrvRequestFlags write_flags)
3510fcc67678SFam Zheng {
3511967d7905SEmanuele Giuseppe Esposito     IO_CODE();
3512742bf09bSEmanuele Giuseppe Esposito     assert_bdrv_graph_readable();
3513ecc983a5SFam Zheng     trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes,
3514ecc983a5SFam Zheng                                   read_flags, write_flags);
3515fcc67678SFam Zheng     return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
351667b51fb9SVladimir Sementsov-Ogievskiy                                        bytes, read_flags, write_flags, true);
3517fcc67678SFam Zheng }
3518fcc67678SFam Zheng 
3519fcc67678SFam Zheng /* Copy range from @src to @dst.
3520fcc67678SFam Zheng  *
3521fcc67678SFam Zheng  * See the comment of bdrv_co_copy_range for the parameter and return value
3522fcc67678SFam Zheng  * semantics. */
3523a5215b8fSVladimir Sementsov-Ogievskiy int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset,
3524a5215b8fSVladimir Sementsov-Ogievskiy                                        BdrvChild *dst, int64_t dst_offset,
3525a5215b8fSVladimir Sementsov-Ogievskiy                                        int64_t bytes,
352667b51fb9SVladimir Sementsov-Ogievskiy                                        BdrvRequestFlags read_flags,
352767b51fb9SVladimir Sementsov-Ogievskiy                                        BdrvRequestFlags write_flags)
3528fcc67678SFam Zheng {
3529967d7905SEmanuele Giuseppe Esposito     IO_CODE();
3530742bf09bSEmanuele Giuseppe Esposito     assert_bdrv_graph_readable();
3531ecc983a5SFam Zheng     trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
3532ecc983a5SFam Zheng                                 read_flags, write_flags);
3533fcc67678SFam Zheng     return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
353467b51fb9SVladimir Sementsov-Ogievskiy                                        bytes, read_flags, write_flags, false);
3535fcc67678SFam Zheng }
3536fcc67678SFam Zheng 
3537a5215b8fSVladimir Sementsov-Ogievskiy int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset,
3538a5215b8fSVladimir Sementsov-Ogievskiy                                     BdrvChild *dst, int64_t dst_offset,
3539a5215b8fSVladimir Sementsov-Ogievskiy                                     int64_t bytes, BdrvRequestFlags read_flags,
354067b51fb9SVladimir Sementsov-Ogievskiy                                     BdrvRequestFlags write_flags)
3541fcc67678SFam Zheng {
3542384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
3543742bf09bSEmanuele Giuseppe Esposito     assert_bdrv_graph_readable();
3544742bf09bSEmanuele Giuseppe Esposito 
354537aec7d7SFam Zheng     return bdrv_co_copy_range_from(src, src_offset,
3546fcc67678SFam Zheng                                    dst, dst_offset,
354767b51fb9SVladimir Sementsov-Ogievskiy                                    bytes, read_flags, write_flags);
3548fcc67678SFam Zheng }
35493d9f2d2aSKevin Wolf 
35503d9f2d2aSKevin Wolf static void bdrv_parent_cb_resize(BlockDriverState *bs)
35513d9f2d2aSKevin Wolf {
35523d9f2d2aSKevin Wolf     BdrvChild *c;
35533d9f2d2aSKevin Wolf     QLIST_FOREACH(c, &bs->parents, next_parent) {
3554bd86fb99SMax Reitz         if (c->klass->resize) {
3555bd86fb99SMax Reitz             c->klass->resize(c);
35563d9f2d2aSKevin Wolf         }
35573d9f2d2aSKevin Wolf     }
35583d9f2d2aSKevin Wolf }
35593d9f2d2aSKevin Wolf 
35603d9f2d2aSKevin Wolf /**
35613d9f2d2aSKevin Wolf  * Truncate file to 'offset' bytes (needed only for file protocols)
3562c80d8b06SMax Reitz  *
3563c80d8b06SMax Reitz  * If 'exact' is true, the file must be resized to exactly the given
3564c80d8b06SMax Reitz  * 'offset'.  Otherwise, it is sufficient for the node to be at least
3565c80d8b06SMax Reitz  * 'offset' bytes in length.
35663d9f2d2aSKevin Wolf  */
3567c80d8b06SMax Reitz int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
35687b8e4857SKevin Wolf                                   PreallocMode prealloc, BdrvRequestFlags flags,
35697b8e4857SKevin Wolf                                   Error **errp)
35703d9f2d2aSKevin Wolf {
35713d9f2d2aSKevin Wolf     BlockDriverState *bs = child->bs;
357223b93525SMax Reitz     BdrvChild *filtered, *backing;
35733d9f2d2aSKevin Wolf     BlockDriver *drv = bs->drv;
35741bc5f09fSKevin Wolf     BdrvTrackedRequest req;
35751bc5f09fSKevin Wolf     int64_t old_size, new_bytes;
35763d9f2d2aSKevin Wolf     int ret;
3577384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
3578c2b8e315SKevin Wolf     assert_bdrv_graph_readable();
35793d9f2d2aSKevin Wolf 
35803d9f2d2aSKevin Wolf     /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
35813d9f2d2aSKevin Wolf     if (!drv) {
35823d9f2d2aSKevin Wolf         error_setg(errp, "No medium inserted");
35833d9f2d2aSKevin Wolf         return -ENOMEDIUM;
35843d9f2d2aSKevin Wolf     }
35853d9f2d2aSKevin Wolf     if (offset < 0) {
35863d9f2d2aSKevin Wolf         error_setg(errp, "Image size cannot be negative");
35873d9f2d2aSKevin Wolf         return -EINVAL;
35883d9f2d2aSKevin Wolf     }
35893d9f2d2aSKevin Wolf 
359069b55e03SVladimir Sementsov-Ogievskiy     ret = bdrv_check_request(offset, 0, errp);
35918b117001SVladimir Sementsov-Ogievskiy     if (ret < 0) {
35928b117001SVladimir Sementsov-Ogievskiy         return ret;
35938b117001SVladimir Sementsov-Ogievskiy     }
35948b117001SVladimir Sementsov-Ogievskiy 
3595*0af02bd1SPaolo Bonzini     old_size = bdrv_co_getlength(bs);
35961bc5f09fSKevin Wolf     if (old_size < 0) {
35971bc5f09fSKevin Wolf         error_setg_errno(errp, -old_size, "Failed to get old image size");
35981bc5f09fSKevin Wolf         return old_size;
35991bc5f09fSKevin Wolf     }
36001bc5f09fSKevin Wolf 
360197efa869SEric Blake     if (bdrv_is_read_only(bs)) {
360297efa869SEric Blake         error_setg(errp, "Image is read-only");
360397efa869SEric Blake         return -EACCES;
360497efa869SEric Blake     }
360597efa869SEric Blake 
36061bc5f09fSKevin Wolf     if (offset > old_size) {
36071bc5f09fSKevin Wolf         new_bytes = offset - old_size;
36081bc5f09fSKevin Wolf     } else {
36091bc5f09fSKevin Wolf         new_bytes = 0;
36101bc5f09fSKevin Wolf     }
36111bc5f09fSKevin Wolf 
36123d9f2d2aSKevin Wolf     bdrv_inc_in_flight(bs);
36135416a11eSFam Zheng     tracked_request_begin(&req, bs, offset - new_bytes, new_bytes,
36145416a11eSFam Zheng                           BDRV_TRACKED_TRUNCATE);
36151bc5f09fSKevin Wolf 
36161bc5f09fSKevin Wolf     /* If we are growing the image and potentially using preallocation for the
36171bc5f09fSKevin Wolf      * new area, we need to make sure that no write requests are made to it
36181bc5f09fSKevin Wolf      * concurrently or they might be overwritten by preallocation. */
36191bc5f09fSKevin Wolf     if (new_bytes) {
36208ac5aab2SVladimir Sementsov-Ogievskiy         bdrv_make_request_serialising(&req, 1);
3621cd47d792SFam Zheng     }
3622cd47d792SFam Zheng     ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req,
3623cd47d792SFam Zheng                                     0);
3624cd47d792SFam Zheng     if (ret < 0) {
3625cd47d792SFam Zheng         error_setg_errno(errp, -ret,
3626cd47d792SFam Zheng                          "Failed to prepare request for truncation");
3627cd47d792SFam Zheng         goto out;
36281bc5f09fSKevin Wolf     }
36293d9f2d2aSKevin Wolf 
363093393e69SMax Reitz     filtered = bdrv_filter_child(bs);
363123b93525SMax Reitz     backing = bdrv_cow_child(bs);
363293393e69SMax Reitz 
3633955c7d66SKevin Wolf     /*
3634955c7d66SKevin Wolf      * If the image has a backing file that is large enough that it would
3635955c7d66SKevin Wolf      * provide data for the new area, we cannot leave it unallocated because
3636955c7d66SKevin Wolf      * then the backing file content would become visible. Instead, zero-fill
3637955c7d66SKevin Wolf      * the new area.
3638955c7d66SKevin Wolf      *
3639955c7d66SKevin Wolf      * Note that if the image has a backing file, but was opened without the
3640955c7d66SKevin Wolf      * backing file, taking care of keeping things consistent with that backing
3641955c7d66SKevin Wolf      * file is the user's responsibility.
3642955c7d66SKevin Wolf      */
364323b93525SMax Reitz     if (new_bytes && backing) {
3644955c7d66SKevin Wolf         int64_t backing_len;
3645955c7d66SKevin Wolf 
3646bd53086eSEmanuele Giuseppe Esposito         backing_len = bdrv_co_getlength(backing->bs);
3647955c7d66SKevin Wolf         if (backing_len < 0) {
3648955c7d66SKevin Wolf             ret = backing_len;
3649955c7d66SKevin Wolf             error_setg_errno(errp, -ret, "Could not get backing file size");
3650955c7d66SKevin Wolf             goto out;
3651955c7d66SKevin Wolf         }
3652955c7d66SKevin Wolf 
3653955c7d66SKevin Wolf         if (backing_len > old_size) {
3654955c7d66SKevin Wolf             flags |= BDRV_REQ_ZERO_WRITE;
3655955c7d66SKevin Wolf         }
3656955c7d66SKevin Wolf     }
3657955c7d66SKevin Wolf 
36586b7e8f8bSMax Reitz     if (drv->bdrv_co_truncate) {
365992b92799SKevin Wolf         if (flags & ~bs->supported_truncate_flags) {
366092b92799SKevin Wolf             error_setg(errp, "Block driver does not support requested flags");
366192b92799SKevin Wolf             ret = -ENOTSUP;
366292b92799SKevin Wolf             goto out;
366392b92799SKevin Wolf         }
366492b92799SKevin Wolf         ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp);
366593393e69SMax Reitz     } else if (filtered) {
366693393e69SMax Reitz         ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp);
36676b7e8f8bSMax Reitz     } else {
36683d9f2d2aSKevin Wolf         error_setg(errp, "Image format driver does not support resize");
36693d9f2d2aSKevin Wolf         ret = -ENOTSUP;
36703d9f2d2aSKevin Wolf         goto out;
36713d9f2d2aSKevin Wolf     }
36723d9f2d2aSKevin Wolf     if (ret < 0) {
36733d9f2d2aSKevin Wolf         goto out;
36743d9f2d2aSKevin Wolf     }
36756b7e8f8bSMax Reitz 
3676bd53086eSEmanuele Giuseppe Esposito     ret = bdrv_co_refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
36773d9f2d2aSKevin Wolf     if (ret < 0) {
36783d9f2d2aSKevin Wolf         error_setg_errno(errp, -ret, "Could not refresh total sector count");
36793d9f2d2aSKevin Wolf     } else {
36803d9f2d2aSKevin Wolf         offset = bs->total_sectors * BDRV_SECTOR_SIZE;
36813d9f2d2aSKevin Wolf     }
3682c057960cSEmanuele Giuseppe Esposito     /*
3683c057960cSEmanuele Giuseppe Esposito      * It's possible that truncation succeeded but bdrv_refresh_total_sectors
3684cd47d792SFam Zheng      * failed, but the latter doesn't affect how we should finish the request.
3685c057960cSEmanuele Giuseppe Esposito      * Pass 0 as the last parameter so that dirty bitmaps etc. are handled.
3686c057960cSEmanuele Giuseppe Esposito      */
3687cd47d792SFam Zheng     bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0);
36883d9f2d2aSKevin Wolf 
36893d9f2d2aSKevin Wolf out:
36901bc5f09fSKevin Wolf     tracked_request_end(&req);
36913d9f2d2aSKevin Wolf     bdrv_dec_in_flight(bs);
36921bc5f09fSKevin Wolf 
36933d9f2d2aSKevin Wolf     return ret;
36943d9f2d2aSKevin Wolf }
3695bd54669aSVladimir Sementsov-Ogievskiy 
3696bd54669aSVladimir Sementsov-Ogievskiy void bdrv_cancel_in_flight(BlockDriverState *bs)
3697bd54669aSVladimir Sementsov-Ogievskiy {
3698f791bf7fSEmanuele Giuseppe Esposito     GLOBAL_STATE_CODE();
3699bd54669aSVladimir Sementsov-Ogievskiy     if (!bs || !bs->drv) {
3700bd54669aSVladimir Sementsov-Ogievskiy         return;
3701bd54669aSVladimir Sementsov-Ogievskiy     }
3702bd54669aSVladimir Sementsov-Ogievskiy 
3703bd54669aSVladimir Sementsov-Ogievskiy     if (bs->drv->bdrv_cancel_in_flight) {
3704bd54669aSVladimir Sementsov-Ogievskiy         bs->drv->bdrv_cancel_in_flight(bs);
3705bd54669aSVladimir Sementsov-Ogievskiy     }
3706bd54669aSVladimir Sementsov-Ogievskiy }
3707ce14f3b4SVladimir Sementsov-Ogievskiy 
3708ce14f3b4SVladimir Sementsov-Ogievskiy int coroutine_fn
3709ce14f3b4SVladimir Sementsov-Ogievskiy bdrv_co_preadv_snapshot(BdrvChild *child, int64_t offset, int64_t bytes,
3710ce14f3b4SVladimir Sementsov-Ogievskiy                         QEMUIOVector *qiov, size_t qiov_offset)
3711ce14f3b4SVladimir Sementsov-Ogievskiy {
3712ce14f3b4SVladimir Sementsov-Ogievskiy     BlockDriverState *bs = child->bs;
3713ce14f3b4SVladimir Sementsov-Ogievskiy     BlockDriver *drv = bs->drv;
3714ce14f3b4SVladimir Sementsov-Ogievskiy     int ret;
3715ce14f3b4SVladimir Sementsov-Ogievskiy     IO_CODE();
37167b9e8b22SKevin Wolf     assert_bdrv_graph_readable();
3717ce14f3b4SVladimir Sementsov-Ogievskiy 
3718ce14f3b4SVladimir Sementsov-Ogievskiy     if (!drv) {
3719ce14f3b4SVladimir Sementsov-Ogievskiy         return -ENOMEDIUM;
3720ce14f3b4SVladimir Sementsov-Ogievskiy     }
3721ce14f3b4SVladimir Sementsov-Ogievskiy 
3722ce14f3b4SVladimir Sementsov-Ogievskiy     if (!drv->bdrv_co_preadv_snapshot) {
3723ce14f3b4SVladimir Sementsov-Ogievskiy         return -ENOTSUP;
3724ce14f3b4SVladimir Sementsov-Ogievskiy     }
3725ce14f3b4SVladimir Sementsov-Ogievskiy 
3726ce14f3b4SVladimir Sementsov-Ogievskiy     bdrv_inc_in_flight(bs);
3727ce14f3b4SVladimir Sementsov-Ogievskiy     ret = drv->bdrv_co_preadv_snapshot(bs, offset, bytes, qiov, qiov_offset);
3728ce14f3b4SVladimir Sementsov-Ogievskiy     bdrv_dec_in_flight(bs);
3729ce14f3b4SVladimir Sementsov-Ogievskiy 
3730ce14f3b4SVladimir Sementsov-Ogievskiy     return ret;
3731ce14f3b4SVladimir Sementsov-Ogievskiy }
3732ce14f3b4SVladimir Sementsov-Ogievskiy 
3733ce14f3b4SVladimir Sementsov-Ogievskiy int coroutine_fn
3734ce14f3b4SVladimir Sementsov-Ogievskiy bdrv_co_snapshot_block_status(BlockDriverState *bs,
3735ce14f3b4SVladimir Sementsov-Ogievskiy                               bool want_zero, int64_t offset, int64_t bytes,
3736ce14f3b4SVladimir Sementsov-Ogievskiy                               int64_t *pnum, int64_t *map,
3737ce14f3b4SVladimir Sementsov-Ogievskiy                               BlockDriverState **file)
3738ce14f3b4SVladimir Sementsov-Ogievskiy {
3739ce14f3b4SVladimir Sementsov-Ogievskiy     BlockDriver *drv = bs->drv;
3740ce14f3b4SVladimir Sementsov-Ogievskiy     int ret;
3741ce14f3b4SVladimir Sementsov-Ogievskiy     IO_CODE();
37427b9e8b22SKevin Wolf     assert_bdrv_graph_readable();
3743ce14f3b4SVladimir Sementsov-Ogievskiy 
3744ce14f3b4SVladimir Sementsov-Ogievskiy     if (!drv) {
3745ce14f3b4SVladimir Sementsov-Ogievskiy         return -ENOMEDIUM;
3746ce14f3b4SVladimir Sementsov-Ogievskiy     }
3747ce14f3b4SVladimir Sementsov-Ogievskiy 
3748ce14f3b4SVladimir Sementsov-Ogievskiy     if (!drv->bdrv_co_snapshot_block_status) {
3749ce14f3b4SVladimir Sementsov-Ogievskiy         return -ENOTSUP;
3750ce14f3b4SVladimir Sementsov-Ogievskiy     }
3751ce14f3b4SVladimir Sementsov-Ogievskiy 
3752ce14f3b4SVladimir Sementsov-Ogievskiy     bdrv_inc_in_flight(bs);
3753ce14f3b4SVladimir Sementsov-Ogievskiy     ret = drv->bdrv_co_snapshot_block_status(bs, want_zero, offset, bytes,
3754ce14f3b4SVladimir Sementsov-Ogievskiy                                              pnum, map, file);
3755ce14f3b4SVladimir Sementsov-Ogievskiy     bdrv_dec_in_flight(bs);
3756ce14f3b4SVladimir Sementsov-Ogievskiy 
3757ce14f3b4SVladimir Sementsov-Ogievskiy     return ret;
3758ce14f3b4SVladimir Sementsov-Ogievskiy }
3759ce14f3b4SVladimir Sementsov-Ogievskiy 
3760ce14f3b4SVladimir Sementsov-Ogievskiy int coroutine_fn
3761ce14f3b4SVladimir Sementsov-Ogievskiy bdrv_co_pdiscard_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes)
3762ce14f3b4SVladimir Sementsov-Ogievskiy {
3763ce14f3b4SVladimir Sementsov-Ogievskiy     BlockDriver *drv = bs->drv;
3764ce14f3b4SVladimir Sementsov-Ogievskiy     int ret;
3765ce14f3b4SVladimir Sementsov-Ogievskiy     IO_CODE();
37669a5a1c62SEmanuele Giuseppe Esposito     assert_bdrv_graph_readable();
3767ce14f3b4SVladimir Sementsov-Ogievskiy 
3768ce14f3b4SVladimir Sementsov-Ogievskiy     if (!drv) {
3769ce14f3b4SVladimir Sementsov-Ogievskiy         return -ENOMEDIUM;
3770ce14f3b4SVladimir Sementsov-Ogievskiy     }
3771ce14f3b4SVladimir Sementsov-Ogievskiy 
3772ce14f3b4SVladimir Sementsov-Ogievskiy     if (!drv->bdrv_co_pdiscard_snapshot) {
3773ce14f3b4SVladimir Sementsov-Ogievskiy         return -ENOTSUP;
3774ce14f3b4SVladimir Sementsov-Ogievskiy     }
3775ce14f3b4SVladimir Sementsov-Ogievskiy 
3776ce14f3b4SVladimir Sementsov-Ogievskiy     bdrv_inc_in_flight(bs);
3777ce14f3b4SVladimir Sementsov-Ogievskiy     ret = drv->bdrv_co_pdiscard_snapshot(bs, offset, bytes);
3778ce14f3b4SVladimir Sementsov-Ogievskiy     bdrv_dec_in_flight(bs);
3779ce14f3b4SVladimir Sementsov-Ogievskiy 
3780ce14f3b4SVladimir Sementsov-Ogievskiy     return ret;
3781ce14f3b4SVladimir Sementsov-Ogievskiy }
3782