xref: /qemu/block/io.c (revision 83c2201fc47bd0dfa656bde7202bd0e2539d54a0)
161007b31SStefan Hajnoczi /*
261007b31SStefan Hajnoczi  * Block layer I/O functions
361007b31SStefan Hajnoczi  *
461007b31SStefan Hajnoczi  * Copyright (c) 2003 Fabrice Bellard
561007b31SStefan Hajnoczi  *
661007b31SStefan Hajnoczi  * Permission is hereby granted, free of charge, to any person obtaining a copy
761007b31SStefan Hajnoczi  * of this software and associated documentation files (the "Software"), to deal
861007b31SStefan Hajnoczi  * in the Software without restriction, including without limitation the rights
961007b31SStefan Hajnoczi  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
1061007b31SStefan Hajnoczi  * copies of the Software, and to permit persons to whom the Software is
1161007b31SStefan Hajnoczi  * furnished to do so, subject to the following conditions:
1261007b31SStefan Hajnoczi  *
1361007b31SStefan Hajnoczi  * The above copyright notice and this permission notice shall be included in
1461007b31SStefan Hajnoczi  * all copies or substantial portions of the Software.
1561007b31SStefan Hajnoczi  *
1661007b31SStefan Hajnoczi  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1761007b31SStefan Hajnoczi  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1861007b31SStefan Hajnoczi  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1961007b31SStefan Hajnoczi  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2061007b31SStefan Hajnoczi  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
2161007b31SStefan Hajnoczi  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
2261007b31SStefan Hajnoczi  * THE SOFTWARE.
2361007b31SStefan Hajnoczi  */
2461007b31SStefan Hajnoczi 
2580c71a24SPeter Maydell #include "qemu/osdep.h"
2661007b31SStefan Hajnoczi #include "trace.h"
2732cad1ffSPhilippe Mathieu-Daudé #include "system/block-backend.h"
287719f3c9SStefan Hajnoczi #include "block/aio-wait.h"
2961007b31SStefan Hajnoczi #include "block/blockjob.h"
30f321dcb5SPaolo Bonzini #include "block/blockjob_int.h"
3161007b31SStefan Hajnoczi #include "block/block_int.h"
3221c2283eSVladimir Sementsov-Ogievskiy #include "block/coroutines.h"
33e2c1c34fSMarkus Armbruster #include "block/dirty-bitmap.h"
3494783301SVladimir Sementsov-Ogievskiy #include "block/write-threshold.h"
35f348b6d1SVeronia Bahaa #include "qemu/cutils.h"
365df022cfSPeter Maydell #include "qemu/memalign.h"
37da34e65cSMarkus Armbruster #include "qapi/error.h"
38d49b6836SMarkus Armbruster #include "qemu/error-report.h"
39db725815SMarkus Armbruster #include "qemu/main-loop.h"
4032cad1ffSPhilippe Mathieu-Daudé #include "system/replay.h"
41*52726096SEric Blake #include "qemu/units.h"
4261007b31SStefan Hajnoczi 
43cb2e2878SEric Blake /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
44cb2e2878SEric Blake #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
45cb2e2878SEric Blake 
46*52726096SEric Blake /* Maximum read size for checking if data reads as zero, in bytes */
47*52726096SEric Blake #define MAX_ZERO_CHECK_BUFFER (128 * KiB)
48*52726096SEric Blake 
497859c45aSKevin Wolf static void coroutine_fn GRAPH_RDLOCK
507859c45aSKevin Wolf bdrv_parent_cb_resize(BlockDriverState *bs);
517859c45aSKevin Wolf 
52d05aa8bbSEric Blake static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
535ae07b14SVladimir Sementsov-Ogievskiy     int64_t offset, int64_t bytes, BdrvRequestFlags flags);
5461007b31SStefan Hajnoczi 
55d05ab380SEmanuele Giuseppe Esposito static void GRAPH_RDLOCK
bdrv_parent_drained_begin(BlockDriverState * bs,BdrvChild * ignore)56d05ab380SEmanuele Giuseppe Esposito bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore)
5761007b31SStefan Hajnoczi {
5802d21300SKevin Wolf     BdrvChild *c, *next;
59d05ab380SEmanuele Giuseppe Esposito     IO_OR_GS_CODE();
60d05ab380SEmanuele Giuseppe Esposito     assert_bdrv_graph_readable();
6127ccdd52SKevin Wolf 
6202d21300SKevin Wolf     QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
63a82a3bd1SKevin Wolf         if (c == ignore) {
640152bf40SKevin Wolf             continue;
650152bf40SKevin Wolf         }
66606ed756SKevin Wolf         bdrv_parent_drained_begin_single(c);
67ce0f1412SPaolo Bonzini     }
68ce0f1412SPaolo Bonzini }
69ce0f1412SPaolo Bonzini 
bdrv_parent_drained_end_single(BdrvChild * c)702f65df6eSKevin Wolf void bdrv_parent_drained_end_single(BdrvChild *c)
71804db8eaSMax Reitz {
72ab613350SStefan Hajnoczi     GLOBAL_STATE_CODE();
732f65df6eSKevin Wolf 
7457e05be3SKevin Wolf     assert(c->quiesced_parent);
7557e05be3SKevin Wolf     c->quiesced_parent = false;
7657e05be3SKevin Wolf 
77bd86fb99SMax Reitz     if (c->klass->drained_end) {
782f65df6eSKevin Wolf         c->klass->drained_end(c);
79804db8eaSMax Reitz     }
80804db8eaSMax Reitz }
81804db8eaSMax Reitz 
82d05ab380SEmanuele Giuseppe Esposito static void GRAPH_RDLOCK
bdrv_parent_drained_end(BlockDriverState * bs,BdrvChild * ignore)83d05ab380SEmanuele Giuseppe Esposito bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore)
84ce0f1412SPaolo Bonzini {
8561ad631cSMax Reitz     BdrvChild *c;
86d05ab380SEmanuele Giuseppe Esposito     IO_OR_GS_CODE();
87d05ab380SEmanuele Giuseppe Esposito     assert_bdrv_graph_readable();
8827ccdd52SKevin Wolf 
8961ad631cSMax Reitz     QLIST_FOREACH(c, &bs->parents, next_parent) {
90a82a3bd1SKevin Wolf         if (c == ignore) {
910152bf40SKevin Wolf             continue;
920152bf40SKevin Wolf         }
932f65df6eSKevin Wolf         bdrv_parent_drained_end_single(c);
94c2066af0SKevin Wolf     }
9561007b31SStefan Hajnoczi }
9661007b31SStefan Hajnoczi 
bdrv_parent_drained_poll_single(BdrvChild * c)9723987471SKevin Wolf bool bdrv_parent_drained_poll_single(BdrvChild *c)
984be6a6d1SKevin Wolf {
99d05ab380SEmanuele Giuseppe Esposito     IO_OR_GS_CODE();
100d05ab380SEmanuele Giuseppe Esposito 
101bd86fb99SMax Reitz     if (c->klass->drained_poll) {
102bd86fb99SMax Reitz         return c->klass->drained_poll(c);
1034be6a6d1SKevin Wolf     }
1044be6a6d1SKevin Wolf     return false;
1054be6a6d1SKevin Wolf }
1064be6a6d1SKevin Wolf 
107d05ab380SEmanuele Giuseppe Esposito static bool GRAPH_RDLOCK
bdrv_parent_drained_poll(BlockDriverState * bs,BdrvChild * ignore,bool ignore_bds_parents)108d05ab380SEmanuele Giuseppe Esposito bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
1096cd5c9d7SKevin Wolf                          bool ignore_bds_parents)
11089bd0305SKevin Wolf {
11189bd0305SKevin Wolf     BdrvChild *c, *next;
11289bd0305SKevin Wolf     bool busy = false;
113d05ab380SEmanuele Giuseppe Esposito     IO_OR_GS_CODE();
114d05ab380SEmanuele Giuseppe Esposito     assert_bdrv_graph_readable();
11589bd0305SKevin Wolf 
11689bd0305SKevin Wolf     QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
117bd86fb99SMax Reitz         if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
11889bd0305SKevin Wolf             continue;
11989bd0305SKevin Wolf         }
1204be6a6d1SKevin Wolf         busy |= bdrv_parent_drained_poll_single(c);
12189bd0305SKevin Wolf     }
12289bd0305SKevin Wolf 
12389bd0305SKevin Wolf     return busy;
12489bd0305SKevin Wolf }
12589bd0305SKevin Wolf 
bdrv_parent_drained_begin_single(BdrvChild * c)126606ed756SKevin Wolf void bdrv_parent_drained_begin_single(BdrvChild *c)
1274be6a6d1SKevin Wolf {
128ab613350SStefan Hajnoczi     GLOBAL_STATE_CODE();
12957e05be3SKevin Wolf 
13057e05be3SKevin Wolf     assert(!c->quiesced_parent);
13157e05be3SKevin Wolf     c->quiesced_parent = true;
13257e05be3SKevin Wolf 
133bd86fb99SMax Reitz     if (c->klass->drained_begin) {
134d05ab380SEmanuele Giuseppe Esposito         /* called with rdlock taken, but it doesn't really need it. */
135bd86fb99SMax Reitz         c->klass->drained_begin(c);
1364be6a6d1SKevin Wolf     }
1374be6a6d1SKevin Wolf }
1384be6a6d1SKevin Wolf 
bdrv_merge_limits(BlockLimits * dst,const BlockLimits * src)139d9e0dfa2SEric Blake static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
140d9e0dfa2SEric Blake {
1419f460c64SAkihiko Odaki     dst->pdiscard_alignment = MAX(dst->pdiscard_alignment,
1429f460c64SAkihiko Odaki                                   src->pdiscard_alignment);
143d9e0dfa2SEric Blake     dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
144d9e0dfa2SEric Blake     dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
14524b36e98SPaolo Bonzini     dst->max_hw_transfer = MIN_NON_ZERO(dst->max_hw_transfer,
14624b36e98SPaolo Bonzini                                         src->max_hw_transfer);
147d9e0dfa2SEric Blake     dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
148d9e0dfa2SEric Blake                                  src->opt_mem_alignment);
149d9e0dfa2SEric Blake     dst->min_mem_alignment = MAX(dst->min_mem_alignment,
150d9e0dfa2SEric Blake                                  src->min_mem_alignment);
151d9e0dfa2SEric Blake     dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
152cc071629SPaolo Bonzini     dst->max_hw_iov = MIN_NON_ZERO(dst->max_hw_iov, src->max_hw_iov);
153d9e0dfa2SEric Blake }
154d9e0dfa2SEric Blake 
1551e4c797cSVladimir Sementsov-Ogievskiy typedef struct BdrvRefreshLimitsState {
1561e4c797cSVladimir Sementsov-Ogievskiy     BlockDriverState *bs;
1571e4c797cSVladimir Sementsov-Ogievskiy     BlockLimits old_bl;
1581e4c797cSVladimir Sementsov-Ogievskiy } BdrvRefreshLimitsState;
1591e4c797cSVladimir Sementsov-Ogievskiy 
bdrv_refresh_limits_abort(void * opaque)1601e4c797cSVladimir Sementsov-Ogievskiy static void bdrv_refresh_limits_abort(void *opaque)
1611e4c797cSVladimir Sementsov-Ogievskiy {
1621e4c797cSVladimir Sementsov-Ogievskiy     BdrvRefreshLimitsState *s = opaque;
1631e4c797cSVladimir Sementsov-Ogievskiy 
1641e4c797cSVladimir Sementsov-Ogievskiy     s->bs->bl = s->old_bl;
1651e4c797cSVladimir Sementsov-Ogievskiy }
1661e4c797cSVladimir Sementsov-Ogievskiy 
1671e4c797cSVladimir Sementsov-Ogievskiy static TransactionActionDrv bdrv_refresh_limits_drv = {
1681e4c797cSVladimir Sementsov-Ogievskiy     .abort = bdrv_refresh_limits_abort,
1691e4c797cSVladimir Sementsov-Ogievskiy     .clean = g_free,
1701e4c797cSVladimir Sementsov-Ogievskiy };
1711e4c797cSVladimir Sementsov-Ogievskiy 
1721e4c797cSVladimir Sementsov-Ogievskiy /* @tran is allowed to be NULL, in this case no rollback is possible. */
bdrv_refresh_limits(BlockDriverState * bs,Transaction * tran,Error ** errp)1731e4c797cSVladimir Sementsov-Ogievskiy void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp)
17461007b31SStefan Hajnoczi {
17533985614SVladimir Sementsov-Ogievskiy     ERRP_GUARD();
17661007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
17766b129acSMax Reitz     BdrvChild *c;
17866b129acSMax Reitz     bool have_limits;
17961007b31SStefan Hajnoczi 
180f791bf7fSEmanuele Giuseppe Esposito     GLOBAL_STATE_CODE();
181f791bf7fSEmanuele Giuseppe Esposito 
1821e4c797cSVladimir Sementsov-Ogievskiy     if (tran) {
1831e4c797cSVladimir Sementsov-Ogievskiy         BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1);
1841e4c797cSVladimir Sementsov-Ogievskiy         *s = (BdrvRefreshLimitsState) {
1851e4c797cSVladimir Sementsov-Ogievskiy             .bs = bs,
1861e4c797cSVladimir Sementsov-Ogievskiy             .old_bl = bs->bl,
1871e4c797cSVladimir Sementsov-Ogievskiy         };
1881e4c797cSVladimir Sementsov-Ogievskiy         tran_add(tran, &bdrv_refresh_limits_drv, s);
1891e4c797cSVladimir Sementsov-Ogievskiy     }
1901e4c797cSVladimir Sementsov-Ogievskiy 
19161007b31SStefan Hajnoczi     memset(&bs->bl, 0, sizeof(bs->bl));
19261007b31SStefan Hajnoczi 
19361007b31SStefan Hajnoczi     if (!drv) {
19461007b31SStefan Hajnoczi         return;
19561007b31SStefan Hajnoczi     }
19661007b31SStefan Hajnoczi 
19779ba8c98SEric Blake     /* Default alignment based on whether driver has byte interface */
198e31f6864SEric Blake     bs->bl.request_alignment = (drv->bdrv_co_preadv ||
199ac850bf0SVladimir Sementsov-Ogievskiy                                 drv->bdrv_aio_preadv ||
200ac850bf0SVladimir Sementsov-Ogievskiy                                 drv->bdrv_co_preadv_part) ? 1 : 512;
20179ba8c98SEric Blake 
20261007b31SStefan Hajnoczi     /* Take some limits from the children as a default */
20366b129acSMax Reitz     have_limits = false;
20466b129acSMax Reitz     QLIST_FOREACH(c, &bs->children, next) {
20566b129acSMax Reitz         if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW))
20666b129acSMax Reitz         {
20766b129acSMax Reitz             bdrv_merge_limits(&bs->bl, &c->bs->bl);
20866b129acSMax Reitz             have_limits = true;
20966b129acSMax Reitz         }
210160a29e2SPaolo Bonzini 
211160a29e2SPaolo Bonzini         if (c->role & BDRV_CHILD_FILTERED) {
212160a29e2SPaolo Bonzini             bs->bl.has_variable_length |= c->bs->bl.has_variable_length;
213160a29e2SPaolo Bonzini         }
21466b129acSMax Reitz     }
21566b129acSMax Reitz 
21666b129acSMax Reitz     if (!have_limits) {
2174196d2f0SDenis V. Lunev         bs->bl.min_mem_alignment = 512;
2188e3b0cbbSMarc-André Lureau         bs->bl.opt_mem_alignment = qemu_real_host_page_size();
219bd44feb7SStefan Hajnoczi 
220bd44feb7SStefan Hajnoczi         /* Safe default since most protocols use readv()/writev()/etc */
221bd44feb7SStefan Hajnoczi         bs->bl.max_iov = IOV_MAX;
22261007b31SStefan Hajnoczi     }
22361007b31SStefan Hajnoczi 
22461007b31SStefan Hajnoczi     /* Then let the driver override it */
22561007b31SStefan Hajnoczi     if (drv->bdrv_refresh_limits) {
22661007b31SStefan Hajnoczi         drv->bdrv_refresh_limits(bs, errp);
2278b117001SVladimir Sementsov-Ogievskiy         if (*errp) {
2288b117001SVladimir Sementsov-Ogievskiy             return;
2298b117001SVladimir Sementsov-Ogievskiy         }
2308b117001SVladimir Sementsov-Ogievskiy     }
2318b117001SVladimir Sementsov-Ogievskiy 
2328b117001SVladimir Sementsov-Ogievskiy     if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) {
2338b117001SVladimir Sementsov-Ogievskiy         error_setg(errp, "Driver requires too large request alignment");
23461007b31SStefan Hajnoczi     }
23561007b31SStefan Hajnoczi }
23661007b31SStefan Hajnoczi 
23761007b31SStefan Hajnoczi /**
23861007b31SStefan Hajnoczi  * The copy-on-read flag is actually a reference count so multiple users may
23961007b31SStefan Hajnoczi  * use the feature without worrying about clobbering its previous state.
24061007b31SStefan Hajnoczi  * Copy-on-read stays enabled until all users have called to disable it.
24161007b31SStefan Hajnoczi  */
bdrv_enable_copy_on_read(BlockDriverState * bs)24261007b31SStefan Hajnoczi void bdrv_enable_copy_on_read(BlockDriverState *bs)
24361007b31SStefan Hajnoczi {
244384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
245d73415a3SStefan Hajnoczi     qatomic_inc(&bs->copy_on_read);
24661007b31SStefan Hajnoczi }
24761007b31SStefan Hajnoczi 
bdrv_disable_copy_on_read(BlockDriverState * bs)24861007b31SStefan Hajnoczi void bdrv_disable_copy_on_read(BlockDriverState *bs)
24961007b31SStefan Hajnoczi {
250d73415a3SStefan Hajnoczi     int old = qatomic_fetch_dec(&bs->copy_on_read);
251384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
252d3faa13eSPaolo Bonzini     assert(old >= 1);
25361007b31SStefan Hajnoczi }
25461007b31SStefan Hajnoczi 
25561124f03SPaolo Bonzini typedef struct {
25661124f03SPaolo Bonzini     Coroutine *co;
25761124f03SPaolo Bonzini     BlockDriverState *bs;
25861124f03SPaolo Bonzini     bool done;
259481cad48SManos Pitsidianakis     bool begin;
260fe4f0614SKevin Wolf     bool poll;
2610152bf40SKevin Wolf     BdrvChild *parent;
26261124f03SPaolo Bonzini } BdrvCoDrainData;
26361124f03SPaolo Bonzini 
2641cc8e54aSKevin Wolf /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
bdrv_drain_poll(BlockDriverState * bs,BdrvChild * ignore_parent,bool ignore_bds_parents)265299403aeSKevin Wolf bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent,
266299403aeSKevin Wolf                      bool ignore_bds_parents)
26789bd0305SKevin Wolf {
268ab613350SStefan Hajnoczi     GLOBAL_STATE_CODE();
269fe4f0614SKevin Wolf 
2706cd5c9d7SKevin Wolf     if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
27189bd0305SKevin Wolf         return true;
27289bd0305SKevin Wolf     }
27389bd0305SKevin Wolf 
274d73415a3SStefan Hajnoczi     if (qatomic_read(&bs->in_flight)) {
275fe4f0614SKevin Wolf         return true;
27689bd0305SKevin Wolf     }
27789bd0305SKevin Wolf 
278fe4f0614SKevin Wolf     return false;
279fe4f0614SKevin Wolf }
280fe4f0614SKevin Wolf 
bdrv_drain_poll_top_level(BlockDriverState * bs,BdrvChild * ignore_parent)281299403aeSKevin Wolf static bool bdrv_drain_poll_top_level(BlockDriverState *bs,
28289bd0305SKevin Wolf                                       BdrvChild *ignore_parent)
2831cc8e54aSKevin Wolf {
284d05ab380SEmanuele Giuseppe Esposito     GLOBAL_STATE_CODE();
285d05ab380SEmanuele Giuseppe Esposito     GRAPH_RDLOCK_GUARD_MAINLOOP();
286d05ab380SEmanuele Giuseppe Esposito 
287299403aeSKevin Wolf     return bdrv_drain_poll(bs, ignore_parent, false);
2881cc8e54aSKevin Wolf }
2891cc8e54aSKevin Wolf 
290299403aeSKevin Wolf static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
291a82a3bd1SKevin Wolf                                   bool poll);
292a82a3bd1SKevin Wolf static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent);
2930152bf40SKevin Wolf 
bdrv_co_drain_bh_cb(void * opaque)294a77fd4bbSFam Zheng static void bdrv_co_drain_bh_cb(void *opaque)
295a77fd4bbSFam Zheng {
296a77fd4bbSFam Zheng     BdrvCoDrainData *data = opaque;
297a77fd4bbSFam Zheng     Coroutine *co = data->co;
29899723548SPaolo Bonzini     BlockDriverState *bs = data->bs;
299a77fd4bbSFam Zheng 
300c8ca33d0SKevin Wolf     if (bs) {
30199723548SPaolo Bonzini         bdrv_dec_in_flight(bs);
302481cad48SManos Pitsidianakis         if (data->begin) {
303a82a3bd1SKevin Wolf             bdrv_do_drained_begin(bs, data->parent, data->poll);
304481cad48SManos Pitsidianakis         } else {
305e037c09cSMax Reitz             assert(!data->poll);
306a82a3bd1SKevin Wolf             bdrv_do_drained_end(bs, data->parent);
307481cad48SManos Pitsidianakis         }
308c8ca33d0SKevin Wolf     } else {
309c8ca33d0SKevin Wolf         assert(data->begin);
310c8ca33d0SKevin Wolf         bdrv_drain_all_begin();
311c8ca33d0SKevin Wolf     }
312481cad48SManos Pitsidianakis 
313a77fd4bbSFam Zheng     data->done = true;
3141919631eSPaolo Bonzini     aio_co_wake(co);
315a77fd4bbSFam Zheng }
316a77fd4bbSFam Zheng 
bdrv_co_yield_to_drain(BlockDriverState * bs,bool begin,BdrvChild * parent,bool poll)317481cad48SManos Pitsidianakis static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
318299403aeSKevin Wolf                                                 bool begin,
3196cd5c9d7SKevin Wolf                                                 BdrvChild *parent,
3202f65df6eSKevin Wolf                                                 bool poll)
321a77fd4bbSFam Zheng {
322a77fd4bbSFam Zheng     BdrvCoDrainData data;
323960d5fb3SKevin Wolf     Coroutine *self = qemu_coroutine_self();
324a77fd4bbSFam Zheng 
325a77fd4bbSFam Zheng     /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
326c40a2545SStefan Hajnoczi      * other coroutines run if they were queued by aio_co_enter(). */
327a77fd4bbSFam Zheng 
328a77fd4bbSFam Zheng     assert(qemu_in_coroutine());
329a77fd4bbSFam Zheng     data = (BdrvCoDrainData) {
330960d5fb3SKevin Wolf         .co = self,
331a77fd4bbSFam Zheng         .bs = bs,
332a77fd4bbSFam Zheng         .done = false,
333481cad48SManos Pitsidianakis         .begin = begin,
3340152bf40SKevin Wolf         .parent = parent,
335fe4f0614SKevin Wolf         .poll = poll,
336a77fd4bbSFam Zheng     };
3378e1da77eSMax Reitz 
338c8ca33d0SKevin Wolf     if (bs) {
33999723548SPaolo Bonzini         bdrv_inc_in_flight(bs);
340c8ca33d0SKevin Wolf     }
341960d5fb3SKevin Wolf 
342ab613350SStefan Hajnoczi     replay_bh_schedule_oneshot_event(qemu_get_aio_context(),
343ab613350SStefan Hajnoczi                                      bdrv_co_drain_bh_cb, &data);
344a77fd4bbSFam Zheng 
345a77fd4bbSFam Zheng     qemu_coroutine_yield();
346a77fd4bbSFam Zheng     /* If we are resumed from some other event (such as an aio completion or a
347a77fd4bbSFam Zheng      * timer callback), it is a bug in the caller that should be fixed. */
348a77fd4bbSFam Zheng     assert(data.done);
349a77fd4bbSFam Zheng }
350a77fd4bbSFam Zheng 
bdrv_do_drained_begin(BlockDriverState * bs,BdrvChild * parent,bool poll)35105c272ffSKevin Wolf static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
35205c272ffSKevin Wolf                                   bool poll)
353dcf94a23SKevin Wolf {
354384a48fbSEmanuele Giuseppe Esposito     IO_OR_GS_CODE();
35505c272ffSKevin Wolf 
35605c272ffSKevin Wolf     if (qemu_in_coroutine()) {
35705c272ffSKevin Wolf         bdrv_co_yield_to_drain(bs, true, parent, poll);
35805c272ffSKevin Wolf         return;
35905c272ffSKevin Wolf     }
360dcf94a23SKevin Wolf 
361ab613350SStefan Hajnoczi     GLOBAL_STATE_CODE();
362ab613350SStefan Hajnoczi 
363dcf94a23SKevin Wolf     /* Stop things in parent-to-child order */
364d73415a3SStefan Hajnoczi     if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) {
365d05ab380SEmanuele Giuseppe Esposito         GRAPH_RDLOCK_GUARD_MAINLOOP();
366a82a3bd1SKevin Wolf         bdrv_parent_drained_begin(bs, parent);
367c7bc05f7SKevin Wolf         if (bs->drv && bs->drv->bdrv_drain_begin) {
368c7bc05f7SKevin Wolf             bs->drv->bdrv_drain_begin(bs);
369c7bc05f7SKevin Wolf         }
370dcf94a23SKevin Wolf     }
371d30b8e64SKevin Wolf 
372fe4f0614SKevin Wolf     /*
373fe4f0614SKevin Wolf      * Wait for drained requests to finish.
374fe4f0614SKevin Wolf      *
375fe4f0614SKevin Wolf      * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
376fe4f0614SKevin Wolf      * call is needed so things in this AioContext can make progress even
377fe4f0614SKevin Wolf      * though we don't return to the main AioContext loop - this automatically
378fe4f0614SKevin Wolf      * includes other nodes in the same AioContext and therefore all child
379fe4f0614SKevin Wolf      * nodes.
380fe4f0614SKevin Wolf      */
381fe4f0614SKevin Wolf     if (poll) {
382299403aeSKevin Wolf         BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, parent));
383fe4f0614SKevin Wolf     }
3846820643fSKevin Wolf }
3856820643fSKevin Wolf 
bdrv_do_drained_begin_quiesce(BlockDriverState * bs,BdrvChild * parent)38605c272ffSKevin Wolf void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, BdrvChild *parent)
38705c272ffSKevin Wolf {
38805c272ffSKevin Wolf     bdrv_do_drained_begin(bs, parent, false);
38905c272ffSKevin Wolf }
39005c272ffSKevin Wolf 
391e2dbca03SPaolo Bonzini void coroutine_mixed_fn
bdrv_drained_begin(BlockDriverState * bs)392e2dbca03SPaolo Bonzini bdrv_drained_begin(BlockDriverState *bs)
3930152bf40SKevin Wolf {
394384a48fbSEmanuele Giuseppe Esposito     IO_OR_GS_CODE();
395a82a3bd1SKevin Wolf     bdrv_do_drained_begin(bs, NULL, true);
396b0165585SKevin Wolf }
397b0165585SKevin Wolf 
398e037c09cSMax Reitz /**
399e037c09cSMax Reitz  * This function does not poll, nor must any of its recursively called
4002f65df6eSKevin Wolf  * functions.
401e037c09cSMax Reitz  */
bdrv_do_drained_end(BlockDriverState * bs,BdrvChild * parent)402a82a3bd1SKevin Wolf static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent)
403b0165585SKevin Wolf {
4040f115168SKevin Wolf     int old_quiesce_counter;
4050f115168SKevin Wolf 
406ab613350SStefan Hajnoczi     IO_OR_GS_CODE();
407ab613350SStefan Hajnoczi 
408481cad48SManos Pitsidianakis     if (qemu_in_coroutine()) {
409a82a3bd1SKevin Wolf         bdrv_co_yield_to_drain(bs, false, parent, false);
410481cad48SManos Pitsidianakis         return;
411481cad48SManos Pitsidianakis     }
412d05ab380SEmanuele Giuseppe Esposito 
413d05ab380SEmanuele Giuseppe Esposito     /* At this point, we should be always running in the main loop. */
414d05ab380SEmanuele Giuseppe Esposito     GLOBAL_STATE_CODE();
4156820643fSKevin Wolf     assert(bs->quiesce_counter > 0);
4166820643fSKevin Wolf 
41760369b86SKevin Wolf     /* Re-enable things in child-to-parent order */
41857e05be3SKevin Wolf     old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter);
41957e05be3SKevin Wolf     if (old_quiesce_counter == 1) {
420d05ab380SEmanuele Giuseppe Esposito         GRAPH_RDLOCK_GUARD_MAINLOOP();
421c7bc05f7SKevin Wolf         if (bs->drv && bs->drv->bdrv_drain_end) {
422c7bc05f7SKevin Wolf             bs->drv->bdrv_drain_end(bs);
423c7bc05f7SKevin Wolf         }
424a82a3bd1SKevin Wolf         bdrv_parent_drained_end(bs, parent);
4256820643fSKevin Wolf     }
4260f115168SKevin Wolf }
4276820643fSKevin Wolf 
bdrv_drained_end(BlockDriverState * bs)4280152bf40SKevin Wolf void bdrv_drained_end(BlockDriverState *bs)
4290152bf40SKevin Wolf {
430384a48fbSEmanuele Giuseppe Esposito     IO_OR_GS_CODE();
431a82a3bd1SKevin Wolf     bdrv_do_drained_end(bs, NULL);
432d736f119SKevin Wolf }
433d736f119SKevin Wolf 
bdrv_drain(BlockDriverState * bs)43461007b31SStefan Hajnoczi void bdrv_drain(BlockDriverState *bs)
43561007b31SStefan Hajnoczi {
436384a48fbSEmanuele Giuseppe Esposito     IO_OR_GS_CODE();
4376820643fSKevin Wolf     bdrv_drained_begin(bs);
4386820643fSKevin Wolf     bdrv_drained_end(bs);
43961007b31SStefan Hajnoczi }
44061007b31SStefan Hajnoczi 
bdrv_drain_assert_idle(BlockDriverState * bs)441c13ad59fSKevin Wolf static void bdrv_drain_assert_idle(BlockDriverState *bs)
442c13ad59fSKevin Wolf {
443c13ad59fSKevin Wolf     BdrvChild *child, *next;
444d05ab380SEmanuele Giuseppe Esposito     GLOBAL_STATE_CODE();
445d05ab380SEmanuele Giuseppe Esposito     GRAPH_RDLOCK_GUARD_MAINLOOP();
446c13ad59fSKevin Wolf 
447d73415a3SStefan Hajnoczi     assert(qatomic_read(&bs->in_flight) == 0);
448c13ad59fSKevin Wolf     QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
449c13ad59fSKevin Wolf         bdrv_drain_assert_idle(child->bs);
450c13ad59fSKevin Wolf     }
451c13ad59fSKevin Wolf }
452c13ad59fSKevin Wolf 
4530f12264eSKevin Wolf unsigned int bdrv_drain_all_count = 0;
4540f12264eSKevin Wolf 
bdrv_drain_all_poll(void)4550f12264eSKevin Wolf static bool bdrv_drain_all_poll(void)
4560f12264eSKevin Wolf {
4570f12264eSKevin Wolf     BlockDriverState *bs = NULL;
4580f12264eSKevin Wolf     bool result = false;
459d05ab380SEmanuele Giuseppe Esposito 
460f791bf7fSEmanuele Giuseppe Esposito     GLOBAL_STATE_CODE();
461d05ab380SEmanuele Giuseppe Esposito     GRAPH_RDLOCK_GUARD_MAINLOOP();
4620f12264eSKevin Wolf 
463b49f4755SStefan Hajnoczi     /*
464b49f4755SStefan Hajnoczi      * bdrv_drain_poll() can't make changes to the graph and we hold the BQL,
465b49f4755SStefan Hajnoczi      * so iterating bdrv_next_all_states() is safe.
466b49f4755SStefan Hajnoczi      */
4670f12264eSKevin Wolf     while ((bs = bdrv_next_all_states(bs))) {
468299403aeSKevin Wolf         result |= bdrv_drain_poll(bs, NULL, true);
4690f12264eSKevin Wolf     }
4700f12264eSKevin Wolf 
4710f12264eSKevin Wolf     return result;
4720f12264eSKevin Wolf }
4730f12264eSKevin Wolf 
47461007b31SStefan Hajnoczi /*
47561007b31SStefan Hajnoczi  * Wait for pending requests to complete across all BlockDriverStates
47661007b31SStefan Hajnoczi  *
47761007b31SStefan Hajnoczi  * This function does not flush data to disk, use bdrv_flush_all() for that
47861007b31SStefan Hajnoczi  * after calling this function.
479c0778f66SAlberto Garcia  *
480c0778f66SAlberto Garcia  * This pauses all block jobs and disables external clients. It must
481c0778f66SAlberto Garcia  * be paired with bdrv_drain_all_end().
482c0778f66SAlberto Garcia  *
483c0778f66SAlberto Garcia  * NOTE: no new block jobs or BlockDriverStates can be created between
484c0778f66SAlberto Garcia  * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
48561007b31SStefan Hajnoczi  */
bdrv_drain_all_begin_nopoll(void)486da0bd744SKevin Wolf void bdrv_drain_all_begin_nopoll(void)
48761007b31SStefan Hajnoczi {
4880f12264eSKevin Wolf     BlockDriverState *bs = NULL;
489f791bf7fSEmanuele Giuseppe Esposito     GLOBAL_STATE_CODE();
49061007b31SStefan Hajnoczi 
491c8aa7895SPavel Dovgalyuk     /*
492c8aa7895SPavel Dovgalyuk      * bdrv queue is managed by record/replay,
493c8aa7895SPavel Dovgalyuk      * waiting for finishing the I/O requests may
494c8aa7895SPavel Dovgalyuk      * be infinite
495c8aa7895SPavel Dovgalyuk      */
496c8aa7895SPavel Dovgalyuk     if (replay_events_enabled()) {
497c8aa7895SPavel Dovgalyuk         return;
498c8aa7895SPavel Dovgalyuk     }
499c8aa7895SPavel Dovgalyuk 
5000f12264eSKevin Wolf     /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
5010f12264eSKevin Wolf      * loop AioContext, so make sure we're in the main context. */
5029a7e86c8SKevin Wolf     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
5030f12264eSKevin Wolf     assert(bdrv_drain_all_count < INT_MAX);
5040f12264eSKevin Wolf     bdrv_drain_all_count++;
5059a7e86c8SKevin Wolf 
5060f12264eSKevin Wolf     /* Quiesce all nodes, without polling in-flight requests yet. The graph
5070f12264eSKevin Wolf      * cannot change during this loop. */
5080f12264eSKevin Wolf     while ((bs = bdrv_next_all_states(bs))) {
509a82a3bd1SKevin Wolf         bdrv_do_drained_begin(bs, NULL, false);
51061007b31SStefan Hajnoczi     }
511da0bd744SKevin Wolf }
512da0bd744SKevin Wolf 
bdrv_drain_all_begin(void)513e2dbca03SPaolo Bonzini void coroutine_mixed_fn bdrv_drain_all_begin(void)
514da0bd744SKevin Wolf {
515da0bd744SKevin Wolf     BlockDriverState *bs = NULL;
516da0bd744SKevin Wolf 
517da0bd744SKevin Wolf     if (qemu_in_coroutine()) {
518da0bd744SKevin Wolf         bdrv_co_yield_to_drain(NULL, true, NULL, true);
519da0bd744SKevin Wolf         return;
520da0bd744SKevin Wolf     }
521da0bd744SKevin Wolf 
52263945789SPeter Maydell     /*
52363945789SPeter Maydell      * bdrv queue is managed by record/replay,
52463945789SPeter Maydell      * waiting for finishing the I/O requests may
52563945789SPeter Maydell      * be infinite
52663945789SPeter Maydell      */
52763945789SPeter Maydell     if (replay_events_enabled()) {
52863945789SPeter Maydell         return;
52963945789SPeter Maydell     }
53063945789SPeter Maydell 
531da0bd744SKevin Wolf     bdrv_drain_all_begin_nopoll();
53261007b31SStefan Hajnoczi 
5330f12264eSKevin Wolf     /* Now poll the in-flight requests */
534263d5e12SStefan Hajnoczi     AIO_WAIT_WHILE_UNLOCKED(NULL, bdrv_drain_all_poll());
5350f12264eSKevin Wolf 
5360f12264eSKevin Wolf     while ((bs = bdrv_next_all_states(bs))) {
537c13ad59fSKevin Wolf         bdrv_drain_assert_idle(bs);
538f406c03cSAlexander Yarygin     }
539f406c03cSAlexander Yarygin }
540c0778f66SAlberto Garcia 
bdrv_drain_all_end_quiesce(BlockDriverState * bs)5411a6d3bd2SGreg Kurz void bdrv_drain_all_end_quiesce(BlockDriverState *bs)
5421a6d3bd2SGreg Kurz {
543b4ad82aaSEmanuele Giuseppe Esposito     GLOBAL_STATE_CODE();
5441a6d3bd2SGreg Kurz 
5451a6d3bd2SGreg Kurz     g_assert(bs->quiesce_counter > 0);
5461a6d3bd2SGreg Kurz     g_assert(!bs->refcnt);
5471a6d3bd2SGreg Kurz 
5481a6d3bd2SGreg Kurz     while (bs->quiesce_counter) {
549a82a3bd1SKevin Wolf         bdrv_do_drained_end(bs, NULL);
5501a6d3bd2SGreg Kurz     }
5511a6d3bd2SGreg Kurz }
5521a6d3bd2SGreg Kurz 
bdrv_drain_all_end(void)553c0778f66SAlberto Garcia void bdrv_drain_all_end(void)
554c0778f66SAlberto Garcia {
5550f12264eSKevin Wolf     BlockDriverState *bs = NULL;
556f791bf7fSEmanuele Giuseppe Esposito     GLOBAL_STATE_CODE();
557c0778f66SAlberto Garcia 
558c8aa7895SPavel Dovgalyuk     /*
559c8aa7895SPavel Dovgalyuk      * bdrv queue is managed by record/replay,
560c8aa7895SPavel Dovgalyuk      * waiting for finishing the I/O requests may
561c8aa7895SPavel Dovgalyuk      * be endless
562c8aa7895SPavel Dovgalyuk      */
563c8aa7895SPavel Dovgalyuk     if (replay_events_enabled()) {
564c8aa7895SPavel Dovgalyuk         return;
565c8aa7895SPavel Dovgalyuk     }
566c8aa7895SPavel Dovgalyuk 
5670f12264eSKevin Wolf     while ((bs = bdrv_next_all_states(bs))) {
568a82a3bd1SKevin Wolf         bdrv_do_drained_end(bs, NULL);
56961007b31SStefan Hajnoczi     }
5700f12264eSKevin Wolf 
571e037c09cSMax Reitz     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
5720f12264eSKevin Wolf     assert(bdrv_drain_all_count > 0);
5730f12264eSKevin Wolf     bdrv_drain_all_count--;
57461007b31SStefan Hajnoczi }
57561007b31SStefan Hajnoczi 
bdrv_drain_all(void)576c0778f66SAlberto Garcia void bdrv_drain_all(void)
577c0778f66SAlberto Garcia {
578f791bf7fSEmanuele Giuseppe Esposito     GLOBAL_STATE_CODE();
579c0778f66SAlberto Garcia     bdrv_drain_all_begin();
580c0778f66SAlberto Garcia     bdrv_drain_all_end();
581c0778f66SAlberto Garcia }
582c0778f66SAlberto Garcia 
58361007b31SStefan Hajnoczi /**
58461007b31SStefan Hajnoczi  * Remove an active request from the tracked requests list
58561007b31SStefan Hajnoczi  *
58661007b31SStefan Hajnoczi  * This function should be called when a tracked request is completing.
58761007b31SStefan Hajnoczi  */
tracked_request_end(BdrvTrackedRequest * req)588f0d43b1eSPaolo Bonzini static void coroutine_fn tracked_request_end(BdrvTrackedRequest *req)
58961007b31SStefan Hajnoczi {
59061007b31SStefan Hajnoczi     if (req->serialising) {
591d73415a3SStefan Hajnoczi         qatomic_dec(&req->bs->serialising_in_flight);
59261007b31SStefan Hajnoczi     }
59361007b31SStefan Hajnoczi 
594fa9185fcSStefan Hajnoczi     qemu_mutex_lock(&req->bs->reqs_lock);
59561007b31SStefan Hajnoczi     QLIST_REMOVE(req, list);
596fa9185fcSStefan Hajnoczi     qemu_mutex_unlock(&req->bs->reqs_lock);
5973480ce69SStefan Hajnoczi 
5983480ce69SStefan Hajnoczi     /*
5993480ce69SStefan Hajnoczi      * At this point qemu_co_queue_wait(&req->wait_queue, ...) won't be called
6003480ce69SStefan Hajnoczi      * anymore because the request has been removed from the list, so it's safe
6013480ce69SStefan Hajnoczi      * to restart the queue outside reqs_lock to minimize the critical section.
6023480ce69SStefan Hajnoczi      */
60361007b31SStefan Hajnoczi     qemu_co_queue_restart_all(&req->wait_queue);
60461007b31SStefan Hajnoczi }
60561007b31SStefan Hajnoczi 
60661007b31SStefan Hajnoczi /**
60761007b31SStefan Hajnoczi  * Add an active request to the tracked requests list
60861007b31SStefan Hajnoczi  */
tracked_request_begin(BdrvTrackedRequest * req,BlockDriverState * bs,int64_t offset,int64_t bytes,enum BdrvTrackedRequestType type)609881a4c55SPaolo Bonzini static void coroutine_fn tracked_request_begin(BdrvTrackedRequest *req,
61061007b31SStefan Hajnoczi                                                BlockDriverState *bs,
61161007b31SStefan Hajnoczi                                                int64_t offset,
61280247264SEric Blake                                                int64_t bytes,
613ebde595cSFam Zheng                                                enum BdrvTrackedRequestType type)
61461007b31SStefan Hajnoczi {
61580247264SEric Blake     bdrv_check_request(offset, bytes, &error_abort);
61622931a15SFam Zheng 
61761007b31SStefan Hajnoczi     *req = (BdrvTrackedRequest){
61861007b31SStefan Hajnoczi         .bs = bs,
61961007b31SStefan Hajnoczi         .offset         = offset,
62061007b31SStefan Hajnoczi         .bytes          = bytes,
621ebde595cSFam Zheng         .type           = type,
62261007b31SStefan Hajnoczi         .co             = qemu_coroutine_self(),
62361007b31SStefan Hajnoczi         .serialising    = false,
62461007b31SStefan Hajnoczi         .overlap_offset = offset,
62561007b31SStefan Hajnoczi         .overlap_bytes  = bytes,
62661007b31SStefan Hajnoczi     };
62761007b31SStefan Hajnoczi 
62861007b31SStefan Hajnoczi     qemu_co_queue_init(&req->wait_queue);
62961007b31SStefan Hajnoczi 
630fa9185fcSStefan Hajnoczi     qemu_mutex_lock(&bs->reqs_lock);
63161007b31SStefan Hajnoczi     QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
632fa9185fcSStefan Hajnoczi     qemu_mutex_unlock(&bs->reqs_lock);
63361007b31SStefan Hajnoczi }
63461007b31SStefan Hajnoczi 
tracked_request_overlaps(BdrvTrackedRequest * req,int64_t offset,int64_t bytes)6353ba0e1a0SPaolo Bonzini static bool tracked_request_overlaps(BdrvTrackedRequest *req,
63680247264SEric Blake                                      int64_t offset, int64_t bytes)
6373ba0e1a0SPaolo Bonzini {
63880247264SEric Blake     bdrv_check_request(offset, bytes, &error_abort);
63980247264SEric Blake 
6403ba0e1a0SPaolo Bonzini     /*        aaaa   bbbb */
6413ba0e1a0SPaolo Bonzini     if (offset >= req->overlap_offset + req->overlap_bytes) {
6423ba0e1a0SPaolo Bonzini         return false;
6433ba0e1a0SPaolo Bonzini     }
6443ba0e1a0SPaolo Bonzini     /* bbbb   aaaa        */
6453ba0e1a0SPaolo Bonzini     if (req->overlap_offset >= offset + bytes) {
6463ba0e1a0SPaolo Bonzini         return false;
6473ba0e1a0SPaolo Bonzini     }
6483ba0e1a0SPaolo Bonzini     return true;
6493ba0e1a0SPaolo Bonzini }
6503ba0e1a0SPaolo Bonzini 
6513183937fSVladimir Sementsov-Ogievskiy /* Called with self->bs->reqs_lock held */
652881a4c55SPaolo Bonzini static coroutine_fn BdrvTrackedRequest *
bdrv_find_conflicting_request(BdrvTrackedRequest * self)6533183937fSVladimir Sementsov-Ogievskiy bdrv_find_conflicting_request(BdrvTrackedRequest *self)
6543ba0e1a0SPaolo Bonzini {
6553ba0e1a0SPaolo Bonzini     BdrvTrackedRequest *req;
6563ba0e1a0SPaolo Bonzini 
6573183937fSVladimir Sementsov-Ogievskiy     QLIST_FOREACH(req, &self->bs->tracked_requests, list) {
6583ba0e1a0SPaolo Bonzini         if (req == self || (!req->serialising && !self->serialising)) {
6593ba0e1a0SPaolo Bonzini             continue;
6603ba0e1a0SPaolo Bonzini         }
6613ba0e1a0SPaolo Bonzini         if (tracked_request_overlaps(req, self->overlap_offset,
6623ba0e1a0SPaolo Bonzini                                      self->overlap_bytes))
6633ba0e1a0SPaolo Bonzini         {
6643183937fSVladimir Sementsov-Ogievskiy             /*
6653183937fSVladimir Sementsov-Ogievskiy              * Hitting this means there was a reentrant request, for
6663ba0e1a0SPaolo Bonzini              * example, a block driver issuing nested requests.  This must
6673ba0e1a0SPaolo Bonzini              * never happen since it means deadlock.
6683ba0e1a0SPaolo Bonzini              */
6693ba0e1a0SPaolo Bonzini             assert(qemu_coroutine_self() != req->co);
6703ba0e1a0SPaolo Bonzini 
6713183937fSVladimir Sementsov-Ogievskiy             /*
6723183937fSVladimir Sementsov-Ogievskiy              * If the request is already (indirectly) waiting for us, or
6733ba0e1a0SPaolo Bonzini              * will wait for us as soon as it wakes up, then just go on
6743183937fSVladimir Sementsov-Ogievskiy              * (instead of producing a deadlock in the former case).
6753183937fSVladimir Sementsov-Ogievskiy              */
6763ba0e1a0SPaolo Bonzini             if (!req->waiting_for) {
6773183937fSVladimir Sementsov-Ogievskiy                 return req;
6783183937fSVladimir Sementsov-Ogievskiy             }
6793183937fSVladimir Sementsov-Ogievskiy         }
6803183937fSVladimir Sementsov-Ogievskiy     }
6813183937fSVladimir Sementsov-Ogievskiy 
6823183937fSVladimir Sementsov-Ogievskiy     return NULL;
6833183937fSVladimir Sementsov-Ogievskiy }
6843183937fSVladimir Sementsov-Ogievskiy 
685ec1c8868SVladimir Sementsov-Ogievskiy /* Called with self->bs->reqs_lock held */
686131498f7SDenis V. Lunev static void coroutine_fn
bdrv_wait_serialising_requests_locked(BdrvTrackedRequest * self)687ec1c8868SVladimir Sementsov-Ogievskiy bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self)
6883183937fSVladimir Sementsov-Ogievskiy {
6893183937fSVladimir Sementsov-Ogievskiy     BdrvTrackedRequest *req;
6903183937fSVladimir Sementsov-Ogievskiy 
6913183937fSVladimir Sementsov-Ogievskiy     while ((req = bdrv_find_conflicting_request(self))) {
6923ba0e1a0SPaolo Bonzini         self->waiting_for = req;
693ec1c8868SVladimir Sementsov-Ogievskiy         qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock);
6943ba0e1a0SPaolo Bonzini         self->waiting_for = NULL;
6953ba0e1a0SPaolo Bonzini     }
6963ba0e1a0SPaolo Bonzini }
6973ba0e1a0SPaolo Bonzini 
6988ac5aab2SVladimir Sementsov-Ogievskiy /* Called with req->bs->reqs_lock held */
tracked_request_set_serialising(BdrvTrackedRequest * req,uint64_t align)6998ac5aab2SVladimir Sementsov-Ogievskiy static void tracked_request_set_serialising(BdrvTrackedRequest *req,
7008ac5aab2SVladimir Sementsov-Ogievskiy                                             uint64_t align)
70161007b31SStefan Hajnoczi {
70261007b31SStefan Hajnoczi     int64_t overlap_offset = req->offset & ~(align - 1);
70380247264SEric Blake     int64_t overlap_bytes =
70480247264SEric Blake         ROUND_UP(req->offset + req->bytes, align) - overlap_offset;
70580247264SEric Blake 
70680247264SEric Blake     bdrv_check_request(req->offset, req->bytes, &error_abort);
70761007b31SStefan Hajnoczi 
70861007b31SStefan Hajnoczi     if (!req->serialising) {
709d73415a3SStefan Hajnoczi         qatomic_inc(&req->bs->serialising_in_flight);
71061007b31SStefan Hajnoczi         req->serialising = true;
71161007b31SStefan Hajnoczi     }
71261007b31SStefan Hajnoczi 
71361007b31SStefan Hajnoczi     req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
71461007b31SStefan Hajnoczi     req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
71509d2f948SVladimir Sementsov-Ogievskiy }
71609d2f948SVladimir Sementsov-Ogievskiy 
71761007b31SStefan Hajnoczi /**
718c28107e9SMax Reitz  * Return the tracked request on @bs for the current coroutine, or
719c28107e9SMax Reitz  * NULL if there is none.
720c28107e9SMax Reitz  */
bdrv_co_get_self_request(BlockDriverState * bs)721c28107e9SMax Reitz BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs)
722c28107e9SMax Reitz {
723c28107e9SMax Reitz     BdrvTrackedRequest *req;
724c28107e9SMax Reitz     Coroutine *self = qemu_coroutine_self();
725967d7905SEmanuele Giuseppe Esposito     IO_CODE();
726c28107e9SMax Reitz 
727c28107e9SMax Reitz     QLIST_FOREACH(req, &bs->tracked_requests, list) {
728c28107e9SMax Reitz         if (req->co == self) {
729c28107e9SMax Reitz             return req;
730c28107e9SMax Reitz         }
731c28107e9SMax Reitz     }
732c28107e9SMax Reitz 
733c28107e9SMax Reitz     return NULL;
734c28107e9SMax Reitz }
735c28107e9SMax Reitz 
736c28107e9SMax Reitz /**
737fc6b211fSAndrey Drobyshev  * Round a region to subcluster (if supported) or cluster boundaries
738244483e6SKevin Wolf  */
739a00e70c0SEmanuele Giuseppe Esposito void coroutine_fn GRAPH_RDLOCK
bdrv_round_to_subclusters(BlockDriverState * bs,int64_t offset,int64_t bytes,int64_t * align_offset,int64_t * align_bytes)740fc6b211fSAndrey Drobyshev bdrv_round_to_subclusters(BlockDriverState *bs, int64_t offset, int64_t bytes,
741fc6b211fSAndrey Drobyshev                           int64_t *align_offset, int64_t *align_bytes)
742244483e6SKevin Wolf {
743244483e6SKevin Wolf     BlockDriverInfo bdi;
744384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
745fc6b211fSAndrey Drobyshev     if (bdrv_co_get_info(bs, &bdi) < 0 || bdi.subcluster_size == 0) {
746fc6b211fSAndrey Drobyshev         *align_offset = offset;
747fc6b211fSAndrey Drobyshev         *align_bytes = bytes;
748244483e6SKevin Wolf     } else {
749fc6b211fSAndrey Drobyshev         int64_t c = bdi.subcluster_size;
750fc6b211fSAndrey Drobyshev         *align_offset = QEMU_ALIGN_DOWN(offset, c);
751fc6b211fSAndrey Drobyshev         *align_bytes = QEMU_ALIGN_UP(offset - *align_offset + bytes, c);
752244483e6SKevin Wolf     }
753244483e6SKevin Wolf }
754244483e6SKevin Wolf 
bdrv_get_cluster_size(BlockDriverState * bs)755a00e70c0SEmanuele Giuseppe Esposito static int coroutine_fn GRAPH_RDLOCK bdrv_get_cluster_size(BlockDriverState *bs)
75661007b31SStefan Hajnoczi {
75761007b31SStefan Hajnoczi     BlockDriverInfo bdi;
75861007b31SStefan Hajnoczi     int ret;
75961007b31SStefan Hajnoczi 
7603d47eb0aSEmanuele Giuseppe Esposito     ret = bdrv_co_get_info(bs, &bdi);
76161007b31SStefan Hajnoczi     if (ret < 0 || bdi.cluster_size == 0) {
762a5b8dd2cSEric Blake         return bs->bl.request_alignment;
76361007b31SStefan Hajnoczi     } else {
76461007b31SStefan Hajnoczi         return bdi.cluster_size;
76561007b31SStefan Hajnoczi     }
76661007b31SStefan Hajnoczi }
76761007b31SStefan Hajnoczi 
bdrv_inc_in_flight(BlockDriverState * bs)76899723548SPaolo Bonzini void bdrv_inc_in_flight(BlockDriverState *bs)
76999723548SPaolo Bonzini {
770967d7905SEmanuele Giuseppe Esposito     IO_CODE();
771d73415a3SStefan Hajnoczi     qatomic_inc(&bs->in_flight);
77299723548SPaolo Bonzini }
77399723548SPaolo Bonzini 
bdrv_wakeup(BlockDriverState * bs)774c9d1a561SPaolo Bonzini void bdrv_wakeup(BlockDriverState *bs)
775c9d1a561SPaolo Bonzini {
776967d7905SEmanuele Giuseppe Esposito     IO_CODE();
777cfe29d82SKevin Wolf     aio_wait_kick();
778c9d1a561SPaolo Bonzini }
779c9d1a561SPaolo Bonzini 
bdrv_dec_in_flight(BlockDriverState * bs)78099723548SPaolo Bonzini void bdrv_dec_in_flight(BlockDriverState *bs)
78199723548SPaolo Bonzini {
782967d7905SEmanuele Giuseppe Esposito     IO_CODE();
783d73415a3SStefan Hajnoczi     qatomic_dec(&bs->in_flight);
784c9d1a561SPaolo Bonzini     bdrv_wakeup(bs);
78599723548SPaolo Bonzini }
78699723548SPaolo Bonzini 
787131498f7SDenis V. Lunev static void coroutine_fn
bdrv_wait_serialising_requests(BdrvTrackedRequest * self)788131498f7SDenis V. Lunev bdrv_wait_serialising_requests(BdrvTrackedRequest *self)
78961007b31SStefan Hajnoczi {
79061007b31SStefan Hajnoczi     BlockDriverState *bs = self->bs;
79161007b31SStefan Hajnoczi 
792d73415a3SStefan Hajnoczi     if (!qatomic_read(&bs->serialising_in_flight)) {
793131498f7SDenis V. Lunev         return;
79461007b31SStefan Hajnoczi     }
79561007b31SStefan Hajnoczi 
796fa9185fcSStefan Hajnoczi     qemu_mutex_lock(&bs->reqs_lock);
797131498f7SDenis V. Lunev     bdrv_wait_serialising_requests_locked(self);
798fa9185fcSStefan Hajnoczi     qemu_mutex_unlock(&bs->reqs_lock);
79961007b31SStefan Hajnoczi }
80061007b31SStefan Hajnoczi 
bdrv_make_request_serialising(BdrvTrackedRequest * req,uint64_t align)801131498f7SDenis V. Lunev void coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
8028ac5aab2SVladimir Sementsov-Ogievskiy                                                 uint64_t align)
8038ac5aab2SVladimir Sementsov-Ogievskiy {
804967d7905SEmanuele Giuseppe Esposito     IO_CODE();
8058ac5aab2SVladimir Sementsov-Ogievskiy 
806fa9185fcSStefan Hajnoczi     qemu_mutex_lock(&req->bs->reqs_lock);
8078ac5aab2SVladimir Sementsov-Ogievskiy 
8088ac5aab2SVladimir Sementsov-Ogievskiy     tracked_request_set_serialising(req, align);
809131498f7SDenis V. Lunev     bdrv_wait_serialising_requests_locked(req);
8108ac5aab2SVladimir Sementsov-Ogievskiy 
811fa9185fcSStefan Hajnoczi     qemu_mutex_unlock(&req->bs->reqs_lock);
8128ac5aab2SVladimir Sementsov-Ogievskiy }
8138ac5aab2SVladimir Sementsov-Ogievskiy 
bdrv_check_qiov_request(int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset,Error ** errp)814558902ccSVladimir Sementsov-Ogievskiy int bdrv_check_qiov_request(int64_t offset, int64_t bytes,
81563f4ad11SVladimir Sementsov-Ogievskiy                             QEMUIOVector *qiov, size_t qiov_offset,
81663f4ad11SVladimir Sementsov-Ogievskiy                             Error **errp)
81761007b31SStefan Hajnoczi {
81863f4ad11SVladimir Sementsov-Ogievskiy     /*
81963f4ad11SVladimir Sementsov-Ogievskiy      * Check generic offset/bytes correctness
82063f4ad11SVladimir Sementsov-Ogievskiy      */
82163f4ad11SVladimir Sementsov-Ogievskiy 
82269b55e03SVladimir Sementsov-Ogievskiy     if (offset < 0) {
82369b55e03SVladimir Sementsov-Ogievskiy         error_setg(errp, "offset is negative: %" PRIi64, offset);
82469b55e03SVladimir Sementsov-Ogievskiy         return -EIO;
82569b55e03SVladimir Sementsov-Ogievskiy     }
82669b55e03SVladimir Sementsov-Ogievskiy 
82769b55e03SVladimir Sementsov-Ogievskiy     if (bytes < 0) {
82869b55e03SVladimir Sementsov-Ogievskiy         error_setg(errp, "bytes is negative: %" PRIi64, bytes);
82961007b31SStefan Hajnoczi         return -EIO;
83061007b31SStefan Hajnoczi     }
83161007b31SStefan Hajnoczi 
8328b117001SVladimir Sementsov-Ogievskiy     if (bytes > BDRV_MAX_LENGTH) {
83369b55e03SVladimir Sementsov-Ogievskiy         error_setg(errp, "bytes(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
83469b55e03SVladimir Sementsov-Ogievskiy                    bytes, BDRV_MAX_LENGTH);
83569b55e03SVladimir Sementsov-Ogievskiy         return -EIO;
83669b55e03SVladimir Sementsov-Ogievskiy     }
83769b55e03SVladimir Sementsov-Ogievskiy 
83869b55e03SVladimir Sementsov-Ogievskiy     if (offset > BDRV_MAX_LENGTH) {
83969b55e03SVladimir Sementsov-Ogievskiy         error_setg(errp, "offset(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
84069b55e03SVladimir Sementsov-Ogievskiy                    offset, BDRV_MAX_LENGTH);
8418b117001SVladimir Sementsov-Ogievskiy         return -EIO;
8428b117001SVladimir Sementsov-Ogievskiy     }
8438b117001SVladimir Sementsov-Ogievskiy 
8448b117001SVladimir Sementsov-Ogievskiy     if (offset > BDRV_MAX_LENGTH - bytes) {
84569b55e03SVladimir Sementsov-Ogievskiy         error_setg(errp, "sum of offset(%" PRIi64 ") and bytes(%" PRIi64 ") "
84669b55e03SVladimir Sementsov-Ogievskiy                    "exceeds maximum(%" PRIi64 ")", offset, bytes,
84769b55e03SVladimir Sementsov-Ogievskiy                    BDRV_MAX_LENGTH);
8488b117001SVladimir Sementsov-Ogievskiy         return -EIO;
8498b117001SVladimir Sementsov-Ogievskiy     }
8508b117001SVladimir Sementsov-Ogievskiy 
85163f4ad11SVladimir Sementsov-Ogievskiy     if (!qiov) {
8528b117001SVladimir Sementsov-Ogievskiy         return 0;
8538b117001SVladimir Sementsov-Ogievskiy     }
8548b117001SVladimir Sementsov-Ogievskiy 
85563f4ad11SVladimir Sementsov-Ogievskiy     /*
85663f4ad11SVladimir Sementsov-Ogievskiy      * Check qiov and qiov_offset
85763f4ad11SVladimir Sementsov-Ogievskiy      */
85863f4ad11SVladimir Sementsov-Ogievskiy 
85963f4ad11SVladimir Sementsov-Ogievskiy     if (qiov_offset > qiov->size) {
86063f4ad11SVladimir Sementsov-Ogievskiy         error_setg(errp, "qiov_offset(%zu) overflow io vector size(%zu)",
86163f4ad11SVladimir Sementsov-Ogievskiy                    qiov_offset, qiov->size);
86263f4ad11SVladimir Sementsov-Ogievskiy         return -EIO;
86363f4ad11SVladimir Sementsov-Ogievskiy     }
86463f4ad11SVladimir Sementsov-Ogievskiy 
86563f4ad11SVladimir Sementsov-Ogievskiy     if (bytes > qiov->size - qiov_offset) {
86663f4ad11SVladimir Sementsov-Ogievskiy         error_setg(errp, "bytes(%" PRIi64 ") + qiov_offset(%zu) overflow io "
86763f4ad11SVladimir Sementsov-Ogievskiy                    "vector size(%zu)", bytes, qiov_offset, qiov->size);
86863f4ad11SVladimir Sementsov-Ogievskiy         return -EIO;
86963f4ad11SVladimir Sementsov-Ogievskiy     }
87063f4ad11SVladimir Sementsov-Ogievskiy 
87163f4ad11SVladimir Sementsov-Ogievskiy     return 0;
87263f4ad11SVladimir Sementsov-Ogievskiy }
87363f4ad11SVladimir Sementsov-Ogievskiy 
bdrv_check_request(int64_t offset,int64_t bytes,Error ** errp)87463f4ad11SVladimir Sementsov-Ogievskiy int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp)
8758b117001SVladimir Sementsov-Ogievskiy {
87663f4ad11SVladimir Sementsov-Ogievskiy     return bdrv_check_qiov_request(offset, bytes, NULL, 0, errp);
87763f4ad11SVladimir Sementsov-Ogievskiy }
87863f4ad11SVladimir Sementsov-Ogievskiy 
bdrv_check_request32(int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset)87963f4ad11SVladimir Sementsov-Ogievskiy static int bdrv_check_request32(int64_t offset, int64_t bytes,
88063f4ad11SVladimir Sementsov-Ogievskiy                                 QEMUIOVector *qiov, size_t qiov_offset)
88163f4ad11SVladimir Sementsov-Ogievskiy {
88263f4ad11SVladimir Sementsov-Ogievskiy     int ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
8838b117001SVladimir Sementsov-Ogievskiy     if (ret < 0) {
8848b117001SVladimir Sementsov-Ogievskiy         return ret;
8858b117001SVladimir Sementsov-Ogievskiy     }
8868b117001SVladimir Sementsov-Ogievskiy 
8878b117001SVladimir Sementsov-Ogievskiy     if (bytes > BDRV_REQUEST_MAX_BYTES) {
88861007b31SStefan Hajnoczi         return -EIO;
88961007b31SStefan Hajnoczi     }
89061007b31SStefan Hajnoczi 
89161007b31SStefan Hajnoczi     return 0;
89261007b31SStefan Hajnoczi }
89361007b31SStefan Hajnoczi 
89461007b31SStefan Hajnoczi /*
89574021bc4SEric Blake  * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
89661007b31SStefan Hajnoczi  * The operation is sped up by checking the block status and only writing
89761007b31SStefan Hajnoczi  * zeroes to the device if they currently do not return zeroes. Optional
89874021bc4SEric Blake  * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
899465fe887SEric Blake  * BDRV_REQ_FUA).
90061007b31SStefan Hajnoczi  *
901f4649069SEric Blake  * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite().
90261007b31SStefan Hajnoczi  */
bdrv_make_zero(BdrvChild * child,BdrvRequestFlags flags)903720ff280SKevin Wolf int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
90461007b31SStefan Hajnoczi {
905237d78f8SEric Blake     int ret;
906237d78f8SEric Blake     int64_t target_size, bytes, offset = 0;
907720ff280SKevin Wolf     BlockDriverState *bs = child->bs;
908384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
90961007b31SStefan Hajnoczi 
9107286d610SEric Blake     target_size = bdrv_getlength(bs);
9117286d610SEric Blake     if (target_size < 0) {
9127286d610SEric Blake         return target_size;
91361007b31SStefan Hajnoczi     }
91461007b31SStefan Hajnoczi 
91561007b31SStefan Hajnoczi     for (;;) {
9167286d610SEric Blake         bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES);
9177286d610SEric Blake         if (bytes <= 0) {
91861007b31SStefan Hajnoczi             return 0;
91961007b31SStefan Hajnoczi         }
920237d78f8SEric Blake         ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL);
92161007b31SStefan Hajnoczi         if (ret < 0) {
92261007b31SStefan Hajnoczi             return ret;
92361007b31SStefan Hajnoczi         }
92461007b31SStefan Hajnoczi         if (ret & BDRV_BLOCK_ZERO) {
925237d78f8SEric Blake             offset += bytes;
92661007b31SStefan Hajnoczi             continue;
92761007b31SStefan Hajnoczi         }
928237d78f8SEric Blake         ret = bdrv_pwrite_zeroes(child, offset, bytes, flags);
92961007b31SStefan Hajnoczi         if (ret < 0) {
93061007b31SStefan Hajnoczi             return ret;
93161007b31SStefan Hajnoczi         }
932237d78f8SEric Blake         offset += bytes;
93361007b31SStefan Hajnoczi     }
93461007b31SStefan Hajnoczi }
93561007b31SStefan Hajnoczi 
93661007b31SStefan Hajnoczi /*
93761007b31SStefan Hajnoczi  * Writes to the file and ensures that no writes are reordered across this
93861007b31SStefan Hajnoczi  * request (acts as a barrier)
93961007b31SStefan Hajnoczi  *
94061007b31SStefan Hajnoczi  * Returns 0 on success, -errno in error cases.
94161007b31SStefan Hajnoczi  */
bdrv_co_pwrite_sync(BdrvChild * child,int64_t offset,int64_t bytes,const void * buf,BdrvRequestFlags flags)942e97190a4SAlberto Faria int coroutine_fn bdrv_co_pwrite_sync(BdrvChild *child, int64_t offset,
943e97190a4SAlberto Faria                                      int64_t bytes, const void *buf,
944e97190a4SAlberto Faria                                      BdrvRequestFlags flags)
94561007b31SStefan Hajnoczi {
94661007b31SStefan Hajnoczi     int ret;
947384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
948b24a4c41SKevin Wolf     assert_bdrv_graph_readable();
94988095349SEmanuele Giuseppe Esposito 
950e97190a4SAlberto Faria     ret = bdrv_co_pwrite(child, offset, bytes, buf, flags);
95161007b31SStefan Hajnoczi     if (ret < 0) {
95261007b31SStefan Hajnoczi         return ret;
95361007b31SStefan Hajnoczi     }
95461007b31SStefan Hajnoczi 
955e97190a4SAlberto Faria     ret = bdrv_co_flush(child->bs);
956855a6a93SKevin Wolf     if (ret < 0) {
957855a6a93SKevin Wolf         return ret;
95861007b31SStefan Hajnoczi     }
95961007b31SStefan Hajnoczi 
96061007b31SStefan Hajnoczi     return 0;
96161007b31SStefan Hajnoczi }
96261007b31SStefan Hajnoczi 
96308844473SKevin Wolf typedef struct CoroutineIOCompletion {
96408844473SKevin Wolf     Coroutine *coroutine;
96508844473SKevin Wolf     int ret;
96608844473SKevin Wolf } CoroutineIOCompletion;
96708844473SKevin Wolf 
bdrv_co_io_em_complete(void * opaque,int ret)96808844473SKevin Wolf static void bdrv_co_io_em_complete(void *opaque, int ret)
96908844473SKevin Wolf {
97008844473SKevin Wolf     CoroutineIOCompletion *co = opaque;
97108844473SKevin Wolf 
97208844473SKevin Wolf     co->ret = ret;
973b9e413ddSPaolo Bonzini     aio_co_wake(co->coroutine);
97408844473SKevin Wolf }
97508844473SKevin Wolf 
9767b1fb72eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
bdrv_driver_preadv(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset,int flags)9777b1fb72eSKevin Wolf bdrv_driver_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
9787b1fb72eSKevin Wolf                    QEMUIOVector *qiov, size_t qiov_offset, int flags)
979166fe960SKevin Wolf {
980166fe960SKevin Wolf     BlockDriver *drv = bs->drv;
9813fb06697SKevin Wolf     int64_t sector_num;
9823fb06697SKevin Wolf     unsigned int nb_sectors;
983ac850bf0SVladimir Sementsov-Ogievskiy     QEMUIOVector local_qiov;
984ac850bf0SVladimir Sementsov-Ogievskiy     int ret;
985b9b10c35SKevin Wolf     assert_bdrv_graph_readable();
9863fb06697SKevin Wolf 
98717abcbeeSVladimir Sementsov-Ogievskiy     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
988e8b65355SStefan Hajnoczi     assert(!(flags & ~bs->supported_read_flags));
989fa166538SEric Blake 
990d470ad42SMax Reitz     if (!drv) {
991d470ad42SMax Reitz         return -ENOMEDIUM;
992d470ad42SMax Reitz     }
993d470ad42SMax Reitz 
994ac850bf0SVladimir Sementsov-Ogievskiy     if (drv->bdrv_co_preadv_part) {
995ac850bf0SVladimir Sementsov-Ogievskiy         return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset,
996ac850bf0SVladimir Sementsov-Ogievskiy                                         flags);
997ac850bf0SVladimir Sementsov-Ogievskiy     }
998ac850bf0SVladimir Sementsov-Ogievskiy 
999ac850bf0SVladimir Sementsov-Ogievskiy     if (qiov_offset > 0 || bytes != qiov->size) {
1000ac850bf0SVladimir Sementsov-Ogievskiy         qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1001ac850bf0SVladimir Sementsov-Ogievskiy         qiov = &local_qiov;
1002ac850bf0SVladimir Sementsov-Ogievskiy     }
1003ac850bf0SVladimir Sementsov-Ogievskiy 
10043fb06697SKevin Wolf     if (drv->bdrv_co_preadv) {
1005ac850bf0SVladimir Sementsov-Ogievskiy         ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
1006ac850bf0SVladimir Sementsov-Ogievskiy         goto out;
10073fb06697SKevin Wolf     }
10083fb06697SKevin Wolf 
1009edfab6a0SEric Blake     if (drv->bdrv_aio_preadv) {
101008844473SKevin Wolf         BlockAIOCB *acb;
101108844473SKevin Wolf         CoroutineIOCompletion co = {
101208844473SKevin Wolf             .coroutine = qemu_coroutine_self(),
101308844473SKevin Wolf         };
101408844473SKevin Wolf 
1015e31f6864SEric Blake         acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags,
101608844473SKevin Wolf                                    bdrv_co_io_em_complete, &co);
101708844473SKevin Wolf         if (acb == NULL) {
1018ac850bf0SVladimir Sementsov-Ogievskiy             ret = -EIO;
1019ac850bf0SVladimir Sementsov-Ogievskiy             goto out;
102008844473SKevin Wolf         } else {
102108844473SKevin Wolf             qemu_coroutine_yield();
1022ac850bf0SVladimir Sementsov-Ogievskiy             ret = co.ret;
1023ac850bf0SVladimir Sementsov-Ogievskiy             goto out;
102408844473SKevin Wolf         }
102508844473SKevin Wolf     }
1026edfab6a0SEric Blake 
1027edfab6a0SEric Blake     sector_num = offset >> BDRV_SECTOR_BITS;
1028edfab6a0SEric Blake     nb_sectors = bytes >> BDRV_SECTOR_BITS;
1029edfab6a0SEric Blake 
10301bbbf32dSNir Soffer     assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
10311bbbf32dSNir Soffer     assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
103241ae31e3SAlberto Garcia     assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1033edfab6a0SEric Blake     assert(drv->bdrv_co_readv);
1034edfab6a0SEric Blake 
1035ac850bf0SVladimir Sementsov-Ogievskiy     ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
1036ac850bf0SVladimir Sementsov-Ogievskiy 
1037ac850bf0SVladimir Sementsov-Ogievskiy out:
1038ac850bf0SVladimir Sementsov-Ogievskiy     if (qiov == &local_qiov) {
1039ac850bf0SVladimir Sementsov-Ogievskiy         qemu_iovec_destroy(&local_qiov);
1040ac850bf0SVladimir Sementsov-Ogievskiy     }
1041ac850bf0SVladimir Sementsov-Ogievskiy 
1042ac850bf0SVladimir Sementsov-Ogievskiy     return ret;
1043166fe960SKevin Wolf }
1044166fe960SKevin Wolf 
10457b1fb72eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
bdrv_driver_pwritev(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset,BdrvRequestFlags flags)10467b1fb72eSKevin Wolf bdrv_driver_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
10477b1fb72eSKevin Wolf                     QEMUIOVector *qiov, size_t qiov_offset,
1048e75abedaSVladimir Sementsov-Ogievskiy                     BdrvRequestFlags flags)
104978a07294SKevin Wolf {
105078a07294SKevin Wolf     BlockDriver *drv = bs->drv;
1051e8b65355SStefan Hajnoczi     bool emulate_fua = false;
10523fb06697SKevin Wolf     int64_t sector_num;
10533fb06697SKevin Wolf     unsigned int nb_sectors;
1054ac850bf0SVladimir Sementsov-Ogievskiy     QEMUIOVector local_qiov;
105578a07294SKevin Wolf     int ret;
1056b9b10c35SKevin Wolf     assert_bdrv_graph_readable();
105778a07294SKevin Wolf 
105817abcbeeSVladimir Sementsov-Ogievskiy     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1059fa166538SEric Blake 
1060d470ad42SMax Reitz     if (!drv) {
1061d470ad42SMax Reitz         return -ENOMEDIUM;
1062d470ad42SMax Reitz     }
1063d470ad42SMax Reitz 
10642f3b6e61SKevin Wolf     if (bs->open_flags & BDRV_O_NO_FLUSH) {
10652f3b6e61SKevin Wolf         flags &= ~BDRV_REQ_FUA;
10662f3b6e61SKevin Wolf     }
10672f3b6e61SKevin Wolf 
1068e8b65355SStefan Hajnoczi     if ((flags & BDRV_REQ_FUA) &&
1069e8b65355SStefan Hajnoczi         (~bs->supported_write_flags & BDRV_REQ_FUA)) {
1070e8b65355SStefan Hajnoczi         flags &= ~BDRV_REQ_FUA;
1071e8b65355SStefan Hajnoczi         emulate_fua = true;
1072e8b65355SStefan Hajnoczi     }
1073e8b65355SStefan Hajnoczi 
1074e8b65355SStefan Hajnoczi     flags &= bs->supported_write_flags;
1075e8b65355SStefan Hajnoczi 
1076ac850bf0SVladimir Sementsov-Ogievskiy     if (drv->bdrv_co_pwritev_part) {
1077ac850bf0SVladimir Sementsov-Ogievskiy         ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset,
1078e8b65355SStefan Hajnoczi                                         flags);
1079ac850bf0SVladimir Sementsov-Ogievskiy         goto emulate_flags;
1080ac850bf0SVladimir Sementsov-Ogievskiy     }
1081ac850bf0SVladimir Sementsov-Ogievskiy 
1082ac850bf0SVladimir Sementsov-Ogievskiy     if (qiov_offset > 0 || bytes != qiov->size) {
1083ac850bf0SVladimir Sementsov-Ogievskiy         qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1084ac850bf0SVladimir Sementsov-Ogievskiy         qiov = &local_qiov;
1085ac850bf0SVladimir Sementsov-Ogievskiy     }
1086ac850bf0SVladimir Sementsov-Ogievskiy 
10873fb06697SKevin Wolf     if (drv->bdrv_co_pwritev) {
1088e8b65355SStefan Hajnoczi         ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, flags);
10893fb06697SKevin Wolf         goto emulate_flags;
10903fb06697SKevin Wolf     }
10913fb06697SKevin Wolf 
1092edfab6a0SEric Blake     if (drv->bdrv_aio_pwritev) {
109308844473SKevin Wolf         BlockAIOCB *acb;
109408844473SKevin Wolf         CoroutineIOCompletion co = {
109508844473SKevin Wolf             .coroutine = qemu_coroutine_self(),
109608844473SKevin Wolf         };
109708844473SKevin Wolf 
1098e8b65355SStefan Hajnoczi         acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, flags,
109908844473SKevin Wolf                                     bdrv_co_io_em_complete, &co);
110008844473SKevin Wolf         if (acb == NULL) {
11013fb06697SKevin Wolf             ret = -EIO;
110208844473SKevin Wolf         } else {
110308844473SKevin Wolf             qemu_coroutine_yield();
11043fb06697SKevin Wolf             ret = co.ret;
110508844473SKevin Wolf         }
1106edfab6a0SEric Blake         goto emulate_flags;
1107edfab6a0SEric Blake     }
1108edfab6a0SEric Blake 
1109edfab6a0SEric Blake     sector_num = offset >> BDRV_SECTOR_BITS;
1110edfab6a0SEric Blake     nb_sectors = bytes >> BDRV_SECTOR_BITS;
1111edfab6a0SEric Blake 
11121bbbf32dSNir Soffer     assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
11131bbbf32dSNir Soffer     assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
111441ae31e3SAlberto Garcia     assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1115edfab6a0SEric Blake 
1116e18a58b4SEric Blake     assert(drv->bdrv_co_writev);
1117e8b65355SStefan Hajnoczi     ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, flags);
111878a07294SKevin Wolf 
11193fb06697SKevin Wolf emulate_flags:
1120e8b65355SStefan Hajnoczi     if (ret == 0 && emulate_fua) {
112178a07294SKevin Wolf         ret = bdrv_co_flush(bs);
112278a07294SKevin Wolf     }
112378a07294SKevin Wolf 
1124ac850bf0SVladimir Sementsov-Ogievskiy     if (qiov == &local_qiov) {
1125ac850bf0SVladimir Sementsov-Ogievskiy         qemu_iovec_destroy(&local_qiov);
1126ac850bf0SVladimir Sementsov-Ogievskiy     }
1127ac850bf0SVladimir Sementsov-Ogievskiy 
112878a07294SKevin Wolf     return ret;
112978a07294SKevin Wolf }
113078a07294SKevin Wolf 
11317b1fb72eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
bdrv_driver_pwritev_compressed(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset)113217abcbeeSVladimir Sementsov-Ogievskiy bdrv_driver_pwritev_compressed(BlockDriverState *bs, int64_t offset,
113317abcbeeSVladimir Sementsov-Ogievskiy                                int64_t bytes, QEMUIOVector *qiov,
1134ac850bf0SVladimir Sementsov-Ogievskiy                                size_t qiov_offset)
113529a298afSPavel Butsykin {
113629a298afSPavel Butsykin     BlockDriver *drv = bs->drv;
1137ac850bf0SVladimir Sementsov-Ogievskiy     QEMUIOVector local_qiov;
1138ac850bf0SVladimir Sementsov-Ogievskiy     int ret;
1139b9b10c35SKevin Wolf     assert_bdrv_graph_readable();
114029a298afSPavel Butsykin 
114117abcbeeSVladimir Sementsov-Ogievskiy     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
114217abcbeeSVladimir Sementsov-Ogievskiy 
1143d470ad42SMax Reitz     if (!drv) {
1144d470ad42SMax Reitz         return -ENOMEDIUM;
1145d470ad42SMax Reitz     }
1146d470ad42SMax Reitz 
1147ac850bf0SVladimir Sementsov-Ogievskiy     if (!block_driver_can_compress(drv)) {
114829a298afSPavel Butsykin         return -ENOTSUP;
114929a298afSPavel Butsykin     }
115029a298afSPavel Butsykin 
1151ac850bf0SVladimir Sementsov-Ogievskiy     if (drv->bdrv_co_pwritev_compressed_part) {
1152ac850bf0SVladimir Sementsov-Ogievskiy         return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes,
1153ac850bf0SVladimir Sementsov-Ogievskiy                                                     qiov, qiov_offset);
1154ac850bf0SVladimir Sementsov-Ogievskiy     }
1155ac850bf0SVladimir Sementsov-Ogievskiy 
1156ac850bf0SVladimir Sementsov-Ogievskiy     if (qiov_offset == 0) {
115729a298afSPavel Butsykin         return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
115829a298afSPavel Butsykin     }
115929a298afSPavel Butsykin 
1160ac850bf0SVladimir Sementsov-Ogievskiy     qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1161ac850bf0SVladimir Sementsov-Ogievskiy     ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov);
1162ac850bf0SVladimir Sementsov-Ogievskiy     qemu_iovec_destroy(&local_qiov);
1163ac850bf0SVladimir Sementsov-Ogievskiy 
1164ac850bf0SVladimir Sementsov-Ogievskiy     return ret;
1165ac850bf0SVladimir Sementsov-Ogievskiy }
1166ac850bf0SVladimir Sementsov-Ogievskiy 
11677b1fb72eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
bdrv_co_do_copy_on_readv(BdrvChild * child,int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset,int flags)11687b1fb72eSKevin Wolf bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
11697b1fb72eSKevin Wolf                          QEMUIOVector *qiov, size_t qiov_offset, int flags)
117061007b31SStefan Hajnoczi {
117185c97ca7SKevin Wolf     BlockDriverState *bs = child->bs;
117285c97ca7SKevin Wolf 
117361007b31SStefan Hajnoczi     /* Perform I/O through a temporary buffer so that users who scribble over
117461007b31SStefan Hajnoczi      * their read buffer while the operation is in progress do not end up
117561007b31SStefan Hajnoczi      * modifying the image file.  This is critical for zero-copy guest I/O
117661007b31SStefan Hajnoczi      * where anything might happen inside guest memory.
117761007b31SStefan Hajnoczi      */
11782275cc90SVladimir Sementsov-Ogievskiy     void *bounce_buffer = NULL;
117961007b31SStefan Hajnoczi 
118061007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
1181fc6b211fSAndrey Drobyshev     int64_t align_offset;
1182fc6b211fSAndrey Drobyshev     int64_t align_bytes;
11839df5afbdSVladimir Sementsov-Ogievskiy     int64_t skip_bytes;
118461007b31SStefan Hajnoczi     int ret;
1185cb2e2878SEric Blake     int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1186cb2e2878SEric Blake                                     BDRV_REQUEST_MAX_BYTES);
11879df5afbdSVladimir Sementsov-Ogievskiy     int64_t progress = 0;
11888644476eSMax Reitz     bool skip_write;
118961007b31SStefan Hajnoczi 
11909df5afbdSVladimir Sementsov-Ogievskiy     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
11919df5afbdSVladimir Sementsov-Ogievskiy 
1192d470ad42SMax Reitz     if (!drv) {
1193d470ad42SMax Reitz         return -ENOMEDIUM;
1194d470ad42SMax Reitz     }
1195d470ad42SMax Reitz 
11968644476eSMax Reitz     /*
11978644476eSMax Reitz      * Do not write anything when the BDS is inactive.  That is not
11988644476eSMax Reitz      * allowed, and it would not help.
11998644476eSMax Reitz      */
12008644476eSMax Reitz     skip_write = (bs->open_flags & BDRV_O_INACTIVE);
12018644476eSMax Reitz 
12021bf03e66SKevin Wolf     /* FIXME We cannot require callers to have write permissions when all they
12031bf03e66SKevin Wolf      * are doing is a read request. If we did things right, write permissions
12041bf03e66SKevin Wolf      * would be obtained anyway, but internally by the copy-on-read code. As
1205765d9df9SEric Blake      * long as it is implemented here rather than in a separate filter driver,
12061bf03e66SKevin Wolf      * the copy-on-read code doesn't have its own BdrvChild, however, for which
12071bf03e66SKevin Wolf      * it could request permissions. Therefore we have to bypass the permission
12081bf03e66SKevin Wolf      * system for the moment. */
12091bf03e66SKevin Wolf     // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1210afa4b293SKevin Wolf 
121161007b31SStefan Hajnoczi     /* Cover entire cluster so no additional backing file I/O is required when
1212cb2e2878SEric Blake      * allocating cluster in the image file.  Note that this value may exceed
1213cb2e2878SEric Blake      * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1214cb2e2878SEric Blake      * is one reason we loop rather than doing it all at once.
121561007b31SStefan Hajnoczi      */
1216fc6b211fSAndrey Drobyshev     bdrv_round_to_subclusters(bs, offset, bytes, &align_offset, &align_bytes);
1217fc6b211fSAndrey Drobyshev     skip_bytes = offset - align_offset;
121861007b31SStefan Hajnoczi 
1219244483e6SKevin Wolf     trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
1220fc6b211fSAndrey Drobyshev                                    align_offset, align_bytes);
122161007b31SStefan Hajnoczi 
1222fc6b211fSAndrey Drobyshev     while (align_bytes) {
1223cb2e2878SEric Blake         int64_t pnum;
122461007b31SStefan Hajnoczi 
12258644476eSMax Reitz         if (skip_write) {
12268644476eSMax Reitz             ret = 1; /* "already allocated", so nothing will be copied */
1227fc6b211fSAndrey Drobyshev             pnum = MIN(align_bytes, max_transfer);
12288644476eSMax Reitz         } else {
1229cc323997SPaolo Bonzini             ret = bdrv_co_is_allocated(bs, align_offset,
1230fc6b211fSAndrey Drobyshev                                        MIN(align_bytes, max_transfer), &pnum);
1231cb2e2878SEric Blake             if (ret < 0) {
12328644476eSMax Reitz                 /*
12338644476eSMax Reitz                  * Safe to treat errors in querying allocation as if
1234cb2e2878SEric Blake                  * unallocated; we'll probably fail again soon on the
1235cb2e2878SEric Blake                  * read, but at least that will set a decent errno.
1236cb2e2878SEric Blake                  */
1237fc6b211fSAndrey Drobyshev                 pnum = MIN(align_bytes, max_transfer);
1238cb2e2878SEric Blake             }
1239cb2e2878SEric Blake 
1240b0ddcbbbSKevin Wolf             /* Stop at EOF if the image ends in the middle of the cluster */
1241b0ddcbbbSKevin Wolf             if (ret == 0 && pnum == 0) {
1242b0ddcbbbSKevin Wolf                 assert(progress >= bytes);
1243b0ddcbbbSKevin Wolf                 break;
1244b0ddcbbbSKevin Wolf             }
1245b0ddcbbbSKevin Wolf 
1246cb2e2878SEric Blake             assert(skip_bytes < pnum);
12478644476eSMax Reitz         }
1248cb2e2878SEric Blake 
1249cb2e2878SEric Blake         if (ret <= 0) {
12501143ec5eSVladimir Sementsov-Ogievskiy             QEMUIOVector local_qiov;
12511143ec5eSVladimir Sementsov-Ogievskiy 
1252cb2e2878SEric Blake             /* Must copy-on-read; use the bounce buffer */
12530d93ed08SVladimir Sementsov-Ogievskiy             pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
12542275cc90SVladimir Sementsov-Ogievskiy             if (!bounce_buffer) {
1255fc6b211fSAndrey Drobyshev                 int64_t max_we_need = MAX(pnum, align_bytes - pnum);
12562275cc90SVladimir Sementsov-Ogievskiy                 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER);
12572275cc90SVladimir Sementsov-Ogievskiy                 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed);
12582275cc90SVladimir Sementsov-Ogievskiy 
12592275cc90SVladimir Sementsov-Ogievskiy                 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len);
12602275cc90SVladimir Sementsov-Ogievskiy                 if (!bounce_buffer) {
12612275cc90SVladimir Sementsov-Ogievskiy                     ret = -ENOMEM;
12622275cc90SVladimir Sementsov-Ogievskiy                     goto err;
12632275cc90SVladimir Sementsov-Ogievskiy                 }
12642275cc90SVladimir Sementsov-Ogievskiy             }
12650d93ed08SVladimir Sementsov-Ogievskiy             qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum);
1266cb2e2878SEric Blake 
1267fc6b211fSAndrey Drobyshev             ret = bdrv_driver_preadv(bs, align_offset, pnum,
1268ac850bf0SVladimir Sementsov-Ogievskiy                                      &local_qiov, 0, 0);
126961007b31SStefan Hajnoczi             if (ret < 0) {
127061007b31SStefan Hajnoczi                 goto err;
127161007b31SStefan Hajnoczi             }
127261007b31SStefan Hajnoczi 
1273c834dc05SEmanuele Giuseppe Esposito             bdrv_co_debug_event(bs, BLKDBG_COR_WRITE);
1274c1499a5eSEric Blake             if (drv->bdrv_co_pwrite_zeroes &&
1275cb2e2878SEric Blake                 buffer_is_zero(bounce_buffer, pnum)) {
1276a604fa2bSEric Blake                 /* FIXME: Should we (perhaps conditionally) be setting
1277a604fa2bSEric Blake                  * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1278a604fa2bSEric Blake                  * that still correctly reads as zero? */
1279fc6b211fSAndrey Drobyshev                 ret = bdrv_co_do_pwrite_zeroes(bs, align_offset, pnum,
12807adcf59fSMax Reitz                                                BDRV_REQ_WRITE_UNCHANGED);
128161007b31SStefan Hajnoczi             } else {
1282cb2e2878SEric Blake                 /* This does not change the data on the disk, it is not
1283cb2e2878SEric Blake                  * necessary to flush even in cache=writethrough mode.
128461007b31SStefan Hajnoczi                  */
1285fc6b211fSAndrey Drobyshev                 ret = bdrv_driver_pwritev(bs, align_offset, pnum,
1286ac850bf0SVladimir Sementsov-Ogievskiy                                           &local_qiov, 0,
12877adcf59fSMax Reitz                                           BDRV_REQ_WRITE_UNCHANGED);
128861007b31SStefan Hajnoczi             }
128961007b31SStefan Hajnoczi 
129061007b31SStefan Hajnoczi             if (ret < 0) {
1291cb2e2878SEric Blake                 /* It might be okay to ignore write errors for guest
1292cb2e2878SEric Blake                  * requests.  If this is a deliberate copy-on-read
1293cb2e2878SEric Blake                  * then we don't want to ignore the error.  Simply
1294cb2e2878SEric Blake                  * report it in all cases.
129561007b31SStefan Hajnoczi                  */
129661007b31SStefan Hajnoczi                 goto err;
129761007b31SStefan Hajnoczi             }
129861007b31SStefan Hajnoczi 
12993299e5ecSVladimir Sementsov-Ogievskiy             if (!(flags & BDRV_REQ_PREFETCH)) {
13001143ec5eSVladimir Sementsov-Ogievskiy                 qemu_iovec_from_buf(qiov, qiov_offset + progress,
13011143ec5eSVladimir Sementsov-Ogievskiy                                     bounce_buffer + skip_bytes,
13024ab78b19SVladimir Sementsov-Ogievskiy                                     MIN(pnum - skip_bytes, bytes - progress));
13033299e5ecSVladimir Sementsov-Ogievskiy             }
13043299e5ecSVladimir Sementsov-Ogievskiy         } else if (!(flags & BDRV_REQ_PREFETCH)) {
1305cb2e2878SEric Blake             /* Read directly into the destination */
13061143ec5eSVladimir Sementsov-Ogievskiy             ret = bdrv_driver_preadv(bs, offset + progress,
13071143ec5eSVladimir Sementsov-Ogievskiy                                      MIN(pnum - skip_bytes, bytes - progress),
13081143ec5eSVladimir Sementsov-Ogievskiy                                      qiov, qiov_offset + progress, 0);
1309cb2e2878SEric Blake             if (ret < 0) {
1310cb2e2878SEric Blake                 goto err;
1311cb2e2878SEric Blake             }
1312cb2e2878SEric Blake         }
1313cb2e2878SEric Blake 
1314fc6b211fSAndrey Drobyshev         align_offset += pnum;
1315fc6b211fSAndrey Drobyshev         align_bytes -= pnum;
1316cb2e2878SEric Blake         progress += pnum - skip_bytes;
1317cb2e2878SEric Blake         skip_bytes = 0;
1318cb2e2878SEric Blake     }
1319cb2e2878SEric Blake     ret = 0;
132061007b31SStefan Hajnoczi 
132161007b31SStefan Hajnoczi err:
132261007b31SStefan Hajnoczi     qemu_vfree(bounce_buffer);
132361007b31SStefan Hajnoczi     return ret;
132461007b31SStefan Hajnoczi }
132561007b31SStefan Hajnoczi 
132661007b31SStefan Hajnoczi /*
132761007b31SStefan Hajnoczi  * Forwards an already correctly aligned request to the BlockDriver. This
13281a62d0acSEric Blake  * handles copy on read, zeroing after EOF, and fragmentation of large
13291a62d0acSEric Blake  * reads; any other features must be implemented by the caller.
133061007b31SStefan Hajnoczi  */
13317b1fb72eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
bdrv_aligned_preadv(BdrvChild * child,BdrvTrackedRequest * req,int64_t offset,int64_t bytes,int64_t align,QEMUIOVector * qiov,size_t qiov_offset,int flags)13327b1fb72eSKevin Wolf bdrv_aligned_preadv(BdrvChild *child, BdrvTrackedRequest *req,
13337b1fb72eSKevin Wolf                     int64_t offset, int64_t bytes, int64_t align,
13347b1fb72eSKevin Wolf                     QEMUIOVector *qiov, size_t qiov_offset, int flags)
133561007b31SStefan Hajnoczi {
133685c97ca7SKevin Wolf     BlockDriverState *bs = child->bs;
1337c9d20029SKevin Wolf     int64_t total_bytes, max_bytes;
13381a62d0acSEric Blake     int ret = 0;
13398b0c5d76SVladimir Sementsov-Ogievskiy     int64_t bytes_remaining = bytes;
13401a62d0acSEric Blake     int max_transfer;
134161007b31SStefan Hajnoczi 
13428b0c5d76SVladimir Sementsov-Ogievskiy     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
134349c07526SKevin Wolf     assert(is_power_of_2(align));
134449c07526SKevin Wolf     assert((offset & (align - 1)) == 0);
134549c07526SKevin Wolf     assert((bytes & (align - 1)) == 0);
1346abb06c5aSDaniel P. Berrange     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
13471a62d0acSEric Blake     max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
13481a62d0acSEric Blake                                    align);
1349a604fa2bSEric Blake 
1350e8b65355SStefan Hajnoczi     /*
1351e8b65355SStefan Hajnoczi      * TODO: We would need a per-BDS .supported_read_flags and
1352a604fa2bSEric Blake      * potential fallback support, if we ever implement any read flags
1353a604fa2bSEric Blake      * to pass through to drivers.  For now, there aren't any
1354e8b65355SStefan Hajnoczi      * passthrough flags except the BDRV_REQ_REGISTERED_BUF optimization hint.
1355e8b65355SStefan Hajnoczi      */
1356e8b65355SStefan Hajnoczi     assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH |
1357e8b65355SStefan Hajnoczi                        BDRV_REQ_REGISTERED_BUF)));
135861007b31SStefan Hajnoczi 
135961007b31SStefan Hajnoczi     /* Handle Copy on Read and associated serialisation */
136061007b31SStefan Hajnoczi     if (flags & BDRV_REQ_COPY_ON_READ) {
136161007b31SStefan Hajnoczi         /* If we touch the same cluster it counts as an overlap.  This
136261007b31SStefan Hajnoczi          * guarantees that allocating writes will be serialized and not race
136361007b31SStefan Hajnoczi          * with each other for the same cluster.  For example, in copy-on-read
136461007b31SStefan Hajnoczi          * it ensures that the CoR read and write operations are atomic and
136561007b31SStefan Hajnoczi          * guest writes cannot interleave between them. */
13668ac5aab2SVladimir Sementsov-Ogievskiy         bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs));
136718fbd0deSPaolo Bonzini     } else {
1368304d9d7fSMax Reitz         bdrv_wait_serialising_requests(req);
136918fbd0deSPaolo Bonzini     }
137061007b31SStefan Hajnoczi 
137161007b31SStefan Hajnoczi     if (flags & BDRV_REQ_COPY_ON_READ) {
1372d6a644bbSEric Blake         int64_t pnum;
137361007b31SStefan Hajnoczi 
1374897dd0ecSAndrey Shinkevich         /* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */
1375897dd0ecSAndrey Shinkevich         flags &= ~BDRV_REQ_COPY_ON_READ;
1376897dd0ecSAndrey Shinkevich 
1377cc323997SPaolo Bonzini         ret = bdrv_co_is_allocated(bs, offset, bytes, &pnum);
137861007b31SStefan Hajnoczi         if (ret < 0) {
137961007b31SStefan Hajnoczi             goto out;
138061007b31SStefan Hajnoczi         }
138161007b31SStefan Hajnoczi 
138288e63df2SEric Blake         if (!ret || pnum != bytes) {
138365cd4424SVladimir Sementsov-Ogievskiy             ret = bdrv_co_do_copy_on_readv(child, offset, bytes,
138465cd4424SVladimir Sementsov-Ogievskiy                                            qiov, qiov_offset, flags);
13853299e5ecSVladimir Sementsov-Ogievskiy             goto out;
13863299e5ecSVladimir Sementsov-Ogievskiy         } else if (flags & BDRV_REQ_PREFETCH) {
138761007b31SStefan Hajnoczi             goto out;
138861007b31SStefan Hajnoczi         }
138961007b31SStefan Hajnoczi     }
139061007b31SStefan Hajnoczi 
13911a62d0acSEric Blake     /* Forward the request to the BlockDriver, possibly fragmenting it */
13920af02bd1SPaolo Bonzini     total_bytes = bdrv_co_getlength(bs);
139349c07526SKevin Wolf     if (total_bytes < 0) {
139449c07526SKevin Wolf         ret = total_bytes;
139561007b31SStefan Hajnoczi         goto out;
139661007b31SStefan Hajnoczi     }
139761007b31SStefan Hajnoczi 
1398e8b65355SStefan Hajnoczi     assert(!(flags & ~(bs->supported_read_flags | BDRV_REQ_REGISTERED_BUF)));
1399897dd0ecSAndrey Shinkevich 
140049c07526SKevin Wolf     max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
14011a62d0acSEric Blake     if (bytes <= max_bytes && bytes <= max_transfer) {
1402897dd0ecSAndrey Shinkevich         ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, flags);
14031a62d0acSEric Blake         goto out;
140461007b31SStefan Hajnoczi     }
140561007b31SStefan Hajnoczi 
14061a62d0acSEric Blake     while (bytes_remaining) {
14078b0c5d76SVladimir Sementsov-Ogievskiy         int64_t num;
14081a62d0acSEric Blake 
14091a62d0acSEric Blake         if (max_bytes) {
14101a62d0acSEric Blake             num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
14111a62d0acSEric Blake             assert(num);
14121a62d0acSEric Blake 
14131a62d0acSEric Blake             ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
1414134b7decSMax Reitz                                      num, qiov,
1415897dd0ecSAndrey Shinkevich                                      qiov_offset + bytes - bytes_remaining,
1416897dd0ecSAndrey Shinkevich                                      flags);
14171a62d0acSEric Blake             max_bytes -= num;
14181a62d0acSEric Blake         } else {
14191a62d0acSEric Blake             num = bytes_remaining;
1420134b7decSMax Reitz             ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining,
1421134b7decSMax Reitz                                     0, bytes_remaining);
14221a62d0acSEric Blake         }
14231a62d0acSEric Blake         if (ret < 0) {
14241a62d0acSEric Blake             goto out;
14251a62d0acSEric Blake         }
14261a62d0acSEric Blake         bytes_remaining -= num;
142761007b31SStefan Hajnoczi     }
142861007b31SStefan Hajnoczi 
142961007b31SStefan Hajnoczi out:
14301a62d0acSEric Blake     return ret < 0 ? ret : 0;
143161007b31SStefan Hajnoczi }
143261007b31SStefan Hajnoczi 
143361007b31SStefan Hajnoczi /*
14347a3f542fSVladimir Sementsov-Ogievskiy  * Request padding
14357a3f542fSVladimir Sementsov-Ogievskiy  *
14367a3f542fSVladimir Sementsov-Ogievskiy  *  |<---- align ----->|                     |<----- align ---->|
14377a3f542fSVladimir Sementsov-Ogievskiy  *  |<- head ->|<------------- bytes ------------->|<-- tail -->|
14387a3f542fSVladimir Sementsov-Ogievskiy  *  |          |       |                     |     |            |
14397a3f542fSVladimir Sementsov-Ogievskiy  * -*----------$-------*-------- ... --------*-----$------------*---
14407a3f542fSVladimir Sementsov-Ogievskiy  *  |          |       |                     |     |            |
14417a3f542fSVladimir Sementsov-Ogievskiy  *  |          offset  |                     |     end          |
14427a3f542fSVladimir Sementsov-Ogievskiy  *  ALIGN_DOWN(offset) ALIGN_UP(offset)      ALIGN_DOWN(end)   ALIGN_UP(end)
14437a3f542fSVladimir Sementsov-Ogievskiy  *  [buf   ... )                             [tail_buf          )
14447a3f542fSVladimir Sementsov-Ogievskiy  *
14457a3f542fSVladimir Sementsov-Ogievskiy  * @buf is an aligned allocation needed to store @head and @tail paddings. @head
14467a3f542fSVladimir Sementsov-Ogievskiy  * is placed at the beginning of @buf and @tail at the @end.
14477a3f542fSVladimir Sementsov-Ogievskiy  *
14487a3f542fSVladimir Sementsov-Ogievskiy  * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk
14497a3f542fSVladimir Sementsov-Ogievskiy  * around tail, if tail exists.
14507a3f542fSVladimir Sementsov-Ogievskiy  *
14517a3f542fSVladimir Sementsov-Ogievskiy  * @merge_reads is true for small requests,
14527a3f542fSVladimir Sementsov-Ogievskiy  * if @buf_len == @head + bytes + @tail. In this case it is possible that both
14537a3f542fSVladimir Sementsov-Ogievskiy  * head and tail exist but @buf_len == align and @tail_buf == @buf.
145418743311SHanna Czenczek  *
145518743311SHanna Czenczek  * @write is true for write requests, false for read requests.
145618743311SHanna Czenczek  *
145718743311SHanna Czenczek  * If padding makes the vector too long (exceeding IOV_MAX), then we need to
145818743311SHanna Czenczek  * merge existing vector elements into a single one.  @collapse_bounce_buf acts
145918743311SHanna Czenczek  * as the bounce buffer in such cases.  @pre_collapse_qiov has the pre-collapse
146018743311SHanna Czenczek  * I/O vector elements so for read requests, the data can be copied back after
146118743311SHanna Czenczek  * the read is done.
146261007b31SStefan Hajnoczi  */
14637a3f542fSVladimir Sementsov-Ogievskiy typedef struct BdrvRequestPadding {
14647a3f542fSVladimir Sementsov-Ogievskiy     uint8_t *buf;
14657a3f542fSVladimir Sementsov-Ogievskiy     size_t buf_len;
14667a3f542fSVladimir Sementsov-Ogievskiy     uint8_t *tail_buf;
14677a3f542fSVladimir Sementsov-Ogievskiy     size_t head;
14687a3f542fSVladimir Sementsov-Ogievskiy     size_t tail;
14697a3f542fSVladimir Sementsov-Ogievskiy     bool merge_reads;
147018743311SHanna Czenczek     bool write;
14717a3f542fSVladimir Sementsov-Ogievskiy     QEMUIOVector local_qiov;
147218743311SHanna Czenczek 
147318743311SHanna Czenczek     uint8_t *collapse_bounce_buf;
147418743311SHanna Czenczek     size_t collapse_len;
147518743311SHanna Czenczek     QEMUIOVector pre_collapse_qiov;
14767a3f542fSVladimir Sementsov-Ogievskiy } BdrvRequestPadding;
14777a3f542fSVladimir Sementsov-Ogievskiy 
bdrv_init_padding(BlockDriverState * bs,int64_t offset,int64_t bytes,bool write,BdrvRequestPadding * pad)14787a3f542fSVladimir Sementsov-Ogievskiy static bool bdrv_init_padding(BlockDriverState *bs,
14797a3f542fSVladimir Sementsov-Ogievskiy                               int64_t offset, int64_t bytes,
148018743311SHanna Czenczek                               bool write,
14817a3f542fSVladimir Sementsov-Ogievskiy                               BdrvRequestPadding *pad)
14827a3f542fSVladimir Sementsov-Ogievskiy {
1483a56ed80cSVladimir Sementsov-Ogievskiy     int64_t align = bs->bl.request_alignment;
1484a56ed80cSVladimir Sementsov-Ogievskiy     int64_t sum;
1485a56ed80cSVladimir Sementsov-Ogievskiy 
1486a56ed80cSVladimir Sementsov-Ogievskiy     bdrv_check_request(offset, bytes, &error_abort);
1487a56ed80cSVladimir Sementsov-Ogievskiy     assert(align <= INT_MAX); /* documented in block/block_int.h */
1488a56ed80cSVladimir Sementsov-Ogievskiy     assert(align <= SIZE_MAX / 2); /* so we can allocate the buffer */
14897a3f542fSVladimir Sementsov-Ogievskiy 
14907a3f542fSVladimir Sementsov-Ogievskiy     memset(pad, 0, sizeof(*pad));
14917a3f542fSVladimir Sementsov-Ogievskiy 
14927a3f542fSVladimir Sementsov-Ogievskiy     pad->head = offset & (align - 1);
14937a3f542fSVladimir Sementsov-Ogievskiy     pad->tail = ((offset + bytes) & (align - 1));
14947a3f542fSVladimir Sementsov-Ogievskiy     if (pad->tail) {
14957a3f542fSVladimir Sementsov-Ogievskiy         pad->tail = align - pad->tail;
14967a3f542fSVladimir Sementsov-Ogievskiy     }
14977a3f542fSVladimir Sementsov-Ogievskiy 
1498ac9d00bfSVladimir Sementsov-Ogievskiy     if (!pad->head && !pad->tail) {
14997a3f542fSVladimir Sementsov-Ogievskiy         return false;
15007a3f542fSVladimir Sementsov-Ogievskiy     }
15017a3f542fSVladimir Sementsov-Ogievskiy 
1502ac9d00bfSVladimir Sementsov-Ogievskiy     assert(bytes); /* Nothing good in aligning zero-length requests */
1503ac9d00bfSVladimir Sementsov-Ogievskiy 
15047a3f542fSVladimir Sementsov-Ogievskiy     sum = pad->head + bytes + pad->tail;
15057a3f542fSVladimir Sementsov-Ogievskiy     pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align;
15067a3f542fSVladimir Sementsov-Ogievskiy     pad->buf = qemu_blockalign(bs, pad->buf_len);
15077a3f542fSVladimir Sementsov-Ogievskiy     pad->merge_reads = sum == pad->buf_len;
15087a3f542fSVladimir Sementsov-Ogievskiy     if (pad->tail) {
15097a3f542fSVladimir Sementsov-Ogievskiy         pad->tail_buf = pad->buf + pad->buf_len - align;
15107a3f542fSVladimir Sementsov-Ogievskiy     }
15117a3f542fSVladimir Sementsov-Ogievskiy 
151218743311SHanna Czenczek     pad->write = write;
151318743311SHanna Czenczek 
15147a3f542fSVladimir Sementsov-Ogievskiy     return true;
15157a3f542fSVladimir Sementsov-Ogievskiy }
15167a3f542fSVladimir Sementsov-Ogievskiy 
15177b1fb72eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
bdrv_padding_rmw_read(BdrvChild * child,BdrvTrackedRequest * req,BdrvRequestPadding * pad,bool zero_middle)15187b1fb72eSKevin Wolf bdrv_padding_rmw_read(BdrvChild *child, BdrvTrackedRequest *req,
15197b1fb72eSKevin Wolf                       BdrvRequestPadding *pad, bool zero_middle)
15207a3f542fSVladimir Sementsov-Ogievskiy {
15217a3f542fSVladimir Sementsov-Ogievskiy     QEMUIOVector local_qiov;
15227a3f542fSVladimir Sementsov-Ogievskiy     BlockDriverState *bs = child->bs;
15237a3f542fSVladimir Sementsov-Ogievskiy     uint64_t align = bs->bl.request_alignment;
15247a3f542fSVladimir Sementsov-Ogievskiy     int ret;
15257a3f542fSVladimir Sementsov-Ogievskiy 
15267a3f542fSVladimir Sementsov-Ogievskiy     assert(req->serialising && pad->buf);
15277a3f542fSVladimir Sementsov-Ogievskiy 
15287a3f542fSVladimir Sementsov-Ogievskiy     if (pad->head || pad->merge_reads) {
15298b0c5d76SVladimir Sementsov-Ogievskiy         int64_t bytes = pad->merge_reads ? pad->buf_len : align;
15307a3f542fSVladimir Sementsov-Ogievskiy 
15317a3f542fSVladimir Sementsov-Ogievskiy         qemu_iovec_init_buf(&local_qiov, pad->buf, bytes);
15327a3f542fSVladimir Sementsov-Ogievskiy 
15337a3f542fSVladimir Sementsov-Ogievskiy         if (pad->head) {
1534c834dc05SEmanuele Giuseppe Esposito             bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
15357a3f542fSVladimir Sementsov-Ogievskiy         }
15367a3f542fSVladimir Sementsov-Ogievskiy         if (pad->merge_reads && pad->tail) {
1537c834dc05SEmanuele Giuseppe Esposito             bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
15387a3f542fSVladimir Sementsov-Ogievskiy         }
15397a3f542fSVladimir Sementsov-Ogievskiy         ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes,
154065cd4424SVladimir Sementsov-Ogievskiy                                   align, &local_qiov, 0, 0);
15417a3f542fSVladimir Sementsov-Ogievskiy         if (ret < 0) {
15427a3f542fSVladimir Sementsov-Ogievskiy             return ret;
15437a3f542fSVladimir Sementsov-Ogievskiy         }
15447a3f542fSVladimir Sementsov-Ogievskiy         if (pad->head) {
1545c834dc05SEmanuele Giuseppe Esposito             bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
15467a3f542fSVladimir Sementsov-Ogievskiy         }
15477a3f542fSVladimir Sementsov-Ogievskiy         if (pad->merge_reads && pad->tail) {
1548c834dc05SEmanuele Giuseppe Esposito             bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
15497a3f542fSVladimir Sementsov-Ogievskiy         }
15507a3f542fSVladimir Sementsov-Ogievskiy 
15517a3f542fSVladimir Sementsov-Ogievskiy         if (pad->merge_reads) {
15527a3f542fSVladimir Sementsov-Ogievskiy             goto zero_mem;
15537a3f542fSVladimir Sementsov-Ogievskiy         }
15547a3f542fSVladimir Sementsov-Ogievskiy     }
15557a3f542fSVladimir Sementsov-Ogievskiy 
15567a3f542fSVladimir Sementsov-Ogievskiy     if (pad->tail) {
15577a3f542fSVladimir Sementsov-Ogievskiy         qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align);
15587a3f542fSVladimir Sementsov-Ogievskiy 
1559c834dc05SEmanuele Giuseppe Esposito         bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
15607a3f542fSVladimir Sementsov-Ogievskiy         ret = bdrv_aligned_preadv(
15617a3f542fSVladimir Sementsov-Ogievskiy                 child, req,
15627a3f542fSVladimir Sementsov-Ogievskiy                 req->overlap_offset + req->overlap_bytes - align,
156365cd4424SVladimir Sementsov-Ogievskiy                 align, align, &local_qiov, 0, 0);
15647a3f542fSVladimir Sementsov-Ogievskiy         if (ret < 0) {
15657a3f542fSVladimir Sementsov-Ogievskiy             return ret;
15667a3f542fSVladimir Sementsov-Ogievskiy         }
1567c834dc05SEmanuele Giuseppe Esposito         bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
15687a3f542fSVladimir Sementsov-Ogievskiy     }
15697a3f542fSVladimir Sementsov-Ogievskiy 
15707a3f542fSVladimir Sementsov-Ogievskiy zero_mem:
15717a3f542fSVladimir Sementsov-Ogievskiy     if (zero_middle) {
15727a3f542fSVladimir Sementsov-Ogievskiy         memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail);
15737a3f542fSVladimir Sementsov-Ogievskiy     }
15747a3f542fSVladimir Sementsov-Ogievskiy 
15757a3f542fSVladimir Sementsov-Ogievskiy     return 0;
15767a3f542fSVladimir Sementsov-Ogievskiy }
15777a3f542fSVladimir Sementsov-Ogievskiy 
157818743311SHanna Czenczek /**
157918743311SHanna Czenczek  * Free *pad's associated buffers, and perform any necessary finalization steps.
158018743311SHanna Czenczek  */
bdrv_padding_finalize(BdrvRequestPadding * pad)158118743311SHanna Czenczek static void bdrv_padding_finalize(BdrvRequestPadding *pad)
15827a3f542fSVladimir Sementsov-Ogievskiy {
158318743311SHanna Czenczek     if (pad->collapse_bounce_buf) {
158418743311SHanna Czenczek         if (!pad->write) {
158518743311SHanna Czenczek             /*
158618743311SHanna Czenczek              * If padding required elements in the vector to be collapsed into a
158718743311SHanna Czenczek              * bounce buffer, copy the bounce buffer content back
158818743311SHanna Czenczek              */
158918743311SHanna Czenczek             qemu_iovec_from_buf(&pad->pre_collapse_qiov, 0,
159018743311SHanna Czenczek                                 pad->collapse_bounce_buf, pad->collapse_len);
159118743311SHanna Czenczek         }
159218743311SHanna Czenczek         qemu_vfree(pad->collapse_bounce_buf);
159318743311SHanna Czenczek         qemu_iovec_destroy(&pad->pre_collapse_qiov);
159418743311SHanna Czenczek     }
15957a3f542fSVladimir Sementsov-Ogievskiy     if (pad->buf) {
15967a3f542fSVladimir Sementsov-Ogievskiy         qemu_vfree(pad->buf);
15977a3f542fSVladimir Sementsov-Ogievskiy         qemu_iovec_destroy(&pad->local_qiov);
15987a3f542fSVladimir Sementsov-Ogievskiy     }
159998ca4549SVladimir Sementsov-Ogievskiy     memset(pad, 0, sizeof(*pad));
16007a3f542fSVladimir Sementsov-Ogievskiy }
16017a3f542fSVladimir Sementsov-Ogievskiy 
16027a3f542fSVladimir Sementsov-Ogievskiy /*
160318743311SHanna Czenczek  * Create pad->local_qiov by wrapping @iov in the padding head and tail, while
160418743311SHanna Czenczek  * ensuring that the resulting vector will not exceed IOV_MAX elements.
160518743311SHanna Czenczek  *
160618743311SHanna Czenczek  * To ensure this, when necessary, the first two or three elements of @iov are
160718743311SHanna Czenczek  * merged into pad->collapse_bounce_buf and replaced by a reference to that
160818743311SHanna Czenczek  * bounce buffer in pad->local_qiov.
160918743311SHanna Czenczek  *
161018743311SHanna Czenczek  * After performing a read request, the data from the bounce buffer must be
161118743311SHanna Czenczek  * copied back into pad->pre_collapse_qiov (e.g. by bdrv_padding_finalize()).
161218743311SHanna Czenczek  */
bdrv_create_padded_qiov(BlockDriverState * bs,BdrvRequestPadding * pad,struct iovec * iov,int niov,size_t iov_offset,size_t bytes)161318743311SHanna Czenczek static int bdrv_create_padded_qiov(BlockDriverState *bs,
161418743311SHanna Czenczek                                    BdrvRequestPadding *pad,
161518743311SHanna Czenczek                                    struct iovec *iov, int niov,
161618743311SHanna Czenczek                                    size_t iov_offset, size_t bytes)
161718743311SHanna Czenczek {
161818743311SHanna Czenczek     int padded_niov, surplus_count, collapse_count;
161918743311SHanna Czenczek 
162018743311SHanna Czenczek     /* Assert this invariant */
162118743311SHanna Czenczek     assert(niov <= IOV_MAX);
162218743311SHanna Czenczek 
162318743311SHanna Czenczek     /*
162418743311SHanna Czenczek      * Cannot pad if resulting length would exceed SIZE_MAX.  Returning an error
162518743311SHanna Czenczek      * to the guest is not ideal, but there is little else we can do.  At least
162618743311SHanna Czenczek      * this will practically never happen on 64-bit systems.
162718743311SHanna Czenczek      */
162818743311SHanna Czenczek     if (SIZE_MAX - pad->head < bytes ||
162918743311SHanna Czenczek         SIZE_MAX - pad->head - bytes < pad->tail)
163018743311SHanna Czenczek     {
163118743311SHanna Czenczek         return -EINVAL;
163218743311SHanna Czenczek     }
163318743311SHanna Czenczek 
163418743311SHanna Czenczek     /* Length of the resulting IOV if we just concatenated everything */
163518743311SHanna Czenczek     padded_niov = !!pad->head + niov + !!pad->tail;
163618743311SHanna Czenczek 
163718743311SHanna Czenczek     qemu_iovec_init(&pad->local_qiov, MIN(padded_niov, IOV_MAX));
163818743311SHanna Czenczek 
163918743311SHanna Czenczek     if (pad->head) {
164018743311SHanna Czenczek         qemu_iovec_add(&pad->local_qiov, pad->buf, pad->head);
164118743311SHanna Czenczek     }
164218743311SHanna Czenczek 
164318743311SHanna Czenczek     /*
164418743311SHanna Czenczek      * If padded_niov > IOV_MAX, we cannot just concatenate everything.
164518743311SHanna Czenczek      * Instead, merge the first two or three elements of @iov to reduce the
164618743311SHanna Czenczek      * number of vector elements as necessary.
164718743311SHanna Czenczek      */
164818743311SHanna Czenczek     if (padded_niov > IOV_MAX) {
164918743311SHanna Czenczek         /*
165018743311SHanna Czenczek          * Only head and tail can have lead to the number of entries exceeding
165118743311SHanna Czenczek          * IOV_MAX, so we can exceed it by the head and tail at most.  We need
165218743311SHanna Czenczek          * to reduce the number of elements by `surplus_count`, so we merge that
165318743311SHanna Czenczek          * many elements plus one into one element.
165418743311SHanna Czenczek          */
165518743311SHanna Czenczek         surplus_count = padded_niov - IOV_MAX;
165618743311SHanna Czenczek         assert(surplus_count <= !!pad->head + !!pad->tail);
165718743311SHanna Czenczek         collapse_count = surplus_count + 1;
165818743311SHanna Czenczek 
165918743311SHanna Czenczek         /*
166018743311SHanna Czenczek          * Move the elements to collapse into `pad->pre_collapse_qiov`, then
166118743311SHanna Czenczek          * advance `iov` (and associated variables) by those elements.
166218743311SHanna Czenczek          */
166318743311SHanna Czenczek         qemu_iovec_init(&pad->pre_collapse_qiov, collapse_count);
166418743311SHanna Czenczek         qemu_iovec_concat_iov(&pad->pre_collapse_qiov, iov,
166518743311SHanna Czenczek                               collapse_count, iov_offset, SIZE_MAX);
166618743311SHanna Czenczek         iov += collapse_count;
166718743311SHanna Czenczek         iov_offset = 0;
166818743311SHanna Czenczek         niov -= collapse_count;
166918743311SHanna Czenczek         bytes -= pad->pre_collapse_qiov.size;
167018743311SHanna Czenczek 
167118743311SHanna Czenczek         /*
167218743311SHanna Czenczek          * Construct the bounce buffer to match the length of the to-collapse
167318743311SHanna Czenczek          * vector elements, and for write requests, initialize it with the data
167418743311SHanna Czenczek          * from those elements.  Then add it to `pad->local_qiov`.
167518743311SHanna Czenczek          */
167618743311SHanna Czenczek         pad->collapse_len = pad->pre_collapse_qiov.size;
167718743311SHanna Czenczek         pad->collapse_bounce_buf = qemu_blockalign(bs, pad->collapse_len);
167818743311SHanna Czenczek         if (pad->write) {
167918743311SHanna Czenczek             qemu_iovec_to_buf(&pad->pre_collapse_qiov, 0,
168018743311SHanna Czenczek                               pad->collapse_bounce_buf, pad->collapse_len);
168118743311SHanna Czenczek         }
168218743311SHanna Czenczek         qemu_iovec_add(&pad->local_qiov,
168318743311SHanna Czenczek                        pad->collapse_bounce_buf, pad->collapse_len);
168418743311SHanna Czenczek     }
168518743311SHanna Czenczek 
168618743311SHanna Czenczek     qemu_iovec_concat_iov(&pad->local_qiov, iov, niov, iov_offset, bytes);
168718743311SHanna Czenczek 
168818743311SHanna Czenczek     if (pad->tail) {
168918743311SHanna Czenczek         qemu_iovec_add(&pad->local_qiov,
169018743311SHanna Czenczek                        pad->buf + pad->buf_len - pad->tail, pad->tail);
169118743311SHanna Czenczek     }
169218743311SHanna Czenczek 
169318743311SHanna Czenczek     assert(pad->local_qiov.niov == MIN(padded_niov, IOV_MAX));
169418743311SHanna Czenczek     return 0;
169518743311SHanna Czenczek }
169618743311SHanna Czenczek 
169718743311SHanna Czenczek /*
16987a3f542fSVladimir Sementsov-Ogievskiy  * bdrv_pad_request
16997a3f542fSVladimir Sementsov-Ogievskiy  *
17007a3f542fSVladimir Sementsov-Ogievskiy  * Exchange request parameters with padded request if needed. Don't include RMW
17017a3f542fSVladimir Sementsov-Ogievskiy  * read of padding, bdrv_padding_rmw_read() should be called separately if
17027a3f542fSVladimir Sementsov-Ogievskiy  * needed.
17037a3f542fSVladimir Sementsov-Ogievskiy  *
170418743311SHanna Czenczek  * @write is true for write requests, false for read requests.
170518743311SHanna Czenczek  *
170698ca4549SVladimir Sementsov-Ogievskiy  * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out:
170798ca4549SVladimir Sementsov-Ogievskiy  *  - on function start they represent original request
170898ca4549SVladimir Sementsov-Ogievskiy  *  - on failure or when padding is not needed they are unchanged
170998ca4549SVladimir Sementsov-Ogievskiy  *  - on success when padding is needed they represent padded request
17107a3f542fSVladimir Sementsov-Ogievskiy  */
bdrv_pad_request(BlockDriverState * bs,QEMUIOVector ** qiov,size_t * qiov_offset,int64_t * offset,int64_t * bytes,bool write,BdrvRequestPadding * pad,bool * padded,BdrvRequestFlags * flags)171198ca4549SVladimir Sementsov-Ogievskiy static int bdrv_pad_request(BlockDriverState *bs,
17121acc3466SVladimir Sementsov-Ogievskiy                             QEMUIOVector **qiov, size_t *qiov_offset,
171337e9403eSVladimir Sementsov-Ogievskiy                             int64_t *offset, int64_t *bytes,
171418743311SHanna Czenczek                             bool write,
1715e8b65355SStefan Hajnoczi                             BdrvRequestPadding *pad, bool *padded,
1716e8b65355SStefan Hajnoczi                             BdrvRequestFlags *flags)
17177a3f542fSVladimir Sementsov-Ogievskiy {
17184c002cefSVladimir Sementsov-Ogievskiy     int ret;
171918743311SHanna Czenczek     struct iovec *sliced_iov;
172018743311SHanna Czenczek     int sliced_niov;
172118743311SHanna Czenczek     size_t sliced_head, sliced_tail;
17224c002cefSVladimir Sementsov-Ogievskiy 
1723ef256751SHanna Czenczek     /* Should have been checked by the caller already */
1724ef256751SHanna Czenczek     ret = bdrv_check_request32(*offset, *bytes, *qiov, *qiov_offset);
1725ef256751SHanna Czenczek     if (ret < 0) {
1726ef256751SHanna Czenczek         return ret;
1727ef256751SHanna Czenczek     }
172837e9403eSVladimir Sementsov-Ogievskiy 
172918743311SHanna Czenczek     if (!bdrv_init_padding(bs, *offset, *bytes, write, pad)) {
173098ca4549SVladimir Sementsov-Ogievskiy         if (padded) {
173198ca4549SVladimir Sementsov-Ogievskiy             *padded = false;
173298ca4549SVladimir Sementsov-Ogievskiy         }
173398ca4549SVladimir Sementsov-Ogievskiy         return 0;
17347a3f542fSVladimir Sementsov-Ogievskiy     }
17357a3f542fSVladimir Sementsov-Ogievskiy 
17363f934817SStefan Reiter     /*
17373f934817SStefan Reiter      * For prefetching in stream_populate(), no qiov is passed along, because
17383f934817SStefan Reiter      * only copy-on-read matters.
17393f934817SStefan Reiter      */
1740e193d4bdSKevin Wolf     if (*qiov) {
174118743311SHanna Czenczek         sliced_iov = qemu_iovec_slice(*qiov, *qiov_offset, *bytes,
174218743311SHanna Czenczek                                       &sliced_head, &sliced_tail,
174318743311SHanna Czenczek                                       &sliced_niov);
174418743311SHanna Czenczek 
1745ef256751SHanna Czenczek         /* Guaranteed by bdrv_check_request32() */
174618743311SHanna Czenczek         assert(*bytes <= SIZE_MAX);
174718743311SHanna Czenczek         ret = bdrv_create_padded_qiov(bs, pad, sliced_iov, sliced_niov,
174818743311SHanna Czenczek                                       sliced_head, *bytes);
174998ca4549SVladimir Sementsov-Ogievskiy         if (ret < 0) {
175018743311SHanna Czenczek             bdrv_padding_finalize(pad);
175198ca4549SVladimir Sementsov-Ogievskiy             return ret;
175298ca4549SVladimir Sementsov-Ogievskiy         }
17537a3f542fSVladimir Sementsov-Ogievskiy         *qiov = &pad->local_qiov;
17541acc3466SVladimir Sementsov-Ogievskiy         *qiov_offset = 0;
17553f934817SStefan Reiter     }
17563f934817SStefan Reiter 
17573f934817SStefan Reiter     *bytes += pad->head + pad->tail;
17583f934817SStefan Reiter     *offset -= pad->head;
175998ca4549SVladimir Sementsov-Ogievskiy     if (padded) {
176098ca4549SVladimir Sementsov-Ogievskiy         *padded = true;
176198ca4549SVladimir Sementsov-Ogievskiy     }
1762e8b65355SStefan Hajnoczi     if (flags) {
1763e8b65355SStefan Hajnoczi         /* Can't use optimization hint with bounce buffer */
1764e8b65355SStefan Hajnoczi         *flags &= ~BDRV_REQ_REGISTERED_BUF;
1765e8b65355SStefan Hajnoczi     }
17667a3f542fSVladimir Sementsov-Ogievskiy 
176798ca4549SVladimir Sementsov-Ogievskiy     return 0;
17687a3f542fSVladimir Sementsov-Ogievskiy }
17697a3f542fSVladimir Sementsov-Ogievskiy 
bdrv_co_preadv(BdrvChild * child,int64_t offset,int64_t bytes,QEMUIOVector * qiov,BdrvRequestFlags flags)1770a03ef88fSKevin Wolf int coroutine_fn bdrv_co_preadv(BdrvChild *child,
1771e9e52efdSVladimir Sementsov-Ogievskiy     int64_t offset, int64_t bytes, QEMUIOVector *qiov,
177261007b31SStefan Hajnoczi     BdrvRequestFlags flags)
177361007b31SStefan Hajnoczi {
1774967d7905SEmanuele Giuseppe Esposito     IO_CODE();
17751acc3466SVladimir Sementsov-Ogievskiy     return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags);
17761acc3466SVladimir Sementsov-Ogievskiy }
17771acc3466SVladimir Sementsov-Ogievskiy 
bdrv_co_preadv_part(BdrvChild * child,int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset,BdrvRequestFlags flags)17781acc3466SVladimir Sementsov-Ogievskiy int coroutine_fn bdrv_co_preadv_part(BdrvChild *child,
177937e9403eSVladimir Sementsov-Ogievskiy     int64_t offset, int64_t bytes,
17801acc3466SVladimir Sementsov-Ogievskiy     QEMUIOVector *qiov, size_t qiov_offset,
17811acc3466SVladimir Sementsov-Ogievskiy     BdrvRequestFlags flags)
17821acc3466SVladimir Sementsov-Ogievskiy {
1783a03ef88fSKevin Wolf     BlockDriverState *bs = child->bs;
178461007b31SStefan Hajnoczi     BdrvTrackedRequest req;
17857a3f542fSVladimir Sementsov-Ogievskiy     BdrvRequestPadding pad;
178661007b31SStefan Hajnoczi     int ret;
1787967d7905SEmanuele Giuseppe Esposito     IO_CODE();
178861007b31SStefan Hajnoczi 
178937e9403eSVladimir Sementsov-Ogievskiy     trace_bdrv_co_preadv_part(bs, offset, bytes, flags);
179061007b31SStefan Hajnoczi 
17911e97be91SEmanuele Giuseppe Esposito     if (!bdrv_co_is_inserted(bs)) {
1792f4dad307SVladimir Sementsov-Ogievskiy         return -ENOMEDIUM;
1793f4dad307SVladimir Sementsov-Ogievskiy     }
1794f4dad307SVladimir Sementsov-Ogievskiy 
179563f4ad11SVladimir Sementsov-Ogievskiy     ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
179661007b31SStefan Hajnoczi     if (ret < 0) {
179761007b31SStefan Hajnoczi         return ret;
179861007b31SStefan Hajnoczi     }
179961007b31SStefan Hajnoczi 
1800ac9d00bfSVladimir Sementsov-Ogievskiy     if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
1801ac9d00bfSVladimir Sementsov-Ogievskiy         /*
1802ac9d00bfSVladimir Sementsov-Ogievskiy          * Aligning zero request is nonsense. Even if driver has special meaning
1803ac9d00bfSVladimir Sementsov-Ogievskiy          * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
1804ac9d00bfSVladimir Sementsov-Ogievskiy          * it to driver due to request_alignment.
1805ac9d00bfSVladimir Sementsov-Ogievskiy          *
1806ac9d00bfSVladimir Sementsov-Ogievskiy          * Still, no reason to return an error if someone do unaligned
1807ac9d00bfSVladimir Sementsov-Ogievskiy          * zero-length read occasionally.
1808ac9d00bfSVladimir Sementsov-Ogievskiy          */
1809ac9d00bfSVladimir Sementsov-Ogievskiy         return 0;
1810ac9d00bfSVladimir Sementsov-Ogievskiy     }
1811ac9d00bfSVladimir Sementsov-Ogievskiy 
181299723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
181399723548SPaolo Bonzini 
18149568b511SWen Congyang     /* Don't do copy-on-read if we read data before write operation */
1815d73415a3SStefan Hajnoczi     if (qatomic_read(&bs->copy_on_read)) {
181661007b31SStefan Hajnoczi         flags |= BDRV_REQ_COPY_ON_READ;
181761007b31SStefan Hajnoczi     }
181861007b31SStefan Hajnoczi 
181918743311SHanna Czenczek     ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, false,
182018743311SHanna Czenczek                            &pad, NULL, &flags);
182198ca4549SVladimir Sementsov-Ogievskiy     if (ret < 0) {
182287ab8802SKevin Wolf         goto fail;
182398ca4549SVladimir Sementsov-Ogievskiy     }
182461007b31SStefan Hajnoczi 
1825ebde595cSFam Zheng     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
18267a3f542fSVladimir Sementsov-Ogievskiy     ret = bdrv_aligned_preadv(child, &req, offset, bytes,
18277a3f542fSVladimir Sementsov-Ogievskiy                               bs->bl.request_alignment,
18281acc3466SVladimir Sementsov-Ogievskiy                               qiov, qiov_offset, flags);
182961007b31SStefan Hajnoczi     tracked_request_end(&req);
183018743311SHanna Czenczek     bdrv_padding_finalize(&pad);
183161007b31SStefan Hajnoczi 
183287ab8802SKevin Wolf fail:
183387ab8802SKevin Wolf     bdrv_dec_in_flight(bs);
183487ab8802SKevin Wolf 
183561007b31SStefan Hajnoczi     return ret;
183661007b31SStefan Hajnoczi }
183761007b31SStefan Hajnoczi 
1838eeb47775SKevin Wolf static int coroutine_fn GRAPH_RDLOCK
bdrv_co_do_pwrite_zeroes(BlockDriverState * bs,int64_t offset,int64_t bytes,BdrvRequestFlags flags)1839eeb47775SKevin Wolf bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
1840eeb47775SKevin Wolf                          BdrvRequestFlags flags)
184161007b31SStefan Hajnoczi {
184261007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
184361007b31SStefan Hajnoczi     QEMUIOVector qiov;
18440d93ed08SVladimir Sementsov-Ogievskiy     void *buf = NULL;
184561007b31SStefan Hajnoczi     int ret = 0;
1846465fe887SEric Blake     bool need_flush = false;
1847443668caSDenis V. Lunev     int head = 0;
1848443668caSDenis V. Lunev     int tail = 0;
184961007b31SStefan Hajnoczi 
18502aaa3f9bSVladimir Sementsov-Ogievskiy     int64_t max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes,
18512aaa3f9bSVladimir Sementsov-Ogievskiy                                             INT64_MAX);
1852a5b8dd2cSEric Blake     int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1853a5b8dd2cSEric Blake                         bs->bl.request_alignment);
1854cb2e2878SEric Blake     int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
1855cf081fcaSEric Blake 
1856abaf8b75SKevin Wolf     assert_bdrv_graph_readable();
18575ae07b14SVladimir Sementsov-Ogievskiy     bdrv_check_request(offset, bytes, &error_abort);
18585ae07b14SVladimir Sementsov-Ogievskiy 
1859d470ad42SMax Reitz     if (!drv) {
1860d470ad42SMax Reitz         return -ENOMEDIUM;
1861d470ad42SMax Reitz     }
1862d470ad42SMax Reitz 
1863fe0480d6SKevin Wolf     if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) {
1864fe0480d6SKevin Wolf         return -ENOTSUP;
1865fe0480d6SKevin Wolf     }
1866fe0480d6SKevin Wolf 
1867e8b65355SStefan Hajnoczi     /* By definition there is no user buffer so this flag doesn't make sense */
1868e8b65355SStefan Hajnoczi     if (flags & BDRV_REQ_REGISTERED_BUF) {
1869e8b65355SStefan Hajnoczi         return -EINVAL;
1870e8b65355SStefan Hajnoczi     }
1871e8b65355SStefan Hajnoczi 
1872d05ae948SNir Soffer     /* If opened with discard=off we should never unmap. */
1873d05ae948SNir Soffer     if (!(bs->open_flags & BDRV_O_UNMAP)) {
1874d05ae948SNir Soffer         flags &= ~BDRV_REQ_MAY_UNMAP;
1875d05ae948SNir Soffer     }
1876d05ae948SNir Soffer 
18770bc329fbSHanna Reitz     /* Invalidate the cached block-status data range if this write overlaps */
18780bc329fbSHanna Reitz     bdrv_bsc_invalidate_range(bs, offset, bytes);
18790bc329fbSHanna Reitz 
1880b8d0a980SEric Blake     assert(alignment % bs->bl.request_alignment == 0);
1881b8d0a980SEric Blake     head = offset % alignment;
1882f5a5ca79SManos Pitsidianakis     tail = (offset + bytes) % alignment;
1883b8d0a980SEric Blake     max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1884b8d0a980SEric Blake     assert(max_write_zeroes >= bs->bl.request_alignment);
188561007b31SStefan Hajnoczi 
1886f5a5ca79SManos Pitsidianakis     while (bytes > 0 && !ret) {
18875ae07b14SVladimir Sementsov-Ogievskiy         int64_t num = bytes;
188861007b31SStefan Hajnoczi 
188961007b31SStefan Hajnoczi         /* Align request.  Block drivers can expect the "bulk" of the request
1890443668caSDenis V. Lunev          * to be aligned, and that unaligned requests do not cross cluster
1891443668caSDenis V. Lunev          * boundaries.
189261007b31SStefan Hajnoczi          */
1893443668caSDenis V. Lunev         if (head) {
1894b2f95feeSEric Blake             /* Make a small request up to the first aligned sector. For
1895b2f95feeSEric Blake              * convenience, limit this request to max_transfer even if
1896b2f95feeSEric Blake              * we don't need to fall back to writes.  */
1897f5a5ca79SManos Pitsidianakis             num = MIN(MIN(bytes, max_transfer), alignment - head);
1898b2f95feeSEric Blake             head = (head + num) % alignment;
1899b2f95feeSEric Blake             assert(num < max_write_zeroes);
1900d05aa8bbSEric Blake         } else if (tail && num > alignment) {
1901443668caSDenis V. Lunev             /* Shorten the request to the last aligned sector.  */
1902443668caSDenis V. Lunev             num -= tail;
190361007b31SStefan Hajnoczi         }
190461007b31SStefan Hajnoczi 
190561007b31SStefan Hajnoczi         /* limit request size */
190661007b31SStefan Hajnoczi         if (num > max_write_zeroes) {
190761007b31SStefan Hajnoczi             num = max_write_zeroes;
190861007b31SStefan Hajnoczi         }
190961007b31SStefan Hajnoczi 
191061007b31SStefan Hajnoczi         ret = -ENOTSUP;
191161007b31SStefan Hajnoczi         /* First try the efficient write zeroes operation */
1912d05aa8bbSEric Blake         if (drv->bdrv_co_pwrite_zeroes) {
1913d05aa8bbSEric Blake             ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1914d05aa8bbSEric Blake                                              flags & bs->supported_zero_flags);
1915d05aa8bbSEric Blake             if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1916d05aa8bbSEric Blake                 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1917d05aa8bbSEric Blake                 need_flush = true;
1918d05aa8bbSEric Blake             }
1919465fe887SEric Blake         } else {
1920465fe887SEric Blake             assert(!bs->supported_zero_flags);
192161007b31SStefan Hajnoczi         }
192261007b31SStefan Hajnoczi 
1923294682ccSAndrey Shinkevich         if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) {
192461007b31SStefan Hajnoczi             /* Fall back to bounce buffer if write zeroes is unsupported */
1925465fe887SEric Blake             BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1926465fe887SEric Blake 
1927465fe887SEric Blake             if ((flags & BDRV_REQ_FUA) &&
1928465fe887SEric Blake                 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1929465fe887SEric Blake                 /* No need for bdrv_driver_pwrite() to do a fallback
1930465fe887SEric Blake                  * flush on each chunk; use just one at the end */
1931465fe887SEric Blake                 write_flags &= ~BDRV_REQ_FUA;
1932465fe887SEric Blake                 need_flush = true;
1933465fe887SEric Blake             }
19345def6b80SEric Blake             num = MIN(num, max_transfer);
19350d93ed08SVladimir Sementsov-Ogievskiy             if (buf == NULL) {
19360d93ed08SVladimir Sementsov-Ogievskiy                 buf = qemu_try_blockalign0(bs, num);
19370d93ed08SVladimir Sementsov-Ogievskiy                 if (buf == NULL) {
193861007b31SStefan Hajnoczi                     ret = -ENOMEM;
193961007b31SStefan Hajnoczi                     goto fail;
194061007b31SStefan Hajnoczi                 }
194161007b31SStefan Hajnoczi             }
19420d93ed08SVladimir Sementsov-Ogievskiy             qemu_iovec_init_buf(&qiov, buf, num);
194361007b31SStefan Hajnoczi 
1944ac850bf0SVladimir Sementsov-Ogievskiy             ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags);
194561007b31SStefan Hajnoczi 
194661007b31SStefan Hajnoczi             /* Keep bounce buffer around if it is big enough for all
194761007b31SStefan Hajnoczi              * all future requests.
194861007b31SStefan Hajnoczi              */
19495def6b80SEric Blake             if (num < max_transfer) {
19500d93ed08SVladimir Sementsov-Ogievskiy                 qemu_vfree(buf);
19510d93ed08SVladimir Sementsov-Ogievskiy                 buf = NULL;
195261007b31SStefan Hajnoczi             }
195361007b31SStefan Hajnoczi         }
195461007b31SStefan Hajnoczi 
1955d05aa8bbSEric Blake         offset += num;
1956f5a5ca79SManos Pitsidianakis         bytes -= num;
195761007b31SStefan Hajnoczi     }
195861007b31SStefan Hajnoczi 
195961007b31SStefan Hajnoczi fail:
1960465fe887SEric Blake     if (ret == 0 && need_flush) {
1961465fe887SEric Blake         ret = bdrv_co_flush(bs);
1962465fe887SEric Blake     }
19630d93ed08SVladimir Sementsov-Ogievskiy     qemu_vfree(buf);
196461007b31SStefan Hajnoczi     return ret;
196561007b31SStefan Hajnoczi }
196661007b31SStefan Hajnoczi 
1967a00e70c0SEmanuele Giuseppe Esposito static inline int coroutine_fn GRAPH_RDLOCK
bdrv_co_write_req_prepare(BdrvChild * child,int64_t offset,int64_t bytes,BdrvTrackedRequest * req,int flags)1968fcfd9adeSVladimir Sementsov-Ogievskiy bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, int64_t bytes,
196985fe2479SFam Zheng                           BdrvTrackedRequest *req, int flags)
197085fe2479SFam Zheng {
197185fe2479SFam Zheng     BlockDriverState *bs = child->bs;
1972fcfd9adeSVladimir Sementsov-Ogievskiy 
1973fcfd9adeSVladimir Sementsov-Ogievskiy     bdrv_check_request(offset, bytes, &error_abort);
197485fe2479SFam Zheng 
1975307261b2SVladimir Sementsov-Ogievskiy     if (bdrv_is_read_only(bs)) {
197685fe2479SFam Zheng         return -EPERM;
197785fe2479SFam Zheng     }
197885fe2479SFam Zheng 
197985fe2479SFam Zheng     assert(!(bs->open_flags & BDRV_O_INACTIVE));
198085fe2479SFam Zheng     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
198185fe2479SFam Zheng     assert(!(flags & ~BDRV_REQ_MASK));
1982d1a764d1SVladimir Sementsov-Ogievskiy     assert(!((flags & BDRV_REQ_NO_WAIT) && !(flags & BDRV_REQ_SERIALISING)));
198385fe2479SFam Zheng 
198485fe2479SFam Zheng     if (flags & BDRV_REQ_SERIALISING) {
1985d1a764d1SVladimir Sementsov-Ogievskiy         QEMU_LOCK_GUARD(&bs->reqs_lock);
1986d1a764d1SVladimir Sementsov-Ogievskiy 
1987d1a764d1SVladimir Sementsov-Ogievskiy         tracked_request_set_serialising(req, bdrv_get_cluster_size(bs));
1988d1a764d1SVladimir Sementsov-Ogievskiy 
1989d1a764d1SVladimir Sementsov-Ogievskiy         if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) {
1990d1a764d1SVladimir Sementsov-Ogievskiy             return -EBUSY;
1991d1a764d1SVladimir Sementsov-Ogievskiy         }
1992d1a764d1SVladimir Sementsov-Ogievskiy 
1993d1a764d1SVladimir Sementsov-Ogievskiy         bdrv_wait_serialising_requests_locked(req);
199418fbd0deSPaolo Bonzini     } else {
199518fbd0deSPaolo Bonzini         bdrv_wait_serialising_requests(req);
199685fe2479SFam Zheng     }
199785fe2479SFam Zheng 
199885fe2479SFam Zheng     assert(req->overlap_offset <= offset);
199985fe2479SFam Zheng     assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
2000fcfd9adeSVladimir Sementsov-Ogievskiy     assert(offset + bytes <= bs->total_sectors * BDRV_SECTOR_SIZE ||
2001fcfd9adeSVladimir Sementsov-Ogievskiy            child->perm & BLK_PERM_RESIZE);
200285fe2479SFam Zheng 
2003cd47d792SFam Zheng     switch (req->type) {
2004cd47d792SFam Zheng     case BDRV_TRACKED_WRITE:
2005cd47d792SFam Zheng     case BDRV_TRACKED_DISCARD:
200685fe2479SFam Zheng         if (flags & BDRV_REQ_WRITE_UNCHANGED) {
200785fe2479SFam Zheng             assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
200885fe2479SFam Zheng         } else {
200985fe2479SFam Zheng             assert(child->perm & BLK_PERM_WRITE);
201085fe2479SFam Zheng         }
201194783301SVladimir Sementsov-Ogievskiy         bdrv_write_threshold_check_write(bs, offset, bytes);
201294783301SVladimir Sementsov-Ogievskiy         return 0;
2013cd47d792SFam Zheng     case BDRV_TRACKED_TRUNCATE:
2014cd47d792SFam Zheng         assert(child->perm & BLK_PERM_RESIZE);
2015cd47d792SFam Zheng         return 0;
2016cd47d792SFam Zheng     default:
2017cd47d792SFam Zheng         abort();
2018cd47d792SFam Zheng     }
201985fe2479SFam Zheng }
202085fe2479SFam Zheng 
20217859c45aSKevin Wolf static inline void coroutine_fn GRAPH_RDLOCK
bdrv_co_write_req_finish(BdrvChild * child,int64_t offset,int64_t bytes,BdrvTrackedRequest * req,int ret)2022fcfd9adeSVladimir Sementsov-Ogievskiy bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes,
202385fe2479SFam Zheng                          BdrvTrackedRequest *req, int ret)
202485fe2479SFam Zheng {
202585fe2479SFam Zheng     int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
202685fe2479SFam Zheng     BlockDriverState *bs = child->bs;
202785fe2479SFam Zheng 
2028fcfd9adeSVladimir Sementsov-Ogievskiy     bdrv_check_request(offset, bytes, &error_abort);
2029fcfd9adeSVladimir Sementsov-Ogievskiy 
2030d73415a3SStefan Hajnoczi     qatomic_inc(&bs->write_gen);
203185fe2479SFam Zheng 
203200695c27SFam Zheng     /*
203300695c27SFam Zheng      * Discard cannot extend the image, but in error handling cases, such as
203400695c27SFam Zheng      * when reverting a qcow2 cluster allocation, the discarded range can pass
203500695c27SFam Zheng      * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
203600695c27SFam Zheng      * here. Instead, just skip it, since semantically a discard request
203700695c27SFam Zheng      * beyond EOF cannot expand the image anyway.
203800695c27SFam Zheng      */
20397f8f03efSFam Zheng     if (ret == 0 &&
2040cd47d792SFam Zheng         (req->type == BDRV_TRACKED_TRUNCATE ||
2041cd47d792SFam Zheng          end_sector > bs->total_sectors) &&
204200695c27SFam Zheng         req->type != BDRV_TRACKED_DISCARD) {
20437f8f03efSFam Zheng         bs->total_sectors = end_sector;
20447f8f03efSFam Zheng         bdrv_parent_cb_resize(bs);
20457f8f03efSFam Zheng         bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS);
204685fe2479SFam Zheng     }
204700695c27SFam Zheng     if (req->bytes) {
204800695c27SFam Zheng         switch (req->type) {
204900695c27SFam Zheng         case BDRV_TRACKED_WRITE:
205000695c27SFam Zheng             stat64_max(&bs->wr_highest_offset, offset + bytes);
205100695c27SFam Zheng             /* fall through, to set dirty bits */
205200695c27SFam Zheng         case BDRV_TRACKED_DISCARD:
20537f8f03efSFam Zheng             bdrv_set_dirty(bs, offset, bytes);
205400695c27SFam Zheng             break;
205500695c27SFam Zheng         default:
205600695c27SFam Zheng             break;
205700695c27SFam Zheng         }
205800695c27SFam Zheng     }
205985fe2479SFam Zheng }
206085fe2479SFam Zheng 
206161007b31SStefan Hajnoczi /*
206204ed95f4SEric Blake  * Forwards an already correctly aligned write request to the BlockDriver,
206304ed95f4SEric Blake  * after possibly fragmenting it.
206461007b31SStefan Hajnoczi  */
20657b1fb72eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
bdrv_aligned_pwritev(BdrvChild * child,BdrvTrackedRequest * req,int64_t offset,int64_t bytes,int64_t align,QEMUIOVector * qiov,size_t qiov_offset,BdrvRequestFlags flags)20667b1fb72eSKevin Wolf bdrv_aligned_pwritev(BdrvChild *child, BdrvTrackedRequest *req,
20677b1fb72eSKevin Wolf                      int64_t offset, int64_t bytes, int64_t align,
20687b1fb72eSKevin Wolf                      QEMUIOVector *qiov, size_t qiov_offset,
2069e75abedaSVladimir Sementsov-Ogievskiy                      BdrvRequestFlags flags)
207061007b31SStefan Hajnoczi {
207185c97ca7SKevin Wolf     BlockDriverState *bs = child->bs;
207261007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
207361007b31SStefan Hajnoczi     int ret;
207461007b31SStefan Hajnoczi 
2075fcfd9adeSVladimir Sementsov-Ogievskiy     int64_t bytes_remaining = bytes;
207604ed95f4SEric Blake     int max_transfer;
207761007b31SStefan Hajnoczi 
2078fcfd9adeSVladimir Sementsov-Ogievskiy     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
2079fcfd9adeSVladimir Sementsov-Ogievskiy 
2080d470ad42SMax Reitz     if (!drv) {
2081d470ad42SMax Reitz         return -ENOMEDIUM;
2082d470ad42SMax Reitz     }
2083d470ad42SMax Reitz 
2084d6883bc9SVladimir Sementsov-Ogievskiy     if (bdrv_has_readonly_bitmaps(bs)) {
2085d6883bc9SVladimir Sementsov-Ogievskiy         return -EPERM;
2086d6883bc9SVladimir Sementsov-Ogievskiy     }
2087d6883bc9SVladimir Sementsov-Ogievskiy 
2088cff86b38SEric Blake     assert(is_power_of_2(align));
2089cff86b38SEric Blake     assert((offset & (align - 1)) == 0);
2090cff86b38SEric Blake     assert((bytes & (align - 1)) == 0);
209104ed95f4SEric Blake     max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
209204ed95f4SEric Blake                                    align);
209361007b31SStefan Hajnoczi 
209485fe2479SFam Zheng     ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags);
209561007b31SStefan Hajnoczi 
209661007b31SStefan Hajnoczi     if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
2097c1499a5eSEric Blake         !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
209828c4da28SVladimir Sementsov-Ogievskiy         qemu_iovec_is_zero(qiov, qiov_offset, bytes)) {
209961007b31SStefan Hajnoczi         flags |= BDRV_REQ_ZERO_WRITE;
210061007b31SStefan Hajnoczi         if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
210161007b31SStefan Hajnoczi             flags |= BDRV_REQ_MAY_UNMAP;
210261007b31SStefan Hajnoczi         }
21033c586715SStefan Hajnoczi 
21043c586715SStefan Hajnoczi         /* Can't use optimization hint with bufferless zero write */
21053c586715SStefan Hajnoczi         flags &= ~BDRV_REQ_REGISTERED_BUF;
210661007b31SStefan Hajnoczi     }
210761007b31SStefan Hajnoczi 
210861007b31SStefan Hajnoczi     if (ret < 0) {
210961007b31SStefan Hajnoczi         /* Do nothing, write notifier decided to fail this request */
211061007b31SStefan Hajnoczi     } else if (flags & BDRV_REQ_ZERO_WRITE) {
2111c834dc05SEmanuele Giuseppe Esposito         bdrv_co_debug_event(bs, BLKDBG_PWRITEV_ZERO);
21129896c876SKevin Wolf         ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
21133ea1a091SPavel Butsykin     } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
211428c4da28SVladimir Sementsov-Ogievskiy         ret = bdrv_driver_pwritev_compressed(bs, offset, bytes,
211528c4da28SVladimir Sementsov-Ogievskiy                                              qiov, qiov_offset);
211604ed95f4SEric Blake     } else if (bytes <= max_transfer) {
2117c834dc05SEmanuele Giuseppe Esposito         bdrv_co_debug_event(bs, BLKDBG_PWRITEV);
211828c4da28SVladimir Sementsov-Ogievskiy         ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags);
211904ed95f4SEric Blake     } else {
2120c834dc05SEmanuele Giuseppe Esposito         bdrv_co_debug_event(bs, BLKDBG_PWRITEV);
212104ed95f4SEric Blake         while (bytes_remaining) {
212204ed95f4SEric Blake             int num = MIN(bytes_remaining, max_transfer);
212304ed95f4SEric Blake             int local_flags = flags;
212404ed95f4SEric Blake 
212504ed95f4SEric Blake             assert(num);
212604ed95f4SEric Blake             if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
212704ed95f4SEric Blake                 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
212804ed95f4SEric Blake                 /* If FUA is going to be emulated by flush, we only
212904ed95f4SEric Blake                  * need to flush on the last iteration */
213004ed95f4SEric Blake                 local_flags &= ~BDRV_REQ_FUA;
213104ed95f4SEric Blake             }
213204ed95f4SEric Blake 
213304ed95f4SEric Blake             ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
2134134b7decSMax Reitz                                       num, qiov,
2135134b7decSMax Reitz                                       qiov_offset + bytes - bytes_remaining,
213628c4da28SVladimir Sementsov-Ogievskiy                                       local_flags);
213704ed95f4SEric Blake             if (ret < 0) {
213804ed95f4SEric Blake                 break;
213904ed95f4SEric Blake             }
214004ed95f4SEric Blake             bytes_remaining -= num;
214104ed95f4SEric Blake         }
214261007b31SStefan Hajnoczi     }
2143c834dc05SEmanuele Giuseppe Esposito     bdrv_co_debug_event(bs, BLKDBG_PWRITEV_DONE);
214461007b31SStefan Hajnoczi 
214561007b31SStefan Hajnoczi     if (ret >= 0) {
214604ed95f4SEric Blake         ret = 0;
214761007b31SStefan Hajnoczi     }
214885fe2479SFam Zheng     bdrv_co_write_req_finish(child, offset, bytes, req, ret);
214961007b31SStefan Hajnoczi 
215061007b31SStefan Hajnoczi     return ret;
215161007b31SStefan Hajnoczi }
215261007b31SStefan Hajnoczi 
21537b1fb72eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
bdrv_co_do_zero_pwritev(BdrvChild * child,int64_t offset,int64_t bytes,BdrvRequestFlags flags,BdrvTrackedRequest * req)21547b1fb72eSKevin Wolf bdrv_co_do_zero_pwritev(BdrvChild *child, int64_t offset, int64_t bytes,
21557b1fb72eSKevin Wolf                         BdrvRequestFlags flags, BdrvTrackedRequest *req)
21569eeb6dd1SFam Zheng {
215785c97ca7SKevin Wolf     BlockDriverState *bs = child->bs;
21589eeb6dd1SFam Zheng     QEMUIOVector local_qiov;
2159a5b8dd2cSEric Blake     uint64_t align = bs->bl.request_alignment;
21609eeb6dd1SFam Zheng     int ret = 0;
21617a3f542fSVladimir Sementsov-Ogievskiy     bool padding;
21627a3f542fSVladimir Sementsov-Ogievskiy     BdrvRequestPadding pad;
21639eeb6dd1SFam Zheng 
2164e8b65355SStefan Hajnoczi     /* This flag doesn't make sense for padding or zero writes */
2165e8b65355SStefan Hajnoczi     flags &= ~BDRV_REQ_REGISTERED_BUF;
2166e8b65355SStefan Hajnoczi 
216718743311SHanna Czenczek     padding = bdrv_init_padding(bs, offset, bytes, true, &pad);
21687a3f542fSVladimir Sementsov-Ogievskiy     if (padding) {
216945e62b46SVladimir Sementsov-Ogievskiy         assert(!(flags & BDRV_REQ_NO_WAIT));
21708ac5aab2SVladimir Sementsov-Ogievskiy         bdrv_make_request_serialising(req, align);
21719eeb6dd1SFam Zheng 
21727a3f542fSVladimir Sementsov-Ogievskiy         bdrv_padding_rmw_read(child, req, &pad, true);
21737a3f542fSVladimir Sementsov-Ogievskiy 
21747a3f542fSVladimir Sementsov-Ogievskiy         if (pad.head || pad.merge_reads) {
21757a3f542fSVladimir Sementsov-Ogievskiy             int64_t aligned_offset = offset & ~(align - 1);
21767a3f542fSVladimir Sementsov-Ogievskiy             int64_t write_bytes = pad.merge_reads ? pad.buf_len : align;
21777a3f542fSVladimir Sementsov-Ogievskiy 
21787a3f542fSVladimir Sementsov-Ogievskiy             qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes);
21797a3f542fSVladimir Sementsov-Ogievskiy             ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes,
218028c4da28SVladimir Sementsov-Ogievskiy                                        align, &local_qiov, 0,
21819eeb6dd1SFam Zheng                                        flags & ~BDRV_REQ_ZERO_WRITE);
21827a3f542fSVladimir Sementsov-Ogievskiy             if (ret < 0 || pad.merge_reads) {
21837a3f542fSVladimir Sementsov-Ogievskiy                 /* Error or all work is done */
21847a3f542fSVladimir Sementsov-Ogievskiy                 goto out;
21859eeb6dd1SFam Zheng             }
21867a3f542fSVladimir Sementsov-Ogievskiy             offset += write_bytes - pad.head;
21877a3f542fSVladimir Sementsov-Ogievskiy             bytes -= write_bytes - pad.head;
21887a3f542fSVladimir Sementsov-Ogievskiy         }
21899eeb6dd1SFam Zheng     }
21909eeb6dd1SFam Zheng 
21919eeb6dd1SFam Zheng     assert(!bytes || (offset & (align - 1)) == 0);
21929eeb6dd1SFam Zheng     if (bytes >= align) {
21939eeb6dd1SFam Zheng         /* Write the aligned part in the middle. */
2194fcfd9adeSVladimir Sementsov-Ogievskiy         int64_t aligned_bytes = bytes & ~(align - 1);
219585c97ca7SKevin Wolf         ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
219628c4da28SVladimir Sementsov-Ogievskiy                                    NULL, 0, flags);
21979eeb6dd1SFam Zheng         if (ret < 0) {
21987a3f542fSVladimir Sementsov-Ogievskiy             goto out;
21999eeb6dd1SFam Zheng         }
22009eeb6dd1SFam Zheng         bytes -= aligned_bytes;
22019eeb6dd1SFam Zheng         offset += aligned_bytes;
22029eeb6dd1SFam Zheng     }
22039eeb6dd1SFam Zheng 
22049eeb6dd1SFam Zheng     assert(!bytes || (offset & (align - 1)) == 0);
22059eeb6dd1SFam Zheng     if (bytes) {
22067a3f542fSVladimir Sementsov-Ogievskiy         assert(align == pad.tail + bytes);
22079eeb6dd1SFam Zheng 
22087a3f542fSVladimir Sementsov-Ogievskiy         qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align);
220985c97ca7SKevin Wolf         ret = bdrv_aligned_pwritev(child, req, offset, align, align,
221028c4da28SVladimir Sementsov-Ogievskiy                                    &local_qiov, 0,
221128c4da28SVladimir Sementsov-Ogievskiy                                    flags & ~BDRV_REQ_ZERO_WRITE);
22129eeb6dd1SFam Zheng     }
22139eeb6dd1SFam Zheng 
22147a3f542fSVladimir Sementsov-Ogievskiy out:
221518743311SHanna Czenczek     bdrv_padding_finalize(&pad);
22167a3f542fSVladimir Sementsov-Ogievskiy 
22177a3f542fSVladimir Sementsov-Ogievskiy     return ret;
22189eeb6dd1SFam Zheng }
22199eeb6dd1SFam Zheng 
222061007b31SStefan Hajnoczi /*
222161007b31SStefan Hajnoczi  * Handle a write request in coroutine context
222261007b31SStefan Hajnoczi  */
bdrv_co_pwritev(BdrvChild * child,int64_t offset,int64_t bytes,QEMUIOVector * qiov,BdrvRequestFlags flags)2223a03ef88fSKevin Wolf int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
2224e9e52efdSVladimir Sementsov-Ogievskiy     int64_t offset, int64_t bytes, QEMUIOVector *qiov,
222561007b31SStefan Hajnoczi     BdrvRequestFlags flags)
222661007b31SStefan Hajnoczi {
2227967d7905SEmanuele Giuseppe Esposito     IO_CODE();
22281acc3466SVladimir Sementsov-Ogievskiy     return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags);
22291acc3466SVladimir Sementsov-Ogievskiy }
22301acc3466SVladimir Sementsov-Ogievskiy 
bdrv_co_pwritev_part(BdrvChild * child,int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset,BdrvRequestFlags flags)22311acc3466SVladimir Sementsov-Ogievskiy int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
223237e9403eSVladimir Sementsov-Ogievskiy     int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
22331acc3466SVladimir Sementsov-Ogievskiy     BdrvRequestFlags flags)
22341acc3466SVladimir Sementsov-Ogievskiy {
2235a03ef88fSKevin Wolf     BlockDriverState *bs = child->bs;
223661007b31SStefan Hajnoczi     BdrvTrackedRequest req;
2237a5b8dd2cSEric Blake     uint64_t align = bs->bl.request_alignment;
22387a3f542fSVladimir Sementsov-Ogievskiy     BdrvRequestPadding pad;
223961007b31SStefan Hajnoczi     int ret;
2240f0deecffSVladimir Sementsov-Ogievskiy     bool padded = false;
2241967d7905SEmanuele Giuseppe Esposito     IO_CODE();
224261007b31SStefan Hajnoczi 
224337e9403eSVladimir Sementsov-Ogievskiy     trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags);
2244f42cf447SDaniel P. Berrange 
22451e97be91SEmanuele Giuseppe Esposito     if (!bdrv_co_is_inserted(bs)) {
224661007b31SStefan Hajnoczi         return -ENOMEDIUM;
224761007b31SStefan Hajnoczi     }
224861007b31SStefan Hajnoczi 
22492aaa3f9bSVladimir Sementsov-Ogievskiy     if (flags & BDRV_REQ_ZERO_WRITE) {
22502aaa3f9bSVladimir Sementsov-Ogievskiy         ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
22512aaa3f9bSVladimir Sementsov-Ogievskiy     } else {
225263f4ad11SVladimir Sementsov-Ogievskiy         ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
22532aaa3f9bSVladimir Sementsov-Ogievskiy     }
225461007b31SStefan Hajnoczi     if (ret < 0) {
225561007b31SStefan Hajnoczi         return ret;
225661007b31SStefan Hajnoczi     }
225761007b31SStefan Hajnoczi 
2258f2208fdcSAlberto Garcia     /* If the request is misaligned then we can't make it efficient */
2259f2208fdcSAlberto Garcia     if ((flags & BDRV_REQ_NO_FALLBACK) &&
2260f2208fdcSAlberto Garcia         !QEMU_IS_ALIGNED(offset | bytes, align))
2261f2208fdcSAlberto Garcia     {
2262f2208fdcSAlberto Garcia         return -ENOTSUP;
2263f2208fdcSAlberto Garcia     }
2264f2208fdcSAlberto Garcia 
2265ac9d00bfSVladimir Sementsov-Ogievskiy     if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
2266ac9d00bfSVladimir Sementsov-Ogievskiy         /*
2267ac9d00bfSVladimir Sementsov-Ogievskiy          * Aligning zero request is nonsense. Even if driver has special meaning
2268ac9d00bfSVladimir Sementsov-Ogievskiy          * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
2269ac9d00bfSVladimir Sementsov-Ogievskiy          * it to driver due to request_alignment.
2270ac9d00bfSVladimir Sementsov-Ogievskiy          *
2271ac9d00bfSVladimir Sementsov-Ogievskiy          * Still, no reason to return an error if someone do unaligned
2272ac9d00bfSVladimir Sementsov-Ogievskiy          * zero-length write occasionally.
2273ac9d00bfSVladimir Sementsov-Ogievskiy          */
2274ac9d00bfSVladimir Sementsov-Ogievskiy         return 0;
2275ac9d00bfSVladimir Sementsov-Ogievskiy     }
2276ac9d00bfSVladimir Sementsov-Ogievskiy 
2277f0deecffSVladimir Sementsov-Ogievskiy     if (!(flags & BDRV_REQ_ZERO_WRITE)) {
227861007b31SStefan Hajnoczi         /*
2279f0deecffSVladimir Sementsov-Ogievskiy          * Pad request for following read-modify-write cycle.
2280f0deecffSVladimir Sementsov-Ogievskiy          * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do
2281f0deecffSVladimir Sementsov-Ogievskiy          * alignment only if there is no ZERO flag.
228261007b31SStefan Hajnoczi          */
228318743311SHanna Czenczek         ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, true,
228418743311SHanna Czenczek                                &pad, &padded, &flags);
228598ca4549SVladimir Sementsov-Ogievskiy         if (ret < 0) {
228698ca4549SVladimir Sementsov-Ogievskiy             return ret;
228798ca4549SVladimir Sementsov-Ogievskiy         }
2288f0deecffSVladimir Sementsov-Ogievskiy     }
2289f0deecffSVladimir Sementsov-Ogievskiy 
2290f0deecffSVladimir Sementsov-Ogievskiy     bdrv_inc_in_flight(bs);
2291ebde595cSFam Zheng     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
229261007b31SStefan Hajnoczi 
229318a59f03SAnton Nefedov     if (flags & BDRV_REQ_ZERO_WRITE) {
2294f0deecffSVladimir Sementsov-Ogievskiy         assert(!padded);
229585c97ca7SKevin Wolf         ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
22969eeb6dd1SFam Zheng         goto out;
22979eeb6dd1SFam Zheng     }
22989eeb6dd1SFam Zheng 
2299f0deecffSVladimir Sementsov-Ogievskiy     if (padded) {
2300f0deecffSVladimir Sementsov-Ogievskiy         /*
2301f0deecffSVladimir Sementsov-Ogievskiy          * Request was unaligned to request_alignment and therefore
2302f0deecffSVladimir Sementsov-Ogievskiy          * padded.  We are going to do read-modify-write, and must
2303f0deecffSVladimir Sementsov-Ogievskiy          * serialize the request to prevent interactions of the
2304f0deecffSVladimir Sementsov-Ogievskiy          * widened region with other transactions.
2305f0deecffSVladimir Sementsov-Ogievskiy          */
230645e62b46SVladimir Sementsov-Ogievskiy         assert(!(flags & BDRV_REQ_NO_WAIT));
23078ac5aab2SVladimir Sementsov-Ogievskiy         bdrv_make_request_serialising(&req, align);
23087a3f542fSVladimir Sementsov-Ogievskiy         bdrv_padding_rmw_read(child, &req, &pad, false);
230961007b31SStefan Hajnoczi     }
231061007b31SStefan Hajnoczi 
231185c97ca7SKevin Wolf     ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
23121acc3466SVladimir Sementsov-Ogievskiy                                qiov, qiov_offset, flags);
231361007b31SStefan Hajnoczi 
231418743311SHanna Czenczek     bdrv_padding_finalize(&pad);
231561007b31SStefan Hajnoczi 
23169eeb6dd1SFam Zheng out:
23179eeb6dd1SFam Zheng     tracked_request_end(&req);
231899723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
23197a3f542fSVladimir Sementsov-Ogievskiy 
232061007b31SStefan Hajnoczi     return ret;
232161007b31SStefan Hajnoczi }
232261007b31SStefan Hajnoczi 
bdrv_co_pwrite_zeroes(BdrvChild * child,int64_t offset,int64_t bytes,BdrvRequestFlags flags)2323a03ef88fSKevin Wolf int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
2324e9e52efdSVladimir Sementsov-Ogievskiy                                        int64_t bytes, BdrvRequestFlags flags)
232561007b31SStefan Hajnoczi {
2326384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
2327f5a5ca79SManos Pitsidianakis     trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
2328abaf8b75SKevin Wolf     assert_bdrv_graph_readable();
232961007b31SStefan Hajnoczi 
2330f5a5ca79SManos Pitsidianakis     return bdrv_co_pwritev(child, offset, bytes, NULL,
233161007b31SStefan Hajnoczi                            BDRV_REQ_ZERO_WRITE | flags);
233261007b31SStefan Hajnoczi }
233361007b31SStefan Hajnoczi 
23344085f5c7SJohn Snow /*
23354085f5c7SJohn Snow  * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
23364085f5c7SJohn Snow  */
bdrv_flush_all(void)23374085f5c7SJohn Snow int bdrv_flush_all(void)
23384085f5c7SJohn Snow {
23394085f5c7SJohn Snow     BdrvNextIterator it;
23404085f5c7SJohn Snow     BlockDriverState *bs = NULL;
23414085f5c7SJohn Snow     int result = 0;
23424085f5c7SJohn Snow 
2343f791bf7fSEmanuele Giuseppe Esposito     GLOBAL_STATE_CODE();
23442b3912f1SKevin Wolf     GRAPH_RDLOCK_GUARD_MAINLOOP();
2345f791bf7fSEmanuele Giuseppe Esposito 
2346c8aa7895SPavel Dovgalyuk     /*
2347c8aa7895SPavel Dovgalyuk      * bdrv queue is managed by record/replay,
2348c8aa7895SPavel Dovgalyuk      * creating new flush request for stopping
2349c8aa7895SPavel Dovgalyuk      * the VM may break the determinism
2350c8aa7895SPavel Dovgalyuk      */
2351c8aa7895SPavel Dovgalyuk     if (replay_events_enabled()) {
2352c8aa7895SPavel Dovgalyuk         return result;
2353c8aa7895SPavel Dovgalyuk     }
2354c8aa7895SPavel Dovgalyuk 
23554085f5c7SJohn Snow     for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
2356b49f4755SStefan Hajnoczi         int ret = bdrv_flush(bs);
23574085f5c7SJohn Snow         if (ret < 0 && !result) {
23584085f5c7SJohn Snow             result = ret;
23594085f5c7SJohn Snow         }
23604085f5c7SJohn Snow     }
23614085f5c7SJohn Snow 
23624085f5c7SJohn Snow     return result;
23634085f5c7SJohn Snow }
23644085f5c7SJohn Snow 
236561007b31SStefan Hajnoczi /*
236661007b31SStefan Hajnoczi  * Returns the allocation status of the specified sectors.
236761007b31SStefan Hajnoczi  * Drivers not implementing the functionality are assumed to not support
236861007b31SStefan Hajnoczi  * backing files, hence all their sectors are reported as allocated.
236961007b31SStefan Hajnoczi  *
2370c33159deSEric Blake  * 'mode' serves as a hint as to which results are favored; see the
2371c33159deSEric Blake  * BDRV_WANT_* macros for details.
2372c9ce8c4dSEric Blake  *
23732e8bc787SEric Blake  * If 'offset' is beyond the end of the disk image the return value is
2374fb0d8654SEric Blake  * BDRV_BLOCK_EOF and 'pnum' is set to 0.
237561007b31SStefan Hajnoczi  *
23762e8bc787SEric Blake  * 'bytes' is the max value 'pnum' should be set to.  If bytes goes
2377fb0d8654SEric Blake  * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2378fb0d8654SEric Blake  * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
237967a0fd2aSFam Zheng  *
23802e8bc787SEric Blake  * 'pnum' is set to the number of bytes (including and immediately
23812e8bc787SEric Blake  * following the specified offset) that are easily known to be in the
23822e8bc787SEric Blake  * same allocated/unallocated state.  Note that a second call starting
23832e8bc787SEric Blake  * at the original offset plus returned pnum may have the same status.
23842e8bc787SEric Blake  * The returned value is non-zero on success except at end-of-file.
23852e8bc787SEric Blake  *
23862e8bc787SEric Blake  * Returns negative errno on failure.  Otherwise, if the
23872e8bc787SEric Blake  * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
23882e8bc787SEric Blake  * set to the host mapping and BDS corresponding to the guest offset.
238961007b31SStefan Hajnoczi  */
23907ff9579eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
bdrv_co_do_block_status(BlockDriverState * bs,unsigned int mode,int64_t offset,int64_t bytes,int64_t * pnum,int64_t * map,BlockDriverState ** file)2391c33159deSEric Blake bdrv_co_do_block_status(BlockDriverState *bs, unsigned int mode,
23922e8bc787SEric Blake                         int64_t offset, int64_t bytes,
23937ff9579eSKevin Wolf                         int64_t *pnum, int64_t *map, BlockDriverState **file)
239461007b31SStefan Hajnoczi {
23952e8bc787SEric Blake     int64_t total_size;
23962e8bc787SEric Blake     int64_t n; /* bytes */
2397efa6e2edSEric Blake     int ret;
23982e8bc787SEric Blake     int64_t local_map = 0;
2399298a1665SEric Blake     BlockDriverState *local_file = NULL;
2400efa6e2edSEric Blake     int64_t aligned_offset, aligned_bytes;
2401efa6e2edSEric Blake     uint32_t align;
2402549ec0d9SMax Reitz     bool has_filtered_child;
240361007b31SStefan Hajnoczi 
2404298a1665SEric Blake     assert(pnum);
24057ff9579eSKevin Wolf     assert_bdrv_graph_readable();
2406298a1665SEric Blake     *pnum = 0;
24070af02bd1SPaolo Bonzini     total_size = bdrv_co_getlength(bs);
24082e8bc787SEric Blake     if (total_size < 0) {
24092e8bc787SEric Blake         ret = total_size;
2410298a1665SEric Blake         goto early_out;
241161007b31SStefan Hajnoczi     }
241261007b31SStefan Hajnoczi 
24132e8bc787SEric Blake     if (offset >= total_size) {
2414298a1665SEric Blake         ret = BDRV_BLOCK_EOF;
2415298a1665SEric Blake         goto early_out;
241661007b31SStefan Hajnoczi     }
24172e8bc787SEric Blake     if (!bytes) {
2418298a1665SEric Blake         ret = 0;
2419298a1665SEric Blake         goto early_out;
24209cdcfd9fSEric Blake     }
242161007b31SStefan Hajnoczi 
24222e8bc787SEric Blake     n = total_size - offset;
24232e8bc787SEric Blake     if (n < bytes) {
24242e8bc787SEric Blake         bytes = n;
242561007b31SStefan Hajnoczi     }
242661007b31SStefan Hajnoczi 
24270af02bd1SPaolo Bonzini     /* Must be non-NULL or bdrv_co_getlength() would have failed */
2428d470ad42SMax Reitz     assert(bs->drv);
2429549ec0d9SMax Reitz     has_filtered_child = bdrv_filter_child(bs);
2430549ec0d9SMax Reitz     if (!bs->drv->bdrv_co_block_status && !has_filtered_child) {
24312e8bc787SEric Blake         *pnum = bytes;
243261007b31SStefan Hajnoczi         ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
24332e8bc787SEric Blake         if (offset + bytes == total_size) {
2434fb0d8654SEric Blake             ret |= BDRV_BLOCK_EOF;
2435fb0d8654SEric Blake         }
243661007b31SStefan Hajnoczi         if (bs->drv->protocol_name) {
24372e8bc787SEric Blake             ret |= BDRV_BLOCK_OFFSET_VALID;
24382e8bc787SEric Blake             local_map = offset;
2439298a1665SEric Blake             local_file = bs;
244061007b31SStefan Hajnoczi         }
2441298a1665SEric Blake         goto early_out;
244261007b31SStefan Hajnoczi     }
244361007b31SStefan Hajnoczi 
244499723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
2445efa6e2edSEric Blake 
2446efa6e2edSEric Blake     /* Round out to request_alignment boundaries */
244786a3d5c6SEric Blake     align = bs->bl.request_alignment;
2448efa6e2edSEric Blake     aligned_offset = QEMU_ALIGN_DOWN(offset, align);
2449efa6e2edSEric Blake     aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
2450efa6e2edSEric Blake 
2451549ec0d9SMax Reitz     if (bs->drv->bdrv_co_block_status) {
24520bc329fbSHanna Reitz         /*
24530bc329fbSHanna Reitz          * Use the block-status cache only for protocol nodes: Format
24540bc329fbSHanna Reitz          * drivers are generally quick to inquire the status, but protocol
24550bc329fbSHanna Reitz          * drivers often need to get information from outside of qemu, so
24560bc329fbSHanna Reitz          * we do not have control over the actual implementation.  There
24570bc329fbSHanna Reitz          * have been cases where inquiring the status took an unreasonably
24580bc329fbSHanna Reitz          * long time, and we can do nothing in qemu to fix it.
24590bc329fbSHanna Reitz          * This is especially problematic for images with large data areas,
24600bc329fbSHanna Reitz          * because finding the few holes in them and giving them special
24610bc329fbSHanna Reitz          * treatment does not gain much performance.  Therefore, we try to
24620bc329fbSHanna Reitz          * cache the last-identified data region.
24630bc329fbSHanna Reitz          *
24640bc329fbSHanna Reitz          * Second, limiting ourselves to protocol nodes allows us to assume
24650bc329fbSHanna Reitz          * the block status for data regions to be DATA | OFFSET_VALID, and
24660bc329fbSHanna Reitz          * that the host offset is the same as the guest offset.
24670bc329fbSHanna Reitz          *
24680bc329fbSHanna Reitz          * Note that it is possible that external writers zero parts of
24690bc329fbSHanna Reitz          * the cached regions without the cache being invalidated, and so
24700bc329fbSHanna Reitz          * we may report zeroes as data.  This is not catastrophic,
24710bc329fbSHanna Reitz          * however, because reporting zeroes as data is fine.
24720bc329fbSHanna Reitz          */
24730bc329fbSHanna Reitz         if (QLIST_EMPTY(&bs->children) &&
24740bc329fbSHanna Reitz             bdrv_bsc_is_data(bs, aligned_offset, pnum))
24750bc329fbSHanna Reitz         {
24760bc329fbSHanna Reitz             ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
24770bc329fbSHanna Reitz             local_file = bs;
24780bc329fbSHanna Reitz             local_map = aligned_offset;
24790bc329fbSHanna Reitz         } else {
2480c33159deSEric Blake             ret = bs->drv->bdrv_co_block_status(bs, mode, aligned_offset,
248186a3d5c6SEric Blake                                                 aligned_bytes, pnum, &local_map,
248286a3d5c6SEric Blake                                                 &local_file);
24830bc329fbSHanna Reitz 
24840bc329fbSHanna Reitz             /*
24850bc329fbSHanna Reitz              * Note that checking QLIST_EMPTY(&bs->children) is also done when
24860bc329fbSHanna Reitz              * the cache is queried above.  Technically, we do not need to check
24870bc329fbSHanna Reitz              * it here; the worst that can happen is that we fill the cache for
24880bc329fbSHanna Reitz              * non-protocol nodes, and then it is never used.  However, filling
24890bc329fbSHanna Reitz              * the cache requires an RCU update, so double check here to avoid
24900bc329fbSHanna Reitz              * such an update if possible.
2491113b727cSHanna Reitz              *
2492c33159deSEric Blake              * Check mode, because we only want to update the cache when we
2493113b727cSHanna Reitz              * have accurate information about what is zero and what is data.
24940bc329fbSHanna Reitz              */
2495c33159deSEric Blake             if (mode == BDRV_WANT_PRECISE &&
2496113b727cSHanna Reitz                 ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) &&
24970bc329fbSHanna Reitz                 QLIST_EMPTY(&bs->children))
24980bc329fbSHanna Reitz             {
24990bc329fbSHanna Reitz                 /*
25000bc329fbSHanna Reitz                  * When a protocol driver reports BLOCK_OFFSET_VALID, the
25010bc329fbSHanna Reitz                  * returned local_map value must be the same as the offset we
25020bc329fbSHanna Reitz                  * have passed (aligned_offset), and local_bs must be the node
25030bc329fbSHanna Reitz                  * itself.
25040bc329fbSHanna Reitz                  * Assert this, because we follow this rule when reading from
25050bc329fbSHanna Reitz                  * the cache (see the `local_file = bs` and
25060bc329fbSHanna Reitz                  * `local_map = aligned_offset` assignments above), and the
25070bc329fbSHanna Reitz                  * result the cache delivers must be the same as the driver
25080bc329fbSHanna Reitz                  * would deliver.
25090bc329fbSHanna Reitz                  */
25100bc329fbSHanna Reitz                 assert(local_file == bs);
25110bc329fbSHanna Reitz                 assert(local_map == aligned_offset);
25120bc329fbSHanna Reitz                 bdrv_bsc_fill(bs, aligned_offset, *pnum);
25130bc329fbSHanna Reitz             }
25140bc329fbSHanna Reitz         }
2515549ec0d9SMax Reitz     } else {
2516549ec0d9SMax Reitz         /* Default code for filters */
2517549ec0d9SMax Reitz 
2518549ec0d9SMax Reitz         local_file = bdrv_filter_bs(bs);
2519549ec0d9SMax Reitz         assert(local_file);
2520549ec0d9SMax Reitz 
2521549ec0d9SMax Reitz         *pnum = aligned_bytes;
2522549ec0d9SMax Reitz         local_map = aligned_offset;
2523549ec0d9SMax Reitz         ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
2524549ec0d9SMax Reitz     }
252586a3d5c6SEric Blake     if (ret < 0) {
252686a3d5c6SEric Blake         *pnum = 0;
252786a3d5c6SEric Blake         goto out;
252886a3d5c6SEric Blake     }
2529efa6e2edSEric Blake 
2530efa6e2edSEric Blake     /*
2531636cb512SEric Blake      * The driver's result must be a non-zero multiple of request_alignment.
2532efa6e2edSEric Blake      * Clamp pnum and adjust map to original request.
2533efa6e2edSEric Blake      */
2534636cb512SEric Blake     assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) &&
2535636cb512SEric Blake            align > offset - aligned_offset);
253669f47505SVladimir Sementsov-Ogievskiy     if (ret & BDRV_BLOCK_RECURSE) {
253769f47505SVladimir Sementsov-Ogievskiy         assert(ret & BDRV_BLOCK_DATA);
253869f47505SVladimir Sementsov-Ogievskiy         assert(ret & BDRV_BLOCK_OFFSET_VALID);
253969f47505SVladimir Sementsov-Ogievskiy         assert(!(ret & BDRV_BLOCK_ZERO));
254069f47505SVladimir Sementsov-Ogievskiy     }
254169f47505SVladimir Sementsov-Ogievskiy 
2542efa6e2edSEric Blake     *pnum -= offset - aligned_offset;
2543efa6e2edSEric Blake     if (*pnum > bytes) {
2544efa6e2edSEric Blake         *pnum = bytes;
2545efa6e2edSEric Blake     }
2546efa6e2edSEric Blake     if (ret & BDRV_BLOCK_OFFSET_VALID) {
2547efa6e2edSEric Blake         local_map += offset - aligned_offset;
2548efa6e2edSEric Blake     }
254961007b31SStefan Hajnoczi 
255061007b31SStefan Hajnoczi     if (ret & BDRV_BLOCK_RAW) {
2551298a1665SEric Blake         assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
2552c33159deSEric Blake         ret = bdrv_co_do_block_status(local_file, mode, local_map,
25532e8bc787SEric Blake                                       *pnum, pnum, &local_map, &local_file);
255499723548SPaolo Bonzini         goto out;
255561007b31SStefan Hajnoczi     }
255661007b31SStefan Hajnoczi 
255761007b31SStefan Hajnoczi     if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
255861007b31SStefan Hajnoczi         ret |= BDRV_BLOCK_ALLOCATED;
2559d40f4a56SAlberto Garcia     } else if (bs->drv->supports_backing) {
2560cb850315SMax Reitz         BlockDriverState *cow_bs = bdrv_cow_bs(bs);
2561cb850315SMax Reitz 
2562d40f4a56SAlberto Garcia         if (!cow_bs) {
2563d40f4a56SAlberto Garcia             ret |= BDRV_BLOCK_ZERO;
2564c33159deSEric Blake         } else if (mode == BDRV_WANT_PRECISE) {
25650af02bd1SPaolo Bonzini             int64_t size2 = bdrv_co_getlength(cow_bs);
2566c9ce8c4dSEric Blake 
25672e8bc787SEric Blake             if (size2 >= 0 && offset >= size2) {
256861007b31SStefan Hajnoczi                 ret |= BDRV_BLOCK_ZERO;
256961007b31SStefan Hajnoczi             }
25707b1efe99SVladimir Sementsov-Ogievskiy         }
257161007b31SStefan Hajnoczi     }
257261007b31SStefan Hajnoczi 
2573c33159deSEric Blake     if (mode == BDRV_WANT_PRECISE && ret & BDRV_BLOCK_RECURSE &&
257469f47505SVladimir Sementsov-Ogievskiy         local_file && local_file != bs &&
257561007b31SStefan Hajnoczi         (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
257661007b31SStefan Hajnoczi         (ret & BDRV_BLOCK_OFFSET_VALID)) {
25772e8bc787SEric Blake         int64_t file_pnum;
25782e8bc787SEric Blake         int ret2;
257961007b31SStefan Hajnoczi 
2580c33159deSEric Blake         ret2 = bdrv_co_do_block_status(local_file, mode, local_map,
25812e8bc787SEric Blake                                        *pnum, &file_pnum, NULL, NULL);
258261007b31SStefan Hajnoczi         if (ret2 >= 0) {
258361007b31SStefan Hajnoczi             /* Ignore errors.  This is just providing extra information, it
258461007b31SStefan Hajnoczi              * is useful but not necessary.
258561007b31SStefan Hajnoczi              */
2586c61e684eSEric Blake             if (ret2 & BDRV_BLOCK_EOF &&
2587c61e684eSEric Blake                 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
2588c61e684eSEric Blake                 /*
2589c61e684eSEric Blake                  * It is valid for the format block driver to read
2590c61e684eSEric Blake                  * beyond the end of the underlying file's current
2591c61e684eSEric Blake                  * size; such areas read as zero.
2592c61e684eSEric Blake                  */
259361007b31SStefan Hajnoczi                 ret |= BDRV_BLOCK_ZERO;
259461007b31SStefan Hajnoczi             } else {
259561007b31SStefan Hajnoczi                 /* Limit request to the range reported by the protocol driver */
259661007b31SStefan Hajnoczi                 *pnum = file_pnum;
259761007b31SStefan Hajnoczi                 ret |= (ret2 & BDRV_BLOCK_ZERO);
259861007b31SStefan Hajnoczi             }
259961007b31SStefan Hajnoczi         }
26008a9be799SFiona Ebner 
26018a9be799SFiona Ebner         /*
26028a9be799SFiona Ebner          * Now that the recursive search was done, clear the flag. Otherwise,
26038a9be799SFiona Ebner          * with more complicated block graphs like snapshot-access ->
26048a9be799SFiona Ebner          * copy-before-write -> qcow2, where the return value will be propagated
26058a9be799SFiona Ebner          * further up to a parent bdrv_co_do_block_status() call, both the
26068a9be799SFiona Ebner          * BDRV_BLOCK_RECURSE and BDRV_BLOCK_ZERO flags would be set, which is
26078a9be799SFiona Ebner          * not allowed.
26088a9be799SFiona Ebner          */
26098a9be799SFiona Ebner         ret &= ~BDRV_BLOCK_RECURSE;
261061007b31SStefan Hajnoczi     }
261161007b31SStefan Hajnoczi 
261299723548SPaolo Bonzini out:
261399723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
26142e8bc787SEric Blake     if (ret >= 0 && offset + *pnum == total_size) {
2615fb0d8654SEric Blake         ret |= BDRV_BLOCK_EOF;
2616fb0d8654SEric Blake     }
2617298a1665SEric Blake early_out:
2618298a1665SEric Blake     if (file) {
2619298a1665SEric Blake         *file = local_file;
2620298a1665SEric Blake     }
26212e8bc787SEric Blake     if (map) {
26222e8bc787SEric Blake         *map = local_map;
26232e8bc787SEric Blake     }
262461007b31SStefan Hajnoczi     return ret;
262561007b31SStefan Hajnoczi }
262661007b31SStefan Hajnoczi 
262721c2283eSVladimir Sementsov-Ogievskiy int coroutine_fn
bdrv_co_common_block_status_above(BlockDriverState * bs,BlockDriverState * base,bool include_base,unsigned int mode,int64_t offset,int64_t bytes,int64_t * pnum,int64_t * map,BlockDriverState ** file,int * depth)2628f9e694cbSVladimir Sementsov-Ogievskiy bdrv_co_common_block_status_above(BlockDriverState *bs,
2629ba3f0e25SFam Zheng                                   BlockDriverState *base,
26303555a432SVladimir Sementsov-Ogievskiy                                   bool include_base,
2631c33159deSEric Blake                                   unsigned int mode,
26325b648c67SEric Blake                                   int64_t offset,
26335b648c67SEric Blake                                   int64_t bytes,
26345b648c67SEric Blake                                   int64_t *pnum,
26355b648c67SEric Blake                                   int64_t *map,
2636a92b1b06SEric Blake                                   BlockDriverState **file,
2637a92b1b06SEric Blake                                   int *depth)
2638ba3f0e25SFam Zheng {
263967c095c8SVladimir Sementsov-Ogievskiy     int ret;
2640ba3f0e25SFam Zheng     BlockDriverState *p;
264167c095c8SVladimir Sementsov-Ogievskiy     int64_t eof = 0;
2642a92b1b06SEric Blake     int dummy;
26431581a70dSEmanuele Giuseppe Esposito     IO_CODE();
2644ba3f0e25SFam Zheng 
26453555a432SVladimir Sementsov-Ogievskiy     assert(!include_base || base); /* Can't include NULL base */
26467ff9579eSKevin Wolf     assert_bdrv_graph_readable();
264767c095c8SVladimir Sementsov-Ogievskiy 
2648a92b1b06SEric Blake     if (!depth) {
2649a92b1b06SEric Blake         depth = &dummy;
2650a92b1b06SEric Blake     }
2651a92b1b06SEric Blake     *depth = 0;
2652a92b1b06SEric Blake 
2653624f27bbSVladimir Sementsov-Ogievskiy     if (!include_base && bs == base) {
2654624f27bbSVladimir Sementsov-Ogievskiy         *pnum = bytes;
2655624f27bbSVladimir Sementsov-Ogievskiy         return 0;
2656624f27bbSVladimir Sementsov-Ogievskiy     }
2657624f27bbSVladimir Sementsov-Ogievskiy 
2658c33159deSEric Blake     ret = bdrv_co_do_block_status(bs, mode, offset, bytes, pnum,
2659b170e929SPaolo Bonzini                                   map, file);
2660a92b1b06SEric Blake     ++*depth;
26613555a432SVladimir Sementsov-Ogievskiy     if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) {
266267c095c8SVladimir Sementsov-Ogievskiy         return ret;
266367c095c8SVladimir Sementsov-Ogievskiy     }
266467c095c8SVladimir Sementsov-Ogievskiy 
266567c095c8SVladimir Sementsov-Ogievskiy     if (ret & BDRV_BLOCK_EOF) {
266667c095c8SVladimir Sementsov-Ogievskiy         eof = offset + *pnum;
266767c095c8SVladimir Sementsov-Ogievskiy     }
266867c095c8SVladimir Sementsov-Ogievskiy 
266967c095c8SVladimir Sementsov-Ogievskiy     assert(*pnum <= bytes);
267067c095c8SVladimir Sementsov-Ogievskiy     bytes = *pnum;
267167c095c8SVladimir Sementsov-Ogievskiy 
26723555a432SVladimir Sementsov-Ogievskiy     for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base;
267367c095c8SVladimir Sementsov-Ogievskiy          p = bdrv_filter_or_cow_bs(p))
267467c095c8SVladimir Sementsov-Ogievskiy     {
2675c33159deSEric Blake         ret = bdrv_co_do_block_status(p, mode, offset, bytes, pnum,
2676b170e929SPaolo Bonzini                                       map, file);
2677a92b1b06SEric Blake         ++*depth;
2678c61e684eSEric Blake         if (ret < 0) {
267967c095c8SVladimir Sementsov-Ogievskiy             return ret;
2680c61e684eSEric Blake         }
268167c095c8SVladimir Sementsov-Ogievskiy         if (*pnum == 0) {
2682c61e684eSEric Blake             /*
268367c095c8SVladimir Sementsov-Ogievskiy              * The top layer deferred to this layer, and because this layer is
268467c095c8SVladimir Sementsov-Ogievskiy              * short, any zeroes that we synthesize beyond EOF behave as if they
268567c095c8SVladimir Sementsov-Ogievskiy              * were allocated at this layer.
268667c095c8SVladimir Sementsov-Ogievskiy              *
268767c095c8SVladimir Sementsov-Ogievskiy              * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be
268867c095c8SVladimir Sementsov-Ogievskiy              * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
268967c095c8SVladimir Sementsov-Ogievskiy              * below.
2690c61e684eSEric Blake              */
269167c095c8SVladimir Sementsov-Ogievskiy             assert(ret & BDRV_BLOCK_EOF);
26925b648c67SEric Blake             *pnum = bytes;
269367c095c8SVladimir Sementsov-Ogievskiy             if (file) {
269467c095c8SVladimir Sementsov-Ogievskiy                 *file = p;
2695c61e684eSEric Blake             }
269667c095c8SVladimir Sementsov-Ogievskiy             ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED;
2697ba3f0e25SFam Zheng             break;
2698ba3f0e25SFam Zheng         }
269967c095c8SVladimir Sementsov-Ogievskiy         if (ret & BDRV_BLOCK_ALLOCATED) {
270067c095c8SVladimir Sementsov-Ogievskiy             /*
270167c095c8SVladimir Sementsov-Ogievskiy              * We've found the node and the status, we must break.
270267c095c8SVladimir Sementsov-Ogievskiy              *
270367c095c8SVladimir Sementsov-Ogievskiy              * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be
270467c095c8SVladimir Sementsov-Ogievskiy              * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
270567c095c8SVladimir Sementsov-Ogievskiy              * below.
270667c095c8SVladimir Sementsov-Ogievskiy              */
270767c095c8SVladimir Sementsov-Ogievskiy             ret &= ~BDRV_BLOCK_EOF;
270867c095c8SVladimir Sementsov-Ogievskiy             break;
2709ba3f0e25SFam Zheng         }
271067c095c8SVladimir Sementsov-Ogievskiy 
27113555a432SVladimir Sementsov-Ogievskiy         if (p == base) {
27123555a432SVladimir Sementsov-Ogievskiy             assert(include_base);
27133555a432SVladimir Sementsov-Ogievskiy             break;
27143555a432SVladimir Sementsov-Ogievskiy         }
27153555a432SVladimir Sementsov-Ogievskiy 
271667c095c8SVladimir Sementsov-Ogievskiy         /*
271767c095c8SVladimir Sementsov-Ogievskiy          * OK, [offset, offset + *pnum) region is unallocated on this layer,
271867c095c8SVladimir Sementsov-Ogievskiy          * let's continue the diving.
271967c095c8SVladimir Sementsov-Ogievskiy          */
272067c095c8SVladimir Sementsov-Ogievskiy         assert(*pnum <= bytes);
272167c095c8SVladimir Sementsov-Ogievskiy         bytes = *pnum;
272267c095c8SVladimir Sementsov-Ogievskiy     }
272367c095c8SVladimir Sementsov-Ogievskiy 
272467c095c8SVladimir Sementsov-Ogievskiy     if (offset + *pnum == eof) {
272567c095c8SVladimir Sementsov-Ogievskiy         ret |= BDRV_BLOCK_EOF;
272667c095c8SVladimir Sementsov-Ogievskiy     }
272767c095c8SVladimir Sementsov-Ogievskiy 
2728ba3f0e25SFam Zheng     return ret;
2729ba3f0e25SFam Zheng }
2730ba3f0e25SFam Zheng 
bdrv_co_block_status_above(BlockDriverState * bs,BlockDriverState * base,int64_t offset,int64_t bytes,int64_t * pnum,int64_t * map,BlockDriverState ** file)27317b52a921SEmanuele Giuseppe Esposito int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs,
27327b52a921SEmanuele Giuseppe Esposito                                             BlockDriverState *base,
27337b52a921SEmanuele Giuseppe Esposito                                             int64_t offset, int64_t bytes,
27347b52a921SEmanuele Giuseppe Esposito                                             int64_t *pnum, int64_t *map,
27357b52a921SEmanuele Giuseppe Esposito                                             BlockDriverState **file)
27367b52a921SEmanuele Giuseppe Esposito {
27377b52a921SEmanuele Giuseppe Esposito     IO_CODE();
2738c33159deSEric Blake     return bdrv_co_common_block_status_above(bs, base, false,
2739c33159deSEric Blake                                              BDRV_WANT_PRECISE, offset,
27407b52a921SEmanuele Giuseppe Esposito                                              bytes, pnum, map, file, NULL);
27417b52a921SEmanuele Giuseppe Esposito }
27427b52a921SEmanuele Giuseppe Esposito 
bdrv_co_block_status(BlockDriverState * bs,int64_t offset,int64_t bytes,int64_t * pnum,int64_t * map,BlockDriverState ** file)27431b88457eSPaolo Bonzini int coroutine_fn bdrv_co_block_status(BlockDriverState *bs, int64_t offset,
27441b88457eSPaolo Bonzini                                       int64_t bytes, int64_t *pnum,
274531826642SEric Blake                                       int64_t *map, BlockDriverState **file)
2746c9ce8c4dSEric Blake {
2747384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
27481b88457eSPaolo Bonzini     return bdrv_co_block_status_above(bs, bdrv_filter_or_cow_bs(bs),
274931826642SEric Blake                                       offset, bytes, pnum, map, file);
2750ba3f0e25SFam Zheng }
2751ba3f0e25SFam Zheng 
275246cd1e8aSAlberto Garcia /*
275346cd1e8aSAlberto Garcia  * Check @bs (and its backing chain) to see if the range defined
275446cd1e8aSAlberto Garcia  * by @offset and @bytes is known to read as zeroes.
275546cd1e8aSAlberto Garcia  * Return 1 if that is the case, 0 otherwise and -errno on error.
275646cd1e8aSAlberto Garcia  * This test is meant to be fast rather than accurate so returning 0
275731bf15d9SEric Blake  * does not guarantee non-zero data; but a return of 1 is reliable.
275846cd1e8aSAlberto Garcia  */
bdrv_co_is_zero_fast(BlockDriverState * bs,int64_t offset,int64_t bytes)275946cd1e8aSAlberto Garcia int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset,
276046cd1e8aSAlberto Garcia                                       int64_t bytes)
276146cd1e8aSAlberto Garcia {
276246cd1e8aSAlberto Garcia     int ret;
276331bf15d9SEric Blake     int64_t pnum;
2764384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
276546cd1e8aSAlberto Garcia 
276631bf15d9SEric Blake     while (bytes) {
276731bf15d9SEric Blake         ret = bdrv_co_common_block_status_above(bs, NULL, false,
276831bf15d9SEric Blake                                                 BDRV_WANT_ZERO, offset, bytes,
276931bf15d9SEric Blake                                                 &pnum, NULL, NULL, NULL);
277046cd1e8aSAlberto Garcia 
277146cd1e8aSAlberto Garcia         if (ret < 0) {
277246cd1e8aSAlberto Garcia             return ret;
277346cd1e8aSAlberto Garcia         }
277431bf15d9SEric Blake         if (!(ret & BDRV_BLOCK_ZERO)) {
277531bf15d9SEric Blake             return 0;
277631bf15d9SEric Blake         }
277731bf15d9SEric Blake         offset += pnum;
277831bf15d9SEric Blake         bytes -= pnum;
277931bf15d9SEric Blake     }
278046cd1e8aSAlberto Garcia 
278131bf15d9SEric Blake     return 1;
278246cd1e8aSAlberto Garcia }
278346cd1e8aSAlberto Garcia 
2784*52726096SEric Blake /*
2785*52726096SEric Blake  * Check @bs (and its backing chain) to see if the entire image is known
2786*52726096SEric Blake  * to read as zeroes.
2787*52726096SEric Blake  * Return 1 if that is the case, 0 otherwise and -errno on error.
2788*52726096SEric Blake  * This test is meant to be fast rather than accurate so returning 0
2789*52726096SEric Blake  * does not guarantee non-zero data; however, a return of 1 is reliable,
2790*52726096SEric Blake  * and this function can report 1 in more cases than bdrv_co_is_zero_fast.
2791*52726096SEric Blake  */
bdrv_co_is_all_zeroes(BlockDriverState * bs)2792*52726096SEric Blake int coroutine_fn bdrv_co_is_all_zeroes(BlockDriverState *bs)
2793*52726096SEric Blake {
2794*52726096SEric Blake     int ret;
2795*52726096SEric Blake     int64_t pnum, bytes;
2796*52726096SEric Blake     char *buf;
2797*52726096SEric Blake     QEMUIOVector local_qiov;
2798*52726096SEric Blake     IO_CODE();
2799*52726096SEric Blake 
2800*52726096SEric Blake     bytes = bdrv_co_getlength(bs);
2801*52726096SEric Blake     if (bytes < 0) {
2802*52726096SEric Blake         return bytes;
2803*52726096SEric Blake     }
2804*52726096SEric Blake 
2805*52726096SEric Blake     /* First probe - see if the entire image reads as zero */
2806*52726096SEric Blake     ret = bdrv_co_common_block_status_above(bs, NULL, false, BDRV_WANT_ZERO,
2807*52726096SEric Blake                                             0, bytes, &pnum, NULL, NULL,
2808*52726096SEric Blake                                             NULL);
2809*52726096SEric Blake     if (ret < 0) {
2810*52726096SEric Blake         return ret;
2811*52726096SEric Blake     }
2812*52726096SEric Blake     if (ret & BDRV_BLOCK_ZERO) {
2813*52726096SEric Blake         return bdrv_co_is_zero_fast(bs, pnum, bytes - pnum);
2814*52726096SEric Blake     }
2815*52726096SEric Blake 
2816*52726096SEric Blake     /*
2817*52726096SEric Blake      * Because of the way 'blockdev-create' works, raw files tend to
2818*52726096SEric Blake      * be created with a non-sparse region at the front to make
2819*52726096SEric Blake      * alignment probing easier.  If the block starts with only a
2820*52726096SEric Blake      * small allocated region, it is still worth the effort to see if
2821*52726096SEric Blake      * the rest of the image is still sparse, coupled with manually
2822*52726096SEric Blake      * reading the first region to see if it reads zero after all.
2823*52726096SEric Blake      */
2824*52726096SEric Blake     if (pnum > MAX_ZERO_CHECK_BUFFER) {
2825*52726096SEric Blake         return 0;
2826*52726096SEric Blake     }
2827*52726096SEric Blake     ret = bdrv_co_is_zero_fast(bs, pnum, bytes - pnum);
2828*52726096SEric Blake     if (ret <= 0) {
2829*52726096SEric Blake         return ret;
2830*52726096SEric Blake     }
2831*52726096SEric Blake     /* Only the head of the image is unknown, and it's small.  Read it.  */
2832*52726096SEric Blake     buf = qemu_blockalign(bs, pnum);
2833*52726096SEric Blake     qemu_iovec_init_buf(&local_qiov, buf, pnum);
2834*52726096SEric Blake     ret = bdrv_driver_preadv(bs, 0, pnum, &local_qiov, 0, 0);
2835*52726096SEric Blake     if (ret >= 0) {
2836*52726096SEric Blake         ret = buffer_is_zero(buf, pnum);
2837*52726096SEric Blake     }
2838*52726096SEric Blake     qemu_vfree(buf);
2839*52726096SEric Blake     return ret;
2840*52726096SEric Blake }
2841*52726096SEric Blake 
bdrv_co_is_allocated(BlockDriverState * bs,int64_t offset,int64_t bytes,int64_t * pnum)28427b52a921SEmanuele Giuseppe Esposito int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset,
28437b52a921SEmanuele Giuseppe Esposito                                       int64_t bytes, int64_t *pnum)
28447b52a921SEmanuele Giuseppe Esposito {
28457b52a921SEmanuele Giuseppe Esposito     int ret;
28467b52a921SEmanuele Giuseppe Esposito     int64_t dummy;
28477b52a921SEmanuele Giuseppe Esposito     IO_CODE();
28487b52a921SEmanuele Giuseppe Esposito 
2849c33159deSEric Blake     ret = bdrv_co_common_block_status_above(bs, bs, true, BDRV_WANT_ALLOCATED,
2850c33159deSEric Blake                                             offset, bytes, pnum ? pnum : &dummy,
2851c33159deSEric Blake                                             NULL, NULL, NULL);
28527b52a921SEmanuele Giuseppe Esposito     if (ret < 0) {
28537b52a921SEmanuele Giuseppe Esposito         return ret;
28547b52a921SEmanuele Giuseppe Esposito     }
28557b52a921SEmanuele Giuseppe Esposito     return !!(ret & BDRV_BLOCK_ALLOCATED);
28567b52a921SEmanuele Giuseppe Esposito }
28577b52a921SEmanuele Giuseppe Esposito 
285861007b31SStefan Hajnoczi /*
285961007b31SStefan Hajnoczi  * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
286061007b31SStefan Hajnoczi  *
2861a92b1b06SEric Blake  * Return a positive depth if (a prefix of) the given range is allocated
2862a92b1b06SEric Blake  * in any image between BASE and TOP (BASE is only included if include_base
2863a92b1b06SEric Blake  * is set).  Depth 1 is TOP, 2 is the first backing layer, and so forth.
2864170d3bd3SAndrey Shinkevich  * BASE can be NULL to check if the given offset is allocated in any
2865170d3bd3SAndrey Shinkevich  * image of the chain.  Return 0 otherwise, or negative errno on
2866170d3bd3SAndrey Shinkevich  * failure.
286761007b31SStefan Hajnoczi  *
286851b0a488SEric Blake  * 'pnum' is set to the number of bytes (including and immediately
286951b0a488SEric Blake  * following the specified offset) that are known to be in the same
287051b0a488SEric Blake  * allocated/unallocated state.  Note that a subsequent call starting
287151b0a488SEric Blake  * at 'offset + *pnum' may return the same allocation status (in other
287251b0a488SEric Blake  * words, the result is not necessarily the maximum possible range);
287351b0a488SEric Blake  * but 'pnum' will only be 0 when end of file is reached.
287461007b31SStefan Hajnoczi  */
bdrv_co_is_allocated_above(BlockDriverState * bs,BlockDriverState * base,bool include_base,int64_t offset,int64_t bytes,int64_t * pnum)2875578ffa9fSPaolo Bonzini int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *bs,
287661007b31SStefan Hajnoczi                                             BlockDriverState *base,
2877170d3bd3SAndrey Shinkevich                                             bool include_base, int64_t offset,
2878170d3bd3SAndrey Shinkevich                                             int64_t bytes, int64_t *pnum)
287961007b31SStefan Hajnoczi {
2880a92b1b06SEric Blake     int depth;
28817b52a921SEmanuele Giuseppe Esposito     int ret;
28827b52a921SEmanuele Giuseppe Esposito     IO_CODE();
28837b52a921SEmanuele Giuseppe Esposito 
2884c33159deSEric Blake     ret = bdrv_co_common_block_status_above(bs, base, include_base,
2885c33159deSEric Blake                                             BDRV_WANT_ALLOCATED,
2886a92b1b06SEric Blake                                             offset, bytes, pnum, NULL, NULL,
2887a92b1b06SEric Blake                                             &depth);
288861007b31SStefan Hajnoczi     if (ret < 0) {
288961007b31SStefan Hajnoczi         return ret;
2890d6a644bbSEric Blake     }
289161007b31SStefan Hajnoczi 
2892a92b1b06SEric Blake     if (ret & BDRV_BLOCK_ALLOCATED) {
2893a92b1b06SEric Blake         return depth;
2894a92b1b06SEric Blake     }
2895a92b1b06SEric Blake     return 0;
289661007b31SStefan Hajnoczi }
289761007b31SStefan Hajnoczi 
289821c2283eSVladimir Sementsov-Ogievskiy int coroutine_fn
bdrv_co_readv_vmstate(BlockDriverState * bs,QEMUIOVector * qiov,int64_t pos)2899b33b354fSVladimir Sementsov-Ogievskiy bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
29001a8ae822SKevin Wolf {
29011a8ae822SKevin Wolf     BlockDriver *drv = bs->drv;
2902c4db2e25SMax Reitz     BlockDriverState *child_bs = bdrv_primary_bs(bs);
2903b984b296SVladimir Sementsov-Ogievskiy     int ret;
29041581a70dSEmanuele Giuseppe Esposito     IO_CODE();
29051b3ff9feSKevin Wolf     assert_bdrv_graph_readable();
2906b984b296SVladimir Sementsov-Ogievskiy 
2907b984b296SVladimir Sementsov-Ogievskiy     ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
2908b984b296SVladimir Sementsov-Ogievskiy     if (ret < 0) {
2909b984b296SVladimir Sementsov-Ogievskiy         return ret;
2910b984b296SVladimir Sementsov-Ogievskiy     }
2911dc88a467SStefan Hajnoczi 
2912b33b354fSVladimir Sementsov-Ogievskiy     if (!drv) {
2913b33b354fSVladimir Sementsov-Ogievskiy         return -ENOMEDIUM;
2914b33b354fSVladimir Sementsov-Ogievskiy     }
2915b33b354fSVladimir Sementsov-Ogievskiy 
2916dc88a467SStefan Hajnoczi     bdrv_inc_in_flight(bs);
29171a8ae822SKevin Wolf 
2918ca5e2ad9SEmanuele Giuseppe Esposito     if (drv->bdrv_co_load_vmstate) {
2919ca5e2ad9SEmanuele Giuseppe Esposito         ret = drv->bdrv_co_load_vmstate(bs, qiov, pos);
2920c4db2e25SMax Reitz     } else if (child_bs) {
2921b33b354fSVladimir Sementsov-Ogievskiy         ret = bdrv_co_readv_vmstate(child_bs, qiov, pos);
2922b984b296SVladimir Sementsov-Ogievskiy     } else {
2923b984b296SVladimir Sementsov-Ogievskiy         ret = -ENOTSUP;
29241a8ae822SKevin Wolf     }
29251a8ae822SKevin Wolf 
2926dc88a467SStefan Hajnoczi     bdrv_dec_in_flight(bs);
2927b33b354fSVladimir Sementsov-Ogievskiy 
2928b33b354fSVladimir Sementsov-Ogievskiy     return ret;
2929b33b354fSVladimir Sementsov-Ogievskiy }
2930b33b354fSVladimir Sementsov-Ogievskiy 
2931b33b354fSVladimir Sementsov-Ogievskiy int coroutine_fn
bdrv_co_writev_vmstate(BlockDriverState * bs,QEMUIOVector * qiov,int64_t pos)2932b33b354fSVladimir Sementsov-Ogievskiy bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2933b33b354fSVladimir Sementsov-Ogievskiy {
2934b33b354fSVladimir Sementsov-Ogievskiy     BlockDriver *drv = bs->drv;
2935b33b354fSVladimir Sementsov-Ogievskiy     BlockDriverState *child_bs = bdrv_primary_bs(bs);
2936b984b296SVladimir Sementsov-Ogievskiy     int ret;
29371581a70dSEmanuele Giuseppe Esposito     IO_CODE();
29381b3ff9feSKevin Wolf     assert_bdrv_graph_readable();
2939b984b296SVladimir Sementsov-Ogievskiy 
2940b984b296SVladimir Sementsov-Ogievskiy     ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
2941b984b296SVladimir Sementsov-Ogievskiy     if (ret < 0) {
2942b984b296SVladimir Sementsov-Ogievskiy         return ret;
2943b984b296SVladimir Sementsov-Ogievskiy     }
2944b33b354fSVladimir Sementsov-Ogievskiy 
2945b33b354fSVladimir Sementsov-Ogievskiy     if (!drv) {
2946b33b354fSVladimir Sementsov-Ogievskiy         return -ENOMEDIUM;
2947b33b354fSVladimir Sementsov-Ogievskiy     }
2948b33b354fSVladimir Sementsov-Ogievskiy 
2949b33b354fSVladimir Sementsov-Ogievskiy     bdrv_inc_in_flight(bs);
2950b33b354fSVladimir Sementsov-Ogievskiy 
2951ca5e2ad9SEmanuele Giuseppe Esposito     if (drv->bdrv_co_save_vmstate) {
2952ca5e2ad9SEmanuele Giuseppe Esposito         ret = drv->bdrv_co_save_vmstate(bs, qiov, pos);
2953b33b354fSVladimir Sementsov-Ogievskiy     } else if (child_bs) {
2954b33b354fSVladimir Sementsov-Ogievskiy         ret = bdrv_co_writev_vmstate(child_bs, qiov, pos);
2955b984b296SVladimir Sementsov-Ogievskiy     } else {
2956b984b296SVladimir Sementsov-Ogievskiy         ret = -ENOTSUP;
2957b33b354fSVladimir Sementsov-Ogievskiy     }
2958b33b354fSVladimir Sementsov-Ogievskiy 
2959b33b354fSVladimir Sementsov-Ogievskiy     bdrv_dec_in_flight(bs);
2960b33b354fSVladimir Sementsov-Ogievskiy 
2961dc88a467SStefan Hajnoczi     return ret;
29621a8ae822SKevin Wolf }
29631a8ae822SKevin Wolf 
bdrv_save_vmstate(BlockDriverState * bs,const uint8_t * buf,int64_t pos,int size)296461007b31SStefan Hajnoczi int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
296561007b31SStefan Hajnoczi                       int64_t pos, int size)
296661007b31SStefan Hajnoczi {
29670d93ed08SVladimir Sementsov-Ogievskiy     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2968b33b354fSVladimir Sementsov-Ogievskiy     int ret = bdrv_writev_vmstate(bs, &qiov, pos);
2969384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
297061007b31SStefan Hajnoczi 
2971b33b354fSVladimir Sementsov-Ogievskiy     return ret < 0 ? ret : size;
297261007b31SStefan Hajnoczi }
297361007b31SStefan Hajnoczi 
bdrv_load_vmstate(BlockDriverState * bs,uint8_t * buf,int64_t pos,int size)297461007b31SStefan Hajnoczi int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
297561007b31SStefan Hajnoczi                       int64_t pos, int size)
297661007b31SStefan Hajnoczi {
29770d93ed08SVladimir Sementsov-Ogievskiy     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2978b33b354fSVladimir Sementsov-Ogievskiy     int ret = bdrv_readv_vmstate(bs, &qiov, pos);
2979384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
29805ddda0b8SKevin Wolf 
2981b33b354fSVladimir Sementsov-Ogievskiy     return ret < 0 ? ret : size;
298261007b31SStefan Hajnoczi }
298361007b31SStefan Hajnoczi 
298461007b31SStefan Hajnoczi /**************************************************************/
298561007b31SStefan Hajnoczi /* async I/Os */
298661007b31SStefan Hajnoczi 
2987652b0dd8SStefan Hajnoczi /**
2988652b0dd8SStefan Hajnoczi  * Synchronously cancels an acb. Must be called with the BQL held and the acb
2989652b0dd8SStefan Hajnoczi  * must be processed with the BQL held too (IOThreads are not allowed).
2990652b0dd8SStefan Hajnoczi  *
2991652b0dd8SStefan Hajnoczi  * Use bdrv_aio_cancel_async() instead when possible.
2992652b0dd8SStefan Hajnoczi  */
bdrv_aio_cancel(BlockAIOCB * acb)299361007b31SStefan Hajnoczi void bdrv_aio_cancel(BlockAIOCB *acb)
299461007b31SStefan Hajnoczi {
2995652b0dd8SStefan Hajnoczi     GLOBAL_STATE_CODE();
299661007b31SStefan Hajnoczi     qemu_aio_ref(acb);
299761007b31SStefan Hajnoczi     bdrv_aio_cancel_async(acb);
2998652b0dd8SStefan Hajnoczi     AIO_WAIT_WHILE_UNLOCKED(NULL, acb->refcnt > 1);
299961007b31SStefan Hajnoczi     qemu_aio_unref(acb);
300061007b31SStefan Hajnoczi }
300161007b31SStefan Hajnoczi 
300261007b31SStefan Hajnoczi /* Async version of aio cancel. The caller is not blocked if the acb implements
300361007b31SStefan Hajnoczi  * cancel_async, otherwise we do nothing and let the request normally complete.
300461007b31SStefan Hajnoczi  * In either case the completion callback must be called. */
bdrv_aio_cancel_async(BlockAIOCB * acb)300561007b31SStefan Hajnoczi void bdrv_aio_cancel_async(BlockAIOCB *acb)
300661007b31SStefan Hajnoczi {
3007384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
300861007b31SStefan Hajnoczi     if (acb->aiocb_info->cancel_async) {
300961007b31SStefan Hajnoczi         acb->aiocb_info->cancel_async(acb);
301061007b31SStefan Hajnoczi     }
301161007b31SStefan Hajnoczi }
301261007b31SStefan Hajnoczi 
301361007b31SStefan Hajnoczi /**************************************************************/
301461007b31SStefan Hajnoczi /* Coroutine block device emulation */
301561007b31SStefan Hajnoczi 
bdrv_co_flush(BlockDriverState * bs)301661007b31SStefan Hajnoczi int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
301761007b31SStefan Hajnoczi {
3018883833e2SMax Reitz     BdrvChild *primary_child = bdrv_primary_child(bs);
3019883833e2SMax Reitz     BdrvChild *child;
302049ca6259SFam Zheng     int current_gen;
302149ca6259SFam Zheng     int ret = 0;
3022384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
302361007b31SStefan Hajnoczi 
302488095349SEmanuele Giuseppe Esposito     assert_bdrv_graph_readable();
302599723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
3026c32b82afSPavel Dovgalyuk 
30271e97be91SEmanuele Giuseppe Esposito     if (!bdrv_co_is_inserted(bs) || bdrv_is_read_only(bs) ||
302849ca6259SFam Zheng         bdrv_is_sg(bs)) {
302949ca6259SFam Zheng         goto early_exit;
303049ca6259SFam Zheng     }
303149ca6259SFam Zheng 
3032fa9185fcSStefan Hajnoczi     qemu_mutex_lock(&bs->reqs_lock);
3033d73415a3SStefan Hajnoczi     current_gen = qatomic_read(&bs->write_gen);
30343ff2f67aSEvgeny Yakovlev 
30353ff2f67aSEvgeny Yakovlev     /* Wait until any previous flushes are completed */
303699723548SPaolo Bonzini     while (bs->active_flush_req) {
30373783fa3dSPaolo Bonzini         qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
30383ff2f67aSEvgeny Yakovlev     }
30393ff2f67aSEvgeny Yakovlev 
30403783fa3dSPaolo Bonzini     /* Flushes reach this point in nondecreasing current_gen order.  */
304199723548SPaolo Bonzini     bs->active_flush_req = true;
3042fa9185fcSStefan Hajnoczi     qemu_mutex_unlock(&bs->reqs_lock);
30433ff2f67aSEvgeny Yakovlev 
3044c32b82afSPavel Dovgalyuk     /* Write back all layers by calling one driver function */
3045c32b82afSPavel Dovgalyuk     if (bs->drv->bdrv_co_flush) {
3046c32b82afSPavel Dovgalyuk         ret = bs->drv->bdrv_co_flush(bs);
3047c32b82afSPavel Dovgalyuk         goto out;
3048c32b82afSPavel Dovgalyuk     }
3049c32b82afSPavel Dovgalyuk 
305061007b31SStefan Hajnoczi     /* Write back cached data to the OS even with cache=unsafe */
305117362398SPaolo Bonzini     BLKDBG_CO_EVENT(primary_child, BLKDBG_FLUSH_TO_OS);
305261007b31SStefan Hajnoczi     if (bs->drv->bdrv_co_flush_to_os) {
305361007b31SStefan Hajnoczi         ret = bs->drv->bdrv_co_flush_to_os(bs);
305461007b31SStefan Hajnoczi         if (ret < 0) {
3055cdb5e315SFam Zheng             goto out;
305661007b31SStefan Hajnoczi         }
305761007b31SStefan Hajnoczi     }
305861007b31SStefan Hajnoczi 
305961007b31SStefan Hajnoczi     /* But don't actually force it to the disk with cache=unsafe */
306061007b31SStefan Hajnoczi     if (bs->open_flags & BDRV_O_NO_FLUSH) {
3061883833e2SMax Reitz         goto flush_children;
306261007b31SStefan Hajnoczi     }
306361007b31SStefan Hajnoczi 
30643ff2f67aSEvgeny Yakovlev     /* Check if we really need to flush anything */
30653ff2f67aSEvgeny Yakovlev     if (bs->flushed_gen == current_gen) {
3066883833e2SMax Reitz         goto flush_children;
30673ff2f67aSEvgeny Yakovlev     }
30683ff2f67aSEvgeny Yakovlev 
306917362398SPaolo Bonzini     BLKDBG_CO_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK);
3070d470ad42SMax Reitz     if (!bs->drv) {
3071d470ad42SMax Reitz         /* bs->drv->bdrv_co_flush() might have ejected the BDS
3072d470ad42SMax Reitz          * (even in case of apparent success) */
3073d470ad42SMax Reitz         ret = -ENOMEDIUM;
3074d470ad42SMax Reitz         goto out;
3075d470ad42SMax Reitz     }
307661007b31SStefan Hajnoczi     if (bs->drv->bdrv_co_flush_to_disk) {
307761007b31SStefan Hajnoczi         ret = bs->drv->bdrv_co_flush_to_disk(bs);
307861007b31SStefan Hajnoczi     } else if (bs->drv->bdrv_aio_flush) {
307961007b31SStefan Hajnoczi         BlockAIOCB *acb;
308061007b31SStefan Hajnoczi         CoroutineIOCompletion co = {
308161007b31SStefan Hajnoczi             .coroutine = qemu_coroutine_self(),
308261007b31SStefan Hajnoczi         };
308361007b31SStefan Hajnoczi 
308461007b31SStefan Hajnoczi         acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
308561007b31SStefan Hajnoczi         if (acb == NULL) {
308661007b31SStefan Hajnoczi             ret = -EIO;
308761007b31SStefan Hajnoczi         } else {
308861007b31SStefan Hajnoczi             qemu_coroutine_yield();
308961007b31SStefan Hajnoczi             ret = co.ret;
309061007b31SStefan Hajnoczi         }
309161007b31SStefan Hajnoczi     } else {
309261007b31SStefan Hajnoczi         /*
309361007b31SStefan Hajnoczi          * Some block drivers always operate in either writethrough or unsafe
309461007b31SStefan Hajnoczi          * mode and don't support bdrv_flush therefore. Usually qemu doesn't
309561007b31SStefan Hajnoczi          * know how the server works (because the behaviour is hardcoded or
309661007b31SStefan Hajnoczi          * depends on server-side configuration), so we can't ensure that
309761007b31SStefan Hajnoczi          * everything is safe on disk. Returning an error doesn't work because
309861007b31SStefan Hajnoczi          * that would break guests even if the server operates in writethrough
309961007b31SStefan Hajnoczi          * mode.
310061007b31SStefan Hajnoczi          *
310161007b31SStefan Hajnoczi          * Let's hope the user knows what he's doing.
310261007b31SStefan Hajnoczi          */
310361007b31SStefan Hajnoczi         ret = 0;
310461007b31SStefan Hajnoczi     }
31053ff2f67aSEvgeny Yakovlev 
310661007b31SStefan Hajnoczi     if (ret < 0) {
3107cdb5e315SFam Zheng         goto out;
310861007b31SStefan Hajnoczi     }
310961007b31SStefan Hajnoczi 
311061007b31SStefan Hajnoczi     /* Now flush the underlying protocol.  It will also have BDRV_O_NO_FLUSH
311161007b31SStefan Hajnoczi      * in the case of cache=unsafe, so there are no useless flushes.
311261007b31SStefan Hajnoczi      */
3113883833e2SMax Reitz flush_children:
3114883833e2SMax Reitz     ret = 0;
3115883833e2SMax Reitz     QLIST_FOREACH(child, &bs->children, next) {
3116883833e2SMax Reitz         if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) {
3117883833e2SMax Reitz             int this_child_ret = bdrv_co_flush(child->bs);
3118883833e2SMax Reitz             if (!ret) {
3119883833e2SMax Reitz                 ret = this_child_ret;
3120883833e2SMax Reitz             }
3121883833e2SMax Reitz         }
3122883833e2SMax Reitz     }
3123883833e2SMax Reitz 
3124cdb5e315SFam Zheng out:
31253ff2f67aSEvgeny Yakovlev     /* Notify any pending flushes that we have completed */
3126e6af1e08SKevin Wolf     if (ret == 0) {
31273ff2f67aSEvgeny Yakovlev         bs->flushed_gen = current_gen;
3128e6af1e08SKevin Wolf     }
31293783fa3dSPaolo Bonzini 
3130fa9185fcSStefan Hajnoczi     qemu_mutex_lock(&bs->reqs_lock);
313199723548SPaolo Bonzini     bs->active_flush_req = false;
3132156af3acSDenis V. Lunev     /* Return value is ignored - it's ok if wait queue is empty */
3133156af3acSDenis V. Lunev     qemu_co_queue_next(&bs->flush_queue);
3134fa9185fcSStefan Hajnoczi     qemu_mutex_unlock(&bs->reqs_lock);
31353ff2f67aSEvgeny Yakovlev 
313649ca6259SFam Zheng early_exit:
313799723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
3138cdb5e315SFam Zheng     return ret;
313961007b31SStefan Hajnoczi }
314061007b31SStefan Hajnoczi 
bdrv_co_pdiscard(BdrvChild * child,int64_t offset,int64_t bytes)3141d93e5726SVladimir Sementsov-Ogievskiy int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
3142d93e5726SVladimir Sementsov-Ogievskiy                                   int64_t bytes)
314361007b31SStefan Hajnoczi {
3144b1066c87SFam Zheng     BdrvTrackedRequest req;
314539af49c0SVladimir Sementsov-Ogievskiy     int ret;
314639af49c0SVladimir Sementsov-Ogievskiy     int64_t max_pdiscard;
31473482b9bcSEric Blake     int head, tail, align;
31480b9fd3f4SFam Zheng     BlockDriverState *bs = child->bs;
3149384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
31509a5a1c62SEmanuele Giuseppe Esposito     assert_bdrv_graph_readable();
315161007b31SStefan Hajnoczi 
31521e97be91SEmanuele Giuseppe Esposito     if (!bs || !bs->drv || !bdrv_co_is_inserted(bs)) {
315361007b31SStefan Hajnoczi         return -ENOMEDIUM;
315461007b31SStefan Hajnoczi     }
315561007b31SStefan Hajnoczi 
3156d6883bc9SVladimir Sementsov-Ogievskiy     if (bdrv_has_readonly_bitmaps(bs)) {
3157d6883bc9SVladimir Sementsov-Ogievskiy         return -EPERM;
3158d6883bc9SVladimir Sementsov-Ogievskiy     }
3159d6883bc9SVladimir Sementsov-Ogievskiy 
316069b55e03SVladimir Sementsov-Ogievskiy     ret = bdrv_check_request(offset, bytes, NULL);
31618b117001SVladimir Sementsov-Ogievskiy     if (ret < 0) {
31628b117001SVladimir Sementsov-Ogievskiy         return ret;
316361007b31SStefan Hajnoczi     }
316461007b31SStefan Hajnoczi 
316561007b31SStefan Hajnoczi     /* Do nothing if disabled.  */
316661007b31SStefan Hajnoczi     if (!(bs->open_flags & BDRV_O_UNMAP)) {
316761007b31SStefan Hajnoczi         return 0;
316861007b31SStefan Hajnoczi     }
316961007b31SStefan Hajnoczi 
3170ed1aef17SSunny Zhu     if (!bs->drv->bdrv_co_pdiscard) {
317161007b31SStefan Hajnoczi         return 0;
317261007b31SStefan Hajnoczi     }
317361007b31SStefan Hajnoczi 
31740bc329fbSHanna Reitz     /* Invalidate the cached block-status data range if this discard overlaps */
31750bc329fbSHanna Reitz     bdrv_bsc_invalidate_range(bs, offset, bytes);
31760bc329fbSHanna Reitz 
31774733cb08SStefan Hajnoczi     /*
31784733cb08SStefan Hajnoczi      * Discard is advisory, but some devices track and coalesce
31793482b9bcSEric Blake      * unaligned requests, so we must pass everything down rather than
31804733cb08SStefan Hajnoczi      * round here.  Still, most devices reject unaligned requests with
31814733cb08SStefan Hajnoczi      * -EINVAL or -ENOTSUP, so we must fragment the request accordingly.
31824733cb08SStefan Hajnoczi      */
318302aefe43SEric Blake     align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
3184b8d0a980SEric Blake     assert(align % bs->bl.request_alignment == 0);
3185b8d0a980SEric Blake     head = offset % align;
3186f5a5ca79SManos Pitsidianakis     tail = (offset + bytes) % align;
31879f1963b3SEric Blake 
318899723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
3189f5a5ca79SManos Pitsidianakis     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
319050824995SFam Zheng 
319100695c27SFam Zheng     ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0);
3192ec050f77SDenis V. Lunev     if (ret < 0) {
3193ec050f77SDenis V. Lunev         goto out;
3194ec050f77SDenis V. Lunev     }
3195ec050f77SDenis V. Lunev 
31966a8f3dbbSVladimir Sementsov-Ogievskiy     max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT64_MAX),
31979f1963b3SEric Blake                                    align);
31983482b9bcSEric Blake     assert(max_pdiscard >= bs->bl.request_alignment);
31999f1963b3SEric Blake 
3200f5a5ca79SManos Pitsidianakis     while (bytes > 0) {
3201d93e5726SVladimir Sementsov-Ogievskiy         int64_t num = bytes;
32023482b9bcSEric Blake 
32033482b9bcSEric Blake         if (head) {
32043482b9bcSEric Blake             /* Make small requests to get to alignment boundaries. */
3205f5a5ca79SManos Pitsidianakis             num = MIN(bytes, align - head);
32063482b9bcSEric Blake             if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
32073482b9bcSEric Blake                 num %= bs->bl.request_alignment;
32083482b9bcSEric Blake             }
32093482b9bcSEric Blake             head = (head + num) % align;
32103482b9bcSEric Blake             assert(num < max_pdiscard);
32113482b9bcSEric Blake         } else if (tail) {
32123482b9bcSEric Blake             if (num > align) {
32133482b9bcSEric Blake                 /* Shorten the request to the last aligned cluster.  */
32143482b9bcSEric Blake                 num -= tail;
32153482b9bcSEric Blake             } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
32163482b9bcSEric Blake                        tail > bs->bl.request_alignment) {
32173482b9bcSEric Blake                 tail %= bs->bl.request_alignment;
32183482b9bcSEric Blake                 num -= tail;
32193482b9bcSEric Blake             }
32203482b9bcSEric Blake         }
32213482b9bcSEric Blake         /* limit request size */
32223482b9bcSEric Blake         if (num > max_pdiscard) {
32233482b9bcSEric Blake             num = max_pdiscard;
32243482b9bcSEric Blake         }
322561007b31SStefan Hajnoczi 
3226d470ad42SMax Reitz         if (!bs->drv) {
3227d470ad42SMax Reitz             ret = -ENOMEDIUM;
3228d470ad42SMax Reitz             goto out;
3229d470ad42SMax Reitz         }
323061007b31SStefan Hajnoczi 
3231ed1aef17SSunny Zhu         ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
323261007b31SStefan Hajnoczi         if (ret && ret != -ENOTSUP) {
32334733cb08SStefan Hajnoczi             if (ret == -EINVAL && (offset % align != 0 || num % align != 0)) {
32344733cb08SStefan Hajnoczi                 /* Silently skip rejected unaligned head/tail requests */
32354733cb08SStefan Hajnoczi             } else {
32364733cb08SStefan Hajnoczi                 goto out; /* bail out */
32374733cb08SStefan Hajnoczi             }
323861007b31SStefan Hajnoczi         }
323961007b31SStefan Hajnoczi 
32409f1963b3SEric Blake         offset += num;
3241f5a5ca79SManos Pitsidianakis         bytes -= num;
324261007b31SStefan Hajnoczi     }
3243b1066c87SFam Zheng     ret = 0;
3244b1066c87SFam Zheng out:
324500695c27SFam Zheng     bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret);
3246b1066c87SFam Zheng     tracked_request_end(&req);
324799723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
3248b1066c87SFam Zheng     return ret;
324961007b31SStefan Hajnoczi }
325061007b31SStefan Hajnoczi 
bdrv_co_ioctl(BlockDriverState * bs,int req,void * buf)3251881a4c55SPaolo Bonzini int coroutine_fn bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
325261007b31SStefan Hajnoczi {
325361007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
32545c5ae76aSFam Zheng     CoroutineIOCompletion co = {
32555c5ae76aSFam Zheng         .coroutine = qemu_coroutine_self(),
32565c5ae76aSFam Zheng     };
32575c5ae76aSFam Zheng     BlockAIOCB *acb;
3258384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
325926c518abSKevin Wolf     assert_bdrv_graph_readable();
326061007b31SStefan Hajnoczi 
326199723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
326216a389dcSKevin Wolf     if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
32635c5ae76aSFam Zheng         co.ret = -ENOTSUP;
32645c5ae76aSFam Zheng         goto out;
32655c5ae76aSFam Zheng     }
32665c5ae76aSFam Zheng 
326716a389dcSKevin Wolf     if (drv->bdrv_co_ioctl) {
326816a389dcSKevin Wolf         co.ret = drv->bdrv_co_ioctl(bs, req, buf);
326916a389dcSKevin Wolf     } else {
32705c5ae76aSFam Zheng         acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
32715c5ae76aSFam Zheng         if (!acb) {
3272c8a9fd80SFam Zheng             co.ret = -ENOTSUP;
3273c8a9fd80SFam Zheng             goto out;
32745c5ae76aSFam Zheng         }
32755c5ae76aSFam Zheng         qemu_coroutine_yield();
327616a389dcSKevin Wolf     }
32775c5ae76aSFam Zheng out:
327899723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
32795c5ae76aSFam Zheng     return co.ret;
32805c5ae76aSFam Zheng }
32815c5ae76aSFam Zheng 
bdrv_co_zone_report(BlockDriverState * bs,int64_t offset,unsigned int * nr_zones,BlockZoneDescriptor * zones)32826d43eaa3SSam Li int coroutine_fn bdrv_co_zone_report(BlockDriverState *bs, int64_t offset,
32836d43eaa3SSam Li                         unsigned int *nr_zones,
32846d43eaa3SSam Li                         BlockZoneDescriptor *zones)
32856d43eaa3SSam Li {
32866d43eaa3SSam Li     BlockDriver *drv = bs->drv;
32876d43eaa3SSam Li     CoroutineIOCompletion co = {
32886d43eaa3SSam Li             .coroutine = qemu_coroutine_self(),
32896d43eaa3SSam Li     };
32906d43eaa3SSam Li     IO_CODE();
32916d43eaa3SSam Li 
32926d43eaa3SSam Li     bdrv_inc_in_flight(bs);
32936d43eaa3SSam Li     if (!drv || !drv->bdrv_co_zone_report || bs->bl.zoned == BLK_Z_NONE) {
32946d43eaa3SSam Li         co.ret = -ENOTSUP;
32956d43eaa3SSam Li         goto out;
32966d43eaa3SSam Li     }
32976d43eaa3SSam Li     co.ret = drv->bdrv_co_zone_report(bs, offset, nr_zones, zones);
32986d43eaa3SSam Li out:
32996d43eaa3SSam Li     bdrv_dec_in_flight(bs);
33006d43eaa3SSam Li     return co.ret;
33016d43eaa3SSam Li }
33026d43eaa3SSam Li 
bdrv_co_zone_mgmt(BlockDriverState * bs,BlockZoneOp op,int64_t offset,int64_t len)33036d43eaa3SSam Li int coroutine_fn bdrv_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
33046d43eaa3SSam Li         int64_t offset, int64_t len)
33056d43eaa3SSam Li {
33066d43eaa3SSam Li     BlockDriver *drv = bs->drv;
33076d43eaa3SSam Li     CoroutineIOCompletion co = {
33086d43eaa3SSam Li             .coroutine = qemu_coroutine_self(),
33096d43eaa3SSam Li     };
33106d43eaa3SSam Li     IO_CODE();
33116d43eaa3SSam Li 
33126d43eaa3SSam Li     bdrv_inc_in_flight(bs);
33136d43eaa3SSam Li     if (!drv || !drv->bdrv_co_zone_mgmt || bs->bl.zoned == BLK_Z_NONE) {
33146d43eaa3SSam Li         co.ret = -ENOTSUP;
33156d43eaa3SSam Li         goto out;
33166d43eaa3SSam Li     }
33176d43eaa3SSam Li     co.ret = drv->bdrv_co_zone_mgmt(bs, op, offset, len);
33186d43eaa3SSam Li out:
33196d43eaa3SSam Li     bdrv_dec_in_flight(bs);
33206d43eaa3SSam Li     return co.ret;
33216d43eaa3SSam Li }
33226d43eaa3SSam Li 
bdrv_co_zone_append(BlockDriverState * bs,int64_t * offset,QEMUIOVector * qiov,BdrvRequestFlags flags)33234751d09aSSam Li int coroutine_fn bdrv_co_zone_append(BlockDriverState *bs, int64_t *offset,
33244751d09aSSam Li                         QEMUIOVector *qiov,
33254751d09aSSam Li                         BdrvRequestFlags flags)
33264751d09aSSam Li {
33274751d09aSSam Li     int ret;
33284751d09aSSam Li     BlockDriver *drv = bs->drv;
33294751d09aSSam Li     CoroutineIOCompletion co = {
33304751d09aSSam Li             .coroutine = qemu_coroutine_self(),
33314751d09aSSam Li     };
33324751d09aSSam Li     IO_CODE();
33334751d09aSSam Li 
33344751d09aSSam Li     ret = bdrv_check_qiov_request(*offset, qiov->size, qiov, 0, NULL);
33354751d09aSSam Li     if (ret < 0) {
33364751d09aSSam Li         return ret;
33374751d09aSSam Li     }
33384751d09aSSam Li 
33394751d09aSSam Li     bdrv_inc_in_flight(bs);
33404751d09aSSam Li     if (!drv || !drv->bdrv_co_zone_append || bs->bl.zoned == BLK_Z_NONE) {
33414751d09aSSam Li         co.ret = -ENOTSUP;
33424751d09aSSam Li         goto out;
33434751d09aSSam Li     }
33444751d09aSSam Li     co.ret = drv->bdrv_co_zone_append(bs, offset, qiov, flags);
33454751d09aSSam Li out:
33464751d09aSSam Li     bdrv_dec_in_flight(bs);
33474751d09aSSam Li     return co.ret;
33484751d09aSSam Li }
33494751d09aSSam Li 
qemu_blockalign(BlockDriverState * bs,size_t size)335061007b31SStefan Hajnoczi void *qemu_blockalign(BlockDriverState *bs, size_t size)
335161007b31SStefan Hajnoczi {
3352384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
335361007b31SStefan Hajnoczi     return qemu_memalign(bdrv_opt_mem_align(bs), size);
335461007b31SStefan Hajnoczi }
335561007b31SStefan Hajnoczi 
qemu_blockalign0(BlockDriverState * bs,size_t size)335661007b31SStefan Hajnoczi void *qemu_blockalign0(BlockDriverState *bs, size_t size)
335761007b31SStefan Hajnoczi {
3358384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
335961007b31SStefan Hajnoczi     return memset(qemu_blockalign(bs, size), 0, size);
336061007b31SStefan Hajnoczi }
336161007b31SStefan Hajnoczi 
qemu_try_blockalign(BlockDriverState * bs,size_t size)336261007b31SStefan Hajnoczi void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
336361007b31SStefan Hajnoczi {
336461007b31SStefan Hajnoczi     size_t align = bdrv_opt_mem_align(bs);
3365384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
336661007b31SStefan Hajnoczi 
336761007b31SStefan Hajnoczi     /* Ensure that NULL is never returned on success */
336861007b31SStefan Hajnoczi     assert(align > 0);
336961007b31SStefan Hajnoczi     if (size == 0) {
337061007b31SStefan Hajnoczi         size = align;
337161007b31SStefan Hajnoczi     }
337261007b31SStefan Hajnoczi 
337361007b31SStefan Hajnoczi     return qemu_try_memalign(align, size);
337461007b31SStefan Hajnoczi }
337561007b31SStefan Hajnoczi 
qemu_try_blockalign0(BlockDriverState * bs,size_t size)337661007b31SStefan Hajnoczi void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
337761007b31SStefan Hajnoczi {
337861007b31SStefan Hajnoczi     void *mem = qemu_try_blockalign(bs, size);
3379384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
338061007b31SStefan Hajnoczi 
338161007b31SStefan Hajnoczi     if (mem) {
338261007b31SStefan Hajnoczi         memset(mem, 0, size);
338361007b31SStefan Hajnoczi     }
338461007b31SStefan Hajnoczi 
338561007b31SStefan Hajnoczi     return mem;
338661007b31SStefan Hajnoczi }
338761007b31SStefan Hajnoczi 
3388f4ec04baSStefan Hajnoczi /* Helper that undoes bdrv_register_buf() when it fails partway through */
3389d9249c25SKevin Wolf static void GRAPH_RDLOCK
bdrv_register_buf_rollback(BlockDriverState * bs,void * host,size_t size,BdrvChild * final_child)3390d9249c25SKevin Wolf bdrv_register_buf_rollback(BlockDriverState *bs, void *host, size_t size,
3391f4ec04baSStefan Hajnoczi                            BdrvChild *final_child)
3392f4ec04baSStefan Hajnoczi {
3393f4ec04baSStefan Hajnoczi     BdrvChild *child;
3394f4ec04baSStefan Hajnoczi 
3395d9249c25SKevin Wolf     GLOBAL_STATE_CODE();
3396d9249c25SKevin Wolf     assert_bdrv_graph_readable();
3397d9249c25SKevin Wolf 
3398f4ec04baSStefan Hajnoczi     QLIST_FOREACH(child, &bs->children, next) {
3399f4ec04baSStefan Hajnoczi         if (child == final_child) {
3400f4ec04baSStefan Hajnoczi             break;
3401f4ec04baSStefan Hajnoczi         }
3402f4ec04baSStefan Hajnoczi 
3403f4ec04baSStefan Hajnoczi         bdrv_unregister_buf(child->bs, host, size);
3404f4ec04baSStefan Hajnoczi     }
3405f4ec04baSStefan Hajnoczi 
3406f4ec04baSStefan Hajnoczi     if (bs->drv && bs->drv->bdrv_unregister_buf) {
3407f4ec04baSStefan Hajnoczi         bs->drv->bdrv_unregister_buf(bs, host, size);
3408f4ec04baSStefan Hajnoczi     }
3409f4ec04baSStefan Hajnoczi }
3410f4ec04baSStefan Hajnoczi 
bdrv_register_buf(BlockDriverState * bs,void * host,size_t size,Error ** errp)3411f4ec04baSStefan Hajnoczi bool bdrv_register_buf(BlockDriverState *bs, void *host, size_t size,
3412f4ec04baSStefan Hajnoczi                        Error **errp)
341323d0ba93SFam Zheng {
341423d0ba93SFam Zheng     BdrvChild *child;
341523d0ba93SFam Zheng 
3416f791bf7fSEmanuele Giuseppe Esposito     GLOBAL_STATE_CODE();
3417d9249c25SKevin Wolf     GRAPH_RDLOCK_GUARD_MAINLOOP();
3418d9249c25SKevin Wolf 
341923d0ba93SFam Zheng     if (bs->drv && bs->drv->bdrv_register_buf) {
3420f4ec04baSStefan Hajnoczi         if (!bs->drv->bdrv_register_buf(bs, host, size, errp)) {
3421f4ec04baSStefan Hajnoczi             return false;
3422f4ec04baSStefan Hajnoczi         }
342323d0ba93SFam Zheng     }
342423d0ba93SFam Zheng     QLIST_FOREACH(child, &bs->children, next) {
3425f4ec04baSStefan Hajnoczi         if (!bdrv_register_buf(child->bs, host, size, errp)) {
3426f4ec04baSStefan Hajnoczi             bdrv_register_buf_rollback(bs, host, size, child);
3427f4ec04baSStefan Hajnoczi             return false;
342823d0ba93SFam Zheng         }
342923d0ba93SFam Zheng     }
3430f4ec04baSStefan Hajnoczi     return true;
3431f4ec04baSStefan Hajnoczi }
343223d0ba93SFam Zheng 
bdrv_unregister_buf(BlockDriverState * bs,void * host,size_t size)34334f384011SStefan Hajnoczi void bdrv_unregister_buf(BlockDriverState *bs, void *host, size_t size)
343423d0ba93SFam Zheng {
343523d0ba93SFam Zheng     BdrvChild *child;
343623d0ba93SFam Zheng 
3437f791bf7fSEmanuele Giuseppe Esposito     GLOBAL_STATE_CODE();
3438d9249c25SKevin Wolf     GRAPH_RDLOCK_GUARD_MAINLOOP();
3439d9249c25SKevin Wolf 
344023d0ba93SFam Zheng     if (bs->drv && bs->drv->bdrv_unregister_buf) {
34414f384011SStefan Hajnoczi         bs->drv->bdrv_unregister_buf(bs, host, size);
344223d0ba93SFam Zheng     }
344323d0ba93SFam Zheng     QLIST_FOREACH(child, &bs->children, next) {
34444f384011SStefan Hajnoczi         bdrv_unregister_buf(child->bs, host, size);
344523d0ba93SFam Zheng     }
344623d0ba93SFam Zheng }
3447fcc67678SFam Zheng 
bdrv_co_copy_range_internal(BdrvChild * src,int64_t src_offset,BdrvChild * dst,int64_t dst_offset,int64_t bytes,BdrvRequestFlags read_flags,BdrvRequestFlags write_flags,bool recurse_src)3448abaf8b75SKevin Wolf static int coroutine_fn GRAPH_RDLOCK bdrv_co_copy_range_internal(
3449a5215b8fSVladimir Sementsov-Ogievskiy         BdrvChild *src, int64_t src_offset, BdrvChild *dst,
3450a5215b8fSVladimir Sementsov-Ogievskiy         int64_t dst_offset, int64_t bytes,
345167b51fb9SVladimir Sementsov-Ogievskiy         BdrvRequestFlags read_flags, BdrvRequestFlags write_flags,
3452fcc67678SFam Zheng         bool recurse_src)
3453fcc67678SFam Zheng {
3454999658a0SVladimir Sementsov-Ogievskiy     BdrvTrackedRequest req;
3455fcc67678SFam Zheng     int ret;
3456742bf09bSEmanuele Giuseppe Esposito     assert_bdrv_graph_readable();
3457fcc67678SFam Zheng 
3458fe0480d6SKevin Wolf     /* TODO We can support BDRV_REQ_NO_FALLBACK here */
3459fe0480d6SKevin Wolf     assert(!(read_flags & BDRV_REQ_NO_FALLBACK));
3460fe0480d6SKevin Wolf     assert(!(write_flags & BDRV_REQ_NO_FALLBACK));
346145e62b46SVladimir Sementsov-Ogievskiy     assert(!(read_flags & BDRV_REQ_NO_WAIT));
346245e62b46SVladimir Sementsov-Ogievskiy     assert(!(write_flags & BDRV_REQ_NO_WAIT));
3463fe0480d6SKevin Wolf 
34641e97be91SEmanuele Giuseppe Esposito     if (!dst || !dst->bs || !bdrv_co_is_inserted(dst->bs)) {
3465fcc67678SFam Zheng         return -ENOMEDIUM;
3466fcc67678SFam Zheng     }
346763f4ad11SVladimir Sementsov-Ogievskiy     ret = bdrv_check_request32(dst_offset, bytes, NULL, 0);
3468fcc67678SFam Zheng     if (ret) {
3469fcc67678SFam Zheng         return ret;
3470fcc67678SFam Zheng     }
347167b51fb9SVladimir Sementsov-Ogievskiy     if (write_flags & BDRV_REQ_ZERO_WRITE) {
347267b51fb9SVladimir Sementsov-Ogievskiy         return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags);
3473fcc67678SFam Zheng     }
3474fcc67678SFam Zheng 
34751e97be91SEmanuele Giuseppe Esposito     if (!src || !src->bs || !bdrv_co_is_inserted(src->bs)) {
3476d4d3e5a0SFam Zheng         return -ENOMEDIUM;
3477d4d3e5a0SFam Zheng     }
347863f4ad11SVladimir Sementsov-Ogievskiy     ret = bdrv_check_request32(src_offset, bytes, NULL, 0);
3479d4d3e5a0SFam Zheng     if (ret) {
3480d4d3e5a0SFam Zheng         return ret;
3481d4d3e5a0SFam Zheng     }
3482d4d3e5a0SFam Zheng 
3483fcc67678SFam Zheng     if (!src->bs->drv->bdrv_co_copy_range_from
3484fcc67678SFam Zheng         || !dst->bs->drv->bdrv_co_copy_range_to
3485fcc67678SFam Zheng         || src->bs->encrypted || dst->bs->encrypted) {
3486fcc67678SFam Zheng         return -ENOTSUP;
3487fcc67678SFam Zheng     }
3488999658a0SVladimir Sementsov-Ogievskiy 
3489999658a0SVladimir Sementsov-Ogievskiy     if (recurse_src) {
3490d4d3e5a0SFam Zheng         bdrv_inc_in_flight(src->bs);
3491999658a0SVladimir Sementsov-Ogievskiy         tracked_request_begin(&req, src->bs, src_offset, bytes,
3492999658a0SVladimir Sementsov-Ogievskiy                               BDRV_TRACKED_READ);
349337aec7d7SFam Zheng 
349409d2f948SVladimir Sementsov-Ogievskiy         /* BDRV_REQ_SERIALISING is only for write operation */
349509d2f948SVladimir Sementsov-Ogievskiy         assert(!(read_flags & BDRV_REQ_SERIALISING));
3496304d9d7fSMax Reitz         bdrv_wait_serialising_requests(&req);
3497999658a0SVladimir Sementsov-Ogievskiy 
349837aec7d7SFam Zheng         ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
3499fcc67678SFam Zheng                                                     src, src_offset,
3500fcc67678SFam Zheng                                                     dst, dst_offset,
350167b51fb9SVladimir Sementsov-Ogievskiy                                                     bytes,
350267b51fb9SVladimir Sementsov-Ogievskiy                                                     read_flags, write_flags);
3503999658a0SVladimir Sementsov-Ogievskiy 
3504999658a0SVladimir Sementsov-Ogievskiy         tracked_request_end(&req);
3505999658a0SVladimir Sementsov-Ogievskiy         bdrv_dec_in_flight(src->bs);
3506fcc67678SFam Zheng     } else {
3507999658a0SVladimir Sementsov-Ogievskiy         bdrv_inc_in_flight(dst->bs);
3508999658a0SVladimir Sementsov-Ogievskiy         tracked_request_begin(&req, dst->bs, dst_offset, bytes,
3509999658a0SVladimir Sementsov-Ogievskiy                               BDRV_TRACKED_WRITE);
35100eb1e891SFam Zheng         ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req,
35110eb1e891SFam Zheng                                         write_flags);
35120eb1e891SFam Zheng         if (!ret) {
351337aec7d7SFam Zheng             ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs,
3514fcc67678SFam Zheng                                                       src, src_offset,
3515fcc67678SFam Zheng                                                       dst, dst_offset,
351667b51fb9SVladimir Sementsov-Ogievskiy                                                       bytes,
351767b51fb9SVladimir Sementsov-Ogievskiy                                                       read_flags, write_flags);
35180eb1e891SFam Zheng         }
35190eb1e891SFam Zheng         bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret);
3520999658a0SVladimir Sementsov-Ogievskiy         tracked_request_end(&req);
3521d4d3e5a0SFam Zheng         bdrv_dec_in_flight(dst->bs);
3522999658a0SVladimir Sementsov-Ogievskiy     }
3523999658a0SVladimir Sementsov-Ogievskiy 
352437aec7d7SFam Zheng     return ret;
3525fcc67678SFam Zheng }
3526fcc67678SFam Zheng 
3527fcc67678SFam Zheng /* Copy range from @src to @dst.
3528fcc67678SFam Zheng  *
3529fcc67678SFam Zheng  * See the comment of bdrv_co_copy_range for the parameter and return value
3530fcc67678SFam Zheng  * semantics. */
bdrv_co_copy_range_from(BdrvChild * src,int64_t src_offset,BdrvChild * dst,int64_t dst_offset,int64_t bytes,BdrvRequestFlags read_flags,BdrvRequestFlags write_flags)3531a5215b8fSVladimir Sementsov-Ogievskiy int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset,
3532a5215b8fSVladimir Sementsov-Ogievskiy                                          BdrvChild *dst, int64_t dst_offset,
3533a5215b8fSVladimir Sementsov-Ogievskiy                                          int64_t bytes,
353467b51fb9SVladimir Sementsov-Ogievskiy                                          BdrvRequestFlags read_flags,
353567b51fb9SVladimir Sementsov-Ogievskiy                                          BdrvRequestFlags write_flags)
3536fcc67678SFam Zheng {
3537967d7905SEmanuele Giuseppe Esposito     IO_CODE();
3538742bf09bSEmanuele Giuseppe Esposito     assert_bdrv_graph_readable();
3539ecc983a5SFam Zheng     trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes,
3540ecc983a5SFam Zheng                                   read_flags, write_flags);
3541fcc67678SFam Zheng     return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
354267b51fb9SVladimir Sementsov-Ogievskiy                                        bytes, read_flags, write_flags, true);
3543fcc67678SFam Zheng }
3544fcc67678SFam Zheng 
3545fcc67678SFam Zheng /* Copy range from @src to @dst.
3546fcc67678SFam Zheng  *
3547fcc67678SFam Zheng  * See the comment of bdrv_co_copy_range for the parameter and return value
3548fcc67678SFam Zheng  * semantics. */
bdrv_co_copy_range_to(BdrvChild * src,int64_t src_offset,BdrvChild * dst,int64_t dst_offset,int64_t bytes,BdrvRequestFlags read_flags,BdrvRequestFlags write_flags)3549a5215b8fSVladimir Sementsov-Ogievskiy int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset,
3550a5215b8fSVladimir Sementsov-Ogievskiy                                        BdrvChild *dst, int64_t dst_offset,
3551a5215b8fSVladimir Sementsov-Ogievskiy                                        int64_t bytes,
355267b51fb9SVladimir Sementsov-Ogievskiy                                        BdrvRequestFlags read_flags,
355367b51fb9SVladimir Sementsov-Ogievskiy                                        BdrvRequestFlags write_flags)
3554fcc67678SFam Zheng {
3555967d7905SEmanuele Giuseppe Esposito     IO_CODE();
3556742bf09bSEmanuele Giuseppe Esposito     assert_bdrv_graph_readable();
3557ecc983a5SFam Zheng     trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
3558ecc983a5SFam Zheng                                 read_flags, write_flags);
3559fcc67678SFam Zheng     return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
356067b51fb9SVladimir Sementsov-Ogievskiy                                        bytes, read_flags, write_flags, false);
3561fcc67678SFam Zheng }
3562fcc67678SFam Zheng 
bdrv_co_copy_range(BdrvChild * src,int64_t src_offset,BdrvChild * dst,int64_t dst_offset,int64_t bytes,BdrvRequestFlags read_flags,BdrvRequestFlags write_flags)3563a5215b8fSVladimir Sementsov-Ogievskiy int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset,
3564a5215b8fSVladimir Sementsov-Ogievskiy                                     BdrvChild *dst, int64_t dst_offset,
3565a5215b8fSVladimir Sementsov-Ogievskiy                                     int64_t bytes, BdrvRequestFlags read_flags,
356667b51fb9SVladimir Sementsov-Ogievskiy                                     BdrvRequestFlags write_flags)
3567fcc67678SFam Zheng {
3568384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
3569742bf09bSEmanuele Giuseppe Esposito     assert_bdrv_graph_readable();
3570742bf09bSEmanuele Giuseppe Esposito 
357137aec7d7SFam Zheng     return bdrv_co_copy_range_from(src, src_offset,
3572fcc67678SFam Zheng                                    dst, dst_offset,
357367b51fb9SVladimir Sementsov-Ogievskiy                                    bytes, read_flags, write_flags);
3574fcc67678SFam Zheng }
35753d9f2d2aSKevin Wolf 
35767859c45aSKevin Wolf static void coroutine_fn GRAPH_RDLOCK
bdrv_parent_cb_resize(BlockDriverState * bs)35777859c45aSKevin Wolf bdrv_parent_cb_resize(BlockDriverState *bs)
35783d9f2d2aSKevin Wolf {
35793d9f2d2aSKevin Wolf     BdrvChild *c;
35807859c45aSKevin Wolf 
35817859c45aSKevin Wolf     assert_bdrv_graph_readable();
35827859c45aSKevin Wolf 
35833d9f2d2aSKevin Wolf     QLIST_FOREACH(c, &bs->parents, next_parent) {
3584bd86fb99SMax Reitz         if (c->klass->resize) {
3585bd86fb99SMax Reitz             c->klass->resize(c);
35863d9f2d2aSKevin Wolf         }
35873d9f2d2aSKevin Wolf     }
35883d9f2d2aSKevin Wolf }
35893d9f2d2aSKevin Wolf 
35903d9f2d2aSKevin Wolf /**
35913d9f2d2aSKevin Wolf  * Truncate file to 'offset' bytes (needed only for file protocols)
3592c80d8b06SMax Reitz  *
3593c80d8b06SMax Reitz  * If 'exact' is true, the file must be resized to exactly the given
3594c80d8b06SMax Reitz  * 'offset'.  Otherwise, it is sufficient for the node to be at least
3595c80d8b06SMax Reitz  * 'offset' bytes in length.
35963d9f2d2aSKevin Wolf  */
bdrv_co_truncate(BdrvChild * child,int64_t offset,bool exact,PreallocMode prealloc,BdrvRequestFlags flags,Error ** errp)3597c80d8b06SMax Reitz int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
35987b8e4857SKevin Wolf                                   PreallocMode prealloc, BdrvRequestFlags flags,
35997b8e4857SKevin Wolf                                   Error **errp)
36003d9f2d2aSKevin Wolf {
36013d9f2d2aSKevin Wolf     BlockDriverState *bs = child->bs;
360223b93525SMax Reitz     BdrvChild *filtered, *backing;
36033d9f2d2aSKevin Wolf     BlockDriver *drv = bs->drv;
36041bc5f09fSKevin Wolf     BdrvTrackedRequest req;
36051bc5f09fSKevin Wolf     int64_t old_size, new_bytes;
36063d9f2d2aSKevin Wolf     int ret;
3607384a48fbSEmanuele Giuseppe Esposito     IO_CODE();
3608c2b8e315SKevin Wolf     assert_bdrv_graph_readable();
36093d9f2d2aSKevin Wolf 
36103d9f2d2aSKevin Wolf     /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
36113d9f2d2aSKevin Wolf     if (!drv) {
36123d9f2d2aSKevin Wolf         error_setg(errp, "No medium inserted");
36133d9f2d2aSKevin Wolf         return -ENOMEDIUM;
36143d9f2d2aSKevin Wolf     }
36153d9f2d2aSKevin Wolf     if (offset < 0) {
36163d9f2d2aSKevin Wolf         error_setg(errp, "Image size cannot be negative");
36173d9f2d2aSKevin Wolf         return -EINVAL;
36183d9f2d2aSKevin Wolf     }
36193d9f2d2aSKevin Wolf 
362069b55e03SVladimir Sementsov-Ogievskiy     ret = bdrv_check_request(offset, 0, errp);
36218b117001SVladimir Sementsov-Ogievskiy     if (ret < 0) {
36228b117001SVladimir Sementsov-Ogievskiy         return ret;
36238b117001SVladimir Sementsov-Ogievskiy     }
36248b117001SVladimir Sementsov-Ogievskiy 
36250af02bd1SPaolo Bonzini     old_size = bdrv_co_getlength(bs);
36261bc5f09fSKevin Wolf     if (old_size < 0) {
36271bc5f09fSKevin Wolf         error_setg_errno(errp, -old_size, "Failed to get old image size");
36281bc5f09fSKevin Wolf         return old_size;
36291bc5f09fSKevin Wolf     }
36301bc5f09fSKevin Wolf 
363197efa869SEric Blake     if (bdrv_is_read_only(bs)) {
363297efa869SEric Blake         error_setg(errp, "Image is read-only");
363397efa869SEric Blake         return -EACCES;
363497efa869SEric Blake     }
363597efa869SEric Blake 
36361bc5f09fSKevin Wolf     if (offset > old_size) {
36371bc5f09fSKevin Wolf         new_bytes = offset - old_size;
36381bc5f09fSKevin Wolf     } else {
36391bc5f09fSKevin Wolf         new_bytes = 0;
36401bc5f09fSKevin Wolf     }
36411bc5f09fSKevin Wolf 
36423d9f2d2aSKevin Wolf     bdrv_inc_in_flight(bs);
36435416a11eSFam Zheng     tracked_request_begin(&req, bs, offset - new_bytes, new_bytes,
36445416a11eSFam Zheng                           BDRV_TRACKED_TRUNCATE);
36451bc5f09fSKevin Wolf 
36461bc5f09fSKevin Wolf     /* If we are growing the image and potentially using preallocation for the
36471bc5f09fSKevin Wolf      * new area, we need to make sure that no write requests are made to it
36481bc5f09fSKevin Wolf      * concurrently or they might be overwritten by preallocation. */
36491bc5f09fSKevin Wolf     if (new_bytes) {
36508ac5aab2SVladimir Sementsov-Ogievskiy         bdrv_make_request_serialising(&req, 1);
3651cd47d792SFam Zheng     }
3652cd47d792SFam Zheng     ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req,
3653cd47d792SFam Zheng                                     0);
3654cd47d792SFam Zheng     if (ret < 0) {
3655cd47d792SFam Zheng         error_setg_errno(errp, -ret,
3656cd47d792SFam Zheng                          "Failed to prepare request for truncation");
3657cd47d792SFam Zheng         goto out;
36581bc5f09fSKevin Wolf     }
36593d9f2d2aSKevin Wolf 
366093393e69SMax Reitz     filtered = bdrv_filter_child(bs);
366123b93525SMax Reitz     backing = bdrv_cow_child(bs);
366293393e69SMax Reitz 
3663955c7d66SKevin Wolf     /*
3664955c7d66SKevin Wolf      * If the image has a backing file that is large enough that it would
3665955c7d66SKevin Wolf      * provide data for the new area, we cannot leave it unallocated because
3666955c7d66SKevin Wolf      * then the backing file content would become visible. Instead, zero-fill
3667955c7d66SKevin Wolf      * the new area.
3668955c7d66SKevin Wolf      *
3669955c7d66SKevin Wolf      * Note that if the image has a backing file, but was opened without the
3670955c7d66SKevin Wolf      * backing file, taking care of keeping things consistent with that backing
3671955c7d66SKevin Wolf      * file is the user's responsibility.
3672955c7d66SKevin Wolf      */
367323b93525SMax Reitz     if (new_bytes && backing) {
3674955c7d66SKevin Wolf         int64_t backing_len;
3675955c7d66SKevin Wolf 
3676bd53086eSEmanuele Giuseppe Esposito         backing_len = bdrv_co_getlength(backing->bs);
3677955c7d66SKevin Wolf         if (backing_len < 0) {
3678955c7d66SKevin Wolf             ret = backing_len;
3679955c7d66SKevin Wolf             error_setg_errno(errp, -ret, "Could not get backing file size");
3680955c7d66SKevin Wolf             goto out;
3681955c7d66SKevin Wolf         }
3682955c7d66SKevin Wolf 
3683955c7d66SKevin Wolf         if (backing_len > old_size) {
3684955c7d66SKevin Wolf             flags |= BDRV_REQ_ZERO_WRITE;
3685955c7d66SKevin Wolf         }
3686955c7d66SKevin Wolf     }
3687955c7d66SKevin Wolf 
36886b7e8f8bSMax Reitz     if (drv->bdrv_co_truncate) {
368992b92799SKevin Wolf         if (flags & ~bs->supported_truncate_flags) {
369092b92799SKevin Wolf             error_setg(errp, "Block driver does not support requested flags");
369192b92799SKevin Wolf             ret = -ENOTSUP;
369292b92799SKevin Wolf             goto out;
369392b92799SKevin Wolf         }
369492b92799SKevin Wolf         ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp);
369593393e69SMax Reitz     } else if (filtered) {
369693393e69SMax Reitz         ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp);
36976b7e8f8bSMax Reitz     } else {
36983d9f2d2aSKevin Wolf         error_setg(errp, "Image format driver does not support resize");
36993d9f2d2aSKevin Wolf         ret = -ENOTSUP;
37003d9f2d2aSKevin Wolf         goto out;
37013d9f2d2aSKevin Wolf     }
37023d9f2d2aSKevin Wolf     if (ret < 0) {
37033d9f2d2aSKevin Wolf         goto out;
37043d9f2d2aSKevin Wolf     }
37056b7e8f8bSMax Reitz 
3706bd53086eSEmanuele Giuseppe Esposito     ret = bdrv_co_refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
37073d9f2d2aSKevin Wolf     if (ret < 0) {
37083d9f2d2aSKevin Wolf         error_setg_errno(errp, -ret, "Could not refresh total sector count");
37093d9f2d2aSKevin Wolf     } else {
37103d9f2d2aSKevin Wolf         offset = bs->total_sectors * BDRV_SECTOR_SIZE;
37113d9f2d2aSKevin Wolf     }
3712c057960cSEmanuele Giuseppe Esposito     /*
3713c057960cSEmanuele Giuseppe Esposito      * It's possible that truncation succeeded but bdrv_refresh_total_sectors
3714cd47d792SFam Zheng      * failed, but the latter doesn't affect how we should finish the request.
3715c057960cSEmanuele Giuseppe Esposito      * Pass 0 as the last parameter so that dirty bitmaps etc. are handled.
3716c057960cSEmanuele Giuseppe Esposito      */
3717cd47d792SFam Zheng     bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0);
37183d9f2d2aSKevin Wolf 
37193d9f2d2aSKevin Wolf out:
37201bc5f09fSKevin Wolf     tracked_request_end(&req);
37213d9f2d2aSKevin Wolf     bdrv_dec_in_flight(bs);
37221bc5f09fSKevin Wolf 
37233d9f2d2aSKevin Wolf     return ret;
37243d9f2d2aSKevin Wolf }
3725bd54669aSVladimir Sementsov-Ogievskiy 
bdrv_cancel_in_flight(BlockDriverState * bs)3726bd54669aSVladimir Sementsov-Ogievskiy void bdrv_cancel_in_flight(BlockDriverState *bs)
3727bd54669aSVladimir Sementsov-Ogievskiy {
3728f791bf7fSEmanuele Giuseppe Esposito     GLOBAL_STATE_CODE();
372979a55866SKevin Wolf     GRAPH_RDLOCK_GUARD_MAINLOOP();
373079a55866SKevin Wolf 
3731bd54669aSVladimir Sementsov-Ogievskiy     if (!bs || !bs->drv) {
3732bd54669aSVladimir Sementsov-Ogievskiy         return;
3733bd54669aSVladimir Sementsov-Ogievskiy     }
3734bd54669aSVladimir Sementsov-Ogievskiy 
3735bd54669aSVladimir Sementsov-Ogievskiy     if (bs->drv->bdrv_cancel_in_flight) {
3736bd54669aSVladimir Sementsov-Ogievskiy         bs->drv->bdrv_cancel_in_flight(bs);
3737bd54669aSVladimir Sementsov-Ogievskiy     }
3738bd54669aSVladimir Sementsov-Ogievskiy }
3739ce14f3b4SVladimir Sementsov-Ogievskiy 
3740ce14f3b4SVladimir Sementsov-Ogievskiy int coroutine_fn
bdrv_co_preadv_snapshot(BdrvChild * child,int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset)3741ce14f3b4SVladimir Sementsov-Ogievskiy bdrv_co_preadv_snapshot(BdrvChild *child, int64_t offset, int64_t bytes,
3742ce14f3b4SVladimir Sementsov-Ogievskiy                         QEMUIOVector *qiov, size_t qiov_offset)
3743ce14f3b4SVladimir Sementsov-Ogievskiy {
3744ce14f3b4SVladimir Sementsov-Ogievskiy     BlockDriverState *bs = child->bs;
3745ce14f3b4SVladimir Sementsov-Ogievskiy     BlockDriver *drv = bs->drv;
3746ce14f3b4SVladimir Sementsov-Ogievskiy     int ret;
3747ce14f3b4SVladimir Sementsov-Ogievskiy     IO_CODE();
37487b9e8b22SKevin Wolf     assert_bdrv_graph_readable();
3749ce14f3b4SVladimir Sementsov-Ogievskiy 
3750ce14f3b4SVladimir Sementsov-Ogievskiy     if (!drv) {
3751ce14f3b4SVladimir Sementsov-Ogievskiy         return -ENOMEDIUM;
3752ce14f3b4SVladimir Sementsov-Ogievskiy     }
3753ce14f3b4SVladimir Sementsov-Ogievskiy 
3754ce14f3b4SVladimir Sementsov-Ogievskiy     if (!drv->bdrv_co_preadv_snapshot) {
3755ce14f3b4SVladimir Sementsov-Ogievskiy         return -ENOTSUP;
3756ce14f3b4SVladimir Sementsov-Ogievskiy     }
3757ce14f3b4SVladimir Sementsov-Ogievskiy 
3758ce14f3b4SVladimir Sementsov-Ogievskiy     bdrv_inc_in_flight(bs);
3759ce14f3b4SVladimir Sementsov-Ogievskiy     ret = drv->bdrv_co_preadv_snapshot(bs, offset, bytes, qiov, qiov_offset);
3760ce14f3b4SVladimir Sementsov-Ogievskiy     bdrv_dec_in_flight(bs);
3761ce14f3b4SVladimir Sementsov-Ogievskiy 
3762ce14f3b4SVladimir Sementsov-Ogievskiy     return ret;
3763ce14f3b4SVladimir Sementsov-Ogievskiy }
3764ce14f3b4SVladimir Sementsov-Ogievskiy 
3765ce14f3b4SVladimir Sementsov-Ogievskiy int coroutine_fn
bdrv_co_snapshot_block_status(BlockDriverState * bs,unsigned int mode,int64_t offset,int64_t bytes,int64_t * pnum,int64_t * map,BlockDriverState ** file)3766c33159deSEric Blake bdrv_co_snapshot_block_status(BlockDriverState *bs, unsigned int mode,
3767c33159deSEric Blake                               int64_t offset, int64_t bytes,
3768ce14f3b4SVladimir Sementsov-Ogievskiy                               int64_t *pnum, int64_t *map,
3769ce14f3b4SVladimir Sementsov-Ogievskiy                               BlockDriverState **file)
3770ce14f3b4SVladimir Sementsov-Ogievskiy {
3771ce14f3b4SVladimir Sementsov-Ogievskiy     BlockDriver *drv = bs->drv;
3772ce14f3b4SVladimir Sementsov-Ogievskiy     int ret;
3773ce14f3b4SVladimir Sementsov-Ogievskiy     IO_CODE();
37747b9e8b22SKevin Wolf     assert_bdrv_graph_readable();
3775ce14f3b4SVladimir Sementsov-Ogievskiy 
3776ce14f3b4SVladimir Sementsov-Ogievskiy     if (!drv) {
3777ce14f3b4SVladimir Sementsov-Ogievskiy         return -ENOMEDIUM;
3778ce14f3b4SVladimir Sementsov-Ogievskiy     }
3779ce14f3b4SVladimir Sementsov-Ogievskiy 
3780ce14f3b4SVladimir Sementsov-Ogievskiy     if (!drv->bdrv_co_snapshot_block_status) {
3781ce14f3b4SVladimir Sementsov-Ogievskiy         return -ENOTSUP;
3782ce14f3b4SVladimir Sementsov-Ogievskiy     }
3783ce14f3b4SVladimir Sementsov-Ogievskiy 
3784ce14f3b4SVladimir Sementsov-Ogievskiy     bdrv_inc_in_flight(bs);
3785c33159deSEric Blake     ret = drv->bdrv_co_snapshot_block_status(bs, mode, offset, bytes,
3786ce14f3b4SVladimir Sementsov-Ogievskiy                                              pnum, map, file);
3787ce14f3b4SVladimir Sementsov-Ogievskiy     bdrv_dec_in_flight(bs);
3788ce14f3b4SVladimir Sementsov-Ogievskiy 
3789ce14f3b4SVladimir Sementsov-Ogievskiy     return ret;
3790ce14f3b4SVladimir Sementsov-Ogievskiy }
3791ce14f3b4SVladimir Sementsov-Ogievskiy 
3792ce14f3b4SVladimir Sementsov-Ogievskiy int coroutine_fn
bdrv_co_pdiscard_snapshot(BlockDriverState * bs,int64_t offset,int64_t bytes)3793ce14f3b4SVladimir Sementsov-Ogievskiy bdrv_co_pdiscard_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes)
3794ce14f3b4SVladimir Sementsov-Ogievskiy {
3795ce14f3b4SVladimir Sementsov-Ogievskiy     BlockDriver *drv = bs->drv;
3796ce14f3b4SVladimir Sementsov-Ogievskiy     int ret;
3797ce14f3b4SVladimir Sementsov-Ogievskiy     IO_CODE();
37989a5a1c62SEmanuele Giuseppe Esposito     assert_bdrv_graph_readable();
3799ce14f3b4SVladimir Sementsov-Ogievskiy 
3800ce14f3b4SVladimir Sementsov-Ogievskiy     if (!drv) {
3801ce14f3b4SVladimir Sementsov-Ogievskiy         return -ENOMEDIUM;
3802ce14f3b4SVladimir Sementsov-Ogievskiy     }
3803ce14f3b4SVladimir Sementsov-Ogievskiy 
3804ce14f3b4SVladimir Sementsov-Ogievskiy     if (!drv->bdrv_co_pdiscard_snapshot) {
3805ce14f3b4SVladimir Sementsov-Ogievskiy         return -ENOTSUP;
3806ce14f3b4SVladimir Sementsov-Ogievskiy     }
3807ce14f3b4SVladimir Sementsov-Ogievskiy 
3808ce14f3b4SVladimir Sementsov-Ogievskiy     bdrv_inc_in_flight(bs);
3809ce14f3b4SVladimir Sementsov-Ogievskiy     ret = drv->bdrv_co_pdiscard_snapshot(bs, offset, bytes);
3810ce14f3b4SVladimir Sementsov-Ogievskiy     bdrv_dec_in_flight(bs);
3811ce14f3b4SVladimir Sementsov-Ogievskiy 
3812ce14f3b4SVladimir Sementsov-Ogievskiy     return ret;
3813ce14f3b4SVladimir Sementsov-Ogievskiy }
3814