xref: /qemu/block/io.c (revision 8b1170012b1de6649c66ac1887f4df7e312abf3b)
161007b31SStefan Hajnoczi /*
261007b31SStefan Hajnoczi  * Block layer I/O functions
361007b31SStefan Hajnoczi  *
461007b31SStefan Hajnoczi  * Copyright (c) 2003 Fabrice Bellard
561007b31SStefan Hajnoczi  *
661007b31SStefan Hajnoczi  * Permission is hereby granted, free of charge, to any person obtaining a copy
761007b31SStefan Hajnoczi  * of this software and associated documentation files (the "Software"), to deal
861007b31SStefan Hajnoczi  * in the Software without restriction, including without limitation the rights
961007b31SStefan Hajnoczi  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
1061007b31SStefan Hajnoczi  * copies of the Software, and to permit persons to whom the Software is
1161007b31SStefan Hajnoczi  * furnished to do so, subject to the following conditions:
1261007b31SStefan Hajnoczi  *
1361007b31SStefan Hajnoczi  * The above copyright notice and this permission notice shall be included in
1461007b31SStefan Hajnoczi  * all copies or substantial portions of the Software.
1561007b31SStefan Hajnoczi  *
1661007b31SStefan Hajnoczi  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1761007b31SStefan Hajnoczi  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1861007b31SStefan Hajnoczi  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1961007b31SStefan Hajnoczi  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2061007b31SStefan Hajnoczi  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
2161007b31SStefan Hajnoczi  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
2261007b31SStefan Hajnoczi  * THE SOFTWARE.
2361007b31SStefan Hajnoczi  */
2461007b31SStefan Hajnoczi 
2580c71a24SPeter Maydell #include "qemu/osdep.h"
2661007b31SStefan Hajnoczi #include "trace.h"
277f0e9da6SMax Reitz #include "sysemu/block-backend.h"
287719f3c9SStefan Hajnoczi #include "block/aio-wait.h"
2961007b31SStefan Hajnoczi #include "block/blockjob.h"
30f321dcb5SPaolo Bonzini #include "block/blockjob_int.h"
3161007b31SStefan Hajnoczi #include "block/block_int.h"
3221c2283eSVladimir Sementsov-Ogievskiy #include "block/coroutines.h"
33f348b6d1SVeronia Bahaa #include "qemu/cutils.h"
34da34e65cSMarkus Armbruster #include "qapi/error.h"
35d49b6836SMarkus Armbruster #include "qemu/error-report.h"
36db725815SMarkus Armbruster #include "qemu/main-loop.h"
37c8aa7895SPavel Dovgalyuk #include "sysemu/replay.h"
3861007b31SStefan Hajnoczi 
39cb2e2878SEric Blake /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
40cb2e2878SEric Blake #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
41cb2e2878SEric Blake 
427f8f03efSFam Zheng static void bdrv_parent_cb_resize(BlockDriverState *bs);
43d05aa8bbSEric Blake static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
44f5a5ca79SManos Pitsidianakis     int64_t offset, int bytes, BdrvRequestFlags flags);
4561007b31SStefan Hajnoczi 
46f4c8a43bSMax Reitz static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore,
476cd5c9d7SKevin Wolf                                       bool ignore_bds_parents)
4861007b31SStefan Hajnoczi {
4902d21300SKevin Wolf     BdrvChild *c, *next;
5027ccdd52SKevin Wolf 
5102d21300SKevin Wolf     QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
52bd86fb99SMax Reitz         if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
530152bf40SKevin Wolf             continue;
540152bf40SKevin Wolf         }
554be6a6d1SKevin Wolf         bdrv_parent_drained_begin_single(c, false);
56ce0f1412SPaolo Bonzini     }
57ce0f1412SPaolo Bonzini }
58ce0f1412SPaolo Bonzini 
59e037c09cSMax Reitz static void bdrv_parent_drained_end_single_no_poll(BdrvChild *c,
60e037c09cSMax Reitz                                                    int *drained_end_counter)
61804db8eaSMax Reitz {
62804db8eaSMax Reitz     assert(c->parent_quiesce_counter > 0);
63804db8eaSMax Reitz     c->parent_quiesce_counter--;
64bd86fb99SMax Reitz     if (c->klass->drained_end) {
65bd86fb99SMax Reitz         c->klass->drained_end(c, drained_end_counter);
66804db8eaSMax Reitz     }
67804db8eaSMax Reitz }
68804db8eaSMax Reitz 
69e037c09cSMax Reitz void bdrv_parent_drained_end_single(BdrvChild *c)
70e037c09cSMax Reitz {
71e037c09cSMax Reitz     int drained_end_counter = 0;
72e037c09cSMax Reitz     bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter);
73d73415a3SStefan Hajnoczi     BDRV_POLL_WHILE(c->bs, qatomic_read(&drained_end_counter) > 0);
74e037c09cSMax Reitz }
75e037c09cSMax Reitz 
76f4c8a43bSMax Reitz static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore,
77e037c09cSMax Reitz                                     bool ignore_bds_parents,
78e037c09cSMax Reitz                                     int *drained_end_counter)
79ce0f1412SPaolo Bonzini {
8061ad631cSMax Reitz     BdrvChild *c;
8127ccdd52SKevin Wolf 
8261ad631cSMax Reitz     QLIST_FOREACH(c, &bs->parents, next_parent) {
83bd86fb99SMax Reitz         if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
840152bf40SKevin Wolf             continue;
850152bf40SKevin Wolf         }
86e037c09cSMax Reitz         bdrv_parent_drained_end_single_no_poll(c, drained_end_counter);
87c2066af0SKevin Wolf     }
8861007b31SStefan Hajnoczi }
8961007b31SStefan Hajnoczi 
904be6a6d1SKevin Wolf static bool bdrv_parent_drained_poll_single(BdrvChild *c)
914be6a6d1SKevin Wolf {
92bd86fb99SMax Reitz     if (c->klass->drained_poll) {
93bd86fb99SMax Reitz         return c->klass->drained_poll(c);
944be6a6d1SKevin Wolf     }
954be6a6d1SKevin Wolf     return false;
964be6a6d1SKevin Wolf }
974be6a6d1SKevin Wolf 
986cd5c9d7SKevin Wolf static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
996cd5c9d7SKevin Wolf                                      bool ignore_bds_parents)
10089bd0305SKevin Wolf {
10189bd0305SKevin Wolf     BdrvChild *c, *next;
10289bd0305SKevin Wolf     bool busy = false;
10389bd0305SKevin Wolf 
10489bd0305SKevin Wolf     QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
105bd86fb99SMax Reitz         if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
10689bd0305SKevin Wolf             continue;
10789bd0305SKevin Wolf         }
1084be6a6d1SKevin Wolf         busy |= bdrv_parent_drained_poll_single(c);
10989bd0305SKevin Wolf     }
11089bd0305SKevin Wolf 
11189bd0305SKevin Wolf     return busy;
11289bd0305SKevin Wolf }
11389bd0305SKevin Wolf 
1144be6a6d1SKevin Wolf void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll)
1154be6a6d1SKevin Wolf {
116804db8eaSMax Reitz     c->parent_quiesce_counter++;
117bd86fb99SMax Reitz     if (c->klass->drained_begin) {
118bd86fb99SMax Reitz         c->klass->drained_begin(c);
1194be6a6d1SKevin Wolf     }
1204be6a6d1SKevin Wolf     if (poll) {
1214be6a6d1SKevin Wolf         BDRV_POLL_WHILE(c->bs, bdrv_parent_drained_poll_single(c));
1224be6a6d1SKevin Wolf     }
1234be6a6d1SKevin Wolf }
1244be6a6d1SKevin Wolf 
125d9e0dfa2SEric Blake static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
126d9e0dfa2SEric Blake {
127d9e0dfa2SEric Blake     dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
128d9e0dfa2SEric Blake     dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
129d9e0dfa2SEric Blake     dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
130d9e0dfa2SEric Blake                                  src->opt_mem_alignment);
131d9e0dfa2SEric Blake     dst->min_mem_alignment = MAX(dst->min_mem_alignment,
132d9e0dfa2SEric Blake                                  src->min_mem_alignment);
133d9e0dfa2SEric Blake     dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
134d9e0dfa2SEric Blake }
135d9e0dfa2SEric Blake 
13661007b31SStefan Hajnoczi void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
13761007b31SStefan Hajnoczi {
13833985614SVladimir Sementsov-Ogievskiy     ERRP_GUARD();
13961007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
14066b129acSMax Reitz     BdrvChild *c;
14166b129acSMax Reitz     bool have_limits;
14261007b31SStefan Hajnoczi 
14361007b31SStefan Hajnoczi     memset(&bs->bl, 0, sizeof(bs->bl));
14461007b31SStefan Hajnoczi 
14561007b31SStefan Hajnoczi     if (!drv) {
14661007b31SStefan Hajnoczi         return;
14761007b31SStefan Hajnoczi     }
14861007b31SStefan Hajnoczi 
14979ba8c98SEric Blake     /* Default alignment based on whether driver has byte interface */
150e31f6864SEric Blake     bs->bl.request_alignment = (drv->bdrv_co_preadv ||
151ac850bf0SVladimir Sementsov-Ogievskiy                                 drv->bdrv_aio_preadv ||
152ac850bf0SVladimir Sementsov-Ogievskiy                                 drv->bdrv_co_preadv_part) ? 1 : 512;
15379ba8c98SEric Blake 
15461007b31SStefan Hajnoczi     /* Take some limits from the children as a default */
15566b129acSMax Reitz     have_limits = false;
15666b129acSMax Reitz     QLIST_FOREACH(c, &bs->children, next) {
15766b129acSMax Reitz         if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW))
15866b129acSMax Reitz         {
15933985614SVladimir Sementsov-Ogievskiy             bdrv_refresh_limits(c->bs, errp);
16033985614SVladimir Sementsov-Ogievskiy             if (*errp) {
16161007b31SStefan Hajnoczi                 return;
16261007b31SStefan Hajnoczi             }
16366b129acSMax Reitz             bdrv_merge_limits(&bs->bl, &c->bs->bl);
16466b129acSMax Reitz             have_limits = true;
16566b129acSMax Reitz         }
16666b129acSMax Reitz     }
16766b129acSMax Reitz 
16866b129acSMax Reitz     if (!have_limits) {
1694196d2f0SDenis V. Lunev         bs->bl.min_mem_alignment = 512;
170038adc2fSWei Yang         bs->bl.opt_mem_alignment = qemu_real_host_page_size;
171bd44feb7SStefan Hajnoczi 
172bd44feb7SStefan Hajnoczi         /* Safe default since most protocols use readv()/writev()/etc */
173bd44feb7SStefan Hajnoczi         bs->bl.max_iov = IOV_MAX;
17461007b31SStefan Hajnoczi     }
17561007b31SStefan Hajnoczi 
17661007b31SStefan Hajnoczi     /* Then let the driver override it */
17761007b31SStefan Hajnoczi     if (drv->bdrv_refresh_limits) {
17861007b31SStefan Hajnoczi         drv->bdrv_refresh_limits(bs, errp);
179*8b117001SVladimir Sementsov-Ogievskiy         if (*errp) {
180*8b117001SVladimir Sementsov-Ogievskiy             return;
181*8b117001SVladimir Sementsov-Ogievskiy         }
182*8b117001SVladimir Sementsov-Ogievskiy     }
183*8b117001SVladimir Sementsov-Ogievskiy 
184*8b117001SVladimir Sementsov-Ogievskiy     if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) {
185*8b117001SVladimir Sementsov-Ogievskiy         error_setg(errp, "Driver requires too large request alignment");
18661007b31SStefan Hajnoczi     }
18761007b31SStefan Hajnoczi }
18861007b31SStefan Hajnoczi 
18961007b31SStefan Hajnoczi /**
19061007b31SStefan Hajnoczi  * The copy-on-read flag is actually a reference count so multiple users may
19161007b31SStefan Hajnoczi  * use the feature without worrying about clobbering its previous state.
19261007b31SStefan Hajnoczi  * Copy-on-read stays enabled until all users have called to disable it.
19361007b31SStefan Hajnoczi  */
19461007b31SStefan Hajnoczi void bdrv_enable_copy_on_read(BlockDriverState *bs)
19561007b31SStefan Hajnoczi {
196d73415a3SStefan Hajnoczi     qatomic_inc(&bs->copy_on_read);
19761007b31SStefan Hajnoczi }
19861007b31SStefan Hajnoczi 
19961007b31SStefan Hajnoczi void bdrv_disable_copy_on_read(BlockDriverState *bs)
20061007b31SStefan Hajnoczi {
201d73415a3SStefan Hajnoczi     int old = qatomic_fetch_dec(&bs->copy_on_read);
202d3faa13eSPaolo Bonzini     assert(old >= 1);
20361007b31SStefan Hajnoczi }
20461007b31SStefan Hajnoczi 
20561124f03SPaolo Bonzini typedef struct {
20661124f03SPaolo Bonzini     Coroutine *co;
20761124f03SPaolo Bonzini     BlockDriverState *bs;
20861124f03SPaolo Bonzini     bool done;
209481cad48SManos Pitsidianakis     bool begin;
210b0165585SKevin Wolf     bool recursive;
211fe4f0614SKevin Wolf     bool poll;
2120152bf40SKevin Wolf     BdrvChild *parent;
2136cd5c9d7SKevin Wolf     bool ignore_bds_parents;
2148e1da77eSMax Reitz     int *drained_end_counter;
21561124f03SPaolo Bonzini } BdrvCoDrainData;
21661124f03SPaolo Bonzini 
21761124f03SPaolo Bonzini static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
21861124f03SPaolo Bonzini {
21961124f03SPaolo Bonzini     BdrvCoDrainData *data = opaque;
22061124f03SPaolo Bonzini     BlockDriverState *bs = data->bs;
22161124f03SPaolo Bonzini 
222481cad48SManos Pitsidianakis     if (data->begin) {
223f8ea8dacSManos Pitsidianakis         bs->drv->bdrv_co_drain_begin(bs);
224481cad48SManos Pitsidianakis     } else {
225481cad48SManos Pitsidianakis         bs->drv->bdrv_co_drain_end(bs);
226481cad48SManos Pitsidianakis     }
22761124f03SPaolo Bonzini 
22865181d63SMax Reitz     /* Set data->done and decrement drained_end_counter before bdrv_wakeup() */
229d73415a3SStefan Hajnoczi     qatomic_mb_set(&data->done, true);
230e037c09cSMax Reitz     if (!data->begin) {
231d73415a3SStefan Hajnoczi         qatomic_dec(data->drained_end_counter);
2328e1da77eSMax Reitz     }
23365181d63SMax Reitz     bdrv_dec_in_flight(bs);
2348e1da77eSMax Reitz 
2350109e7e6SKevin Wolf     g_free(data);
2360109e7e6SKevin Wolf }
23761124f03SPaolo Bonzini 
238db0289b9SKevin Wolf /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
2398e1da77eSMax Reitz static void bdrv_drain_invoke(BlockDriverState *bs, bool begin,
2408e1da77eSMax Reitz                               int *drained_end_counter)
24161124f03SPaolo Bonzini {
2420109e7e6SKevin Wolf     BdrvCoDrainData *data;
24361124f03SPaolo Bonzini 
244f8ea8dacSManos Pitsidianakis     if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) ||
245481cad48SManos Pitsidianakis             (!begin && !bs->drv->bdrv_co_drain_end)) {
24661124f03SPaolo Bonzini         return;
24761124f03SPaolo Bonzini     }
24861124f03SPaolo Bonzini 
2490109e7e6SKevin Wolf     data = g_new(BdrvCoDrainData, 1);
2500109e7e6SKevin Wolf     *data = (BdrvCoDrainData) {
2510109e7e6SKevin Wolf         .bs = bs,
2520109e7e6SKevin Wolf         .done = false,
2538e1da77eSMax Reitz         .begin = begin,
2548e1da77eSMax Reitz         .drained_end_counter = drained_end_counter,
2550109e7e6SKevin Wolf     };
2560109e7e6SKevin Wolf 
257e037c09cSMax Reitz     if (!begin) {
258d73415a3SStefan Hajnoczi         qatomic_inc(drained_end_counter);
2598e1da77eSMax Reitz     }
2608e1da77eSMax Reitz 
2610109e7e6SKevin Wolf     /* Make sure the driver callback completes during the polling phase for
2620109e7e6SKevin Wolf      * drain_begin. */
2630109e7e6SKevin Wolf     bdrv_inc_in_flight(bs);
2640109e7e6SKevin Wolf     data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data);
2650109e7e6SKevin Wolf     aio_co_schedule(bdrv_get_aio_context(bs), data->co);
26661124f03SPaolo Bonzini }
26761124f03SPaolo Bonzini 
2681cc8e54aSKevin Wolf /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
269fe4f0614SKevin Wolf bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
2706cd5c9d7SKevin Wolf                      BdrvChild *ignore_parent, bool ignore_bds_parents)
27189bd0305SKevin Wolf {
272fe4f0614SKevin Wolf     BdrvChild *child, *next;
273fe4f0614SKevin Wolf 
2746cd5c9d7SKevin Wolf     if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
27589bd0305SKevin Wolf         return true;
27689bd0305SKevin Wolf     }
27789bd0305SKevin Wolf 
278d73415a3SStefan Hajnoczi     if (qatomic_read(&bs->in_flight)) {
279fe4f0614SKevin Wolf         return true;
28089bd0305SKevin Wolf     }
28189bd0305SKevin Wolf 
282fe4f0614SKevin Wolf     if (recursive) {
2836cd5c9d7SKevin Wolf         assert(!ignore_bds_parents);
284fe4f0614SKevin Wolf         QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
2856cd5c9d7SKevin Wolf             if (bdrv_drain_poll(child->bs, recursive, child, false)) {
286fe4f0614SKevin Wolf                 return true;
287fe4f0614SKevin Wolf             }
288fe4f0614SKevin Wolf         }
289fe4f0614SKevin Wolf     }
290fe4f0614SKevin Wolf 
291fe4f0614SKevin Wolf     return false;
292fe4f0614SKevin Wolf }
293fe4f0614SKevin Wolf 
294fe4f0614SKevin Wolf static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive,
29589bd0305SKevin Wolf                                       BdrvChild *ignore_parent)
2961cc8e54aSKevin Wolf {
2976cd5c9d7SKevin Wolf     return bdrv_drain_poll(bs, recursive, ignore_parent, false);
2981cc8e54aSKevin Wolf }
2991cc8e54aSKevin Wolf 
300b0165585SKevin Wolf static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
3016cd5c9d7SKevin Wolf                                   BdrvChild *parent, bool ignore_bds_parents,
3026cd5c9d7SKevin Wolf                                   bool poll);
303b0165585SKevin Wolf static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
3048e1da77eSMax Reitz                                 BdrvChild *parent, bool ignore_bds_parents,
3058e1da77eSMax Reitz                                 int *drained_end_counter);
3060152bf40SKevin Wolf 
307a77fd4bbSFam Zheng static void bdrv_co_drain_bh_cb(void *opaque)
308a77fd4bbSFam Zheng {
309a77fd4bbSFam Zheng     BdrvCoDrainData *data = opaque;
310a77fd4bbSFam Zheng     Coroutine *co = data->co;
31199723548SPaolo Bonzini     BlockDriverState *bs = data->bs;
312a77fd4bbSFam Zheng 
313c8ca33d0SKevin Wolf     if (bs) {
314aa1361d5SKevin Wolf         AioContext *ctx = bdrv_get_aio_context(bs);
315aa1361d5SKevin Wolf         AioContext *co_ctx = qemu_coroutine_get_aio_context(co);
316aa1361d5SKevin Wolf 
317aa1361d5SKevin Wolf         /*
318aa1361d5SKevin Wolf          * When the coroutine yielded, the lock for its home context was
319aa1361d5SKevin Wolf          * released, so we need to re-acquire it here. If it explicitly
320aa1361d5SKevin Wolf          * acquired a different context, the lock is still held and we don't
321aa1361d5SKevin Wolf          * want to lock it a second time (or AIO_WAIT_WHILE() would hang).
322aa1361d5SKevin Wolf          */
323aa1361d5SKevin Wolf         if (ctx == co_ctx) {
324aa1361d5SKevin Wolf             aio_context_acquire(ctx);
325aa1361d5SKevin Wolf         }
32699723548SPaolo Bonzini         bdrv_dec_in_flight(bs);
327481cad48SManos Pitsidianakis         if (data->begin) {
328e037c09cSMax Reitz             assert(!data->drained_end_counter);
3296cd5c9d7SKevin Wolf             bdrv_do_drained_begin(bs, data->recursive, data->parent,
3306cd5c9d7SKevin Wolf                                   data->ignore_bds_parents, data->poll);
331481cad48SManos Pitsidianakis         } else {
332e037c09cSMax Reitz             assert(!data->poll);
3336cd5c9d7SKevin Wolf             bdrv_do_drained_end(bs, data->recursive, data->parent,
3348e1da77eSMax Reitz                                 data->ignore_bds_parents,
3358e1da77eSMax Reitz                                 data->drained_end_counter);
336481cad48SManos Pitsidianakis         }
337aa1361d5SKevin Wolf         if (ctx == co_ctx) {
338aa1361d5SKevin Wolf             aio_context_release(ctx);
339aa1361d5SKevin Wolf         }
340c8ca33d0SKevin Wolf     } else {
341c8ca33d0SKevin Wolf         assert(data->begin);
342c8ca33d0SKevin Wolf         bdrv_drain_all_begin();
343c8ca33d0SKevin Wolf     }
344481cad48SManos Pitsidianakis 
345a77fd4bbSFam Zheng     data->done = true;
3461919631eSPaolo Bonzini     aio_co_wake(co);
347a77fd4bbSFam Zheng }
348a77fd4bbSFam Zheng 
349481cad48SManos Pitsidianakis static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
350b0165585SKevin Wolf                                                 bool begin, bool recursive,
3516cd5c9d7SKevin Wolf                                                 BdrvChild *parent,
3526cd5c9d7SKevin Wolf                                                 bool ignore_bds_parents,
3538e1da77eSMax Reitz                                                 bool poll,
3548e1da77eSMax Reitz                                                 int *drained_end_counter)
355a77fd4bbSFam Zheng {
356a77fd4bbSFam Zheng     BdrvCoDrainData data;
357a77fd4bbSFam Zheng 
358a77fd4bbSFam Zheng     /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
359c40a2545SStefan Hajnoczi      * other coroutines run if they were queued by aio_co_enter(). */
360a77fd4bbSFam Zheng 
361a77fd4bbSFam Zheng     assert(qemu_in_coroutine());
362a77fd4bbSFam Zheng     data = (BdrvCoDrainData) {
363a77fd4bbSFam Zheng         .co = qemu_coroutine_self(),
364a77fd4bbSFam Zheng         .bs = bs,
365a77fd4bbSFam Zheng         .done = false,
366481cad48SManos Pitsidianakis         .begin = begin,
367b0165585SKevin Wolf         .recursive = recursive,
3680152bf40SKevin Wolf         .parent = parent,
3696cd5c9d7SKevin Wolf         .ignore_bds_parents = ignore_bds_parents,
370fe4f0614SKevin Wolf         .poll = poll,
3718e1da77eSMax Reitz         .drained_end_counter = drained_end_counter,
372a77fd4bbSFam Zheng     };
3738e1da77eSMax Reitz 
374c8ca33d0SKevin Wolf     if (bs) {
37599723548SPaolo Bonzini         bdrv_inc_in_flight(bs);
376c8ca33d0SKevin Wolf     }
377e4ec5ad4SPavel Dovgalyuk     replay_bh_schedule_oneshot_event(bdrv_get_aio_context(bs),
378fffb6e12SPaolo Bonzini                                      bdrv_co_drain_bh_cb, &data);
379a77fd4bbSFam Zheng 
380a77fd4bbSFam Zheng     qemu_coroutine_yield();
381a77fd4bbSFam Zheng     /* If we are resumed from some other event (such as an aio completion or a
382a77fd4bbSFam Zheng      * timer callback), it is a bug in the caller that should be fixed. */
383a77fd4bbSFam Zheng     assert(data.done);
384a77fd4bbSFam Zheng }
385a77fd4bbSFam Zheng 
386dcf94a23SKevin Wolf void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
3876cd5c9d7SKevin Wolf                                    BdrvChild *parent, bool ignore_bds_parents)
388dcf94a23SKevin Wolf {
389dcf94a23SKevin Wolf     assert(!qemu_in_coroutine());
390dcf94a23SKevin Wolf 
391dcf94a23SKevin Wolf     /* Stop things in parent-to-child order */
392d73415a3SStefan Hajnoczi     if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) {
393dcf94a23SKevin Wolf         aio_disable_external(bdrv_get_aio_context(bs));
394dcf94a23SKevin Wolf     }
395dcf94a23SKevin Wolf 
3966cd5c9d7SKevin Wolf     bdrv_parent_drained_begin(bs, parent, ignore_bds_parents);
3978e1da77eSMax Reitz     bdrv_drain_invoke(bs, true, NULL);
398dcf94a23SKevin Wolf }
399dcf94a23SKevin Wolf 
400dcf94a23SKevin Wolf static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
4016cd5c9d7SKevin Wolf                                   BdrvChild *parent, bool ignore_bds_parents,
4026cd5c9d7SKevin Wolf                                   bool poll)
4036820643fSKevin Wolf {
404b0165585SKevin Wolf     BdrvChild *child, *next;
405b0165585SKevin Wolf 
406d42cf288SPaolo Bonzini     if (qemu_in_coroutine()) {
4076cd5c9d7SKevin Wolf         bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents,
4088e1da77eSMax Reitz                                poll, NULL);
409d42cf288SPaolo Bonzini         return;
410d42cf288SPaolo Bonzini     }
411d42cf288SPaolo Bonzini 
4126cd5c9d7SKevin Wolf     bdrv_do_drained_begin_quiesce(bs, parent, ignore_bds_parents);
413d30b8e64SKevin Wolf 
414b0165585SKevin Wolf     if (recursive) {
4156cd5c9d7SKevin Wolf         assert(!ignore_bds_parents);
416d736f119SKevin Wolf         bs->recursive_quiesce_counter++;
417b0165585SKevin Wolf         QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
4186cd5c9d7SKevin Wolf             bdrv_do_drained_begin(child->bs, true, child, ignore_bds_parents,
4196cd5c9d7SKevin Wolf                                   false);
420b0165585SKevin Wolf         }
421b0165585SKevin Wolf     }
422fe4f0614SKevin Wolf 
423fe4f0614SKevin Wolf     /*
424fe4f0614SKevin Wolf      * Wait for drained requests to finish.
425fe4f0614SKevin Wolf      *
426fe4f0614SKevin Wolf      * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
427fe4f0614SKevin Wolf      * call is needed so things in this AioContext can make progress even
428fe4f0614SKevin Wolf      * though we don't return to the main AioContext loop - this automatically
429fe4f0614SKevin Wolf      * includes other nodes in the same AioContext and therefore all child
430fe4f0614SKevin Wolf      * nodes.
431fe4f0614SKevin Wolf      */
432fe4f0614SKevin Wolf     if (poll) {
4336cd5c9d7SKevin Wolf         assert(!ignore_bds_parents);
434fe4f0614SKevin Wolf         BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent));
435fe4f0614SKevin Wolf     }
4366820643fSKevin Wolf }
4376820643fSKevin Wolf 
4380152bf40SKevin Wolf void bdrv_drained_begin(BlockDriverState *bs)
4390152bf40SKevin Wolf {
4406cd5c9d7SKevin Wolf     bdrv_do_drained_begin(bs, false, NULL, false, true);
4410152bf40SKevin Wolf }
4420152bf40SKevin Wolf 
443b0165585SKevin Wolf void bdrv_subtree_drained_begin(BlockDriverState *bs)
4446820643fSKevin Wolf {
4456cd5c9d7SKevin Wolf     bdrv_do_drained_begin(bs, true, NULL, false, true);
446b0165585SKevin Wolf }
447b0165585SKevin Wolf 
448e037c09cSMax Reitz /**
449e037c09cSMax Reitz  * This function does not poll, nor must any of its recursively called
450e037c09cSMax Reitz  * functions.  The *drained_end_counter pointee will be incremented
451e037c09cSMax Reitz  * once for every background operation scheduled, and decremented once
452e037c09cSMax Reitz  * the operation settles.  Therefore, the pointer must remain valid
453e037c09cSMax Reitz  * until the pointee reaches 0.  That implies that whoever sets up the
454e037c09cSMax Reitz  * pointee has to poll until it is 0.
455e037c09cSMax Reitz  *
456e037c09cSMax Reitz  * We use atomic operations to access *drained_end_counter, because
457e037c09cSMax Reitz  * (1) when called from bdrv_set_aio_context_ignore(), the subgraph of
458e037c09cSMax Reitz  *     @bs may contain nodes in different AioContexts,
459e037c09cSMax Reitz  * (2) bdrv_drain_all_end() uses the same counter for all nodes,
460e037c09cSMax Reitz  *     regardless of which AioContext they are in.
461e037c09cSMax Reitz  */
4626cd5c9d7SKevin Wolf static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
4638e1da77eSMax Reitz                                 BdrvChild *parent, bool ignore_bds_parents,
4648e1da77eSMax Reitz                                 int *drained_end_counter)
465b0165585SKevin Wolf {
46661ad631cSMax Reitz     BdrvChild *child;
4670f115168SKevin Wolf     int old_quiesce_counter;
4680f115168SKevin Wolf 
469e037c09cSMax Reitz     assert(drained_end_counter != NULL);
470e037c09cSMax Reitz 
471481cad48SManos Pitsidianakis     if (qemu_in_coroutine()) {
4726cd5c9d7SKevin Wolf         bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents,
4738e1da77eSMax Reitz                                false, drained_end_counter);
474481cad48SManos Pitsidianakis         return;
475481cad48SManos Pitsidianakis     }
4766820643fSKevin Wolf     assert(bs->quiesce_counter > 0);
4776820643fSKevin Wolf 
47860369b86SKevin Wolf     /* Re-enable things in child-to-parent order */
4798e1da77eSMax Reitz     bdrv_drain_invoke(bs, false, drained_end_counter);
480e037c09cSMax Reitz     bdrv_parent_drained_end(bs, parent, ignore_bds_parents,
481e037c09cSMax Reitz                             drained_end_counter);
4825cb2737eSMax Reitz 
483d73415a3SStefan Hajnoczi     old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter);
4840f115168SKevin Wolf     if (old_quiesce_counter == 1) {
4856820643fSKevin Wolf         aio_enable_external(bdrv_get_aio_context(bs));
4866820643fSKevin Wolf     }
487b0165585SKevin Wolf 
488b0165585SKevin Wolf     if (recursive) {
4896cd5c9d7SKevin Wolf         assert(!ignore_bds_parents);
490d736f119SKevin Wolf         bs->recursive_quiesce_counter--;
49161ad631cSMax Reitz         QLIST_FOREACH(child, &bs->children, next) {
4928e1da77eSMax Reitz             bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents,
4938e1da77eSMax Reitz                                 drained_end_counter);
494b0165585SKevin Wolf         }
495b0165585SKevin Wolf     }
4960f115168SKevin Wolf }
4976820643fSKevin Wolf 
4980152bf40SKevin Wolf void bdrv_drained_end(BlockDriverState *bs)
4990152bf40SKevin Wolf {
500e037c09cSMax Reitz     int drained_end_counter = 0;
501e037c09cSMax Reitz     bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter);
502d73415a3SStefan Hajnoczi     BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
503e037c09cSMax Reitz }
504e037c09cSMax Reitz 
505e037c09cSMax Reitz void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter)
506e037c09cSMax Reitz {
507e037c09cSMax Reitz     bdrv_do_drained_end(bs, false, NULL, false, drained_end_counter);
508b0165585SKevin Wolf }
509b0165585SKevin Wolf 
510b0165585SKevin Wolf void bdrv_subtree_drained_end(BlockDriverState *bs)
511b0165585SKevin Wolf {
512e037c09cSMax Reitz     int drained_end_counter = 0;
513e037c09cSMax Reitz     bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter);
514d73415a3SStefan Hajnoczi     BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
5150152bf40SKevin Wolf }
5160152bf40SKevin Wolf 
517d736f119SKevin Wolf void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent)
518d736f119SKevin Wolf {
519d736f119SKevin Wolf     int i;
520d736f119SKevin Wolf 
521d736f119SKevin Wolf     for (i = 0; i < new_parent->recursive_quiesce_counter; i++) {
5226cd5c9d7SKevin Wolf         bdrv_do_drained_begin(child->bs, true, child, false, true);
523d736f119SKevin Wolf     }
524d736f119SKevin Wolf }
525d736f119SKevin Wolf 
526d736f119SKevin Wolf void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent)
527d736f119SKevin Wolf {
528e037c09cSMax Reitz     int drained_end_counter = 0;
529d736f119SKevin Wolf     int i;
530d736f119SKevin Wolf 
531d736f119SKevin Wolf     for (i = 0; i < old_parent->recursive_quiesce_counter; i++) {
532e037c09cSMax Reitz         bdrv_do_drained_end(child->bs, true, child, false,
533e037c09cSMax Reitz                             &drained_end_counter);
534d736f119SKevin Wolf     }
535e037c09cSMax Reitz 
536d73415a3SStefan Hajnoczi     BDRV_POLL_WHILE(child->bs, qatomic_read(&drained_end_counter) > 0);
537d736f119SKevin Wolf }
538d736f119SKevin Wolf 
53961007b31SStefan Hajnoczi /*
54067da1dc5SFam Zheng  * Wait for pending requests to complete on a single BlockDriverState subtree,
54167da1dc5SFam Zheng  * and suspend block driver's internal I/O until next request arrives.
54261007b31SStefan Hajnoczi  *
54361007b31SStefan Hajnoczi  * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
54461007b31SStefan Hajnoczi  * AioContext.
54561007b31SStefan Hajnoczi  */
546b6e84c97SPaolo Bonzini void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
547b6e84c97SPaolo Bonzini {
5486820643fSKevin Wolf     assert(qemu_in_coroutine());
5496820643fSKevin Wolf     bdrv_drained_begin(bs);
5506820643fSKevin Wolf     bdrv_drained_end(bs);
551b6e84c97SPaolo Bonzini }
552b6e84c97SPaolo Bonzini 
55361007b31SStefan Hajnoczi void bdrv_drain(BlockDriverState *bs)
55461007b31SStefan Hajnoczi {
5556820643fSKevin Wolf     bdrv_drained_begin(bs);
5566820643fSKevin Wolf     bdrv_drained_end(bs);
55761007b31SStefan Hajnoczi }
55861007b31SStefan Hajnoczi 
559c13ad59fSKevin Wolf static void bdrv_drain_assert_idle(BlockDriverState *bs)
560c13ad59fSKevin Wolf {
561c13ad59fSKevin Wolf     BdrvChild *child, *next;
562c13ad59fSKevin Wolf 
563d73415a3SStefan Hajnoczi     assert(qatomic_read(&bs->in_flight) == 0);
564c13ad59fSKevin Wolf     QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
565c13ad59fSKevin Wolf         bdrv_drain_assert_idle(child->bs);
566c13ad59fSKevin Wolf     }
567c13ad59fSKevin Wolf }
568c13ad59fSKevin Wolf 
5690f12264eSKevin Wolf unsigned int bdrv_drain_all_count = 0;
5700f12264eSKevin Wolf 
5710f12264eSKevin Wolf static bool bdrv_drain_all_poll(void)
5720f12264eSKevin Wolf {
5730f12264eSKevin Wolf     BlockDriverState *bs = NULL;
5740f12264eSKevin Wolf     bool result = false;
5750f12264eSKevin Wolf 
5760f12264eSKevin Wolf     /* bdrv_drain_poll() can't make changes to the graph and we are holding the
5770f12264eSKevin Wolf      * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
5780f12264eSKevin Wolf     while ((bs = bdrv_next_all_states(bs))) {
5790f12264eSKevin Wolf         AioContext *aio_context = bdrv_get_aio_context(bs);
5800f12264eSKevin Wolf         aio_context_acquire(aio_context);
5810f12264eSKevin Wolf         result |= bdrv_drain_poll(bs, false, NULL, true);
5820f12264eSKevin Wolf         aio_context_release(aio_context);
5830f12264eSKevin Wolf     }
5840f12264eSKevin Wolf 
5850f12264eSKevin Wolf     return result;
5860f12264eSKevin Wolf }
5870f12264eSKevin Wolf 
58861007b31SStefan Hajnoczi /*
58961007b31SStefan Hajnoczi  * Wait for pending requests to complete across all BlockDriverStates
59061007b31SStefan Hajnoczi  *
59161007b31SStefan Hajnoczi  * This function does not flush data to disk, use bdrv_flush_all() for that
59261007b31SStefan Hajnoczi  * after calling this function.
593c0778f66SAlberto Garcia  *
594c0778f66SAlberto Garcia  * This pauses all block jobs and disables external clients. It must
595c0778f66SAlberto Garcia  * be paired with bdrv_drain_all_end().
596c0778f66SAlberto Garcia  *
597c0778f66SAlberto Garcia  * NOTE: no new block jobs or BlockDriverStates can be created between
598c0778f66SAlberto Garcia  * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
59961007b31SStefan Hajnoczi  */
600c0778f66SAlberto Garcia void bdrv_drain_all_begin(void)
60161007b31SStefan Hajnoczi {
6020f12264eSKevin Wolf     BlockDriverState *bs = NULL;
60361007b31SStefan Hajnoczi 
604c8ca33d0SKevin Wolf     if (qemu_in_coroutine()) {
6058e1da77eSMax Reitz         bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL);
606c8ca33d0SKevin Wolf         return;
607c8ca33d0SKevin Wolf     }
608c8ca33d0SKevin Wolf 
609c8aa7895SPavel Dovgalyuk     /*
610c8aa7895SPavel Dovgalyuk      * bdrv queue is managed by record/replay,
611c8aa7895SPavel Dovgalyuk      * waiting for finishing the I/O requests may
612c8aa7895SPavel Dovgalyuk      * be infinite
613c8aa7895SPavel Dovgalyuk      */
614c8aa7895SPavel Dovgalyuk     if (replay_events_enabled()) {
615c8aa7895SPavel Dovgalyuk         return;
616c8aa7895SPavel Dovgalyuk     }
617c8aa7895SPavel Dovgalyuk 
6180f12264eSKevin Wolf     /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
6190f12264eSKevin Wolf      * loop AioContext, so make sure we're in the main context. */
6209a7e86c8SKevin Wolf     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
6210f12264eSKevin Wolf     assert(bdrv_drain_all_count < INT_MAX);
6220f12264eSKevin Wolf     bdrv_drain_all_count++;
6239a7e86c8SKevin Wolf 
6240f12264eSKevin Wolf     /* Quiesce all nodes, without polling in-flight requests yet. The graph
6250f12264eSKevin Wolf      * cannot change during this loop. */
6260f12264eSKevin Wolf     while ((bs = bdrv_next_all_states(bs))) {
62761007b31SStefan Hajnoczi         AioContext *aio_context = bdrv_get_aio_context(bs);
62861007b31SStefan Hajnoczi 
62961007b31SStefan Hajnoczi         aio_context_acquire(aio_context);
6300f12264eSKevin Wolf         bdrv_do_drained_begin(bs, false, NULL, true, false);
63161007b31SStefan Hajnoczi         aio_context_release(aio_context);
63261007b31SStefan Hajnoczi     }
63361007b31SStefan Hajnoczi 
6340f12264eSKevin Wolf     /* Now poll the in-flight requests */
635cfe29d82SKevin Wolf     AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll());
6360f12264eSKevin Wolf 
6370f12264eSKevin Wolf     while ((bs = bdrv_next_all_states(bs))) {
638c13ad59fSKevin Wolf         bdrv_drain_assert_idle(bs);
639f406c03cSAlexander Yarygin     }
640f406c03cSAlexander Yarygin }
641c0778f66SAlberto Garcia 
6421a6d3bd2SGreg Kurz void bdrv_drain_all_end_quiesce(BlockDriverState *bs)
6431a6d3bd2SGreg Kurz {
6441a6d3bd2SGreg Kurz     int drained_end_counter = 0;
6451a6d3bd2SGreg Kurz 
6461a6d3bd2SGreg Kurz     g_assert(bs->quiesce_counter > 0);
6471a6d3bd2SGreg Kurz     g_assert(!bs->refcnt);
6481a6d3bd2SGreg Kurz 
6491a6d3bd2SGreg Kurz     while (bs->quiesce_counter) {
6501a6d3bd2SGreg Kurz         bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter);
6511a6d3bd2SGreg Kurz     }
6521a6d3bd2SGreg Kurz     BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
6531a6d3bd2SGreg Kurz }
6541a6d3bd2SGreg Kurz 
655c0778f66SAlberto Garcia void bdrv_drain_all_end(void)
656c0778f66SAlberto Garcia {
6570f12264eSKevin Wolf     BlockDriverState *bs = NULL;
658e037c09cSMax Reitz     int drained_end_counter = 0;
659c0778f66SAlberto Garcia 
660c8aa7895SPavel Dovgalyuk     /*
661c8aa7895SPavel Dovgalyuk      * bdrv queue is managed by record/replay,
662c8aa7895SPavel Dovgalyuk      * waiting for finishing the I/O requests may
663c8aa7895SPavel Dovgalyuk      * be endless
664c8aa7895SPavel Dovgalyuk      */
665c8aa7895SPavel Dovgalyuk     if (replay_events_enabled()) {
666c8aa7895SPavel Dovgalyuk         return;
667c8aa7895SPavel Dovgalyuk     }
668c8aa7895SPavel Dovgalyuk 
6690f12264eSKevin Wolf     while ((bs = bdrv_next_all_states(bs))) {
67061007b31SStefan Hajnoczi         AioContext *aio_context = bdrv_get_aio_context(bs);
67161007b31SStefan Hajnoczi 
67261007b31SStefan Hajnoczi         aio_context_acquire(aio_context);
673e037c09cSMax Reitz         bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter);
67461007b31SStefan Hajnoczi         aio_context_release(aio_context);
67561007b31SStefan Hajnoczi     }
6760f12264eSKevin Wolf 
677e037c09cSMax Reitz     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
678d73415a3SStefan Hajnoczi     AIO_WAIT_WHILE(NULL, qatomic_read(&drained_end_counter) > 0);
679e037c09cSMax Reitz 
6800f12264eSKevin Wolf     assert(bdrv_drain_all_count > 0);
6810f12264eSKevin Wolf     bdrv_drain_all_count--;
68261007b31SStefan Hajnoczi }
68361007b31SStefan Hajnoczi 
684c0778f66SAlberto Garcia void bdrv_drain_all(void)
685c0778f66SAlberto Garcia {
686c0778f66SAlberto Garcia     bdrv_drain_all_begin();
687c0778f66SAlberto Garcia     bdrv_drain_all_end();
688c0778f66SAlberto Garcia }
689c0778f66SAlberto Garcia 
69061007b31SStefan Hajnoczi /**
69161007b31SStefan Hajnoczi  * Remove an active request from the tracked requests list
69261007b31SStefan Hajnoczi  *
69361007b31SStefan Hajnoczi  * This function should be called when a tracked request is completing.
69461007b31SStefan Hajnoczi  */
69561007b31SStefan Hajnoczi static void tracked_request_end(BdrvTrackedRequest *req)
69661007b31SStefan Hajnoczi {
69761007b31SStefan Hajnoczi     if (req->serialising) {
698d73415a3SStefan Hajnoczi         qatomic_dec(&req->bs->serialising_in_flight);
69961007b31SStefan Hajnoczi     }
70061007b31SStefan Hajnoczi 
7013783fa3dSPaolo Bonzini     qemu_co_mutex_lock(&req->bs->reqs_lock);
70261007b31SStefan Hajnoczi     QLIST_REMOVE(req, list);
70361007b31SStefan Hajnoczi     qemu_co_queue_restart_all(&req->wait_queue);
7043783fa3dSPaolo Bonzini     qemu_co_mutex_unlock(&req->bs->reqs_lock);
70561007b31SStefan Hajnoczi }
70661007b31SStefan Hajnoczi 
70761007b31SStefan Hajnoczi /**
70861007b31SStefan Hajnoczi  * Add an active request to the tracked requests list
70961007b31SStefan Hajnoczi  */
71061007b31SStefan Hajnoczi static void tracked_request_begin(BdrvTrackedRequest *req,
71161007b31SStefan Hajnoczi                                   BlockDriverState *bs,
71261007b31SStefan Hajnoczi                                   int64_t offset,
71322931a15SFam Zheng                                   uint64_t bytes,
714ebde595cSFam Zheng                                   enum BdrvTrackedRequestType type)
71561007b31SStefan Hajnoczi {
71622931a15SFam Zheng     assert(bytes <= INT64_MAX && offset <= INT64_MAX - bytes);
71722931a15SFam Zheng 
71861007b31SStefan Hajnoczi     *req = (BdrvTrackedRequest){
71961007b31SStefan Hajnoczi         .bs = bs,
72061007b31SStefan Hajnoczi         .offset         = offset,
72161007b31SStefan Hajnoczi         .bytes          = bytes,
722ebde595cSFam Zheng         .type           = type,
72361007b31SStefan Hajnoczi         .co             = qemu_coroutine_self(),
72461007b31SStefan Hajnoczi         .serialising    = false,
72561007b31SStefan Hajnoczi         .overlap_offset = offset,
72661007b31SStefan Hajnoczi         .overlap_bytes  = bytes,
72761007b31SStefan Hajnoczi     };
72861007b31SStefan Hajnoczi 
72961007b31SStefan Hajnoczi     qemu_co_queue_init(&req->wait_queue);
73061007b31SStefan Hajnoczi 
7313783fa3dSPaolo Bonzini     qemu_co_mutex_lock(&bs->reqs_lock);
73261007b31SStefan Hajnoczi     QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
7333783fa3dSPaolo Bonzini     qemu_co_mutex_unlock(&bs->reqs_lock);
73461007b31SStefan Hajnoczi }
73561007b31SStefan Hajnoczi 
7363ba0e1a0SPaolo Bonzini static bool tracked_request_overlaps(BdrvTrackedRequest *req,
7373ba0e1a0SPaolo Bonzini                                      int64_t offset, uint64_t bytes)
7383ba0e1a0SPaolo Bonzini {
7393ba0e1a0SPaolo Bonzini     /*        aaaa   bbbb */
7403ba0e1a0SPaolo Bonzini     if (offset >= req->overlap_offset + req->overlap_bytes) {
7413ba0e1a0SPaolo Bonzini         return false;
7423ba0e1a0SPaolo Bonzini     }
7433ba0e1a0SPaolo Bonzini     /* bbbb   aaaa        */
7443ba0e1a0SPaolo Bonzini     if (req->overlap_offset >= offset + bytes) {
7453ba0e1a0SPaolo Bonzini         return false;
7463ba0e1a0SPaolo Bonzini     }
7473ba0e1a0SPaolo Bonzini     return true;
7483ba0e1a0SPaolo Bonzini }
7493ba0e1a0SPaolo Bonzini 
7503ba0e1a0SPaolo Bonzini static bool coroutine_fn
7513ba0e1a0SPaolo Bonzini bdrv_wait_serialising_requests_locked(BlockDriverState *bs,
7523ba0e1a0SPaolo Bonzini                                       BdrvTrackedRequest *self)
7533ba0e1a0SPaolo Bonzini {
7543ba0e1a0SPaolo Bonzini     BdrvTrackedRequest *req;
7553ba0e1a0SPaolo Bonzini     bool retry;
7563ba0e1a0SPaolo Bonzini     bool waited = false;
7573ba0e1a0SPaolo Bonzini 
7583ba0e1a0SPaolo Bonzini     do {
7593ba0e1a0SPaolo Bonzini         retry = false;
7603ba0e1a0SPaolo Bonzini         QLIST_FOREACH(req, &bs->tracked_requests, list) {
7613ba0e1a0SPaolo Bonzini             if (req == self || (!req->serialising && !self->serialising)) {
7623ba0e1a0SPaolo Bonzini                 continue;
7633ba0e1a0SPaolo Bonzini             }
7643ba0e1a0SPaolo Bonzini             if (tracked_request_overlaps(req, self->overlap_offset,
7653ba0e1a0SPaolo Bonzini                                          self->overlap_bytes))
7663ba0e1a0SPaolo Bonzini             {
7673ba0e1a0SPaolo Bonzini                 /* Hitting this means there was a reentrant request, for
7683ba0e1a0SPaolo Bonzini                  * example, a block driver issuing nested requests.  This must
7693ba0e1a0SPaolo Bonzini                  * never happen since it means deadlock.
7703ba0e1a0SPaolo Bonzini                  */
7713ba0e1a0SPaolo Bonzini                 assert(qemu_coroutine_self() != req->co);
7723ba0e1a0SPaolo Bonzini 
7733ba0e1a0SPaolo Bonzini                 /* If the request is already (indirectly) waiting for us, or
7743ba0e1a0SPaolo Bonzini                  * will wait for us as soon as it wakes up, then just go on
7753ba0e1a0SPaolo Bonzini                  * (instead of producing a deadlock in the former case). */
7763ba0e1a0SPaolo Bonzini                 if (!req->waiting_for) {
7773ba0e1a0SPaolo Bonzini                     self->waiting_for = req;
7783ba0e1a0SPaolo Bonzini                     qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock);
7793ba0e1a0SPaolo Bonzini                     self->waiting_for = NULL;
7803ba0e1a0SPaolo Bonzini                     retry = true;
7813ba0e1a0SPaolo Bonzini                     waited = true;
7823ba0e1a0SPaolo Bonzini                     break;
7833ba0e1a0SPaolo Bonzini                 }
7843ba0e1a0SPaolo Bonzini             }
7853ba0e1a0SPaolo Bonzini         }
7863ba0e1a0SPaolo Bonzini     } while (retry);
7873ba0e1a0SPaolo Bonzini     return waited;
7883ba0e1a0SPaolo Bonzini }
7893ba0e1a0SPaolo Bonzini 
79018fbd0deSPaolo Bonzini bool bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
79161007b31SStefan Hajnoczi {
7923ba0e1a0SPaolo Bonzini     BlockDriverState *bs = req->bs;
79361007b31SStefan Hajnoczi     int64_t overlap_offset = req->offset & ~(align - 1);
79422931a15SFam Zheng     uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
79561007b31SStefan Hajnoczi                                - overlap_offset;
7963ba0e1a0SPaolo Bonzini     bool waited;
79761007b31SStefan Hajnoczi 
7983ba0e1a0SPaolo Bonzini     qemu_co_mutex_lock(&bs->reqs_lock);
79961007b31SStefan Hajnoczi     if (!req->serialising) {
800d73415a3SStefan Hajnoczi         qatomic_inc(&req->bs->serialising_in_flight);
80161007b31SStefan Hajnoczi         req->serialising = true;
80261007b31SStefan Hajnoczi     }
80361007b31SStefan Hajnoczi 
80461007b31SStefan Hajnoczi     req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
80561007b31SStefan Hajnoczi     req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
8063ba0e1a0SPaolo Bonzini     waited = bdrv_wait_serialising_requests_locked(bs, req);
8073ba0e1a0SPaolo Bonzini     qemu_co_mutex_unlock(&bs->reqs_lock);
8083ba0e1a0SPaolo Bonzini     return waited;
80909d2f948SVladimir Sementsov-Ogievskiy }
81009d2f948SVladimir Sementsov-Ogievskiy 
81161007b31SStefan Hajnoczi /**
812c28107e9SMax Reitz  * Return the tracked request on @bs for the current coroutine, or
813c28107e9SMax Reitz  * NULL if there is none.
814c28107e9SMax Reitz  */
815c28107e9SMax Reitz BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs)
816c28107e9SMax Reitz {
817c28107e9SMax Reitz     BdrvTrackedRequest *req;
818c28107e9SMax Reitz     Coroutine *self = qemu_coroutine_self();
819c28107e9SMax Reitz 
820c28107e9SMax Reitz     QLIST_FOREACH(req, &bs->tracked_requests, list) {
821c28107e9SMax Reitz         if (req->co == self) {
822c28107e9SMax Reitz             return req;
823c28107e9SMax Reitz         }
824c28107e9SMax Reitz     }
825c28107e9SMax Reitz 
826c28107e9SMax Reitz     return NULL;
827c28107e9SMax Reitz }
828c28107e9SMax Reitz 
829c28107e9SMax Reitz /**
830244483e6SKevin Wolf  * Round a region to cluster boundaries
831244483e6SKevin Wolf  */
832244483e6SKevin Wolf void bdrv_round_to_clusters(BlockDriverState *bs,
8337cfd5275SEric Blake                             int64_t offset, int64_t bytes,
834244483e6SKevin Wolf                             int64_t *cluster_offset,
8357cfd5275SEric Blake                             int64_t *cluster_bytes)
836244483e6SKevin Wolf {
837244483e6SKevin Wolf     BlockDriverInfo bdi;
838244483e6SKevin Wolf 
839244483e6SKevin Wolf     if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
840244483e6SKevin Wolf         *cluster_offset = offset;
841244483e6SKevin Wolf         *cluster_bytes = bytes;
842244483e6SKevin Wolf     } else {
843244483e6SKevin Wolf         int64_t c = bdi.cluster_size;
844244483e6SKevin Wolf         *cluster_offset = QEMU_ALIGN_DOWN(offset, c);
845244483e6SKevin Wolf         *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
846244483e6SKevin Wolf     }
847244483e6SKevin Wolf }
848244483e6SKevin Wolf 
84961007b31SStefan Hajnoczi static int bdrv_get_cluster_size(BlockDriverState *bs)
85061007b31SStefan Hajnoczi {
85161007b31SStefan Hajnoczi     BlockDriverInfo bdi;
85261007b31SStefan Hajnoczi     int ret;
85361007b31SStefan Hajnoczi 
85461007b31SStefan Hajnoczi     ret = bdrv_get_info(bs, &bdi);
85561007b31SStefan Hajnoczi     if (ret < 0 || bdi.cluster_size == 0) {
856a5b8dd2cSEric Blake         return bs->bl.request_alignment;
85761007b31SStefan Hajnoczi     } else {
85861007b31SStefan Hajnoczi         return bdi.cluster_size;
85961007b31SStefan Hajnoczi     }
86061007b31SStefan Hajnoczi }
86161007b31SStefan Hajnoczi 
86299723548SPaolo Bonzini void bdrv_inc_in_flight(BlockDriverState *bs)
86399723548SPaolo Bonzini {
864d73415a3SStefan Hajnoczi     qatomic_inc(&bs->in_flight);
86599723548SPaolo Bonzini }
86699723548SPaolo Bonzini 
867c9d1a561SPaolo Bonzini void bdrv_wakeup(BlockDriverState *bs)
868c9d1a561SPaolo Bonzini {
869cfe29d82SKevin Wolf     aio_wait_kick();
870c9d1a561SPaolo Bonzini }
871c9d1a561SPaolo Bonzini 
87299723548SPaolo Bonzini void bdrv_dec_in_flight(BlockDriverState *bs)
87399723548SPaolo Bonzini {
874d73415a3SStefan Hajnoczi     qatomic_dec(&bs->in_flight);
875c9d1a561SPaolo Bonzini     bdrv_wakeup(bs);
87699723548SPaolo Bonzini }
87799723548SPaolo Bonzini 
87818fbd0deSPaolo Bonzini static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self)
87961007b31SStefan Hajnoczi {
88061007b31SStefan Hajnoczi     BlockDriverState *bs = self->bs;
88161007b31SStefan Hajnoczi     bool waited = false;
88261007b31SStefan Hajnoczi 
883d73415a3SStefan Hajnoczi     if (!qatomic_read(&bs->serialising_in_flight)) {
88461007b31SStefan Hajnoczi         return false;
88561007b31SStefan Hajnoczi     }
88661007b31SStefan Hajnoczi 
8873783fa3dSPaolo Bonzini     qemu_co_mutex_lock(&bs->reqs_lock);
8883ba0e1a0SPaolo Bonzini     waited = bdrv_wait_serialising_requests_locked(bs, self);
8893783fa3dSPaolo Bonzini     qemu_co_mutex_unlock(&bs->reqs_lock);
89061007b31SStefan Hajnoczi 
89161007b31SStefan Hajnoczi     return waited;
89261007b31SStefan Hajnoczi }
89361007b31SStefan Hajnoczi 
894*8b117001SVladimir Sementsov-Ogievskiy int bdrv_check_request(int64_t offset, int64_t bytes)
89561007b31SStefan Hajnoczi {
896*8b117001SVladimir Sementsov-Ogievskiy     if (offset < 0 || bytes < 0) {
89761007b31SStefan Hajnoczi         return -EIO;
89861007b31SStefan Hajnoczi     }
89961007b31SStefan Hajnoczi 
900*8b117001SVladimir Sementsov-Ogievskiy     if (bytes > BDRV_MAX_LENGTH) {
901*8b117001SVladimir Sementsov-Ogievskiy         return -EIO;
902*8b117001SVladimir Sementsov-Ogievskiy     }
903*8b117001SVladimir Sementsov-Ogievskiy 
904*8b117001SVladimir Sementsov-Ogievskiy     if (offset > BDRV_MAX_LENGTH - bytes) {
905*8b117001SVladimir Sementsov-Ogievskiy         return -EIO;
906*8b117001SVladimir Sementsov-Ogievskiy     }
907*8b117001SVladimir Sementsov-Ogievskiy 
908*8b117001SVladimir Sementsov-Ogievskiy     return 0;
909*8b117001SVladimir Sementsov-Ogievskiy }
910*8b117001SVladimir Sementsov-Ogievskiy 
911*8b117001SVladimir Sementsov-Ogievskiy static int bdrv_check_request32(int64_t offset, int64_t bytes)
912*8b117001SVladimir Sementsov-Ogievskiy {
913*8b117001SVladimir Sementsov-Ogievskiy     int ret = bdrv_check_request(offset, bytes);
914*8b117001SVladimir Sementsov-Ogievskiy     if (ret < 0) {
915*8b117001SVladimir Sementsov-Ogievskiy         return ret;
916*8b117001SVladimir Sementsov-Ogievskiy     }
917*8b117001SVladimir Sementsov-Ogievskiy 
918*8b117001SVladimir Sementsov-Ogievskiy     if (bytes > BDRV_REQUEST_MAX_BYTES) {
91961007b31SStefan Hajnoczi         return -EIO;
92061007b31SStefan Hajnoczi     }
92161007b31SStefan Hajnoczi 
92261007b31SStefan Hajnoczi     return 0;
92361007b31SStefan Hajnoczi }
92461007b31SStefan Hajnoczi 
925720ff280SKevin Wolf int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
926f5a5ca79SManos Pitsidianakis                        int bytes, BdrvRequestFlags flags)
92761007b31SStefan Hajnoczi {
928fae2681aSVladimir Sementsov-Ogievskiy     return bdrv_pwritev(child, offset, bytes, NULL,
929fae2681aSVladimir Sementsov-Ogievskiy                         BDRV_REQ_ZERO_WRITE | flags);
93061007b31SStefan Hajnoczi }
93161007b31SStefan Hajnoczi 
93261007b31SStefan Hajnoczi /*
93374021bc4SEric Blake  * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
93461007b31SStefan Hajnoczi  * The operation is sped up by checking the block status and only writing
93561007b31SStefan Hajnoczi  * zeroes to the device if they currently do not return zeroes. Optional
93674021bc4SEric Blake  * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
937465fe887SEric Blake  * BDRV_REQ_FUA).
93861007b31SStefan Hajnoczi  *
939f4649069SEric Blake  * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite().
94061007b31SStefan Hajnoczi  */
941720ff280SKevin Wolf int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
94261007b31SStefan Hajnoczi {
943237d78f8SEric Blake     int ret;
944237d78f8SEric Blake     int64_t target_size, bytes, offset = 0;
945720ff280SKevin Wolf     BlockDriverState *bs = child->bs;
94661007b31SStefan Hajnoczi 
9477286d610SEric Blake     target_size = bdrv_getlength(bs);
9487286d610SEric Blake     if (target_size < 0) {
9497286d610SEric Blake         return target_size;
95061007b31SStefan Hajnoczi     }
95161007b31SStefan Hajnoczi 
95261007b31SStefan Hajnoczi     for (;;) {
9537286d610SEric Blake         bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES);
9547286d610SEric Blake         if (bytes <= 0) {
95561007b31SStefan Hajnoczi             return 0;
95661007b31SStefan Hajnoczi         }
957237d78f8SEric Blake         ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL);
95861007b31SStefan Hajnoczi         if (ret < 0) {
95961007b31SStefan Hajnoczi             return ret;
96061007b31SStefan Hajnoczi         }
96161007b31SStefan Hajnoczi         if (ret & BDRV_BLOCK_ZERO) {
962237d78f8SEric Blake             offset += bytes;
96361007b31SStefan Hajnoczi             continue;
96461007b31SStefan Hajnoczi         }
965237d78f8SEric Blake         ret = bdrv_pwrite_zeroes(child, offset, bytes, flags);
96661007b31SStefan Hajnoczi         if (ret < 0) {
96761007b31SStefan Hajnoczi             return ret;
96861007b31SStefan Hajnoczi         }
969237d78f8SEric Blake         offset += bytes;
97061007b31SStefan Hajnoczi     }
97161007b31SStefan Hajnoczi }
97261007b31SStefan Hajnoczi 
9732e11d756SAlberto Garcia /* See bdrv_pwrite() for the return codes */
974cf2ab8fcSKevin Wolf int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes)
97561007b31SStefan Hajnoczi {
976fae2681aSVladimir Sementsov-Ogievskiy     int ret;
9770d93ed08SVladimir Sementsov-Ogievskiy     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
97861007b31SStefan Hajnoczi 
97961007b31SStefan Hajnoczi     if (bytes < 0) {
98061007b31SStefan Hajnoczi         return -EINVAL;
98161007b31SStefan Hajnoczi     }
98261007b31SStefan Hajnoczi 
983fae2681aSVladimir Sementsov-Ogievskiy     ret = bdrv_preadv(child, offset, bytes, &qiov,  0);
98461007b31SStefan Hajnoczi 
985fae2681aSVladimir Sementsov-Ogievskiy     return ret < 0 ? ret : bytes;
98661007b31SStefan Hajnoczi }
98761007b31SStefan Hajnoczi 
9882e11d756SAlberto Garcia /* Return no. of bytes on success or < 0 on error. Important errors are:
9892e11d756SAlberto Garcia   -EIO         generic I/O error (may happen for all errors)
9902e11d756SAlberto Garcia   -ENOMEDIUM   No media inserted.
9912e11d756SAlberto Garcia   -EINVAL      Invalid offset or number of bytes
9922e11d756SAlberto Garcia   -EACCES      Trying to write a read-only device
9932e11d756SAlberto Garcia */
994d9ca2ea2SKevin Wolf int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes)
99561007b31SStefan Hajnoczi {
996fae2681aSVladimir Sementsov-Ogievskiy     int ret;
9970d93ed08SVladimir Sementsov-Ogievskiy     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
99861007b31SStefan Hajnoczi 
99961007b31SStefan Hajnoczi     if (bytes < 0) {
100061007b31SStefan Hajnoczi         return -EINVAL;
100161007b31SStefan Hajnoczi     }
100261007b31SStefan Hajnoczi 
1003fae2681aSVladimir Sementsov-Ogievskiy     ret = bdrv_pwritev(child, offset, bytes, &qiov, 0);
1004fae2681aSVladimir Sementsov-Ogievskiy 
1005fae2681aSVladimir Sementsov-Ogievskiy     return ret < 0 ? ret : bytes;
100661007b31SStefan Hajnoczi }
100761007b31SStefan Hajnoczi 
100861007b31SStefan Hajnoczi /*
100961007b31SStefan Hajnoczi  * Writes to the file and ensures that no writes are reordered across this
101061007b31SStefan Hajnoczi  * request (acts as a barrier)
101161007b31SStefan Hajnoczi  *
101261007b31SStefan Hajnoczi  * Returns 0 on success, -errno in error cases.
101361007b31SStefan Hajnoczi  */
1014d9ca2ea2SKevin Wolf int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
101561007b31SStefan Hajnoczi                      const void *buf, int count)
101661007b31SStefan Hajnoczi {
101761007b31SStefan Hajnoczi     int ret;
101861007b31SStefan Hajnoczi 
1019d9ca2ea2SKevin Wolf     ret = bdrv_pwrite(child, offset, buf, count);
102061007b31SStefan Hajnoczi     if (ret < 0) {
102161007b31SStefan Hajnoczi         return ret;
102261007b31SStefan Hajnoczi     }
102361007b31SStefan Hajnoczi 
1024d9ca2ea2SKevin Wolf     ret = bdrv_flush(child->bs);
1025855a6a93SKevin Wolf     if (ret < 0) {
1026855a6a93SKevin Wolf         return ret;
102761007b31SStefan Hajnoczi     }
102861007b31SStefan Hajnoczi 
102961007b31SStefan Hajnoczi     return 0;
103061007b31SStefan Hajnoczi }
103161007b31SStefan Hajnoczi 
103208844473SKevin Wolf typedef struct CoroutineIOCompletion {
103308844473SKevin Wolf     Coroutine *coroutine;
103408844473SKevin Wolf     int ret;
103508844473SKevin Wolf } CoroutineIOCompletion;
103608844473SKevin Wolf 
103708844473SKevin Wolf static void bdrv_co_io_em_complete(void *opaque, int ret)
103808844473SKevin Wolf {
103908844473SKevin Wolf     CoroutineIOCompletion *co = opaque;
104008844473SKevin Wolf 
104108844473SKevin Wolf     co->ret = ret;
1042b9e413ddSPaolo Bonzini     aio_co_wake(co->coroutine);
104308844473SKevin Wolf }
104408844473SKevin Wolf 
1045166fe960SKevin Wolf static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
1046166fe960SKevin Wolf                                            uint64_t offset, uint64_t bytes,
1047ac850bf0SVladimir Sementsov-Ogievskiy                                            QEMUIOVector *qiov,
1048ac850bf0SVladimir Sementsov-Ogievskiy                                            size_t qiov_offset, int flags)
1049166fe960SKevin Wolf {
1050166fe960SKevin Wolf     BlockDriver *drv = bs->drv;
10513fb06697SKevin Wolf     int64_t sector_num;
10523fb06697SKevin Wolf     unsigned int nb_sectors;
1053ac850bf0SVladimir Sementsov-Ogievskiy     QEMUIOVector local_qiov;
1054ac850bf0SVladimir Sementsov-Ogievskiy     int ret;
10553fb06697SKevin Wolf 
1056fa166538SEric Blake     assert(!(flags & ~BDRV_REQ_MASK));
1057fe0480d6SKevin Wolf     assert(!(flags & BDRV_REQ_NO_FALLBACK));
1058fa166538SEric Blake 
1059d470ad42SMax Reitz     if (!drv) {
1060d470ad42SMax Reitz         return -ENOMEDIUM;
1061d470ad42SMax Reitz     }
1062d470ad42SMax Reitz 
1063ac850bf0SVladimir Sementsov-Ogievskiy     if (drv->bdrv_co_preadv_part) {
1064ac850bf0SVladimir Sementsov-Ogievskiy         return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset,
1065ac850bf0SVladimir Sementsov-Ogievskiy                                         flags);
1066ac850bf0SVladimir Sementsov-Ogievskiy     }
1067ac850bf0SVladimir Sementsov-Ogievskiy 
1068ac850bf0SVladimir Sementsov-Ogievskiy     if (qiov_offset > 0 || bytes != qiov->size) {
1069ac850bf0SVladimir Sementsov-Ogievskiy         qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1070ac850bf0SVladimir Sementsov-Ogievskiy         qiov = &local_qiov;
1071ac850bf0SVladimir Sementsov-Ogievskiy     }
1072ac850bf0SVladimir Sementsov-Ogievskiy 
10733fb06697SKevin Wolf     if (drv->bdrv_co_preadv) {
1074ac850bf0SVladimir Sementsov-Ogievskiy         ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
1075ac850bf0SVladimir Sementsov-Ogievskiy         goto out;
10763fb06697SKevin Wolf     }
10773fb06697SKevin Wolf 
1078edfab6a0SEric Blake     if (drv->bdrv_aio_preadv) {
107908844473SKevin Wolf         BlockAIOCB *acb;
108008844473SKevin Wolf         CoroutineIOCompletion co = {
108108844473SKevin Wolf             .coroutine = qemu_coroutine_self(),
108208844473SKevin Wolf         };
108308844473SKevin Wolf 
1084e31f6864SEric Blake         acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags,
108508844473SKevin Wolf                                    bdrv_co_io_em_complete, &co);
108608844473SKevin Wolf         if (acb == NULL) {
1087ac850bf0SVladimir Sementsov-Ogievskiy             ret = -EIO;
1088ac850bf0SVladimir Sementsov-Ogievskiy             goto out;
108908844473SKevin Wolf         } else {
109008844473SKevin Wolf             qemu_coroutine_yield();
1091ac850bf0SVladimir Sementsov-Ogievskiy             ret = co.ret;
1092ac850bf0SVladimir Sementsov-Ogievskiy             goto out;
109308844473SKevin Wolf         }
109408844473SKevin Wolf     }
1095edfab6a0SEric Blake 
1096edfab6a0SEric Blake     sector_num = offset >> BDRV_SECTOR_BITS;
1097edfab6a0SEric Blake     nb_sectors = bytes >> BDRV_SECTOR_BITS;
1098edfab6a0SEric Blake 
10991bbbf32dSNir Soffer     assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
11001bbbf32dSNir Soffer     assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
110141ae31e3SAlberto Garcia     assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1102edfab6a0SEric Blake     assert(drv->bdrv_co_readv);
1103edfab6a0SEric Blake 
1104ac850bf0SVladimir Sementsov-Ogievskiy     ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
1105ac850bf0SVladimir Sementsov-Ogievskiy 
1106ac850bf0SVladimir Sementsov-Ogievskiy out:
1107ac850bf0SVladimir Sementsov-Ogievskiy     if (qiov == &local_qiov) {
1108ac850bf0SVladimir Sementsov-Ogievskiy         qemu_iovec_destroy(&local_qiov);
1109ac850bf0SVladimir Sementsov-Ogievskiy     }
1110ac850bf0SVladimir Sementsov-Ogievskiy 
1111ac850bf0SVladimir Sementsov-Ogievskiy     return ret;
1112166fe960SKevin Wolf }
1113166fe960SKevin Wolf 
111478a07294SKevin Wolf static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
111578a07294SKevin Wolf                                             uint64_t offset, uint64_t bytes,
1116ac850bf0SVladimir Sementsov-Ogievskiy                                             QEMUIOVector *qiov,
1117ac850bf0SVladimir Sementsov-Ogievskiy                                             size_t qiov_offset, int flags)
111878a07294SKevin Wolf {
111978a07294SKevin Wolf     BlockDriver *drv = bs->drv;
11203fb06697SKevin Wolf     int64_t sector_num;
11213fb06697SKevin Wolf     unsigned int nb_sectors;
1122ac850bf0SVladimir Sementsov-Ogievskiy     QEMUIOVector local_qiov;
112378a07294SKevin Wolf     int ret;
112478a07294SKevin Wolf 
1125fa166538SEric Blake     assert(!(flags & ~BDRV_REQ_MASK));
1126fe0480d6SKevin Wolf     assert(!(flags & BDRV_REQ_NO_FALLBACK));
1127fa166538SEric Blake 
1128d470ad42SMax Reitz     if (!drv) {
1129d470ad42SMax Reitz         return -ENOMEDIUM;
1130d470ad42SMax Reitz     }
1131d470ad42SMax Reitz 
1132ac850bf0SVladimir Sementsov-Ogievskiy     if (drv->bdrv_co_pwritev_part) {
1133ac850bf0SVladimir Sementsov-Ogievskiy         ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset,
1134ac850bf0SVladimir Sementsov-Ogievskiy                                         flags & bs->supported_write_flags);
1135ac850bf0SVladimir Sementsov-Ogievskiy         flags &= ~bs->supported_write_flags;
1136ac850bf0SVladimir Sementsov-Ogievskiy         goto emulate_flags;
1137ac850bf0SVladimir Sementsov-Ogievskiy     }
1138ac850bf0SVladimir Sementsov-Ogievskiy 
1139ac850bf0SVladimir Sementsov-Ogievskiy     if (qiov_offset > 0 || bytes != qiov->size) {
1140ac850bf0SVladimir Sementsov-Ogievskiy         qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1141ac850bf0SVladimir Sementsov-Ogievskiy         qiov = &local_qiov;
1142ac850bf0SVladimir Sementsov-Ogievskiy     }
1143ac850bf0SVladimir Sementsov-Ogievskiy 
11443fb06697SKevin Wolf     if (drv->bdrv_co_pwritev) {
1145515c2f43SKevin Wolf         ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov,
1146515c2f43SKevin Wolf                                    flags & bs->supported_write_flags);
1147515c2f43SKevin Wolf         flags &= ~bs->supported_write_flags;
11483fb06697SKevin Wolf         goto emulate_flags;
11493fb06697SKevin Wolf     }
11503fb06697SKevin Wolf 
1151edfab6a0SEric Blake     if (drv->bdrv_aio_pwritev) {
115208844473SKevin Wolf         BlockAIOCB *acb;
115308844473SKevin Wolf         CoroutineIOCompletion co = {
115408844473SKevin Wolf             .coroutine = qemu_coroutine_self(),
115508844473SKevin Wolf         };
115608844473SKevin Wolf 
1157e31f6864SEric Blake         acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov,
1158e31f6864SEric Blake                                     flags & bs->supported_write_flags,
115908844473SKevin Wolf                                     bdrv_co_io_em_complete, &co);
1160e31f6864SEric Blake         flags &= ~bs->supported_write_flags;
116108844473SKevin Wolf         if (acb == NULL) {
11623fb06697SKevin Wolf             ret = -EIO;
116308844473SKevin Wolf         } else {
116408844473SKevin Wolf             qemu_coroutine_yield();
11653fb06697SKevin Wolf             ret = co.ret;
116608844473SKevin Wolf         }
1167edfab6a0SEric Blake         goto emulate_flags;
1168edfab6a0SEric Blake     }
1169edfab6a0SEric Blake 
1170edfab6a0SEric Blake     sector_num = offset >> BDRV_SECTOR_BITS;
1171edfab6a0SEric Blake     nb_sectors = bytes >> BDRV_SECTOR_BITS;
1172edfab6a0SEric Blake 
11731bbbf32dSNir Soffer     assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
11741bbbf32dSNir Soffer     assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
117541ae31e3SAlberto Garcia     assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1176edfab6a0SEric Blake 
1177e18a58b4SEric Blake     assert(drv->bdrv_co_writev);
1178e18a58b4SEric Blake     ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov,
1179edfab6a0SEric Blake                               flags & bs->supported_write_flags);
1180edfab6a0SEric Blake     flags &= ~bs->supported_write_flags;
118178a07294SKevin Wolf 
11823fb06697SKevin Wolf emulate_flags:
11834df863f3SEric Blake     if (ret == 0 && (flags & BDRV_REQ_FUA)) {
118478a07294SKevin Wolf         ret = bdrv_co_flush(bs);
118578a07294SKevin Wolf     }
118678a07294SKevin Wolf 
1187ac850bf0SVladimir Sementsov-Ogievskiy     if (qiov == &local_qiov) {
1188ac850bf0SVladimir Sementsov-Ogievskiy         qemu_iovec_destroy(&local_qiov);
1189ac850bf0SVladimir Sementsov-Ogievskiy     }
1190ac850bf0SVladimir Sementsov-Ogievskiy 
119178a07294SKevin Wolf     return ret;
119278a07294SKevin Wolf }
119378a07294SKevin Wolf 
119429a298afSPavel Butsykin static int coroutine_fn
119529a298afSPavel Butsykin bdrv_driver_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
1196ac850bf0SVladimir Sementsov-Ogievskiy                                uint64_t bytes, QEMUIOVector *qiov,
1197ac850bf0SVladimir Sementsov-Ogievskiy                                size_t qiov_offset)
119829a298afSPavel Butsykin {
119929a298afSPavel Butsykin     BlockDriver *drv = bs->drv;
1200ac850bf0SVladimir Sementsov-Ogievskiy     QEMUIOVector local_qiov;
1201ac850bf0SVladimir Sementsov-Ogievskiy     int ret;
120229a298afSPavel Butsykin 
1203d470ad42SMax Reitz     if (!drv) {
1204d470ad42SMax Reitz         return -ENOMEDIUM;
1205d470ad42SMax Reitz     }
1206d470ad42SMax Reitz 
1207ac850bf0SVladimir Sementsov-Ogievskiy     if (!block_driver_can_compress(drv)) {
120829a298afSPavel Butsykin         return -ENOTSUP;
120929a298afSPavel Butsykin     }
121029a298afSPavel Butsykin 
1211ac850bf0SVladimir Sementsov-Ogievskiy     if (drv->bdrv_co_pwritev_compressed_part) {
1212ac850bf0SVladimir Sementsov-Ogievskiy         return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes,
1213ac850bf0SVladimir Sementsov-Ogievskiy                                                     qiov, qiov_offset);
1214ac850bf0SVladimir Sementsov-Ogievskiy     }
1215ac850bf0SVladimir Sementsov-Ogievskiy 
1216ac850bf0SVladimir Sementsov-Ogievskiy     if (qiov_offset == 0) {
121729a298afSPavel Butsykin         return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
121829a298afSPavel Butsykin     }
121929a298afSPavel Butsykin 
1220ac850bf0SVladimir Sementsov-Ogievskiy     qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1221ac850bf0SVladimir Sementsov-Ogievskiy     ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov);
1222ac850bf0SVladimir Sementsov-Ogievskiy     qemu_iovec_destroy(&local_qiov);
1223ac850bf0SVladimir Sementsov-Ogievskiy 
1224ac850bf0SVladimir Sementsov-Ogievskiy     return ret;
1225ac850bf0SVladimir Sementsov-Ogievskiy }
1226ac850bf0SVladimir Sementsov-Ogievskiy 
122785c97ca7SKevin Wolf static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
12283299e5ecSVladimir Sementsov-Ogievskiy         int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
12291143ec5eSVladimir Sementsov-Ogievskiy         size_t qiov_offset, int flags)
123061007b31SStefan Hajnoczi {
123185c97ca7SKevin Wolf     BlockDriverState *bs = child->bs;
123285c97ca7SKevin Wolf 
123361007b31SStefan Hajnoczi     /* Perform I/O through a temporary buffer so that users who scribble over
123461007b31SStefan Hajnoczi      * their read buffer while the operation is in progress do not end up
123561007b31SStefan Hajnoczi      * modifying the image file.  This is critical for zero-copy guest I/O
123661007b31SStefan Hajnoczi      * where anything might happen inside guest memory.
123761007b31SStefan Hajnoczi      */
12382275cc90SVladimir Sementsov-Ogievskiy     void *bounce_buffer = NULL;
123961007b31SStefan Hajnoczi 
124061007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
1241244483e6SKevin Wolf     int64_t cluster_offset;
12427cfd5275SEric Blake     int64_t cluster_bytes;
124361007b31SStefan Hajnoczi     size_t skip_bytes;
124461007b31SStefan Hajnoczi     int ret;
1245cb2e2878SEric Blake     int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1246cb2e2878SEric Blake                                     BDRV_REQUEST_MAX_BYTES);
1247cb2e2878SEric Blake     unsigned int progress = 0;
12488644476eSMax Reitz     bool skip_write;
124961007b31SStefan Hajnoczi 
1250d470ad42SMax Reitz     if (!drv) {
1251d470ad42SMax Reitz         return -ENOMEDIUM;
1252d470ad42SMax Reitz     }
1253d470ad42SMax Reitz 
12548644476eSMax Reitz     /*
12558644476eSMax Reitz      * Do not write anything when the BDS is inactive.  That is not
12568644476eSMax Reitz      * allowed, and it would not help.
12578644476eSMax Reitz      */
12588644476eSMax Reitz     skip_write = (bs->open_flags & BDRV_O_INACTIVE);
12598644476eSMax Reitz 
12601bf03e66SKevin Wolf     /* FIXME We cannot require callers to have write permissions when all they
12611bf03e66SKevin Wolf      * are doing is a read request. If we did things right, write permissions
12621bf03e66SKevin Wolf      * would be obtained anyway, but internally by the copy-on-read code. As
1263765d9df9SEric Blake      * long as it is implemented here rather than in a separate filter driver,
12641bf03e66SKevin Wolf      * the copy-on-read code doesn't have its own BdrvChild, however, for which
12651bf03e66SKevin Wolf      * it could request permissions. Therefore we have to bypass the permission
12661bf03e66SKevin Wolf      * system for the moment. */
12671bf03e66SKevin Wolf     // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1268afa4b293SKevin Wolf 
126961007b31SStefan Hajnoczi     /* Cover entire cluster so no additional backing file I/O is required when
1270cb2e2878SEric Blake      * allocating cluster in the image file.  Note that this value may exceed
1271cb2e2878SEric Blake      * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1272cb2e2878SEric Blake      * is one reason we loop rather than doing it all at once.
127361007b31SStefan Hajnoczi      */
1274244483e6SKevin Wolf     bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
1275cb2e2878SEric Blake     skip_bytes = offset - cluster_offset;
127661007b31SStefan Hajnoczi 
1277244483e6SKevin Wolf     trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
1278244483e6SKevin Wolf                                    cluster_offset, cluster_bytes);
127961007b31SStefan Hajnoczi 
1280cb2e2878SEric Blake     while (cluster_bytes) {
1281cb2e2878SEric Blake         int64_t pnum;
128261007b31SStefan Hajnoczi 
12838644476eSMax Reitz         if (skip_write) {
12848644476eSMax Reitz             ret = 1; /* "already allocated", so nothing will be copied */
12858644476eSMax Reitz             pnum = MIN(cluster_bytes, max_transfer);
12868644476eSMax Reitz         } else {
1287cb2e2878SEric Blake             ret = bdrv_is_allocated(bs, cluster_offset,
1288cb2e2878SEric Blake                                     MIN(cluster_bytes, max_transfer), &pnum);
1289cb2e2878SEric Blake             if (ret < 0) {
12908644476eSMax Reitz                 /*
12918644476eSMax Reitz                  * Safe to treat errors in querying allocation as if
1292cb2e2878SEric Blake                  * unallocated; we'll probably fail again soon on the
1293cb2e2878SEric Blake                  * read, but at least that will set a decent errno.
1294cb2e2878SEric Blake                  */
1295cb2e2878SEric Blake                 pnum = MIN(cluster_bytes, max_transfer);
1296cb2e2878SEric Blake             }
1297cb2e2878SEric Blake 
1298b0ddcbbbSKevin Wolf             /* Stop at EOF if the image ends in the middle of the cluster */
1299b0ddcbbbSKevin Wolf             if (ret == 0 && pnum == 0) {
1300b0ddcbbbSKevin Wolf                 assert(progress >= bytes);
1301b0ddcbbbSKevin Wolf                 break;
1302b0ddcbbbSKevin Wolf             }
1303b0ddcbbbSKevin Wolf 
1304cb2e2878SEric Blake             assert(skip_bytes < pnum);
13058644476eSMax Reitz         }
1306cb2e2878SEric Blake 
1307cb2e2878SEric Blake         if (ret <= 0) {
13081143ec5eSVladimir Sementsov-Ogievskiy             QEMUIOVector local_qiov;
13091143ec5eSVladimir Sementsov-Ogievskiy 
1310cb2e2878SEric Blake             /* Must copy-on-read; use the bounce buffer */
13110d93ed08SVladimir Sementsov-Ogievskiy             pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
13122275cc90SVladimir Sementsov-Ogievskiy             if (!bounce_buffer) {
13132275cc90SVladimir Sementsov-Ogievskiy                 int64_t max_we_need = MAX(pnum, cluster_bytes - pnum);
13142275cc90SVladimir Sementsov-Ogievskiy                 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER);
13152275cc90SVladimir Sementsov-Ogievskiy                 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed);
13162275cc90SVladimir Sementsov-Ogievskiy 
13172275cc90SVladimir Sementsov-Ogievskiy                 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len);
13182275cc90SVladimir Sementsov-Ogievskiy                 if (!bounce_buffer) {
13192275cc90SVladimir Sementsov-Ogievskiy                     ret = -ENOMEM;
13202275cc90SVladimir Sementsov-Ogievskiy                     goto err;
13212275cc90SVladimir Sementsov-Ogievskiy                 }
13222275cc90SVladimir Sementsov-Ogievskiy             }
13230d93ed08SVladimir Sementsov-Ogievskiy             qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum);
1324cb2e2878SEric Blake 
1325cb2e2878SEric Blake             ret = bdrv_driver_preadv(bs, cluster_offset, pnum,
1326ac850bf0SVladimir Sementsov-Ogievskiy                                      &local_qiov, 0, 0);
132761007b31SStefan Hajnoczi             if (ret < 0) {
132861007b31SStefan Hajnoczi                 goto err;
132961007b31SStefan Hajnoczi             }
133061007b31SStefan Hajnoczi 
1331d855ebcdSEric Blake             bdrv_debug_event(bs, BLKDBG_COR_WRITE);
1332c1499a5eSEric Blake             if (drv->bdrv_co_pwrite_zeroes &&
1333cb2e2878SEric Blake                 buffer_is_zero(bounce_buffer, pnum)) {
1334a604fa2bSEric Blake                 /* FIXME: Should we (perhaps conditionally) be setting
1335a604fa2bSEric Blake                  * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1336a604fa2bSEric Blake                  * that still correctly reads as zero? */
13377adcf59fSMax Reitz                 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum,
13387adcf59fSMax Reitz                                                BDRV_REQ_WRITE_UNCHANGED);
133961007b31SStefan Hajnoczi             } else {
1340cb2e2878SEric Blake                 /* This does not change the data on the disk, it is not
1341cb2e2878SEric Blake                  * necessary to flush even in cache=writethrough mode.
134261007b31SStefan Hajnoczi                  */
1343cb2e2878SEric Blake                 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum,
1344ac850bf0SVladimir Sementsov-Ogievskiy                                           &local_qiov, 0,
13457adcf59fSMax Reitz                                           BDRV_REQ_WRITE_UNCHANGED);
134661007b31SStefan Hajnoczi             }
134761007b31SStefan Hajnoczi 
134861007b31SStefan Hajnoczi             if (ret < 0) {
1349cb2e2878SEric Blake                 /* It might be okay to ignore write errors for guest
1350cb2e2878SEric Blake                  * requests.  If this is a deliberate copy-on-read
1351cb2e2878SEric Blake                  * then we don't want to ignore the error.  Simply
1352cb2e2878SEric Blake                  * report it in all cases.
135361007b31SStefan Hajnoczi                  */
135461007b31SStefan Hajnoczi                 goto err;
135561007b31SStefan Hajnoczi             }
135661007b31SStefan Hajnoczi 
13573299e5ecSVladimir Sementsov-Ogievskiy             if (!(flags & BDRV_REQ_PREFETCH)) {
13581143ec5eSVladimir Sementsov-Ogievskiy                 qemu_iovec_from_buf(qiov, qiov_offset + progress,
13591143ec5eSVladimir Sementsov-Ogievskiy                                     bounce_buffer + skip_bytes,
13604ab78b19SVladimir Sementsov-Ogievskiy                                     MIN(pnum - skip_bytes, bytes - progress));
13613299e5ecSVladimir Sementsov-Ogievskiy             }
13623299e5ecSVladimir Sementsov-Ogievskiy         } else if (!(flags & BDRV_REQ_PREFETCH)) {
1363cb2e2878SEric Blake             /* Read directly into the destination */
13641143ec5eSVladimir Sementsov-Ogievskiy             ret = bdrv_driver_preadv(bs, offset + progress,
13651143ec5eSVladimir Sementsov-Ogievskiy                                      MIN(pnum - skip_bytes, bytes - progress),
13661143ec5eSVladimir Sementsov-Ogievskiy                                      qiov, qiov_offset + progress, 0);
1367cb2e2878SEric Blake             if (ret < 0) {
1368cb2e2878SEric Blake                 goto err;
1369cb2e2878SEric Blake             }
1370cb2e2878SEric Blake         }
1371cb2e2878SEric Blake 
1372cb2e2878SEric Blake         cluster_offset += pnum;
1373cb2e2878SEric Blake         cluster_bytes -= pnum;
1374cb2e2878SEric Blake         progress += pnum - skip_bytes;
1375cb2e2878SEric Blake         skip_bytes = 0;
1376cb2e2878SEric Blake     }
1377cb2e2878SEric Blake     ret = 0;
137861007b31SStefan Hajnoczi 
137961007b31SStefan Hajnoczi err:
138061007b31SStefan Hajnoczi     qemu_vfree(bounce_buffer);
138161007b31SStefan Hajnoczi     return ret;
138261007b31SStefan Hajnoczi }
138361007b31SStefan Hajnoczi 
138461007b31SStefan Hajnoczi /*
138561007b31SStefan Hajnoczi  * Forwards an already correctly aligned request to the BlockDriver. This
13861a62d0acSEric Blake  * handles copy on read, zeroing after EOF, and fragmentation of large
13871a62d0acSEric Blake  * reads; any other features must be implemented by the caller.
138861007b31SStefan Hajnoczi  */
138985c97ca7SKevin Wolf static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
139061007b31SStefan Hajnoczi     BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
139165cd4424SVladimir Sementsov-Ogievskiy     int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags)
139261007b31SStefan Hajnoczi {
139385c97ca7SKevin Wolf     BlockDriverState *bs = child->bs;
1394c9d20029SKevin Wolf     int64_t total_bytes, max_bytes;
13951a62d0acSEric Blake     int ret = 0;
13961a62d0acSEric Blake     uint64_t bytes_remaining = bytes;
13971a62d0acSEric Blake     int max_transfer;
139861007b31SStefan Hajnoczi 
139949c07526SKevin Wolf     assert(is_power_of_2(align));
140049c07526SKevin Wolf     assert((offset & (align - 1)) == 0);
140149c07526SKevin Wolf     assert((bytes & (align - 1)) == 0);
1402abb06c5aSDaniel P. Berrange     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
14031a62d0acSEric Blake     max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
14041a62d0acSEric Blake                                    align);
1405a604fa2bSEric Blake 
1406a604fa2bSEric Blake     /* TODO: We would need a per-BDS .supported_read_flags and
1407a604fa2bSEric Blake      * potential fallback support, if we ever implement any read flags
1408a604fa2bSEric Blake      * to pass through to drivers.  For now, there aren't any
1409a604fa2bSEric Blake      * passthrough flags.  */
1410c53cb427SPaolo Bonzini     assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH)));
141161007b31SStefan Hajnoczi 
141261007b31SStefan Hajnoczi     /* Handle Copy on Read and associated serialisation */
141361007b31SStefan Hajnoczi     if (flags & BDRV_REQ_COPY_ON_READ) {
141461007b31SStefan Hajnoczi         /* If we touch the same cluster it counts as an overlap.  This
141561007b31SStefan Hajnoczi          * guarantees that allocating writes will be serialized and not race
141661007b31SStefan Hajnoczi          * with each other for the same cluster.  For example, in copy-on-read
141761007b31SStefan Hajnoczi          * it ensures that the CoR read and write operations are atomic and
141861007b31SStefan Hajnoczi          * guest writes cannot interleave between them. */
1419304d9d7fSMax Reitz         bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs));
142018fbd0deSPaolo Bonzini     } else {
1421304d9d7fSMax Reitz         bdrv_wait_serialising_requests(req);
142218fbd0deSPaolo Bonzini     }
142361007b31SStefan Hajnoczi 
142461007b31SStefan Hajnoczi     if (flags & BDRV_REQ_COPY_ON_READ) {
1425d6a644bbSEric Blake         int64_t pnum;
142661007b31SStefan Hajnoczi 
142788e63df2SEric Blake         ret = bdrv_is_allocated(bs, offset, bytes, &pnum);
142861007b31SStefan Hajnoczi         if (ret < 0) {
142961007b31SStefan Hajnoczi             goto out;
143061007b31SStefan Hajnoczi         }
143161007b31SStefan Hajnoczi 
143288e63df2SEric Blake         if (!ret || pnum != bytes) {
143365cd4424SVladimir Sementsov-Ogievskiy             ret = bdrv_co_do_copy_on_readv(child, offset, bytes,
143465cd4424SVladimir Sementsov-Ogievskiy                                            qiov, qiov_offset, flags);
14353299e5ecSVladimir Sementsov-Ogievskiy             goto out;
14363299e5ecSVladimir Sementsov-Ogievskiy         } else if (flags & BDRV_REQ_PREFETCH) {
143761007b31SStefan Hajnoczi             goto out;
143861007b31SStefan Hajnoczi         }
143961007b31SStefan Hajnoczi     }
144061007b31SStefan Hajnoczi 
14411a62d0acSEric Blake     /* Forward the request to the BlockDriver, possibly fragmenting it */
144249c07526SKevin Wolf     total_bytes = bdrv_getlength(bs);
144349c07526SKevin Wolf     if (total_bytes < 0) {
144449c07526SKevin Wolf         ret = total_bytes;
144561007b31SStefan Hajnoczi         goto out;
144661007b31SStefan Hajnoczi     }
144761007b31SStefan Hajnoczi 
144849c07526SKevin Wolf     max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
14491a62d0acSEric Blake     if (bytes <= max_bytes && bytes <= max_transfer) {
145065cd4424SVladimir Sementsov-Ogievskiy         ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, 0);
14511a62d0acSEric Blake         goto out;
145261007b31SStefan Hajnoczi     }
145361007b31SStefan Hajnoczi 
14541a62d0acSEric Blake     while (bytes_remaining) {
14551a62d0acSEric Blake         int num;
14561a62d0acSEric Blake 
14571a62d0acSEric Blake         if (max_bytes) {
14581a62d0acSEric Blake             num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
14591a62d0acSEric Blake             assert(num);
14601a62d0acSEric Blake 
14611a62d0acSEric Blake             ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
1462134b7decSMax Reitz                                      num, qiov,
1463134b7decSMax Reitz                                      qiov_offset + bytes - bytes_remaining, 0);
14641a62d0acSEric Blake             max_bytes -= num;
14651a62d0acSEric Blake         } else {
14661a62d0acSEric Blake             num = bytes_remaining;
1467134b7decSMax Reitz             ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining,
1468134b7decSMax Reitz                                     0, bytes_remaining);
14691a62d0acSEric Blake         }
14701a62d0acSEric Blake         if (ret < 0) {
14711a62d0acSEric Blake             goto out;
14721a62d0acSEric Blake         }
14731a62d0acSEric Blake         bytes_remaining -= num;
147461007b31SStefan Hajnoczi     }
147561007b31SStefan Hajnoczi 
147661007b31SStefan Hajnoczi out:
14771a62d0acSEric Blake     return ret < 0 ? ret : 0;
147861007b31SStefan Hajnoczi }
147961007b31SStefan Hajnoczi 
148061007b31SStefan Hajnoczi /*
14817a3f542fSVladimir Sementsov-Ogievskiy  * Request padding
14827a3f542fSVladimir Sementsov-Ogievskiy  *
14837a3f542fSVladimir Sementsov-Ogievskiy  *  |<---- align ----->|                     |<----- align ---->|
14847a3f542fSVladimir Sementsov-Ogievskiy  *  |<- head ->|<------------- bytes ------------->|<-- tail -->|
14857a3f542fSVladimir Sementsov-Ogievskiy  *  |          |       |                     |     |            |
14867a3f542fSVladimir Sementsov-Ogievskiy  * -*----------$-------*-------- ... --------*-----$------------*---
14877a3f542fSVladimir Sementsov-Ogievskiy  *  |          |       |                     |     |            |
14887a3f542fSVladimir Sementsov-Ogievskiy  *  |          offset  |                     |     end          |
14897a3f542fSVladimir Sementsov-Ogievskiy  *  ALIGN_DOWN(offset) ALIGN_UP(offset)      ALIGN_DOWN(end)   ALIGN_UP(end)
14907a3f542fSVladimir Sementsov-Ogievskiy  *  [buf   ... )                             [tail_buf          )
14917a3f542fSVladimir Sementsov-Ogievskiy  *
14927a3f542fSVladimir Sementsov-Ogievskiy  * @buf is an aligned allocation needed to store @head and @tail paddings. @head
14937a3f542fSVladimir Sementsov-Ogievskiy  * is placed at the beginning of @buf and @tail at the @end.
14947a3f542fSVladimir Sementsov-Ogievskiy  *
14957a3f542fSVladimir Sementsov-Ogievskiy  * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk
14967a3f542fSVladimir Sementsov-Ogievskiy  * around tail, if tail exists.
14977a3f542fSVladimir Sementsov-Ogievskiy  *
14987a3f542fSVladimir Sementsov-Ogievskiy  * @merge_reads is true for small requests,
14997a3f542fSVladimir Sementsov-Ogievskiy  * if @buf_len == @head + bytes + @tail. In this case it is possible that both
15007a3f542fSVladimir Sementsov-Ogievskiy  * head and tail exist but @buf_len == align and @tail_buf == @buf.
150161007b31SStefan Hajnoczi  */
15027a3f542fSVladimir Sementsov-Ogievskiy typedef struct BdrvRequestPadding {
15037a3f542fSVladimir Sementsov-Ogievskiy     uint8_t *buf;
15047a3f542fSVladimir Sementsov-Ogievskiy     size_t buf_len;
15057a3f542fSVladimir Sementsov-Ogievskiy     uint8_t *tail_buf;
15067a3f542fSVladimir Sementsov-Ogievskiy     size_t head;
15077a3f542fSVladimir Sementsov-Ogievskiy     size_t tail;
15087a3f542fSVladimir Sementsov-Ogievskiy     bool merge_reads;
15097a3f542fSVladimir Sementsov-Ogievskiy     QEMUIOVector local_qiov;
15107a3f542fSVladimir Sementsov-Ogievskiy } BdrvRequestPadding;
15117a3f542fSVladimir Sementsov-Ogievskiy 
15127a3f542fSVladimir Sementsov-Ogievskiy static bool bdrv_init_padding(BlockDriverState *bs,
15137a3f542fSVladimir Sementsov-Ogievskiy                               int64_t offset, int64_t bytes,
15147a3f542fSVladimir Sementsov-Ogievskiy                               BdrvRequestPadding *pad)
15157a3f542fSVladimir Sementsov-Ogievskiy {
15167a3f542fSVladimir Sementsov-Ogievskiy     uint64_t align = bs->bl.request_alignment;
15177a3f542fSVladimir Sementsov-Ogievskiy     size_t sum;
15187a3f542fSVladimir Sementsov-Ogievskiy 
15197a3f542fSVladimir Sementsov-Ogievskiy     memset(pad, 0, sizeof(*pad));
15207a3f542fSVladimir Sementsov-Ogievskiy 
15217a3f542fSVladimir Sementsov-Ogievskiy     pad->head = offset & (align - 1);
15227a3f542fSVladimir Sementsov-Ogievskiy     pad->tail = ((offset + bytes) & (align - 1));
15237a3f542fSVladimir Sementsov-Ogievskiy     if (pad->tail) {
15247a3f542fSVladimir Sementsov-Ogievskiy         pad->tail = align - pad->tail;
15257a3f542fSVladimir Sementsov-Ogievskiy     }
15267a3f542fSVladimir Sementsov-Ogievskiy 
1527ac9d00bfSVladimir Sementsov-Ogievskiy     if (!pad->head && !pad->tail) {
15287a3f542fSVladimir Sementsov-Ogievskiy         return false;
15297a3f542fSVladimir Sementsov-Ogievskiy     }
15307a3f542fSVladimir Sementsov-Ogievskiy 
1531ac9d00bfSVladimir Sementsov-Ogievskiy     assert(bytes); /* Nothing good in aligning zero-length requests */
1532ac9d00bfSVladimir Sementsov-Ogievskiy 
15337a3f542fSVladimir Sementsov-Ogievskiy     sum = pad->head + bytes + pad->tail;
15347a3f542fSVladimir Sementsov-Ogievskiy     pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align;
15357a3f542fSVladimir Sementsov-Ogievskiy     pad->buf = qemu_blockalign(bs, pad->buf_len);
15367a3f542fSVladimir Sementsov-Ogievskiy     pad->merge_reads = sum == pad->buf_len;
15377a3f542fSVladimir Sementsov-Ogievskiy     if (pad->tail) {
15387a3f542fSVladimir Sementsov-Ogievskiy         pad->tail_buf = pad->buf + pad->buf_len - align;
15397a3f542fSVladimir Sementsov-Ogievskiy     }
15407a3f542fSVladimir Sementsov-Ogievskiy 
15417a3f542fSVladimir Sementsov-Ogievskiy     return true;
15427a3f542fSVladimir Sementsov-Ogievskiy }
15437a3f542fSVladimir Sementsov-Ogievskiy 
15447a3f542fSVladimir Sementsov-Ogievskiy static int bdrv_padding_rmw_read(BdrvChild *child,
15457a3f542fSVladimir Sementsov-Ogievskiy                                  BdrvTrackedRequest *req,
15467a3f542fSVladimir Sementsov-Ogievskiy                                  BdrvRequestPadding *pad,
15477a3f542fSVladimir Sementsov-Ogievskiy                                  bool zero_middle)
15487a3f542fSVladimir Sementsov-Ogievskiy {
15497a3f542fSVladimir Sementsov-Ogievskiy     QEMUIOVector local_qiov;
15507a3f542fSVladimir Sementsov-Ogievskiy     BlockDriverState *bs = child->bs;
15517a3f542fSVladimir Sementsov-Ogievskiy     uint64_t align = bs->bl.request_alignment;
15527a3f542fSVladimir Sementsov-Ogievskiy     int ret;
15537a3f542fSVladimir Sementsov-Ogievskiy 
15547a3f542fSVladimir Sementsov-Ogievskiy     assert(req->serialising && pad->buf);
15557a3f542fSVladimir Sementsov-Ogievskiy 
15567a3f542fSVladimir Sementsov-Ogievskiy     if (pad->head || pad->merge_reads) {
15577a3f542fSVladimir Sementsov-Ogievskiy         uint64_t bytes = pad->merge_reads ? pad->buf_len : align;
15587a3f542fSVladimir Sementsov-Ogievskiy 
15597a3f542fSVladimir Sementsov-Ogievskiy         qemu_iovec_init_buf(&local_qiov, pad->buf, bytes);
15607a3f542fSVladimir Sementsov-Ogievskiy 
15617a3f542fSVladimir Sementsov-Ogievskiy         if (pad->head) {
15627a3f542fSVladimir Sementsov-Ogievskiy             bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
15637a3f542fSVladimir Sementsov-Ogievskiy         }
15647a3f542fSVladimir Sementsov-Ogievskiy         if (pad->merge_reads && pad->tail) {
15657a3f542fSVladimir Sementsov-Ogievskiy             bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
15667a3f542fSVladimir Sementsov-Ogievskiy         }
15677a3f542fSVladimir Sementsov-Ogievskiy         ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes,
156865cd4424SVladimir Sementsov-Ogievskiy                                   align, &local_qiov, 0, 0);
15697a3f542fSVladimir Sementsov-Ogievskiy         if (ret < 0) {
15707a3f542fSVladimir Sementsov-Ogievskiy             return ret;
15717a3f542fSVladimir Sementsov-Ogievskiy         }
15727a3f542fSVladimir Sementsov-Ogievskiy         if (pad->head) {
15737a3f542fSVladimir Sementsov-Ogievskiy             bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
15747a3f542fSVladimir Sementsov-Ogievskiy         }
15757a3f542fSVladimir Sementsov-Ogievskiy         if (pad->merge_reads && pad->tail) {
15767a3f542fSVladimir Sementsov-Ogievskiy             bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
15777a3f542fSVladimir Sementsov-Ogievskiy         }
15787a3f542fSVladimir Sementsov-Ogievskiy 
15797a3f542fSVladimir Sementsov-Ogievskiy         if (pad->merge_reads) {
15807a3f542fSVladimir Sementsov-Ogievskiy             goto zero_mem;
15817a3f542fSVladimir Sementsov-Ogievskiy         }
15827a3f542fSVladimir Sementsov-Ogievskiy     }
15837a3f542fSVladimir Sementsov-Ogievskiy 
15847a3f542fSVladimir Sementsov-Ogievskiy     if (pad->tail) {
15857a3f542fSVladimir Sementsov-Ogievskiy         qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align);
15867a3f542fSVladimir Sementsov-Ogievskiy 
15877a3f542fSVladimir Sementsov-Ogievskiy         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
15887a3f542fSVladimir Sementsov-Ogievskiy         ret = bdrv_aligned_preadv(
15897a3f542fSVladimir Sementsov-Ogievskiy                 child, req,
15907a3f542fSVladimir Sementsov-Ogievskiy                 req->overlap_offset + req->overlap_bytes - align,
159165cd4424SVladimir Sementsov-Ogievskiy                 align, align, &local_qiov, 0, 0);
15927a3f542fSVladimir Sementsov-Ogievskiy         if (ret < 0) {
15937a3f542fSVladimir Sementsov-Ogievskiy             return ret;
15947a3f542fSVladimir Sementsov-Ogievskiy         }
15957a3f542fSVladimir Sementsov-Ogievskiy         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
15967a3f542fSVladimir Sementsov-Ogievskiy     }
15977a3f542fSVladimir Sementsov-Ogievskiy 
15987a3f542fSVladimir Sementsov-Ogievskiy zero_mem:
15997a3f542fSVladimir Sementsov-Ogievskiy     if (zero_middle) {
16007a3f542fSVladimir Sementsov-Ogievskiy         memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail);
16017a3f542fSVladimir Sementsov-Ogievskiy     }
16027a3f542fSVladimir Sementsov-Ogievskiy 
16037a3f542fSVladimir Sementsov-Ogievskiy     return 0;
16047a3f542fSVladimir Sementsov-Ogievskiy }
16057a3f542fSVladimir Sementsov-Ogievskiy 
16067a3f542fSVladimir Sementsov-Ogievskiy static void bdrv_padding_destroy(BdrvRequestPadding *pad)
16077a3f542fSVladimir Sementsov-Ogievskiy {
16087a3f542fSVladimir Sementsov-Ogievskiy     if (pad->buf) {
16097a3f542fSVladimir Sementsov-Ogievskiy         qemu_vfree(pad->buf);
16107a3f542fSVladimir Sementsov-Ogievskiy         qemu_iovec_destroy(&pad->local_qiov);
16117a3f542fSVladimir Sementsov-Ogievskiy     }
16127a3f542fSVladimir Sementsov-Ogievskiy }
16137a3f542fSVladimir Sementsov-Ogievskiy 
16147a3f542fSVladimir Sementsov-Ogievskiy /*
16157a3f542fSVladimir Sementsov-Ogievskiy  * bdrv_pad_request
16167a3f542fSVladimir Sementsov-Ogievskiy  *
16177a3f542fSVladimir Sementsov-Ogievskiy  * Exchange request parameters with padded request if needed. Don't include RMW
16187a3f542fSVladimir Sementsov-Ogievskiy  * read of padding, bdrv_padding_rmw_read() should be called separately if
16197a3f542fSVladimir Sementsov-Ogievskiy  * needed.
16207a3f542fSVladimir Sementsov-Ogievskiy  *
16217a3f542fSVladimir Sementsov-Ogievskiy  * All parameters except @bs are in-out: they represent original request at
16227a3f542fSVladimir Sementsov-Ogievskiy  * function call and padded (if padding needed) at function finish.
16237a3f542fSVladimir Sementsov-Ogievskiy  *
16247a3f542fSVladimir Sementsov-Ogievskiy  * Function always succeeds.
16257a3f542fSVladimir Sementsov-Ogievskiy  */
16261acc3466SVladimir Sementsov-Ogievskiy static bool bdrv_pad_request(BlockDriverState *bs,
16271acc3466SVladimir Sementsov-Ogievskiy                              QEMUIOVector **qiov, size_t *qiov_offset,
16287a3f542fSVladimir Sementsov-Ogievskiy                              int64_t *offset, unsigned int *bytes,
16297a3f542fSVladimir Sementsov-Ogievskiy                              BdrvRequestPadding *pad)
16307a3f542fSVladimir Sementsov-Ogievskiy {
16317a3f542fSVladimir Sementsov-Ogievskiy     if (!bdrv_init_padding(bs, *offset, *bytes, pad)) {
16327a3f542fSVladimir Sementsov-Ogievskiy         return false;
16337a3f542fSVladimir Sementsov-Ogievskiy     }
16347a3f542fSVladimir Sementsov-Ogievskiy 
16357a3f542fSVladimir Sementsov-Ogievskiy     qemu_iovec_init_extended(&pad->local_qiov, pad->buf, pad->head,
16361acc3466SVladimir Sementsov-Ogievskiy                              *qiov, *qiov_offset, *bytes,
16377a3f542fSVladimir Sementsov-Ogievskiy                              pad->buf + pad->buf_len - pad->tail, pad->tail);
16387a3f542fSVladimir Sementsov-Ogievskiy     *bytes += pad->head + pad->tail;
16397a3f542fSVladimir Sementsov-Ogievskiy     *offset -= pad->head;
16407a3f542fSVladimir Sementsov-Ogievskiy     *qiov = &pad->local_qiov;
16411acc3466SVladimir Sementsov-Ogievskiy     *qiov_offset = 0;
16427a3f542fSVladimir Sementsov-Ogievskiy 
16437a3f542fSVladimir Sementsov-Ogievskiy     return true;
16447a3f542fSVladimir Sementsov-Ogievskiy }
16457a3f542fSVladimir Sementsov-Ogievskiy 
1646a03ef88fSKevin Wolf int coroutine_fn bdrv_co_preadv(BdrvChild *child,
164761007b31SStefan Hajnoczi     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
164861007b31SStefan Hajnoczi     BdrvRequestFlags flags)
164961007b31SStefan Hajnoczi {
16501acc3466SVladimir Sementsov-Ogievskiy     return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags);
16511acc3466SVladimir Sementsov-Ogievskiy }
16521acc3466SVladimir Sementsov-Ogievskiy 
16531acc3466SVladimir Sementsov-Ogievskiy int coroutine_fn bdrv_co_preadv_part(BdrvChild *child,
16541acc3466SVladimir Sementsov-Ogievskiy     int64_t offset, unsigned int bytes,
16551acc3466SVladimir Sementsov-Ogievskiy     QEMUIOVector *qiov, size_t qiov_offset,
16561acc3466SVladimir Sementsov-Ogievskiy     BdrvRequestFlags flags)
16571acc3466SVladimir Sementsov-Ogievskiy {
1658a03ef88fSKevin Wolf     BlockDriverState *bs = child->bs;
165961007b31SStefan Hajnoczi     BdrvTrackedRequest req;
16607a3f542fSVladimir Sementsov-Ogievskiy     BdrvRequestPadding pad;
166161007b31SStefan Hajnoczi     int ret;
166261007b31SStefan Hajnoczi 
16637a3f542fSVladimir Sementsov-Ogievskiy     trace_bdrv_co_preadv(bs, offset, bytes, flags);
166461007b31SStefan Hajnoczi 
1665f4dad307SVladimir Sementsov-Ogievskiy     if (!bdrv_is_inserted(bs)) {
1666f4dad307SVladimir Sementsov-Ogievskiy         return -ENOMEDIUM;
1667f4dad307SVladimir Sementsov-Ogievskiy     }
1668f4dad307SVladimir Sementsov-Ogievskiy 
1669*8b117001SVladimir Sementsov-Ogievskiy     ret = bdrv_check_request32(offset, bytes);
167061007b31SStefan Hajnoczi     if (ret < 0) {
167161007b31SStefan Hajnoczi         return ret;
167261007b31SStefan Hajnoczi     }
167361007b31SStefan Hajnoczi 
1674ac9d00bfSVladimir Sementsov-Ogievskiy     if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
1675ac9d00bfSVladimir Sementsov-Ogievskiy         /*
1676ac9d00bfSVladimir Sementsov-Ogievskiy          * Aligning zero request is nonsense. Even if driver has special meaning
1677ac9d00bfSVladimir Sementsov-Ogievskiy          * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
1678ac9d00bfSVladimir Sementsov-Ogievskiy          * it to driver due to request_alignment.
1679ac9d00bfSVladimir Sementsov-Ogievskiy          *
1680ac9d00bfSVladimir Sementsov-Ogievskiy          * Still, no reason to return an error if someone do unaligned
1681ac9d00bfSVladimir Sementsov-Ogievskiy          * zero-length read occasionally.
1682ac9d00bfSVladimir Sementsov-Ogievskiy          */
1683ac9d00bfSVladimir Sementsov-Ogievskiy         return 0;
1684ac9d00bfSVladimir Sementsov-Ogievskiy     }
1685ac9d00bfSVladimir Sementsov-Ogievskiy 
168699723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
168799723548SPaolo Bonzini 
16889568b511SWen Congyang     /* Don't do copy-on-read if we read data before write operation */
1689d73415a3SStefan Hajnoczi     if (qatomic_read(&bs->copy_on_read)) {
169061007b31SStefan Hajnoczi         flags |= BDRV_REQ_COPY_ON_READ;
169161007b31SStefan Hajnoczi     }
169261007b31SStefan Hajnoczi 
16931acc3466SVladimir Sementsov-Ogievskiy     bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad);
169461007b31SStefan Hajnoczi 
1695ebde595cSFam Zheng     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
16967a3f542fSVladimir Sementsov-Ogievskiy     ret = bdrv_aligned_preadv(child, &req, offset, bytes,
16977a3f542fSVladimir Sementsov-Ogievskiy                               bs->bl.request_alignment,
16981acc3466SVladimir Sementsov-Ogievskiy                               qiov, qiov_offset, flags);
169961007b31SStefan Hajnoczi     tracked_request_end(&req);
170099723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
170161007b31SStefan Hajnoczi 
17027a3f542fSVladimir Sementsov-Ogievskiy     bdrv_padding_destroy(&pad);
170361007b31SStefan Hajnoczi 
170461007b31SStefan Hajnoczi     return ret;
170561007b31SStefan Hajnoczi }
170661007b31SStefan Hajnoczi 
1707d05aa8bbSEric Blake static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
1708f5a5ca79SManos Pitsidianakis     int64_t offset, int bytes, BdrvRequestFlags flags)
170961007b31SStefan Hajnoczi {
171061007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
171161007b31SStefan Hajnoczi     QEMUIOVector qiov;
17120d93ed08SVladimir Sementsov-Ogievskiy     void *buf = NULL;
171361007b31SStefan Hajnoczi     int ret = 0;
1714465fe887SEric Blake     bool need_flush = false;
1715443668caSDenis V. Lunev     int head = 0;
1716443668caSDenis V. Lunev     int tail = 0;
171761007b31SStefan Hajnoczi 
1718cf081fcaSEric Blake     int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
1719a5b8dd2cSEric Blake     int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1720a5b8dd2cSEric Blake                         bs->bl.request_alignment);
1721cb2e2878SEric Blake     int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
1722cf081fcaSEric Blake 
1723d470ad42SMax Reitz     if (!drv) {
1724d470ad42SMax Reitz         return -ENOMEDIUM;
1725d470ad42SMax Reitz     }
1726d470ad42SMax Reitz 
1727fe0480d6SKevin Wolf     if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) {
1728fe0480d6SKevin Wolf         return -ENOTSUP;
1729fe0480d6SKevin Wolf     }
1730fe0480d6SKevin Wolf 
1731b8d0a980SEric Blake     assert(alignment % bs->bl.request_alignment == 0);
1732b8d0a980SEric Blake     head = offset % alignment;
1733f5a5ca79SManos Pitsidianakis     tail = (offset + bytes) % alignment;
1734b8d0a980SEric Blake     max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1735b8d0a980SEric Blake     assert(max_write_zeroes >= bs->bl.request_alignment);
173661007b31SStefan Hajnoczi 
1737f5a5ca79SManos Pitsidianakis     while (bytes > 0 && !ret) {
1738f5a5ca79SManos Pitsidianakis         int num = bytes;
173961007b31SStefan Hajnoczi 
174061007b31SStefan Hajnoczi         /* Align request.  Block drivers can expect the "bulk" of the request
1741443668caSDenis V. Lunev          * to be aligned, and that unaligned requests do not cross cluster
1742443668caSDenis V. Lunev          * boundaries.
174361007b31SStefan Hajnoczi          */
1744443668caSDenis V. Lunev         if (head) {
1745b2f95feeSEric Blake             /* Make a small request up to the first aligned sector. For
1746b2f95feeSEric Blake              * convenience, limit this request to max_transfer even if
1747b2f95feeSEric Blake              * we don't need to fall back to writes.  */
1748f5a5ca79SManos Pitsidianakis             num = MIN(MIN(bytes, max_transfer), alignment - head);
1749b2f95feeSEric Blake             head = (head + num) % alignment;
1750b2f95feeSEric Blake             assert(num < max_write_zeroes);
1751d05aa8bbSEric Blake         } else if (tail && num > alignment) {
1752443668caSDenis V. Lunev             /* Shorten the request to the last aligned sector.  */
1753443668caSDenis V. Lunev             num -= tail;
175461007b31SStefan Hajnoczi         }
175561007b31SStefan Hajnoczi 
175661007b31SStefan Hajnoczi         /* limit request size */
175761007b31SStefan Hajnoczi         if (num > max_write_zeroes) {
175861007b31SStefan Hajnoczi             num = max_write_zeroes;
175961007b31SStefan Hajnoczi         }
176061007b31SStefan Hajnoczi 
176161007b31SStefan Hajnoczi         ret = -ENOTSUP;
176261007b31SStefan Hajnoczi         /* First try the efficient write zeroes operation */
1763d05aa8bbSEric Blake         if (drv->bdrv_co_pwrite_zeroes) {
1764d05aa8bbSEric Blake             ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1765d05aa8bbSEric Blake                                              flags & bs->supported_zero_flags);
1766d05aa8bbSEric Blake             if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1767d05aa8bbSEric Blake                 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1768d05aa8bbSEric Blake                 need_flush = true;
1769d05aa8bbSEric Blake             }
1770465fe887SEric Blake         } else {
1771465fe887SEric Blake             assert(!bs->supported_zero_flags);
177261007b31SStefan Hajnoczi         }
177361007b31SStefan Hajnoczi 
1774294682ccSAndrey Shinkevich         if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) {
177561007b31SStefan Hajnoczi             /* Fall back to bounce buffer if write zeroes is unsupported */
1776465fe887SEric Blake             BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1777465fe887SEric Blake 
1778465fe887SEric Blake             if ((flags & BDRV_REQ_FUA) &&
1779465fe887SEric Blake                 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1780465fe887SEric Blake                 /* No need for bdrv_driver_pwrite() to do a fallback
1781465fe887SEric Blake                  * flush on each chunk; use just one at the end */
1782465fe887SEric Blake                 write_flags &= ~BDRV_REQ_FUA;
1783465fe887SEric Blake                 need_flush = true;
1784465fe887SEric Blake             }
17855def6b80SEric Blake             num = MIN(num, max_transfer);
17860d93ed08SVladimir Sementsov-Ogievskiy             if (buf == NULL) {
17870d93ed08SVladimir Sementsov-Ogievskiy                 buf = qemu_try_blockalign0(bs, num);
17880d93ed08SVladimir Sementsov-Ogievskiy                 if (buf == NULL) {
178961007b31SStefan Hajnoczi                     ret = -ENOMEM;
179061007b31SStefan Hajnoczi                     goto fail;
179161007b31SStefan Hajnoczi                 }
179261007b31SStefan Hajnoczi             }
17930d93ed08SVladimir Sementsov-Ogievskiy             qemu_iovec_init_buf(&qiov, buf, num);
179461007b31SStefan Hajnoczi 
1795ac850bf0SVladimir Sementsov-Ogievskiy             ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags);
179661007b31SStefan Hajnoczi 
179761007b31SStefan Hajnoczi             /* Keep bounce buffer around if it is big enough for all
179861007b31SStefan Hajnoczi              * all future requests.
179961007b31SStefan Hajnoczi              */
18005def6b80SEric Blake             if (num < max_transfer) {
18010d93ed08SVladimir Sementsov-Ogievskiy                 qemu_vfree(buf);
18020d93ed08SVladimir Sementsov-Ogievskiy                 buf = NULL;
180361007b31SStefan Hajnoczi             }
180461007b31SStefan Hajnoczi         }
180561007b31SStefan Hajnoczi 
1806d05aa8bbSEric Blake         offset += num;
1807f5a5ca79SManos Pitsidianakis         bytes -= num;
180861007b31SStefan Hajnoczi     }
180961007b31SStefan Hajnoczi 
181061007b31SStefan Hajnoczi fail:
1811465fe887SEric Blake     if (ret == 0 && need_flush) {
1812465fe887SEric Blake         ret = bdrv_co_flush(bs);
1813465fe887SEric Blake     }
18140d93ed08SVladimir Sementsov-Ogievskiy     qemu_vfree(buf);
181561007b31SStefan Hajnoczi     return ret;
181661007b31SStefan Hajnoczi }
181761007b31SStefan Hajnoczi 
181885fe2479SFam Zheng static inline int coroutine_fn
181985fe2479SFam Zheng bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes,
182085fe2479SFam Zheng                           BdrvTrackedRequest *req, int flags)
182185fe2479SFam Zheng {
182285fe2479SFam Zheng     BlockDriverState *bs = child->bs;
182385fe2479SFam Zheng     bool waited;
182485fe2479SFam Zheng     int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
182585fe2479SFam Zheng 
182685fe2479SFam Zheng     if (bs->read_only) {
182785fe2479SFam Zheng         return -EPERM;
182885fe2479SFam Zheng     }
182985fe2479SFam Zheng 
183085fe2479SFam Zheng     assert(!(bs->open_flags & BDRV_O_INACTIVE));
183185fe2479SFam Zheng     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
183285fe2479SFam Zheng     assert(!(flags & ~BDRV_REQ_MASK));
183385fe2479SFam Zheng 
183485fe2479SFam Zheng     if (flags & BDRV_REQ_SERIALISING) {
183518fbd0deSPaolo Bonzini         waited = bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs));
183618fbd0deSPaolo Bonzini         /*
183718fbd0deSPaolo Bonzini          * For a misaligned request we should have already waited earlier,
183818fbd0deSPaolo Bonzini          * because we come after bdrv_padding_rmw_read which must be called
183918fbd0deSPaolo Bonzini          * with the request already marked as serialising.
184018fbd0deSPaolo Bonzini          */
184118fbd0deSPaolo Bonzini         assert(!waited ||
184218fbd0deSPaolo Bonzini                (req->offset == req->overlap_offset &&
184318fbd0deSPaolo Bonzini                 req->bytes == req->overlap_bytes));
184418fbd0deSPaolo Bonzini     } else {
184518fbd0deSPaolo Bonzini         bdrv_wait_serialising_requests(req);
184685fe2479SFam Zheng     }
184785fe2479SFam Zheng 
184885fe2479SFam Zheng     assert(req->overlap_offset <= offset);
184985fe2479SFam Zheng     assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1850cd47d792SFam Zheng     assert(end_sector <= bs->total_sectors || child->perm & BLK_PERM_RESIZE);
185185fe2479SFam Zheng 
1852cd47d792SFam Zheng     switch (req->type) {
1853cd47d792SFam Zheng     case BDRV_TRACKED_WRITE:
1854cd47d792SFam Zheng     case BDRV_TRACKED_DISCARD:
185585fe2479SFam Zheng         if (flags & BDRV_REQ_WRITE_UNCHANGED) {
185685fe2479SFam Zheng             assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
185785fe2479SFam Zheng         } else {
185885fe2479SFam Zheng             assert(child->perm & BLK_PERM_WRITE);
185985fe2479SFam Zheng         }
1860cd47d792SFam Zheng         return notifier_with_return_list_notify(&bs->before_write_notifiers,
1861cd47d792SFam Zheng                                                 req);
1862cd47d792SFam Zheng     case BDRV_TRACKED_TRUNCATE:
1863cd47d792SFam Zheng         assert(child->perm & BLK_PERM_RESIZE);
1864cd47d792SFam Zheng         return 0;
1865cd47d792SFam Zheng     default:
1866cd47d792SFam Zheng         abort();
1867cd47d792SFam Zheng     }
186885fe2479SFam Zheng }
186985fe2479SFam Zheng 
187085fe2479SFam Zheng static inline void coroutine_fn
187185fe2479SFam Zheng bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, uint64_t bytes,
187285fe2479SFam Zheng                          BdrvTrackedRequest *req, int ret)
187385fe2479SFam Zheng {
187485fe2479SFam Zheng     int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
187585fe2479SFam Zheng     BlockDriverState *bs = child->bs;
187685fe2479SFam Zheng 
1877d73415a3SStefan Hajnoczi     qatomic_inc(&bs->write_gen);
187885fe2479SFam Zheng 
187900695c27SFam Zheng     /*
188000695c27SFam Zheng      * Discard cannot extend the image, but in error handling cases, such as
188100695c27SFam Zheng      * when reverting a qcow2 cluster allocation, the discarded range can pass
188200695c27SFam Zheng      * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
188300695c27SFam Zheng      * here. Instead, just skip it, since semantically a discard request
188400695c27SFam Zheng      * beyond EOF cannot expand the image anyway.
188500695c27SFam Zheng      */
18867f8f03efSFam Zheng     if (ret == 0 &&
1887cd47d792SFam Zheng         (req->type == BDRV_TRACKED_TRUNCATE ||
1888cd47d792SFam Zheng          end_sector > bs->total_sectors) &&
188900695c27SFam Zheng         req->type != BDRV_TRACKED_DISCARD) {
18907f8f03efSFam Zheng         bs->total_sectors = end_sector;
18917f8f03efSFam Zheng         bdrv_parent_cb_resize(bs);
18927f8f03efSFam Zheng         bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS);
189385fe2479SFam Zheng     }
189400695c27SFam Zheng     if (req->bytes) {
189500695c27SFam Zheng         switch (req->type) {
189600695c27SFam Zheng         case BDRV_TRACKED_WRITE:
189700695c27SFam Zheng             stat64_max(&bs->wr_highest_offset, offset + bytes);
189800695c27SFam Zheng             /* fall through, to set dirty bits */
189900695c27SFam Zheng         case BDRV_TRACKED_DISCARD:
19007f8f03efSFam Zheng             bdrv_set_dirty(bs, offset, bytes);
190100695c27SFam Zheng             break;
190200695c27SFam Zheng         default:
190300695c27SFam Zheng             break;
190400695c27SFam Zheng         }
190500695c27SFam Zheng     }
190685fe2479SFam Zheng }
190785fe2479SFam Zheng 
190861007b31SStefan Hajnoczi /*
190904ed95f4SEric Blake  * Forwards an already correctly aligned write request to the BlockDriver,
191004ed95f4SEric Blake  * after possibly fragmenting it.
191161007b31SStefan Hajnoczi  */
191285c97ca7SKevin Wolf static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child,
191361007b31SStefan Hajnoczi     BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
191428c4da28SVladimir Sementsov-Ogievskiy     int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags)
191561007b31SStefan Hajnoczi {
191685c97ca7SKevin Wolf     BlockDriverState *bs = child->bs;
191761007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
191861007b31SStefan Hajnoczi     int ret;
191961007b31SStefan Hajnoczi 
192004ed95f4SEric Blake     uint64_t bytes_remaining = bytes;
192104ed95f4SEric Blake     int max_transfer;
192261007b31SStefan Hajnoczi 
1923d470ad42SMax Reitz     if (!drv) {
1924d470ad42SMax Reitz         return -ENOMEDIUM;
1925d470ad42SMax Reitz     }
1926d470ad42SMax Reitz 
1927d6883bc9SVladimir Sementsov-Ogievskiy     if (bdrv_has_readonly_bitmaps(bs)) {
1928d6883bc9SVladimir Sementsov-Ogievskiy         return -EPERM;
1929d6883bc9SVladimir Sementsov-Ogievskiy     }
1930d6883bc9SVladimir Sementsov-Ogievskiy 
1931cff86b38SEric Blake     assert(is_power_of_2(align));
1932cff86b38SEric Blake     assert((offset & (align - 1)) == 0);
1933cff86b38SEric Blake     assert((bytes & (align - 1)) == 0);
193428c4da28SVladimir Sementsov-Ogievskiy     assert(!qiov || qiov_offset + bytes <= qiov->size);
193504ed95f4SEric Blake     max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
193604ed95f4SEric Blake                                    align);
193761007b31SStefan Hajnoczi 
193885fe2479SFam Zheng     ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags);
193961007b31SStefan Hajnoczi 
194061007b31SStefan Hajnoczi     if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
1941c1499a5eSEric Blake         !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
194228c4da28SVladimir Sementsov-Ogievskiy         qemu_iovec_is_zero(qiov, qiov_offset, bytes)) {
194361007b31SStefan Hajnoczi         flags |= BDRV_REQ_ZERO_WRITE;
194461007b31SStefan Hajnoczi         if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
194561007b31SStefan Hajnoczi             flags |= BDRV_REQ_MAY_UNMAP;
194661007b31SStefan Hajnoczi         }
194761007b31SStefan Hajnoczi     }
194861007b31SStefan Hajnoczi 
194961007b31SStefan Hajnoczi     if (ret < 0) {
195061007b31SStefan Hajnoczi         /* Do nothing, write notifier decided to fail this request */
195161007b31SStefan Hajnoczi     } else if (flags & BDRV_REQ_ZERO_WRITE) {
19529a4f4c31SKevin Wolf         bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
19539896c876SKevin Wolf         ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
19543ea1a091SPavel Butsykin     } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
195528c4da28SVladimir Sementsov-Ogievskiy         ret = bdrv_driver_pwritev_compressed(bs, offset, bytes,
195628c4da28SVladimir Sementsov-Ogievskiy                                              qiov, qiov_offset);
195704ed95f4SEric Blake     } else if (bytes <= max_transfer) {
19589a4f4c31SKevin Wolf         bdrv_debug_event(bs, BLKDBG_PWRITEV);
195928c4da28SVladimir Sementsov-Ogievskiy         ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags);
196004ed95f4SEric Blake     } else {
196104ed95f4SEric Blake         bdrv_debug_event(bs, BLKDBG_PWRITEV);
196204ed95f4SEric Blake         while (bytes_remaining) {
196304ed95f4SEric Blake             int num = MIN(bytes_remaining, max_transfer);
196404ed95f4SEric Blake             int local_flags = flags;
196504ed95f4SEric Blake 
196604ed95f4SEric Blake             assert(num);
196704ed95f4SEric Blake             if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
196804ed95f4SEric Blake                 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
196904ed95f4SEric Blake                 /* If FUA is going to be emulated by flush, we only
197004ed95f4SEric Blake                  * need to flush on the last iteration */
197104ed95f4SEric Blake                 local_flags &= ~BDRV_REQ_FUA;
197204ed95f4SEric Blake             }
197304ed95f4SEric Blake 
197404ed95f4SEric Blake             ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
1975134b7decSMax Reitz                                       num, qiov,
1976134b7decSMax Reitz                                       qiov_offset + bytes - bytes_remaining,
197728c4da28SVladimir Sementsov-Ogievskiy                                       local_flags);
197804ed95f4SEric Blake             if (ret < 0) {
197904ed95f4SEric Blake                 break;
198004ed95f4SEric Blake             }
198104ed95f4SEric Blake             bytes_remaining -= num;
198204ed95f4SEric Blake         }
198361007b31SStefan Hajnoczi     }
19849a4f4c31SKevin Wolf     bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
198561007b31SStefan Hajnoczi 
198661007b31SStefan Hajnoczi     if (ret >= 0) {
198704ed95f4SEric Blake         ret = 0;
198861007b31SStefan Hajnoczi     }
198985fe2479SFam Zheng     bdrv_co_write_req_finish(child, offset, bytes, req, ret);
199061007b31SStefan Hajnoczi 
199161007b31SStefan Hajnoczi     return ret;
199261007b31SStefan Hajnoczi }
199361007b31SStefan Hajnoczi 
199485c97ca7SKevin Wolf static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
19959eeb6dd1SFam Zheng                                                 int64_t offset,
19969eeb6dd1SFam Zheng                                                 unsigned int bytes,
19979eeb6dd1SFam Zheng                                                 BdrvRequestFlags flags,
19989eeb6dd1SFam Zheng                                                 BdrvTrackedRequest *req)
19999eeb6dd1SFam Zheng {
200085c97ca7SKevin Wolf     BlockDriverState *bs = child->bs;
20019eeb6dd1SFam Zheng     QEMUIOVector local_qiov;
2002a5b8dd2cSEric Blake     uint64_t align = bs->bl.request_alignment;
20039eeb6dd1SFam Zheng     int ret = 0;
20047a3f542fSVladimir Sementsov-Ogievskiy     bool padding;
20057a3f542fSVladimir Sementsov-Ogievskiy     BdrvRequestPadding pad;
20069eeb6dd1SFam Zheng 
20077a3f542fSVladimir Sementsov-Ogievskiy     padding = bdrv_init_padding(bs, offset, bytes, &pad);
20087a3f542fSVladimir Sementsov-Ogievskiy     if (padding) {
2009304d9d7fSMax Reitz         bdrv_mark_request_serialising(req, align);
20109eeb6dd1SFam Zheng 
20117a3f542fSVladimir Sementsov-Ogievskiy         bdrv_padding_rmw_read(child, req, &pad, true);
20127a3f542fSVladimir Sementsov-Ogievskiy 
20137a3f542fSVladimir Sementsov-Ogievskiy         if (pad.head || pad.merge_reads) {
20147a3f542fSVladimir Sementsov-Ogievskiy             int64_t aligned_offset = offset & ~(align - 1);
20157a3f542fSVladimir Sementsov-Ogievskiy             int64_t write_bytes = pad.merge_reads ? pad.buf_len : align;
20167a3f542fSVladimir Sementsov-Ogievskiy 
20177a3f542fSVladimir Sementsov-Ogievskiy             qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes);
20187a3f542fSVladimir Sementsov-Ogievskiy             ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes,
201928c4da28SVladimir Sementsov-Ogievskiy                                        align, &local_qiov, 0,
20209eeb6dd1SFam Zheng                                        flags & ~BDRV_REQ_ZERO_WRITE);
20217a3f542fSVladimir Sementsov-Ogievskiy             if (ret < 0 || pad.merge_reads) {
20227a3f542fSVladimir Sementsov-Ogievskiy                 /* Error or all work is done */
20237a3f542fSVladimir Sementsov-Ogievskiy                 goto out;
20249eeb6dd1SFam Zheng             }
20257a3f542fSVladimir Sementsov-Ogievskiy             offset += write_bytes - pad.head;
20267a3f542fSVladimir Sementsov-Ogievskiy             bytes -= write_bytes - pad.head;
20277a3f542fSVladimir Sementsov-Ogievskiy         }
20289eeb6dd1SFam Zheng     }
20299eeb6dd1SFam Zheng 
20309eeb6dd1SFam Zheng     assert(!bytes || (offset & (align - 1)) == 0);
20319eeb6dd1SFam Zheng     if (bytes >= align) {
20329eeb6dd1SFam Zheng         /* Write the aligned part in the middle. */
20339eeb6dd1SFam Zheng         uint64_t aligned_bytes = bytes & ~(align - 1);
203485c97ca7SKevin Wolf         ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
203528c4da28SVladimir Sementsov-Ogievskiy                                    NULL, 0, flags);
20369eeb6dd1SFam Zheng         if (ret < 0) {
20377a3f542fSVladimir Sementsov-Ogievskiy             goto out;
20389eeb6dd1SFam Zheng         }
20399eeb6dd1SFam Zheng         bytes -= aligned_bytes;
20409eeb6dd1SFam Zheng         offset += aligned_bytes;
20419eeb6dd1SFam Zheng     }
20429eeb6dd1SFam Zheng 
20439eeb6dd1SFam Zheng     assert(!bytes || (offset & (align - 1)) == 0);
20449eeb6dd1SFam Zheng     if (bytes) {
20457a3f542fSVladimir Sementsov-Ogievskiy         assert(align == pad.tail + bytes);
20469eeb6dd1SFam Zheng 
20477a3f542fSVladimir Sementsov-Ogievskiy         qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align);
204885c97ca7SKevin Wolf         ret = bdrv_aligned_pwritev(child, req, offset, align, align,
204928c4da28SVladimir Sementsov-Ogievskiy                                    &local_qiov, 0,
205028c4da28SVladimir Sementsov-Ogievskiy                                    flags & ~BDRV_REQ_ZERO_WRITE);
20519eeb6dd1SFam Zheng     }
20529eeb6dd1SFam Zheng 
20537a3f542fSVladimir Sementsov-Ogievskiy out:
20547a3f542fSVladimir Sementsov-Ogievskiy     bdrv_padding_destroy(&pad);
20557a3f542fSVladimir Sementsov-Ogievskiy 
20567a3f542fSVladimir Sementsov-Ogievskiy     return ret;
20579eeb6dd1SFam Zheng }
20589eeb6dd1SFam Zheng 
205961007b31SStefan Hajnoczi /*
206061007b31SStefan Hajnoczi  * Handle a write request in coroutine context
206161007b31SStefan Hajnoczi  */
2062a03ef88fSKevin Wolf int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
206361007b31SStefan Hajnoczi     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
206461007b31SStefan Hajnoczi     BdrvRequestFlags flags)
206561007b31SStefan Hajnoczi {
20661acc3466SVladimir Sementsov-Ogievskiy     return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags);
20671acc3466SVladimir Sementsov-Ogievskiy }
20681acc3466SVladimir Sementsov-Ogievskiy 
20691acc3466SVladimir Sementsov-Ogievskiy int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
20701acc3466SVladimir Sementsov-Ogievskiy     int64_t offset, unsigned int bytes, QEMUIOVector *qiov, size_t qiov_offset,
20711acc3466SVladimir Sementsov-Ogievskiy     BdrvRequestFlags flags)
20721acc3466SVladimir Sementsov-Ogievskiy {
2073a03ef88fSKevin Wolf     BlockDriverState *bs = child->bs;
207461007b31SStefan Hajnoczi     BdrvTrackedRequest req;
2075a5b8dd2cSEric Blake     uint64_t align = bs->bl.request_alignment;
20767a3f542fSVladimir Sementsov-Ogievskiy     BdrvRequestPadding pad;
207761007b31SStefan Hajnoczi     int ret;
207861007b31SStefan Hajnoczi 
2079f42cf447SDaniel P. Berrange     trace_bdrv_co_pwritev(child->bs, offset, bytes, flags);
2080f42cf447SDaniel P. Berrange 
2081f4dad307SVladimir Sementsov-Ogievskiy     if (!bdrv_is_inserted(bs)) {
208261007b31SStefan Hajnoczi         return -ENOMEDIUM;
208361007b31SStefan Hajnoczi     }
208461007b31SStefan Hajnoczi 
2085*8b117001SVladimir Sementsov-Ogievskiy     ret = bdrv_check_request32(offset, bytes);
208661007b31SStefan Hajnoczi     if (ret < 0) {
208761007b31SStefan Hajnoczi         return ret;
208861007b31SStefan Hajnoczi     }
208961007b31SStefan Hajnoczi 
2090f2208fdcSAlberto Garcia     /* If the request is misaligned then we can't make it efficient */
2091f2208fdcSAlberto Garcia     if ((flags & BDRV_REQ_NO_FALLBACK) &&
2092f2208fdcSAlberto Garcia         !QEMU_IS_ALIGNED(offset | bytes, align))
2093f2208fdcSAlberto Garcia     {
2094f2208fdcSAlberto Garcia         return -ENOTSUP;
2095f2208fdcSAlberto Garcia     }
2096f2208fdcSAlberto Garcia 
2097ac9d00bfSVladimir Sementsov-Ogievskiy     if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
2098ac9d00bfSVladimir Sementsov-Ogievskiy         /*
2099ac9d00bfSVladimir Sementsov-Ogievskiy          * Aligning zero request is nonsense. Even if driver has special meaning
2100ac9d00bfSVladimir Sementsov-Ogievskiy          * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
2101ac9d00bfSVladimir Sementsov-Ogievskiy          * it to driver due to request_alignment.
2102ac9d00bfSVladimir Sementsov-Ogievskiy          *
2103ac9d00bfSVladimir Sementsov-Ogievskiy          * Still, no reason to return an error if someone do unaligned
2104ac9d00bfSVladimir Sementsov-Ogievskiy          * zero-length write occasionally.
2105ac9d00bfSVladimir Sementsov-Ogievskiy          */
2106ac9d00bfSVladimir Sementsov-Ogievskiy         return 0;
2107ac9d00bfSVladimir Sementsov-Ogievskiy     }
2108ac9d00bfSVladimir Sementsov-Ogievskiy 
210999723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
211061007b31SStefan Hajnoczi     /*
211161007b31SStefan Hajnoczi      * Align write if necessary by performing a read-modify-write cycle.
211261007b31SStefan Hajnoczi      * Pad qiov with the read parts and be sure to have a tracked request not
211361007b31SStefan Hajnoczi      * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
211461007b31SStefan Hajnoczi      */
2115ebde595cSFam Zheng     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
211661007b31SStefan Hajnoczi 
211718a59f03SAnton Nefedov     if (flags & BDRV_REQ_ZERO_WRITE) {
211885c97ca7SKevin Wolf         ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
21199eeb6dd1SFam Zheng         goto out;
21209eeb6dd1SFam Zheng     }
21219eeb6dd1SFam Zheng 
21221acc3466SVladimir Sementsov-Ogievskiy     if (bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad)) {
2123304d9d7fSMax Reitz         bdrv_mark_request_serialising(&req, align);
21247a3f542fSVladimir Sementsov-Ogievskiy         bdrv_padding_rmw_read(child, &req, &pad, false);
212561007b31SStefan Hajnoczi     }
212661007b31SStefan Hajnoczi 
212785c97ca7SKevin Wolf     ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
21281acc3466SVladimir Sementsov-Ogievskiy                                qiov, qiov_offset, flags);
212961007b31SStefan Hajnoczi 
21307a3f542fSVladimir Sementsov-Ogievskiy     bdrv_padding_destroy(&pad);
213161007b31SStefan Hajnoczi 
21329eeb6dd1SFam Zheng out:
21339eeb6dd1SFam Zheng     tracked_request_end(&req);
213499723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
21357a3f542fSVladimir Sementsov-Ogievskiy 
213661007b31SStefan Hajnoczi     return ret;
213761007b31SStefan Hajnoczi }
213861007b31SStefan Hajnoczi 
2139a03ef88fSKevin Wolf int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
2140f5a5ca79SManos Pitsidianakis                                        int bytes, BdrvRequestFlags flags)
214161007b31SStefan Hajnoczi {
2142f5a5ca79SManos Pitsidianakis     trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
214361007b31SStefan Hajnoczi 
2144a03ef88fSKevin Wolf     if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
214561007b31SStefan Hajnoczi         flags &= ~BDRV_REQ_MAY_UNMAP;
214661007b31SStefan Hajnoczi     }
214761007b31SStefan Hajnoczi 
2148f5a5ca79SManos Pitsidianakis     return bdrv_co_pwritev(child, offset, bytes, NULL,
214961007b31SStefan Hajnoczi                            BDRV_REQ_ZERO_WRITE | flags);
215061007b31SStefan Hajnoczi }
215161007b31SStefan Hajnoczi 
21524085f5c7SJohn Snow /*
21534085f5c7SJohn Snow  * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
21544085f5c7SJohn Snow  */
21554085f5c7SJohn Snow int bdrv_flush_all(void)
21564085f5c7SJohn Snow {
21574085f5c7SJohn Snow     BdrvNextIterator it;
21584085f5c7SJohn Snow     BlockDriverState *bs = NULL;
21594085f5c7SJohn Snow     int result = 0;
21604085f5c7SJohn Snow 
2161c8aa7895SPavel Dovgalyuk     /*
2162c8aa7895SPavel Dovgalyuk      * bdrv queue is managed by record/replay,
2163c8aa7895SPavel Dovgalyuk      * creating new flush request for stopping
2164c8aa7895SPavel Dovgalyuk      * the VM may break the determinism
2165c8aa7895SPavel Dovgalyuk      */
2166c8aa7895SPavel Dovgalyuk     if (replay_events_enabled()) {
2167c8aa7895SPavel Dovgalyuk         return result;
2168c8aa7895SPavel Dovgalyuk     }
2169c8aa7895SPavel Dovgalyuk 
21704085f5c7SJohn Snow     for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
21714085f5c7SJohn Snow         AioContext *aio_context = bdrv_get_aio_context(bs);
21724085f5c7SJohn Snow         int ret;
21734085f5c7SJohn Snow 
21744085f5c7SJohn Snow         aio_context_acquire(aio_context);
21754085f5c7SJohn Snow         ret = bdrv_flush(bs);
21764085f5c7SJohn Snow         if (ret < 0 && !result) {
21774085f5c7SJohn Snow             result = ret;
21784085f5c7SJohn Snow         }
21794085f5c7SJohn Snow         aio_context_release(aio_context);
21804085f5c7SJohn Snow     }
21814085f5c7SJohn Snow 
21824085f5c7SJohn Snow     return result;
21834085f5c7SJohn Snow }
21844085f5c7SJohn Snow 
218561007b31SStefan Hajnoczi /*
218661007b31SStefan Hajnoczi  * Returns the allocation status of the specified sectors.
218761007b31SStefan Hajnoczi  * Drivers not implementing the functionality are assumed to not support
218861007b31SStefan Hajnoczi  * backing files, hence all their sectors are reported as allocated.
218961007b31SStefan Hajnoczi  *
219086a3d5c6SEric Blake  * If 'want_zero' is true, the caller is querying for mapping
219186a3d5c6SEric Blake  * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
219286a3d5c6SEric Blake  * _ZERO where possible; otherwise, the result favors larger 'pnum',
219386a3d5c6SEric Blake  * with a focus on accurate BDRV_BLOCK_ALLOCATED.
2194c9ce8c4dSEric Blake  *
21952e8bc787SEric Blake  * If 'offset' is beyond the end of the disk image the return value is
2196fb0d8654SEric Blake  * BDRV_BLOCK_EOF and 'pnum' is set to 0.
219761007b31SStefan Hajnoczi  *
21982e8bc787SEric Blake  * 'bytes' is the max value 'pnum' should be set to.  If bytes goes
2199fb0d8654SEric Blake  * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2200fb0d8654SEric Blake  * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
220167a0fd2aSFam Zheng  *
22022e8bc787SEric Blake  * 'pnum' is set to the number of bytes (including and immediately
22032e8bc787SEric Blake  * following the specified offset) that are easily known to be in the
22042e8bc787SEric Blake  * same allocated/unallocated state.  Note that a second call starting
22052e8bc787SEric Blake  * at the original offset plus returned pnum may have the same status.
22062e8bc787SEric Blake  * The returned value is non-zero on success except at end-of-file.
22072e8bc787SEric Blake  *
22082e8bc787SEric Blake  * Returns negative errno on failure.  Otherwise, if the
22092e8bc787SEric Blake  * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
22102e8bc787SEric Blake  * set to the host mapping and BDS corresponding to the guest offset.
221161007b31SStefan Hajnoczi  */
22122e8bc787SEric Blake static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs,
2213c9ce8c4dSEric Blake                                              bool want_zero,
22142e8bc787SEric Blake                                              int64_t offset, int64_t bytes,
22152e8bc787SEric Blake                                              int64_t *pnum, int64_t *map,
221667a0fd2aSFam Zheng                                              BlockDriverState **file)
221761007b31SStefan Hajnoczi {
22182e8bc787SEric Blake     int64_t total_size;
22192e8bc787SEric Blake     int64_t n; /* bytes */
2220efa6e2edSEric Blake     int ret;
22212e8bc787SEric Blake     int64_t local_map = 0;
2222298a1665SEric Blake     BlockDriverState *local_file = NULL;
2223efa6e2edSEric Blake     int64_t aligned_offset, aligned_bytes;
2224efa6e2edSEric Blake     uint32_t align;
2225549ec0d9SMax Reitz     bool has_filtered_child;
222661007b31SStefan Hajnoczi 
2227298a1665SEric Blake     assert(pnum);
2228298a1665SEric Blake     *pnum = 0;
22292e8bc787SEric Blake     total_size = bdrv_getlength(bs);
22302e8bc787SEric Blake     if (total_size < 0) {
22312e8bc787SEric Blake         ret = total_size;
2232298a1665SEric Blake         goto early_out;
223361007b31SStefan Hajnoczi     }
223461007b31SStefan Hajnoczi 
22352e8bc787SEric Blake     if (offset >= total_size) {
2236298a1665SEric Blake         ret = BDRV_BLOCK_EOF;
2237298a1665SEric Blake         goto early_out;
223861007b31SStefan Hajnoczi     }
22392e8bc787SEric Blake     if (!bytes) {
2240298a1665SEric Blake         ret = 0;
2241298a1665SEric Blake         goto early_out;
22429cdcfd9fSEric Blake     }
224361007b31SStefan Hajnoczi 
22442e8bc787SEric Blake     n = total_size - offset;
22452e8bc787SEric Blake     if (n < bytes) {
22462e8bc787SEric Blake         bytes = n;
224761007b31SStefan Hajnoczi     }
224861007b31SStefan Hajnoczi 
2249d470ad42SMax Reitz     /* Must be non-NULL or bdrv_getlength() would have failed */
2250d470ad42SMax Reitz     assert(bs->drv);
2251549ec0d9SMax Reitz     has_filtered_child = bdrv_filter_child(bs);
2252549ec0d9SMax Reitz     if (!bs->drv->bdrv_co_block_status && !has_filtered_child) {
22532e8bc787SEric Blake         *pnum = bytes;
225461007b31SStefan Hajnoczi         ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
22552e8bc787SEric Blake         if (offset + bytes == total_size) {
2256fb0d8654SEric Blake             ret |= BDRV_BLOCK_EOF;
2257fb0d8654SEric Blake         }
225861007b31SStefan Hajnoczi         if (bs->drv->protocol_name) {
22592e8bc787SEric Blake             ret |= BDRV_BLOCK_OFFSET_VALID;
22602e8bc787SEric Blake             local_map = offset;
2261298a1665SEric Blake             local_file = bs;
226261007b31SStefan Hajnoczi         }
2263298a1665SEric Blake         goto early_out;
226461007b31SStefan Hajnoczi     }
226561007b31SStefan Hajnoczi 
226699723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
2267efa6e2edSEric Blake 
2268efa6e2edSEric Blake     /* Round out to request_alignment boundaries */
226986a3d5c6SEric Blake     align = bs->bl.request_alignment;
2270efa6e2edSEric Blake     aligned_offset = QEMU_ALIGN_DOWN(offset, align);
2271efa6e2edSEric Blake     aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
2272efa6e2edSEric Blake 
2273549ec0d9SMax Reitz     if (bs->drv->bdrv_co_block_status) {
227486a3d5c6SEric Blake         ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
227586a3d5c6SEric Blake                                             aligned_bytes, pnum, &local_map,
227686a3d5c6SEric Blake                                             &local_file);
2277549ec0d9SMax Reitz     } else {
2278549ec0d9SMax Reitz         /* Default code for filters */
2279549ec0d9SMax Reitz 
2280549ec0d9SMax Reitz         local_file = bdrv_filter_bs(bs);
2281549ec0d9SMax Reitz         assert(local_file);
2282549ec0d9SMax Reitz 
2283549ec0d9SMax Reitz         *pnum = aligned_bytes;
2284549ec0d9SMax Reitz         local_map = aligned_offset;
2285549ec0d9SMax Reitz         ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
2286549ec0d9SMax Reitz     }
228786a3d5c6SEric Blake     if (ret < 0) {
228886a3d5c6SEric Blake         *pnum = 0;
228986a3d5c6SEric Blake         goto out;
229086a3d5c6SEric Blake     }
2291efa6e2edSEric Blake 
2292efa6e2edSEric Blake     /*
2293636cb512SEric Blake      * The driver's result must be a non-zero multiple of request_alignment.
2294efa6e2edSEric Blake      * Clamp pnum and adjust map to original request.
2295efa6e2edSEric Blake      */
2296636cb512SEric Blake     assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) &&
2297636cb512SEric Blake            align > offset - aligned_offset);
229869f47505SVladimir Sementsov-Ogievskiy     if (ret & BDRV_BLOCK_RECURSE) {
229969f47505SVladimir Sementsov-Ogievskiy         assert(ret & BDRV_BLOCK_DATA);
230069f47505SVladimir Sementsov-Ogievskiy         assert(ret & BDRV_BLOCK_OFFSET_VALID);
230169f47505SVladimir Sementsov-Ogievskiy         assert(!(ret & BDRV_BLOCK_ZERO));
230269f47505SVladimir Sementsov-Ogievskiy     }
230369f47505SVladimir Sementsov-Ogievskiy 
2304efa6e2edSEric Blake     *pnum -= offset - aligned_offset;
2305efa6e2edSEric Blake     if (*pnum > bytes) {
2306efa6e2edSEric Blake         *pnum = bytes;
2307efa6e2edSEric Blake     }
2308efa6e2edSEric Blake     if (ret & BDRV_BLOCK_OFFSET_VALID) {
2309efa6e2edSEric Blake         local_map += offset - aligned_offset;
2310efa6e2edSEric Blake     }
231161007b31SStefan Hajnoczi 
231261007b31SStefan Hajnoczi     if (ret & BDRV_BLOCK_RAW) {
2313298a1665SEric Blake         assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
23142e8bc787SEric Blake         ret = bdrv_co_block_status(local_file, want_zero, local_map,
23152e8bc787SEric Blake                                    *pnum, pnum, &local_map, &local_file);
231699723548SPaolo Bonzini         goto out;
231761007b31SStefan Hajnoczi     }
231861007b31SStefan Hajnoczi 
231961007b31SStefan Hajnoczi     if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
232061007b31SStefan Hajnoczi         ret |= BDRV_BLOCK_ALLOCATED;
2321d40f4a56SAlberto Garcia     } else if (bs->drv->supports_backing) {
2322cb850315SMax Reitz         BlockDriverState *cow_bs = bdrv_cow_bs(bs);
2323cb850315SMax Reitz 
2324d40f4a56SAlberto Garcia         if (!cow_bs) {
2325d40f4a56SAlberto Garcia             ret |= BDRV_BLOCK_ZERO;
2326d40f4a56SAlberto Garcia         } else if (want_zero) {
2327cb850315SMax Reitz             int64_t size2 = bdrv_getlength(cow_bs);
2328c9ce8c4dSEric Blake 
23292e8bc787SEric Blake             if (size2 >= 0 && offset >= size2) {
233061007b31SStefan Hajnoczi                 ret |= BDRV_BLOCK_ZERO;
233161007b31SStefan Hajnoczi             }
23327b1efe99SVladimir Sementsov-Ogievskiy         }
233361007b31SStefan Hajnoczi     }
233461007b31SStefan Hajnoczi 
233569f47505SVladimir Sementsov-Ogievskiy     if (want_zero && ret & BDRV_BLOCK_RECURSE &&
233669f47505SVladimir Sementsov-Ogievskiy         local_file && local_file != bs &&
233761007b31SStefan Hajnoczi         (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
233861007b31SStefan Hajnoczi         (ret & BDRV_BLOCK_OFFSET_VALID)) {
23392e8bc787SEric Blake         int64_t file_pnum;
23402e8bc787SEric Blake         int ret2;
234161007b31SStefan Hajnoczi 
23422e8bc787SEric Blake         ret2 = bdrv_co_block_status(local_file, want_zero, local_map,
23432e8bc787SEric Blake                                     *pnum, &file_pnum, NULL, NULL);
234461007b31SStefan Hajnoczi         if (ret2 >= 0) {
234561007b31SStefan Hajnoczi             /* Ignore errors.  This is just providing extra information, it
234661007b31SStefan Hajnoczi              * is useful but not necessary.
234761007b31SStefan Hajnoczi              */
2348c61e684eSEric Blake             if (ret2 & BDRV_BLOCK_EOF &&
2349c61e684eSEric Blake                 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
2350c61e684eSEric Blake                 /*
2351c61e684eSEric Blake                  * It is valid for the format block driver to read
2352c61e684eSEric Blake                  * beyond the end of the underlying file's current
2353c61e684eSEric Blake                  * size; such areas read as zero.
2354c61e684eSEric Blake                  */
235561007b31SStefan Hajnoczi                 ret |= BDRV_BLOCK_ZERO;
235661007b31SStefan Hajnoczi             } else {
235761007b31SStefan Hajnoczi                 /* Limit request to the range reported by the protocol driver */
235861007b31SStefan Hajnoczi                 *pnum = file_pnum;
235961007b31SStefan Hajnoczi                 ret |= (ret2 & BDRV_BLOCK_ZERO);
236061007b31SStefan Hajnoczi             }
236161007b31SStefan Hajnoczi         }
236261007b31SStefan Hajnoczi     }
236361007b31SStefan Hajnoczi 
236499723548SPaolo Bonzini out:
236599723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
23662e8bc787SEric Blake     if (ret >= 0 && offset + *pnum == total_size) {
2367fb0d8654SEric Blake         ret |= BDRV_BLOCK_EOF;
2368fb0d8654SEric Blake     }
2369298a1665SEric Blake early_out:
2370298a1665SEric Blake     if (file) {
2371298a1665SEric Blake         *file = local_file;
2372298a1665SEric Blake     }
23732e8bc787SEric Blake     if (map) {
23742e8bc787SEric Blake         *map = local_map;
23752e8bc787SEric Blake     }
237661007b31SStefan Hajnoczi     return ret;
237761007b31SStefan Hajnoczi }
237861007b31SStefan Hajnoczi 
237921c2283eSVladimir Sementsov-Ogievskiy int coroutine_fn
2380f9e694cbSVladimir Sementsov-Ogievskiy bdrv_co_common_block_status_above(BlockDriverState *bs,
2381ba3f0e25SFam Zheng                                   BlockDriverState *base,
23823555a432SVladimir Sementsov-Ogievskiy                                   bool include_base,
2383c9ce8c4dSEric Blake                                   bool want_zero,
23845b648c67SEric Blake                                   int64_t offset,
23855b648c67SEric Blake                                   int64_t bytes,
23865b648c67SEric Blake                                   int64_t *pnum,
23875b648c67SEric Blake                                   int64_t *map,
2388a92b1b06SEric Blake                                   BlockDriverState **file,
2389a92b1b06SEric Blake                                   int *depth)
2390ba3f0e25SFam Zheng {
239167c095c8SVladimir Sementsov-Ogievskiy     int ret;
2392ba3f0e25SFam Zheng     BlockDriverState *p;
239367c095c8SVladimir Sementsov-Ogievskiy     int64_t eof = 0;
2394a92b1b06SEric Blake     int dummy;
2395ba3f0e25SFam Zheng 
23963555a432SVladimir Sementsov-Ogievskiy     assert(!include_base || base); /* Can't include NULL base */
239767c095c8SVladimir Sementsov-Ogievskiy 
2398a92b1b06SEric Blake     if (!depth) {
2399a92b1b06SEric Blake         depth = &dummy;
2400a92b1b06SEric Blake     }
2401a92b1b06SEric Blake     *depth = 0;
2402a92b1b06SEric Blake 
2403624f27bbSVladimir Sementsov-Ogievskiy     if (!include_base && bs == base) {
2404624f27bbSVladimir Sementsov-Ogievskiy         *pnum = bytes;
2405624f27bbSVladimir Sementsov-Ogievskiy         return 0;
2406624f27bbSVladimir Sementsov-Ogievskiy     }
2407624f27bbSVladimir Sementsov-Ogievskiy 
240867c095c8SVladimir Sementsov-Ogievskiy     ret = bdrv_co_block_status(bs, want_zero, offset, bytes, pnum, map, file);
2409a92b1b06SEric Blake     ++*depth;
24103555a432SVladimir Sementsov-Ogievskiy     if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) {
241167c095c8SVladimir Sementsov-Ogievskiy         return ret;
241267c095c8SVladimir Sementsov-Ogievskiy     }
241367c095c8SVladimir Sementsov-Ogievskiy 
241467c095c8SVladimir Sementsov-Ogievskiy     if (ret & BDRV_BLOCK_EOF) {
241567c095c8SVladimir Sementsov-Ogievskiy         eof = offset + *pnum;
241667c095c8SVladimir Sementsov-Ogievskiy     }
241767c095c8SVladimir Sementsov-Ogievskiy 
241867c095c8SVladimir Sementsov-Ogievskiy     assert(*pnum <= bytes);
241967c095c8SVladimir Sementsov-Ogievskiy     bytes = *pnum;
242067c095c8SVladimir Sementsov-Ogievskiy 
24213555a432SVladimir Sementsov-Ogievskiy     for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base;
242267c095c8SVladimir Sementsov-Ogievskiy          p = bdrv_filter_or_cow_bs(p))
242367c095c8SVladimir Sementsov-Ogievskiy     {
24245b648c67SEric Blake         ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map,
24255b648c67SEric Blake                                    file);
2426a92b1b06SEric Blake         ++*depth;
2427c61e684eSEric Blake         if (ret < 0) {
242867c095c8SVladimir Sementsov-Ogievskiy             return ret;
2429c61e684eSEric Blake         }
243067c095c8SVladimir Sementsov-Ogievskiy         if (*pnum == 0) {
2431c61e684eSEric Blake             /*
243267c095c8SVladimir Sementsov-Ogievskiy              * The top layer deferred to this layer, and because this layer is
243367c095c8SVladimir Sementsov-Ogievskiy              * short, any zeroes that we synthesize beyond EOF behave as if they
243467c095c8SVladimir Sementsov-Ogievskiy              * were allocated at this layer.
243567c095c8SVladimir Sementsov-Ogievskiy              *
243667c095c8SVladimir Sementsov-Ogievskiy              * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be
243767c095c8SVladimir Sementsov-Ogievskiy              * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
243867c095c8SVladimir Sementsov-Ogievskiy              * below.
2439c61e684eSEric Blake              */
244067c095c8SVladimir Sementsov-Ogievskiy             assert(ret & BDRV_BLOCK_EOF);
24415b648c67SEric Blake             *pnum = bytes;
244267c095c8SVladimir Sementsov-Ogievskiy             if (file) {
244367c095c8SVladimir Sementsov-Ogievskiy                 *file = p;
2444c61e684eSEric Blake             }
244567c095c8SVladimir Sementsov-Ogievskiy             ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED;
2446ba3f0e25SFam Zheng             break;
2447ba3f0e25SFam Zheng         }
244867c095c8SVladimir Sementsov-Ogievskiy         if (ret & BDRV_BLOCK_ALLOCATED) {
244967c095c8SVladimir Sementsov-Ogievskiy             /*
245067c095c8SVladimir Sementsov-Ogievskiy              * We've found the node and the status, we must break.
245167c095c8SVladimir Sementsov-Ogievskiy              *
245267c095c8SVladimir Sementsov-Ogievskiy              * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be
245367c095c8SVladimir Sementsov-Ogievskiy              * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
245467c095c8SVladimir Sementsov-Ogievskiy              * below.
245567c095c8SVladimir Sementsov-Ogievskiy              */
245667c095c8SVladimir Sementsov-Ogievskiy             ret &= ~BDRV_BLOCK_EOF;
245767c095c8SVladimir Sementsov-Ogievskiy             break;
2458ba3f0e25SFam Zheng         }
245967c095c8SVladimir Sementsov-Ogievskiy 
24603555a432SVladimir Sementsov-Ogievskiy         if (p == base) {
24613555a432SVladimir Sementsov-Ogievskiy             assert(include_base);
24623555a432SVladimir Sementsov-Ogievskiy             break;
24633555a432SVladimir Sementsov-Ogievskiy         }
24643555a432SVladimir Sementsov-Ogievskiy 
246567c095c8SVladimir Sementsov-Ogievskiy         /*
246667c095c8SVladimir Sementsov-Ogievskiy          * OK, [offset, offset + *pnum) region is unallocated on this layer,
246767c095c8SVladimir Sementsov-Ogievskiy          * let's continue the diving.
246867c095c8SVladimir Sementsov-Ogievskiy          */
246967c095c8SVladimir Sementsov-Ogievskiy         assert(*pnum <= bytes);
247067c095c8SVladimir Sementsov-Ogievskiy         bytes = *pnum;
247167c095c8SVladimir Sementsov-Ogievskiy     }
247267c095c8SVladimir Sementsov-Ogievskiy 
247367c095c8SVladimir Sementsov-Ogievskiy     if (offset + *pnum == eof) {
247467c095c8SVladimir Sementsov-Ogievskiy         ret |= BDRV_BLOCK_EOF;
247567c095c8SVladimir Sementsov-Ogievskiy     }
247667c095c8SVladimir Sementsov-Ogievskiy 
2477ba3f0e25SFam Zheng     return ret;
2478ba3f0e25SFam Zheng }
2479ba3f0e25SFam Zheng 
248031826642SEric Blake int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
248131826642SEric Blake                             int64_t offset, int64_t bytes, int64_t *pnum,
248231826642SEric Blake                             int64_t *map, BlockDriverState **file)
2483c9ce8c4dSEric Blake {
24843555a432SVladimir Sementsov-Ogievskiy     return bdrv_common_block_status_above(bs, base, false, true, offset, bytes,
2485a92b1b06SEric Blake                                           pnum, map, file, NULL);
2486c9ce8c4dSEric Blake }
2487c9ce8c4dSEric Blake 
2488237d78f8SEric Blake int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
2489237d78f8SEric Blake                       int64_t *pnum, int64_t *map, BlockDriverState **file)
2490ba3f0e25SFam Zheng {
2491cb850315SMax Reitz     return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs),
249231826642SEric Blake                                    offset, bytes, pnum, map, file);
2493ba3f0e25SFam Zheng }
2494ba3f0e25SFam Zheng 
249546cd1e8aSAlberto Garcia /*
249646cd1e8aSAlberto Garcia  * Check @bs (and its backing chain) to see if the range defined
249746cd1e8aSAlberto Garcia  * by @offset and @bytes is known to read as zeroes.
249846cd1e8aSAlberto Garcia  * Return 1 if that is the case, 0 otherwise and -errno on error.
249946cd1e8aSAlberto Garcia  * This test is meant to be fast rather than accurate so returning 0
250046cd1e8aSAlberto Garcia  * does not guarantee non-zero data.
250146cd1e8aSAlberto Garcia  */
250246cd1e8aSAlberto Garcia int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset,
250346cd1e8aSAlberto Garcia                                       int64_t bytes)
250446cd1e8aSAlberto Garcia {
250546cd1e8aSAlberto Garcia     int ret;
250646cd1e8aSAlberto Garcia     int64_t pnum = bytes;
250746cd1e8aSAlberto Garcia 
250846cd1e8aSAlberto Garcia     if (!bytes) {
250946cd1e8aSAlberto Garcia         return 1;
251046cd1e8aSAlberto Garcia     }
251146cd1e8aSAlberto Garcia 
251246cd1e8aSAlberto Garcia     ret = bdrv_common_block_status_above(bs, NULL, false, false, offset,
2513a92b1b06SEric Blake                                          bytes, &pnum, NULL, NULL, NULL);
251446cd1e8aSAlberto Garcia 
251546cd1e8aSAlberto Garcia     if (ret < 0) {
251646cd1e8aSAlberto Garcia         return ret;
251746cd1e8aSAlberto Garcia     }
251846cd1e8aSAlberto Garcia 
251946cd1e8aSAlberto Garcia     return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO);
252046cd1e8aSAlberto Garcia }
252146cd1e8aSAlberto Garcia 
2522d6a644bbSEric Blake int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset,
2523d6a644bbSEric Blake                                    int64_t bytes, int64_t *pnum)
252461007b31SStefan Hajnoczi {
25257ddb99b9SEric Blake     int ret;
25267ddb99b9SEric Blake     int64_t dummy;
2527d6a644bbSEric Blake 
25283555a432SVladimir Sementsov-Ogievskiy     ret = bdrv_common_block_status_above(bs, bs, true, false, offset,
25293555a432SVladimir Sementsov-Ogievskiy                                          bytes, pnum ? pnum : &dummy, NULL,
2530a92b1b06SEric Blake                                          NULL, NULL);
253161007b31SStefan Hajnoczi     if (ret < 0) {
253261007b31SStefan Hajnoczi         return ret;
253361007b31SStefan Hajnoczi     }
253461007b31SStefan Hajnoczi     return !!(ret & BDRV_BLOCK_ALLOCATED);
253561007b31SStefan Hajnoczi }
253661007b31SStefan Hajnoczi 
253761007b31SStefan Hajnoczi /*
253861007b31SStefan Hajnoczi  * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
253961007b31SStefan Hajnoczi  *
2540a92b1b06SEric Blake  * Return a positive depth if (a prefix of) the given range is allocated
2541a92b1b06SEric Blake  * in any image between BASE and TOP (BASE is only included if include_base
2542a92b1b06SEric Blake  * is set).  Depth 1 is TOP, 2 is the first backing layer, and so forth.
2543170d3bd3SAndrey Shinkevich  * BASE can be NULL to check if the given offset is allocated in any
2544170d3bd3SAndrey Shinkevich  * image of the chain.  Return 0 otherwise, or negative errno on
2545170d3bd3SAndrey Shinkevich  * failure.
254661007b31SStefan Hajnoczi  *
254751b0a488SEric Blake  * 'pnum' is set to the number of bytes (including and immediately
254851b0a488SEric Blake  * following the specified offset) that are known to be in the same
254951b0a488SEric Blake  * allocated/unallocated state.  Note that a subsequent call starting
255051b0a488SEric Blake  * at 'offset + *pnum' may return the same allocation status (in other
255151b0a488SEric Blake  * words, the result is not necessarily the maximum possible range);
255251b0a488SEric Blake  * but 'pnum' will only be 0 when end of file is reached.
255361007b31SStefan Hajnoczi  */
255461007b31SStefan Hajnoczi int bdrv_is_allocated_above(BlockDriverState *top,
255561007b31SStefan Hajnoczi                             BlockDriverState *base,
2556170d3bd3SAndrey Shinkevich                             bool include_base, int64_t offset,
2557170d3bd3SAndrey Shinkevich                             int64_t bytes, int64_t *pnum)
255861007b31SStefan Hajnoczi {
2559a92b1b06SEric Blake     int depth;
25607e7e5100SVladimir Sementsov-Ogievskiy     int ret = bdrv_common_block_status_above(top, base, include_base, false,
2561a92b1b06SEric Blake                                              offset, bytes, pnum, NULL, NULL,
2562a92b1b06SEric Blake                                              &depth);
256361007b31SStefan Hajnoczi     if (ret < 0) {
256461007b31SStefan Hajnoczi         return ret;
2565d6a644bbSEric Blake     }
256661007b31SStefan Hajnoczi 
2567a92b1b06SEric Blake     if (ret & BDRV_BLOCK_ALLOCATED) {
2568a92b1b06SEric Blake         return depth;
2569a92b1b06SEric Blake     }
2570a92b1b06SEric Blake     return 0;
257161007b31SStefan Hajnoczi }
257261007b31SStefan Hajnoczi 
257321c2283eSVladimir Sementsov-Ogievskiy int coroutine_fn
2574b33b354fSVladimir Sementsov-Ogievskiy bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
25751a8ae822SKevin Wolf {
25761a8ae822SKevin Wolf     BlockDriver *drv = bs->drv;
2577c4db2e25SMax Reitz     BlockDriverState *child_bs = bdrv_primary_bs(bs);
2578dc88a467SStefan Hajnoczi     int ret = -ENOTSUP;
2579dc88a467SStefan Hajnoczi 
2580b33b354fSVladimir Sementsov-Ogievskiy     if (!drv) {
2581b33b354fSVladimir Sementsov-Ogievskiy         return -ENOMEDIUM;
2582b33b354fSVladimir Sementsov-Ogievskiy     }
2583b33b354fSVladimir Sementsov-Ogievskiy 
2584dc88a467SStefan Hajnoczi     bdrv_inc_in_flight(bs);
25851a8ae822SKevin Wolf 
2586b33b354fSVladimir Sementsov-Ogievskiy     if (drv->bdrv_load_vmstate) {
2587dc88a467SStefan Hajnoczi         ret = drv->bdrv_load_vmstate(bs, qiov, pos);
2588c4db2e25SMax Reitz     } else if (child_bs) {
2589b33b354fSVladimir Sementsov-Ogievskiy         ret = bdrv_co_readv_vmstate(child_bs, qiov, pos);
25901a8ae822SKevin Wolf     }
25911a8ae822SKevin Wolf 
2592dc88a467SStefan Hajnoczi     bdrv_dec_in_flight(bs);
2593b33b354fSVladimir Sementsov-Ogievskiy 
2594b33b354fSVladimir Sementsov-Ogievskiy     return ret;
2595b33b354fSVladimir Sementsov-Ogievskiy }
2596b33b354fSVladimir Sementsov-Ogievskiy 
2597b33b354fSVladimir Sementsov-Ogievskiy int coroutine_fn
2598b33b354fSVladimir Sementsov-Ogievskiy bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2599b33b354fSVladimir Sementsov-Ogievskiy {
2600b33b354fSVladimir Sementsov-Ogievskiy     BlockDriver *drv = bs->drv;
2601b33b354fSVladimir Sementsov-Ogievskiy     BlockDriverState *child_bs = bdrv_primary_bs(bs);
2602b33b354fSVladimir Sementsov-Ogievskiy     int ret = -ENOTSUP;
2603b33b354fSVladimir Sementsov-Ogievskiy 
2604b33b354fSVladimir Sementsov-Ogievskiy     if (!drv) {
2605b33b354fSVladimir Sementsov-Ogievskiy         return -ENOMEDIUM;
2606b33b354fSVladimir Sementsov-Ogievskiy     }
2607b33b354fSVladimir Sementsov-Ogievskiy 
2608b33b354fSVladimir Sementsov-Ogievskiy     bdrv_inc_in_flight(bs);
2609b33b354fSVladimir Sementsov-Ogievskiy 
2610b33b354fSVladimir Sementsov-Ogievskiy     if (drv->bdrv_save_vmstate) {
2611b33b354fSVladimir Sementsov-Ogievskiy         ret = drv->bdrv_save_vmstate(bs, qiov, pos);
2612b33b354fSVladimir Sementsov-Ogievskiy     } else if (child_bs) {
2613b33b354fSVladimir Sementsov-Ogievskiy         ret = bdrv_co_writev_vmstate(child_bs, qiov, pos);
2614b33b354fSVladimir Sementsov-Ogievskiy     }
2615b33b354fSVladimir Sementsov-Ogievskiy 
2616b33b354fSVladimir Sementsov-Ogievskiy     bdrv_dec_in_flight(bs);
2617b33b354fSVladimir Sementsov-Ogievskiy 
2618dc88a467SStefan Hajnoczi     return ret;
26191a8ae822SKevin Wolf }
26201a8ae822SKevin Wolf 
262161007b31SStefan Hajnoczi int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
262261007b31SStefan Hajnoczi                       int64_t pos, int size)
262361007b31SStefan Hajnoczi {
26240d93ed08SVladimir Sementsov-Ogievskiy     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2625b33b354fSVladimir Sementsov-Ogievskiy     int ret = bdrv_writev_vmstate(bs, &qiov, pos);
262661007b31SStefan Hajnoczi 
2627b33b354fSVladimir Sementsov-Ogievskiy     return ret < 0 ? ret : size;
262861007b31SStefan Hajnoczi }
262961007b31SStefan Hajnoczi 
263061007b31SStefan Hajnoczi int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
263161007b31SStefan Hajnoczi                       int64_t pos, int size)
263261007b31SStefan Hajnoczi {
26330d93ed08SVladimir Sementsov-Ogievskiy     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2634b33b354fSVladimir Sementsov-Ogievskiy     int ret = bdrv_readv_vmstate(bs, &qiov, pos);
26355ddda0b8SKevin Wolf 
2636b33b354fSVladimir Sementsov-Ogievskiy     return ret < 0 ? ret : size;
263761007b31SStefan Hajnoczi }
263861007b31SStefan Hajnoczi 
263961007b31SStefan Hajnoczi /**************************************************************/
264061007b31SStefan Hajnoczi /* async I/Os */
264161007b31SStefan Hajnoczi 
264261007b31SStefan Hajnoczi void bdrv_aio_cancel(BlockAIOCB *acb)
264361007b31SStefan Hajnoczi {
264461007b31SStefan Hajnoczi     qemu_aio_ref(acb);
264561007b31SStefan Hajnoczi     bdrv_aio_cancel_async(acb);
264661007b31SStefan Hajnoczi     while (acb->refcnt > 1) {
264761007b31SStefan Hajnoczi         if (acb->aiocb_info->get_aio_context) {
264861007b31SStefan Hajnoczi             aio_poll(acb->aiocb_info->get_aio_context(acb), true);
264961007b31SStefan Hajnoczi         } else if (acb->bs) {
26502f47da5fSPaolo Bonzini             /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
26512f47da5fSPaolo Bonzini              * assert that we're not using an I/O thread.  Thread-safe
26522f47da5fSPaolo Bonzini              * code should use bdrv_aio_cancel_async exclusively.
26532f47da5fSPaolo Bonzini              */
26542f47da5fSPaolo Bonzini             assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
265561007b31SStefan Hajnoczi             aio_poll(bdrv_get_aio_context(acb->bs), true);
265661007b31SStefan Hajnoczi         } else {
265761007b31SStefan Hajnoczi             abort();
265861007b31SStefan Hajnoczi         }
265961007b31SStefan Hajnoczi     }
266061007b31SStefan Hajnoczi     qemu_aio_unref(acb);
266161007b31SStefan Hajnoczi }
266261007b31SStefan Hajnoczi 
266361007b31SStefan Hajnoczi /* Async version of aio cancel. The caller is not blocked if the acb implements
266461007b31SStefan Hajnoczi  * cancel_async, otherwise we do nothing and let the request normally complete.
266561007b31SStefan Hajnoczi  * In either case the completion callback must be called. */
266661007b31SStefan Hajnoczi void bdrv_aio_cancel_async(BlockAIOCB *acb)
266761007b31SStefan Hajnoczi {
266861007b31SStefan Hajnoczi     if (acb->aiocb_info->cancel_async) {
266961007b31SStefan Hajnoczi         acb->aiocb_info->cancel_async(acb);
267061007b31SStefan Hajnoczi     }
267161007b31SStefan Hajnoczi }
267261007b31SStefan Hajnoczi 
267361007b31SStefan Hajnoczi /**************************************************************/
267461007b31SStefan Hajnoczi /* Coroutine block device emulation */
267561007b31SStefan Hajnoczi 
267661007b31SStefan Hajnoczi int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
267761007b31SStefan Hajnoczi {
2678883833e2SMax Reitz     BdrvChild *primary_child = bdrv_primary_child(bs);
2679883833e2SMax Reitz     BdrvChild *child;
268049ca6259SFam Zheng     int current_gen;
268149ca6259SFam Zheng     int ret = 0;
268261007b31SStefan Hajnoczi 
268399723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
2684c32b82afSPavel Dovgalyuk 
2685e914404eSFam Zheng     if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
268649ca6259SFam Zheng         bdrv_is_sg(bs)) {
268749ca6259SFam Zheng         goto early_exit;
268849ca6259SFam Zheng     }
268949ca6259SFam Zheng 
26903783fa3dSPaolo Bonzini     qemu_co_mutex_lock(&bs->reqs_lock);
2691d73415a3SStefan Hajnoczi     current_gen = qatomic_read(&bs->write_gen);
26923ff2f67aSEvgeny Yakovlev 
26933ff2f67aSEvgeny Yakovlev     /* Wait until any previous flushes are completed */
269499723548SPaolo Bonzini     while (bs->active_flush_req) {
26953783fa3dSPaolo Bonzini         qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
26963ff2f67aSEvgeny Yakovlev     }
26973ff2f67aSEvgeny Yakovlev 
26983783fa3dSPaolo Bonzini     /* Flushes reach this point in nondecreasing current_gen order.  */
269999723548SPaolo Bonzini     bs->active_flush_req = true;
27003783fa3dSPaolo Bonzini     qemu_co_mutex_unlock(&bs->reqs_lock);
27013ff2f67aSEvgeny Yakovlev 
2702c32b82afSPavel Dovgalyuk     /* Write back all layers by calling one driver function */
2703c32b82afSPavel Dovgalyuk     if (bs->drv->bdrv_co_flush) {
2704c32b82afSPavel Dovgalyuk         ret = bs->drv->bdrv_co_flush(bs);
2705c32b82afSPavel Dovgalyuk         goto out;
2706c32b82afSPavel Dovgalyuk     }
2707c32b82afSPavel Dovgalyuk 
270861007b31SStefan Hajnoczi     /* Write back cached data to the OS even with cache=unsafe */
2709883833e2SMax Reitz     BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_OS);
271061007b31SStefan Hajnoczi     if (bs->drv->bdrv_co_flush_to_os) {
271161007b31SStefan Hajnoczi         ret = bs->drv->bdrv_co_flush_to_os(bs);
271261007b31SStefan Hajnoczi         if (ret < 0) {
2713cdb5e315SFam Zheng             goto out;
271461007b31SStefan Hajnoczi         }
271561007b31SStefan Hajnoczi     }
271661007b31SStefan Hajnoczi 
271761007b31SStefan Hajnoczi     /* But don't actually force it to the disk with cache=unsafe */
271861007b31SStefan Hajnoczi     if (bs->open_flags & BDRV_O_NO_FLUSH) {
2719883833e2SMax Reitz         goto flush_children;
272061007b31SStefan Hajnoczi     }
272161007b31SStefan Hajnoczi 
27223ff2f67aSEvgeny Yakovlev     /* Check if we really need to flush anything */
27233ff2f67aSEvgeny Yakovlev     if (bs->flushed_gen == current_gen) {
2724883833e2SMax Reitz         goto flush_children;
27253ff2f67aSEvgeny Yakovlev     }
27263ff2f67aSEvgeny Yakovlev 
2727883833e2SMax Reitz     BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK);
2728d470ad42SMax Reitz     if (!bs->drv) {
2729d470ad42SMax Reitz         /* bs->drv->bdrv_co_flush() might have ejected the BDS
2730d470ad42SMax Reitz          * (even in case of apparent success) */
2731d470ad42SMax Reitz         ret = -ENOMEDIUM;
2732d470ad42SMax Reitz         goto out;
2733d470ad42SMax Reitz     }
273461007b31SStefan Hajnoczi     if (bs->drv->bdrv_co_flush_to_disk) {
273561007b31SStefan Hajnoczi         ret = bs->drv->bdrv_co_flush_to_disk(bs);
273661007b31SStefan Hajnoczi     } else if (bs->drv->bdrv_aio_flush) {
273761007b31SStefan Hajnoczi         BlockAIOCB *acb;
273861007b31SStefan Hajnoczi         CoroutineIOCompletion co = {
273961007b31SStefan Hajnoczi             .coroutine = qemu_coroutine_self(),
274061007b31SStefan Hajnoczi         };
274161007b31SStefan Hajnoczi 
274261007b31SStefan Hajnoczi         acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
274361007b31SStefan Hajnoczi         if (acb == NULL) {
274461007b31SStefan Hajnoczi             ret = -EIO;
274561007b31SStefan Hajnoczi         } else {
274661007b31SStefan Hajnoczi             qemu_coroutine_yield();
274761007b31SStefan Hajnoczi             ret = co.ret;
274861007b31SStefan Hajnoczi         }
274961007b31SStefan Hajnoczi     } else {
275061007b31SStefan Hajnoczi         /*
275161007b31SStefan Hajnoczi          * Some block drivers always operate in either writethrough or unsafe
275261007b31SStefan Hajnoczi          * mode and don't support bdrv_flush therefore. Usually qemu doesn't
275361007b31SStefan Hajnoczi          * know how the server works (because the behaviour is hardcoded or
275461007b31SStefan Hajnoczi          * depends on server-side configuration), so we can't ensure that
275561007b31SStefan Hajnoczi          * everything is safe on disk. Returning an error doesn't work because
275661007b31SStefan Hajnoczi          * that would break guests even if the server operates in writethrough
275761007b31SStefan Hajnoczi          * mode.
275861007b31SStefan Hajnoczi          *
275961007b31SStefan Hajnoczi          * Let's hope the user knows what he's doing.
276061007b31SStefan Hajnoczi          */
276161007b31SStefan Hajnoczi         ret = 0;
276261007b31SStefan Hajnoczi     }
27633ff2f67aSEvgeny Yakovlev 
276461007b31SStefan Hajnoczi     if (ret < 0) {
2765cdb5e315SFam Zheng         goto out;
276661007b31SStefan Hajnoczi     }
276761007b31SStefan Hajnoczi 
276861007b31SStefan Hajnoczi     /* Now flush the underlying protocol.  It will also have BDRV_O_NO_FLUSH
276961007b31SStefan Hajnoczi      * in the case of cache=unsafe, so there are no useless flushes.
277061007b31SStefan Hajnoczi      */
2771883833e2SMax Reitz flush_children:
2772883833e2SMax Reitz     ret = 0;
2773883833e2SMax Reitz     QLIST_FOREACH(child, &bs->children, next) {
2774883833e2SMax Reitz         if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) {
2775883833e2SMax Reitz             int this_child_ret = bdrv_co_flush(child->bs);
2776883833e2SMax Reitz             if (!ret) {
2777883833e2SMax Reitz                 ret = this_child_ret;
2778883833e2SMax Reitz             }
2779883833e2SMax Reitz         }
2780883833e2SMax Reitz     }
2781883833e2SMax Reitz 
2782cdb5e315SFam Zheng out:
27833ff2f67aSEvgeny Yakovlev     /* Notify any pending flushes that we have completed */
2784e6af1e08SKevin Wolf     if (ret == 0) {
27853ff2f67aSEvgeny Yakovlev         bs->flushed_gen = current_gen;
2786e6af1e08SKevin Wolf     }
27873783fa3dSPaolo Bonzini 
27883783fa3dSPaolo Bonzini     qemu_co_mutex_lock(&bs->reqs_lock);
278999723548SPaolo Bonzini     bs->active_flush_req = false;
2790156af3acSDenis V. Lunev     /* Return value is ignored - it's ok if wait queue is empty */
2791156af3acSDenis V. Lunev     qemu_co_queue_next(&bs->flush_queue);
27923783fa3dSPaolo Bonzini     qemu_co_mutex_unlock(&bs->reqs_lock);
27933ff2f67aSEvgeny Yakovlev 
279449ca6259SFam Zheng early_exit:
279599723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
2796cdb5e315SFam Zheng     return ret;
279761007b31SStefan Hajnoczi }
279861007b31SStefan Hajnoczi 
2799d93e5726SVladimir Sementsov-Ogievskiy int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
2800d93e5726SVladimir Sementsov-Ogievskiy                                   int64_t bytes)
280161007b31SStefan Hajnoczi {
2802b1066c87SFam Zheng     BdrvTrackedRequest req;
28039f1963b3SEric Blake     int max_pdiscard, ret;
28043482b9bcSEric Blake     int head, tail, align;
28050b9fd3f4SFam Zheng     BlockDriverState *bs = child->bs;
280661007b31SStefan Hajnoczi 
2807d93e5726SVladimir Sementsov-Ogievskiy     if (!bs || !bs->drv || !bdrv_is_inserted(bs)) {
280861007b31SStefan Hajnoczi         return -ENOMEDIUM;
280961007b31SStefan Hajnoczi     }
281061007b31SStefan Hajnoczi 
2811d6883bc9SVladimir Sementsov-Ogievskiy     if (bdrv_has_readonly_bitmaps(bs)) {
2812d6883bc9SVladimir Sementsov-Ogievskiy         return -EPERM;
2813d6883bc9SVladimir Sementsov-Ogievskiy     }
2814d6883bc9SVladimir Sementsov-Ogievskiy 
2815*8b117001SVladimir Sementsov-Ogievskiy     ret = bdrv_check_request(offset, bytes);
2816*8b117001SVladimir Sementsov-Ogievskiy     if (ret < 0) {
2817*8b117001SVladimir Sementsov-Ogievskiy         return ret;
281861007b31SStefan Hajnoczi     }
281961007b31SStefan Hajnoczi 
282061007b31SStefan Hajnoczi     /* Do nothing if disabled.  */
282161007b31SStefan Hajnoczi     if (!(bs->open_flags & BDRV_O_UNMAP)) {
282261007b31SStefan Hajnoczi         return 0;
282361007b31SStefan Hajnoczi     }
282461007b31SStefan Hajnoczi 
282502aefe43SEric Blake     if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
282661007b31SStefan Hajnoczi         return 0;
282761007b31SStefan Hajnoczi     }
282861007b31SStefan Hajnoczi 
28293482b9bcSEric Blake     /* Discard is advisory, but some devices track and coalesce
28303482b9bcSEric Blake      * unaligned requests, so we must pass everything down rather than
28313482b9bcSEric Blake      * round here.  Still, most devices will just silently ignore
28323482b9bcSEric Blake      * unaligned requests (by returning -ENOTSUP), so we must fragment
28333482b9bcSEric Blake      * the request accordingly.  */
283402aefe43SEric Blake     align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
2835b8d0a980SEric Blake     assert(align % bs->bl.request_alignment == 0);
2836b8d0a980SEric Blake     head = offset % align;
2837f5a5ca79SManos Pitsidianakis     tail = (offset + bytes) % align;
28389f1963b3SEric Blake 
283999723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
2840f5a5ca79SManos Pitsidianakis     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
284150824995SFam Zheng 
284200695c27SFam Zheng     ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0);
2843ec050f77SDenis V. Lunev     if (ret < 0) {
2844ec050f77SDenis V. Lunev         goto out;
2845ec050f77SDenis V. Lunev     }
2846ec050f77SDenis V. Lunev 
28479f1963b3SEric Blake     max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
28489f1963b3SEric Blake                                    align);
28493482b9bcSEric Blake     assert(max_pdiscard >= bs->bl.request_alignment);
28509f1963b3SEric Blake 
2851f5a5ca79SManos Pitsidianakis     while (bytes > 0) {
2852d93e5726SVladimir Sementsov-Ogievskiy         int64_t num = bytes;
28533482b9bcSEric Blake 
28543482b9bcSEric Blake         if (head) {
28553482b9bcSEric Blake             /* Make small requests to get to alignment boundaries. */
2856f5a5ca79SManos Pitsidianakis             num = MIN(bytes, align - head);
28573482b9bcSEric Blake             if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
28583482b9bcSEric Blake                 num %= bs->bl.request_alignment;
28593482b9bcSEric Blake             }
28603482b9bcSEric Blake             head = (head + num) % align;
28613482b9bcSEric Blake             assert(num < max_pdiscard);
28623482b9bcSEric Blake         } else if (tail) {
28633482b9bcSEric Blake             if (num > align) {
28643482b9bcSEric Blake                 /* Shorten the request to the last aligned cluster.  */
28653482b9bcSEric Blake                 num -= tail;
28663482b9bcSEric Blake             } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
28673482b9bcSEric Blake                        tail > bs->bl.request_alignment) {
28683482b9bcSEric Blake                 tail %= bs->bl.request_alignment;
28693482b9bcSEric Blake                 num -= tail;
28703482b9bcSEric Blake             }
28713482b9bcSEric Blake         }
28723482b9bcSEric Blake         /* limit request size */
28733482b9bcSEric Blake         if (num > max_pdiscard) {
28743482b9bcSEric Blake             num = max_pdiscard;
28753482b9bcSEric Blake         }
287661007b31SStefan Hajnoczi 
2877d470ad42SMax Reitz         if (!bs->drv) {
2878d470ad42SMax Reitz             ret = -ENOMEDIUM;
2879d470ad42SMax Reitz             goto out;
2880d470ad42SMax Reitz         }
288147a5486dSEric Blake         if (bs->drv->bdrv_co_pdiscard) {
288247a5486dSEric Blake             ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
288361007b31SStefan Hajnoczi         } else {
288461007b31SStefan Hajnoczi             BlockAIOCB *acb;
288561007b31SStefan Hajnoczi             CoroutineIOCompletion co = {
288661007b31SStefan Hajnoczi                 .coroutine = qemu_coroutine_self(),
288761007b31SStefan Hajnoczi             };
288861007b31SStefan Hajnoczi 
28894da444a0SEric Blake             acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
289061007b31SStefan Hajnoczi                                              bdrv_co_io_em_complete, &co);
289161007b31SStefan Hajnoczi             if (acb == NULL) {
2892b1066c87SFam Zheng                 ret = -EIO;
2893b1066c87SFam Zheng                 goto out;
289461007b31SStefan Hajnoczi             } else {
289561007b31SStefan Hajnoczi                 qemu_coroutine_yield();
289661007b31SStefan Hajnoczi                 ret = co.ret;
289761007b31SStefan Hajnoczi             }
289861007b31SStefan Hajnoczi         }
289961007b31SStefan Hajnoczi         if (ret && ret != -ENOTSUP) {
2900b1066c87SFam Zheng             goto out;
290161007b31SStefan Hajnoczi         }
290261007b31SStefan Hajnoczi 
29039f1963b3SEric Blake         offset += num;
2904f5a5ca79SManos Pitsidianakis         bytes -= num;
290561007b31SStefan Hajnoczi     }
2906b1066c87SFam Zheng     ret = 0;
2907b1066c87SFam Zheng out:
290800695c27SFam Zheng     bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret);
2909b1066c87SFam Zheng     tracked_request_end(&req);
291099723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
2911b1066c87SFam Zheng     return ret;
291261007b31SStefan Hajnoczi }
291361007b31SStefan Hajnoczi 
291448af776aSKevin Wolf int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
291561007b31SStefan Hajnoczi {
291661007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
29175c5ae76aSFam Zheng     CoroutineIOCompletion co = {
29185c5ae76aSFam Zheng         .coroutine = qemu_coroutine_self(),
29195c5ae76aSFam Zheng     };
29205c5ae76aSFam Zheng     BlockAIOCB *acb;
292161007b31SStefan Hajnoczi 
292299723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
292316a389dcSKevin Wolf     if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
29245c5ae76aSFam Zheng         co.ret = -ENOTSUP;
29255c5ae76aSFam Zheng         goto out;
29265c5ae76aSFam Zheng     }
29275c5ae76aSFam Zheng 
292816a389dcSKevin Wolf     if (drv->bdrv_co_ioctl) {
292916a389dcSKevin Wolf         co.ret = drv->bdrv_co_ioctl(bs, req, buf);
293016a389dcSKevin Wolf     } else {
29315c5ae76aSFam Zheng         acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
29325c5ae76aSFam Zheng         if (!acb) {
2933c8a9fd80SFam Zheng             co.ret = -ENOTSUP;
2934c8a9fd80SFam Zheng             goto out;
29355c5ae76aSFam Zheng         }
29365c5ae76aSFam Zheng         qemu_coroutine_yield();
293716a389dcSKevin Wolf     }
29385c5ae76aSFam Zheng out:
293999723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
29405c5ae76aSFam Zheng     return co.ret;
29415c5ae76aSFam Zheng }
29425c5ae76aSFam Zheng 
294361007b31SStefan Hajnoczi void *qemu_blockalign(BlockDriverState *bs, size_t size)
294461007b31SStefan Hajnoczi {
294561007b31SStefan Hajnoczi     return qemu_memalign(bdrv_opt_mem_align(bs), size);
294661007b31SStefan Hajnoczi }
294761007b31SStefan Hajnoczi 
294861007b31SStefan Hajnoczi void *qemu_blockalign0(BlockDriverState *bs, size_t size)
294961007b31SStefan Hajnoczi {
295061007b31SStefan Hajnoczi     return memset(qemu_blockalign(bs, size), 0, size);
295161007b31SStefan Hajnoczi }
295261007b31SStefan Hajnoczi 
295361007b31SStefan Hajnoczi void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
295461007b31SStefan Hajnoczi {
295561007b31SStefan Hajnoczi     size_t align = bdrv_opt_mem_align(bs);
295661007b31SStefan Hajnoczi 
295761007b31SStefan Hajnoczi     /* Ensure that NULL is never returned on success */
295861007b31SStefan Hajnoczi     assert(align > 0);
295961007b31SStefan Hajnoczi     if (size == 0) {
296061007b31SStefan Hajnoczi         size = align;
296161007b31SStefan Hajnoczi     }
296261007b31SStefan Hajnoczi 
296361007b31SStefan Hajnoczi     return qemu_try_memalign(align, size);
296461007b31SStefan Hajnoczi }
296561007b31SStefan Hajnoczi 
296661007b31SStefan Hajnoczi void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
296761007b31SStefan Hajnoczi {
296861007b31SStefan Hajnoczi     void *mem = qemu_try_blockalign(bs, size);
296961007b31SStefan Hajnoczi 
297061007b31SStefan Hajnoczi     if (mem) {
297161007b31SStefan Hajnoczi         memset(mem, 0, size);
297261007b31SStefan Hajnoczi     }
297361007b31SStefan Hajnoczi 
297461007b31SStefan Hajnoczi     return mem;
297561007b31SStefan Hajnoczi }
297661007b31SStefan Hajnoczi 
297761007b31SStefan Hajnoczi /*
297861007b31SStefan Hajnoczi  * Check if all memory in this vector is sector aligned.
297961007b31SStefan Hajnoczi  */
298061007b31SStefan Hajnoczi bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
298161007b31SStefan Hajnoczi {
298261007b31SStefan Hajnoczi     int i;
29834196d2f0SDenis V. Lunev     size_t alignment = bdrv_min_mem_align(bs);
298461007b31SStefan Hajnoczi 
298561007b31SStefan Hajnoczi     for (i = 0; i < qiov->niov; i++) {
298661007b31SStefan Hajnoczi         if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
298761007b31SStefan Hajnoczi             return false;
298861007b31SStefan Hajnoczi         }
298961007b31SStefan Hajnoczi         if (qiov->iov[i].iov_len % alignment) {
299061007b31SStefan Hajnoczi             return false;
299161007b31SStefan Hajnoczi         }
299261007b31SStefan Hajnoczi     }
299361007b31SStefan Hajnoczi 
299461007b31SStefan Hajnoczi     return true;
299561007b31SStefan Hajnoczi }
299661007b31SStefan Hajnoczi 
299761007b31SStefan Hajnoczi void bdrv_add_before_write_notifier(BlockDriverState *bs,
299861007b31SStefan Hajnoczi                                     NotifierWithReturn *notifier)
299961007b31SStefan Hajnoczi {
300061007b31SStefan Hajnoczi     notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
300161007b31SStefan Hajnoczi }
300261007b31SStefan Hajnoczi 
300361007b31SStefan Hajnoczi void bdrv_io_plug(BlockDriverState *bs)
300461007b31SStefan Hajnoczi {
30056b98bd64SPaolo Bonzini     BdrvChild *child;
30066b98bd64SPaolo Bonzini 
30076b98bd64SPaolo Bonzini     QLIST_FOREACH(child, &bs->children, next) {
30086b98bd64SPaolo Bonzini         bdrv_io_plug(child->bs);
30096b98bd64SPaolo Bonzini     }
30106b98bd64SPaolo Bonzini 
3011d73415a3SStefan Hajnoczi     if (qatomic_fetch_inc(&bs->io_plugged) == 0) {
301261007b31SStefan Hajnoczi         BlockDriver *drv = bs->drv;
301361007b31SStefan Hajnoczi         if (drv && drv->bdrv_io_plug) {
301461007b31SStefan Hajnoczi             drv->bdrv_io_plug(bs);
30156b98bd64SPaolo Bonzini         }
301661007b31SStefan Hajnoczi     }
301761007b31SStefan Hajnoczi }
301861007b31SStefan Hajnoczi 
301961007b31SStefan Hajnoczi void bdrv_io_unplug(BlockDriverState *bs)
302061007b31SStefan Hajnoczi {
30216b98bd64SPaolo Bonzini     BdrvChild *child;
30226b98bd64SPaolo Bonzini 
30236b98bd64SPaolo Bonzini     assert(bs->io_plugged);
3024d73415a3SStefan Hajnoczi     if (qatomic_fetch_dec(&bs->io_plugged) == 1) {
302561007b31SStefan Hajnoczi         BlockDriver *drv = bs->drv;
302661007b31SStefan Hajnoczi         if (drv && drv->bdrv_io_unplug) {
302761007b31SStefan Hajnoczi             drv->bdrv_io_unplug(bs);
302861007b31SStefan Hajnoczi         }
302961007b31SStefan Hajnoczi     }
303061007b31SStefan Hajnoczi 
30316b98bd64SPaolo Bonzini     QLIST_FOREACH(child, &bs->children, next) {
30326b98bd64SPaolo Bonzini         bdrv_io_unplug(child->bs);
30336b98bd64SPaolo Bonzini     }
30346b98bd64SPaolo Bonzini }
303523d0ba93SFam Zheng 
303623d0ba93SFam Zheng void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size)
303723d0ba93SFam Zheng {
303823d0ba93SFam Zheng     BdrvChild *child;
303923d0ba93SFam Zheng 
304023d0ba93SFam Zheng     if (bs->drv && bs->drv->bdrv_register_buf) {
304123d0ba93SFam Zheng         bs->drv->bdrv_register_buf(bs, host, size);
304223d0ba93SFam Zheng     }
304323d0ba93SFam Zheng     QLIST_FOREACH(child, &bs->children, next) {
304423d0ba93SFam Zheng         bdrv_register_buf(child->bs, host, size);
304523d0ba93SFam Zheng     }
304623d0ba93SFam Zheng }
304723d0ba93SFam Zheng 
304823d0ba93SFam Zheng void bdrv_unregister_buf(BlockDriverState *bs, void *host)
304923d0ba93SFam Zheng {
305023d0ba93SFam Zheng     BdrvChild *child;
305123d0ba93SFam Zheng 
305223d0ba93SFam Zheng     if (bs->drv && bs->drv->bdrv_unregister_buf) {
305323d0ba93SFam Zheng         bs->drv->bdrv_unregister_buf(bs, host);
305423d0ba93SFam Zheng     }
305523d0ba93SFam Zheng     QLIST_FOREACH(child, &bs->children, next) {
305623d0ba93SFam Zheng         bdrv_unregister_buf(child->bs, host);
305723d0ba93SFam Zheng     }
305823d0ba93SFam Zheng }
3059fcc67678SFam Zheng 
306067b51fb9SVladimir Sementsov-Ogievskiy static int coroutine_fn bdrv_co_copy_range_internal(
306167b51fb9SVladimir Sementsov-Ogievskiy         BdrvChild *src, uint64_t src_offset, BdrvChild *dst,
306267b51fb9SVladimir Sementsov-Ogievskiy         uint64_t dst_offset, uint64_t bytes,
306367b51fb9SVladimir Sementsov-Ogievskiy         BdrvRequestFlags read_flags, BdrvRequestFlags write_flags,
3064fcc67678SFam Zheng         bool recurse_src)
3065fcc67678SFam Zheng {
3066999658a0SVladimir Sementsov-Ogievskiy     BdrvTrackedRequest req;
3067fcc67678SFam Zheng     int ret;
3068fcc67678SFam Zheng 
3069fe0480d6SKevin Wolf     /* TODO We can support BDRV_REQ_NO_FALLBACK here */
3070fe0480d6SKevin Wolf     assert(!(read_flags & BDRV_REQ_NO_FALLBACK));
3071fe0480d6SKevin Wolf     assert(!(write_flags & BDRV_REQ_NO_FALLBACK));
3072fe0480d6SKevin Wolf 
3073f4dad307SVladimir Sementsov-Ogievskiy     if (!dst || !dst->bs || !bdrv_is_inserted(dst->bs)) {
3074fcc67678SFam Zheng         return -ENOMEDIUM;
3075fcc67678SFam Zheng     }
3076*8b117001SVladimir Sementsov-Ogievskiy     ret = bdrv_check_request32(dst_offset, bytes);
3077fcc67678SFam Zheng     if (ret) {
3078fcc67678SFam Zheng         return ret;
3079fcc67678SFam Zheng     }
308067b51fb9SVladimir Sementsov-Ogievskiy     if (write_flags & BDRV_REQ_ZERO_WRITE) {
308167b51fb9SVladimir Sementsov-Ogievskiy         return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags);
3082fcc67678SFam Zheng     }
3083fcc67678SFam Zheng 
3084f4dad307SVladimir Sementsov-Ogievskiy     if (!src || !src->bs || !bdrv_is_inserted(src->bs)) {
3085d4d3e5a0SFam Zheng         return -ENOMEDIUM;
3086d4d3e5a0SFam Zheng     }
3087*8b117001SVladimir Sementsov-Ogievskiy     ret = bdrv_check_request32(src_offset, bytes);
3088d4d3e5a0SFam Zheng     if (ret) {
3089d4d3e5a0SFam Zheng         return ret;
3090d4d3e5a0SFam Zheng     }
3091d4d3e5a0SFam Zheng 
3092fcc67678SFam Zheng     if (!src->bs->drv->bdrv_co_copy_range_from
3093fcc67678SFam Zheng         || !dst->bs->drv->bdrv_co_copy_range_to
3094fcc67678SFam Zheng         || src->bs->encrypted || dst->bs->encrypted) {
3095fcc67678SFam Zheng         return -ENOTSUP;
3096fcc67678SFam Zheng     }
3097999658a0SVladimir Sementsov-Ogievskiy 
3098999658a0SVladimir Sementsov-Ogievskiy     if (recurse_src) {
3099d4d3e5a0SFam Zheng         bdrv_inc_in_flight(src->bs);
3100999658a0SVladimir Sementsov-Ogievskiy         tracked_request_begin(&req, src->bs, src_offset, bytes,
3101999658a0SVladimir Sementsov-Ogievskiy                               BDRV_TRACKED_READ);
310237aec7d7SFam Zheng 
310309d2f948SVladimir Sementsov-Ogievskiy         /* BDRV_REQ_SERIALISING is only for write operation */
310409d2f948SVladimir Sementsov-Ogievskiy         assert(!(read_flags & BDRV_REQ_SERIALISING));
3105304d9d7fSMax Reitz         bdrv_wait_serialising_requests(&req);
3106999658a0SVladimir Sementsov-Ogievskiy 
310737aec7d7SFam Zheng         ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
3108fcc67678SFam Zheng                                                     src, src_offset,
3109fcc67678SFam Zheng                                                     dst, dst_offset,
311067b51fb9SVladimir Sementsov-Ogievskiy                                                     bytes,
311167b51fb9SVladimir Sementsov-Ogievskiy                                                     read_flags, write_flags);
3112999658a0SVladimir Sementsov-Ogievskiy 
3113999658a0SVladimir Sementsov-Ogievskiy         tracked_request_end(&req);
3114999658a0SVladimir Sementsov-Ogievskiy         bdrv_dec_in_flight(src->bs);
3115fcc67678SFam Zheng     } else {
3116999658a0SVladimir Sementsov-Ogievskiy         bdrv_inc_in_flight(dst->bs);
3117999658a0SVladimir Sementsov-Ogievskiy         tracked_request_begin(&req, dst->bs, dst_offset, bytes,
3118999658a0SVladimir Sementsov-Ogievskiy                               BDRV_TRACKED_WRITE);
31190eb1e891SFam Zheng         ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req,
31200eb1e891SFam Zheng                                         write_flags);
31210eb1e891SFam Zheng         if (!ret) {
312237aec7d7SFam Zheng             ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs,
3123fcc67678SFam Zheng                                                       src, src_offset,
3124fcc67678SFam Zheng                                                       dst, dst_offset,
312567b51fb9SVladimir Sementsov-Ogievskiy                                                       bytes,
312667b51fb9SVladimir Sementsov-Ogievskiy                                                       read_flags, write_flags);
31270eb1e891SFam Zheng         }
31280eb1e891SFam Zheng         bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret);
3129999658a0SVladimir Sementsov-Ogievskiy         tracked_request_end(&req);
3130d4d3e5a0SFam Zheng         bdrv_dec_in_flight(dst->bs);
3131999658a0SVladimir Sementsov-Ogievskiy     }
3132999658a0SVladimir Sementsov-Ogievskiy 
313337aec7d7SFam Zheng     return ret;
3134fcc67678SFam Zheng }
3135fcc67678SFam Zheng 
3136fcc67678SFam Zheng /* Copy range from @src to @dst.
3137fcc67678SFam Zheng  *
3138fcc67678SFam Zheng  * See the comment of bdrv_co_copy_range for the parameter and return value
3139fcc67678SFam Zheng  * semantics. */
3140fcc67678SFam Zheng int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, uint64_t src_offset,
3141fcc67678SFam Zheng                                          BdrvChild *dst, uint64_t dst_offset,
314267b51fb9SVladimir Sementsov-Ogievskiy                                          uint64_t bytes,
314367b51fb9SVladimir Sementsov-Ogievskiy                                          BdrvRequestFlags read_flags,
314467b51fb9SVladimir Sementsov-Ogievskiy                                          BdrvRequestFlags write_flags)
3145fcc67678SFam Zheng {
3146ecc983a5SFam Zheng     trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes,
3147ecc983a5SFam Zheng                                   read_flags, write_flags);
3148fcc67678SFam Zheng     return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
314967b51fb9SVladimir Sementsov-Ogievskiy                                        bytes, read_flags, write_flags, true);
3150fcc67678SFam Zheng }
3151fcc67678SFam Zheng 
3152fcc67678SFam Zheng /* Copy range from @src to @dst.
3153fcc67678SFam Zheng  *
3154fcc67678SFam Zheng  * See the comment of bdrv_co_copy_range for the parameter and return value
3155fcc67678SFam Zheng  * semantics. */
3156fcc67678SFam Zheng int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, uint64_t src_offset,
3157fcc67678SFam Zheng                                        BdrvChild *dst, uint64_t dst_offset,
315867b51fb9SVladimir Sementsov-Ogievskiy                                        uint64_t bytes,
315967b51fb9SVladimir Sementsov-Ogievskiy                                        BdrvRequestFlags read_flags,
316067b51fb9SVladimir Sementsov-Ogievskiy                                        BdrvRequestFlags write_flags)
3161fcc67678SFam Zheng {
3162ecc983a5SFam Zheng     trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
3163ecc983a5SFam Zheng                                 read_flags, write_flags);
3164fcc67678SFam Zheng     return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
316567b51fb9SVladimir Sementsov-Ogievskiy                                        bytes, read_flags, write_flags, false);
3166fcc67678SFam Zheng }
3167fcc67678SFam Zheng 
3168fcc67678SFam Zheng int coroutine_fn bdrv_co_copy_range(BdrvChild *src, uint64_t src_offset,
3169fcc67678SFam Zheng                                     BdrvChild *dst, uint64_t dst_offset,
317067b51fb9SVladimir Sementsov-Ogievskiy                                     uint64_t bytes, BdrvRequestFlags read_flags,
317167b51fb9SVladimir Sementsov-Ogievskiy                                     BdrvRequestFlags write_flags)
3172fcc67678SFam Zheng {
317337aec7d7SFam Zheng     return bdrv_co_copy_range_from(src, src_offset,
3174fcc67678SFam Zheng                                    dst, dst_offset,
317567b51fb9SVladimir Sementsov-Ogievskiy                                    bytes, read_flags, write_flags);
3176fcc67678SFam Zheng }
31773d9f2d2aSKevin Wolf 
31783d9f2d2aSKevin Wolf static void bdrv_parent_cb_resize(BlockDriverState *bs)
31793d9f2d2aSKevin Wolf {
31803d9f2d2aSKevin Wolf     BdrvChild *c;
31813d9f2d2aSKevin Wolf     QLIST_FOREACH(c, &bs->parents, next_parent) {
3182bd86fb99SMax Reitz         if (c->klass->resize) {
3183bd86fb99SMax Reitz             c->klass->resize(c);
31843d9f2d2aSKevin Wolf         }
31853d9f2d2aSKevin Wolf     }
31863d9f2d2aSKevin Wolf }
31873d9f2d2aSKevin Wolf 
31883d9f2d2aSKevin Wolf /**
31893d9f2d2aSKevin Wolf  * Truncate file to 'offset' bytes (needed only for file protocols)
3190c80d8b06SMax Reitz  *
3191c80d8b06SMax Reitz  * If 'exact' is true, the file must be resized to exactly the given
3192c80d8b06SMax Reitz  * 'offset'.  Otherwise, it is sufficient for the node to be at least
3193c80d8b06SMax Reitz  * 'offset' bytes in length.
31943d9f2d2aSKevin Wolf  */
3195c80d8b06SMax Reitz int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
31967b8e4857SKevin Wolf                                   PreallocMode prealloc, BdrvRequestFlags flags,
31977b8e4857SKevin Wolf                                   Error **errp)
31983d9f2d2aSKevin Wolf {
31993d9f2d2aSKevin Wolf     BlockDriverState *bs = child->bs;
320023b93525SMax Reitz     BdrvChild *filtered, *backing;
32013d9f2d2aSKevin Wolf     BlockDriver *drv = bs->drv;
32021bc5f09fSKevin Wolf     BdrvTrackedRequest req;
32031bc5f09fSKevin Wolf     int64_t old_size, new_bytes;
32043d9f2d2aSKevin Wolf     int ret;
32053d9f2d2aSKevin Wolf 
32063d9f2d2aSKevin Wolf 
32073d9f2d2aSKevin Wolf     /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
32083d9f2d2aSKevin Wolf     if (!drv) {
32093d9f2d2aSKevin Wolf         error_setg(errp, "No medium inserted");
32103d9f2d2aSKevin Wolf         return -ENOMEDIUM;
32113d9f2d2aSKevin Wolf     }
32123d9f2d2aSKevin Wolf     if (offset < 0) {
32133d9f2d2aSKevin Wolf         error_setg(errp, "Image size cannot be negative");
32143d9f2d2aSKevin Wolf         return -EINVAL;
32153d9f2d2aSKevin Wolf     }
32163d9f2d2aSKevin Wolf 
3217*8b117001SVladimir Sementsov-Ogievskiy     ret = bdrv_check_request(offset, 0);
3218*8b117001SVladimir Sementsov-Ogievskiy     if (ret < 0) {
3219*8b117001SVladimir Sementsov-Ogievskiy         error_setg(errp, "Required too big image size, it must be not greater "
3220*8b117001SVladimir Sementsov-Ogievskiy                    "than %" PRId64, BDRV_MAX_LENGTH);
3221*8b117001SVladimir Sementsov-Ogievskiy         return ret;
3222*8b117001SVladimir Sementsov-Ogievskiy     }
3223*8b117001SVladimir Sementsov-Ogievskiy 
32241bc5f09fSKevin Wolf     old_size = bdrv_getlength(bs);
32251bc5f09fSKevin Wolf     if (old_size < 0) {
32261bc5f09fSKevin Wolf         error_setg_errno(errp, -old_size, "Failed to get old image size");
32271bc5f09fSKevin Wolf         return old_size;
32281bc5f09fSKevin Wolf     }
32291bc5f09fSKevin Wolf 
32301bc5f09fSKevin Wolf     if (offset > old_size) {
32311bc5f09fSKevin Wolf         new_bytes = offset - old_size;
32321bc5f09fSKevin Wolf     } else {
32331bc5f09fSKevin Wolf         new_bytes = 0;
32341bc5f09fSKevin Wolf     }
32351bc5f09fSKevin Wolf 
32363d9f2d2aSKevin Wolf     bdrv_inc_in_flight(bs);
32375416a11eSFam Zheng     tracked_request_begin(&req, bs, offset - new_bytes, new_bytes,
32385416a11eSFam Zheng                           BDRV_TRACKED_TRUNCATE);
32391bc5f09fSKevin Wolf 
32401bc5f09fSKevin Wolf     /* If we are growing the image and potentially using preallocation for the
32411bc5f09fSKevin Wolf      * new area, we need to make sure that no write requests are made to it
32421bc5f09fSKevin Wolf      * concurrently or they might be overwritten by preallocation. */
32431bc5f09fSKevin Wolf     if (new_bytes) {
3244304d9d7fSMax Reitz         bdrv_mark_request_serialising(&req, 1);
3245cd47d792SFam Zheng     }
3246cd47d792SFam Zheng     if (bs->read_only) {
3247cd47d792SFam Zheng         error_setg(errp, "Image is read-only");
3248cd47d792SFam Zheng         ret = -EACCES;
3249cd47d792SFam Zheng         goto out;
3250cd47d792SFam Zheng     }
3251cd47d792SFam Zheng     ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req,
3252cd47d792SFam Zheng                                     0);
3253cd47d792SFam Zheng     if (ret < 0) {
3254cd47d792SFam Zheng         error_setg_errno(errp, -ret,
3255cd47d792SFam Zheng                          "Failed to prepare request for truncation");
3256cd47d792SFam Zheng         goto out;
32571bc5f09fSKevin Wolf     }
32583d9f2d2aSKevin Wolf 
325993393e69SMax Reitz     filtered = bdrv_filter_child(bs);
326023b93525SMax Reitz     backing = bdrv_cow_child(bs);
326193393e69SMax Reitz 
3262955c7d66SKevin Wolf     /*
3263955c7d66SKevin Wolf      * If the image has a backing file that is large enough that it would
3264955c7d66SKevin Wolf      * provide data for the new area, we cannot leave it unallocated because
3265955c7d66SKevin Wolf      * then the backing file content would become visible. Instead, zero-fill
3266955c7d66SKevin Wolf      * the new area.
3267955c7d66SKevin Wolf      *
3268955c7d66SKevin Wolf      * Note that if the image has a backing file, but was opened without the
3269955c7d66SKevin Wolf      * backing file, taking care of keeping things consistent with that backing
3270955c7d66SKevin Wolf      * file is the user's responsibility.
3271955c7d66SKevin Wolf      */
327223b93525SMax Reitz     if (new_bytes && backing) {
3273955c7d66SKevin Wolf         int64_t backing_len;
3274955c7d66SKevin Wolf 
327523b93525SMax Reitz         backing_len = bdrv_getlength(backing->bs);
3276955c7d66SKevin Wolf         if (backing_len < 0) {
3277955c7d66SKevin Wolf             ret = backing_len;
3278955c7d66SKevin Wolf             error_setg_errno(errp, -ret, "Could not get backing file size");
3279955c7d66SKevin Wolf             goto out;
3280955c7d66SKevin Wolf         }
3281955c7d66SKevin Wolf 
3282955c7d66SKevin Wolf         if (backing_len > old_size) {
3283955c7d66SKevin Wolf             flags |= BDRV_REQ_ZERO_WRITE;
3284955c7d66SKevin Wolf         }
3285955c7d66SKevin Wolf     }
3286955c7d66SKevin Wolf 
32876b7e8f8bSMax Reitz     if (drv->bdrv_co_truncate) {
328892b92799SKevin Wolf         if (flags & ~bs->supported_truncate_flags) {
328992b92799SKevin Wolf             error_setg(errp, "Block driver does not support requested flags");
329092b92799SKevin Wolf             ret = -ENOTSUP;
329192b92799SKevin Wolf             goto out;
329292b92799SKevin Wolf         }
329392b92799SKevin Wolf         ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp);
329493393e69SMax Reitz     } else if (filtered) {
329593393e69SMax Reitz         ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp);
32966b7e8f8bSMax Reitz     } else {
32973d9f2d2aSKevin Wolf         error_setg(errp, "Image format driver does not support resize");
32983d9f2d2aSKevin Wolf         ret = -ENOTSUP;
32993d9f2d2aSKevin Wolf         goto out;
33003d9f2d2aSKevin Wolf     }
33013d9f2d2aSKevin Wolf     if (ret < 0) {
33023d9f2d2aSKevin Wolf         goto out;
33033d9f2d2aSKevin Wolf     }
33046b7e8f8bSMax Reitz 
33053d9f2d2aSKevin Wolf     ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
33063d9f2d2aSKevin Wolf     if (ret < 0) {
33073d9f2d2aSKevin Wolf         error_setg_errno(errp, -ret, "Could not refresh total sector count");
33083d9f2d2aSKevin Wolf     } else {
33093d9f2d2aSKevin Wolf         offset = bs->total_sectors * BDRV_SECTOR_SIZE;
33103d9f2d2aSKevin Wolf     }
3311cd47d792SFam Zheng     /* It's possible that truncation succeeded but refresh_total_sectors
3312cd47d792SFam Zheng      * failed, but the latter doesn't affect how we should finish the request.
3313cd47d792SFam Zheng      * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. */
3314cd47d792SFam Zheng     bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0);
33153d9f2d2aSKevin Wolf 
33163d9f2d2aSKevin Wolf out:
33171bc5f09fSKevin Wolf     tracked_request_end(&req);
33183d9f2d2aSKevin Wolf     bdrv_dec_in_flight(bs);
33191bc5f09fSKevin Wolf 
33203d9f2d2aSKevin Wolf     return ret;
33213d9f2d2aSKevin Wolf }
3322