xref: /qemu/block/io.c (revision fae2681add3bb2f6a5172eceaec0b90a12a73e96)
161007b31SStefan Hajnoczi /*
261007b31SStefan Hajnoczi  * Block layer I/O functions
361007b31SStefan Hajnoczi  *
461007b31SStefan Hajnoczi  * Copyright (c) 2003 Fabrice Bellard
561007b31SStefan Hajnoczi  *
661007b31SStefan Hajnoczi  * Permission is hereby granted, free of charge, to any person obtaining a copy
761007b31SStefan Hajnoczi  * of this software and associated documentation files (the "Software"), to deal
861007b31SStefan Hajnoczi  * in the Software without restriction, including without limitation the rights
961007b31SStefan Hajnoczi  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
1061007b31SStefan Hajnoczi  * copies of the Software, and to permit persons to whom the Software is
1161007b31SStefan Hajnoczi  * furnished to do so, subject to the following conditions:
1261007b31SStefan Hajnoczi  *
1361007b31SStefan Hajnoczi  * The above copyright notice and this permission notice shall be included in
1461007b31SStefan Hajnoczi  * all copies or substantial portions of the Software.
1561007b31SStefan Hajnoczi  *
1661007b31SStefan Hajnoczi  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1761007b31SStefan Hajnoczi  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1861007b31SStefan Hajnoczi  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1961007b31SStefan Hajnoczi  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2061007b31SStefan Hajnoczi  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
2161007b31SStefan Hajnoczi  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
2261007b31SStefan Hajnoczi  * THE SOFTWARE.
2361007b31SStefan Hajnoczi  */
2461007b31SStefan Hajnoczi 
2580c71a24SPeter Maydell #include "qemu/osdep.h"
2661007b31SStefan Hajnoczi #include "trace.h"
277f0e9da6SMax Reitz #include "sysemu/block-backend.h"
287719f3c9SStefan Hajnoczi #include "block/aio-wait.h"
2961007b31SStefan Hajnoczi #include "block/blockjob.h"
30f321dcb5SPaolo Bonzini #include "block/blockjob_int.h"
3161007b31SStefan Hajnoczi #include "block/block_int.h"
3221c2283eSVladimir Sementsov-Ogievskiy #include "block/coroutines.h"
33f348b6d1SVeronia Bahaa #include "qemu/cutils.h"
34da34e65cSMarkus Armbruster #include "qapi/error.h"
35d49b6836SMarkus Armbruster #include "qemu/error-report.h"
36db725815SMarkus Armbruster #include "qemu/main-loop.h"
37c8aa7895SPavel Dovgalyuk #include "sysemu/replay.h"
3861007b31SStefan Hajnoczi 
39cb2e2878SEric Blake /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
40cb2e2878SEric Blake #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
41cb2e2878SEric Blake 
427f8f03efSFam Zheng static void bdrv_parent_cb_resize(BlockDriverState *bs);
43d05aa8bbSEric Blake static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
44f5a5ca79SManos Pitsidianakis     int64_t offset, int bytes, BdrvRequestFlags flags);
4561007b31SStefan Hajnoczi 
46f4c8a43bSMax Reitz static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore,
476cd5c9d7SKevin Wolf                                       bool ignore_bds_parents)
4861007b31SStefan Hajnoczi {
4902d21300SKevin Wolf     BdrvChild *c, *next;
5027ccdd52SKevin Wolf 
5102d21300SKevin Wolf     QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
52bd86fb99SMax Reitz         if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
530152bf40SKevin Wolf             continue;
540152bf40SKevin Wolf         }
554be6a6d1SKevin Wolf         bdrv_parent_drained_begin_single(c, false);
56ce0f1412SPaolo Bonzini     }
57ce0f1412SPaolo Bonzini }
58ce0f1412SPaolo Bonzini 
59e037c09cSMax Reitz static void bdrv_parent_drained_end_single_no_poll(BdrvChild *c,
60e037c09cSMax Reitz                                                    int *drained_end_counter)
61804db8eaSMax Reitz {
62804db8eaSMax Reitz     assert(c->parent_quiesce_counter > 0);
63804db8eaSMax Reitz     c->parent_quiesce_counter--;
64bd86fb99SMax Reitz     if (c->klass->drained_end) {
65bd86fb99SMax Reitz         c->klass->drained_end(c, drained_end_counter);
66804db8eaSMax Reitz     }
67804db8eaSMax Reitz }
68804db8eaSMax Reitz 
69e037c09cSMax Reitz void bdrv_parent_drained_end_single(BdrvChild *c)
70e037c09cSMax Reitz {
71e037c09cSMax Reitz     int drained_end_counter = 0;
72e037c09cSMax Reitz     bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter);
73d73415a3SStefan Hajnoczi     BDRV_POLL_WHILE(c->bs, qatomic_read(&drained_end_counter) > 0);
74e037c09cSMax Reitz }
75e037c09cSMax Reitz 
76f4c8a43bSMax Reitz static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore,
77e037c09cSMax Reitz                                     bool ignore_bds_parents,
78e037c09cSMax Reitz                                     int *drained_end_counter)
79ce0f1412SPaolo Bonzini {
8061ad631cSMax Reitz     BdrvChild *c;
8127ccdd52SKevin Wolf 
8261ad631cSMax Reitz     QLIST_FOREACH(c, &bs->parents, next_parent) {
83bd86fb99SMax Reitz         if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
840152bf40SKevin Wolf             continue;
850152bf40SKevin Wolf         }
86e037c09cSMax Reitz         bdrv_parent_drained_end_single_no_poll(c, drained_end_counter);
87c2066af0SKevin Wolf     }
8861007b31SStefan Hajnoczi }
8961007b31SStefan Hajnoczi 
904be6a6d1SKevin Wolf static bool bdrv_parent_drained_poll_single(BdrvChild *c)
914be6a6d1SKevin Wolf {
92bd86fb99SMax Reitz     if (c->klass->drained_poll) {
93bd86fb99SMax Reitz         return c->klass->drained_poll(c);
944be6a6d1SKevin Wolf     }
954be6a6d1SKevin Wolf     return false;
964be6a6d1SKevin Wolf }
974be6a6d1SKevin Wolf 
986cd5c9d7SKevin Wolf static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
996cd5c9d7SKevin Wolf                                      bool ignore_bds_parents)
10089bd0305SKevin Wolf {
10189bd0305SKevin Wolf     BdrvChild *c, *next;
10289bd0305SKevin Wolf     bool busy = false;
10389bd0305SKevin Wolf 
10489bd0305SKevin Wolf     QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
105bd86fb99SMax Reitz         if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
10689bd0305SKevin Wolf             continue;
10789bd0305SKevin Wolf         }
1084be6a6d1SKevin Wolf         busy |= bdrv_parent_drained_poll_single(c);
10989bd0305SKevin Wolf     }
11089bd0305SKevin Wolf 
11189bd0305SKevin Wolf     return busy;
11289bd0305SKevin Wolf }
11389bd0305SKevin Wolf 
1144be6a6d1SKevin Wolf void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll)
1154be6a6d1SKevin Wolf {
116804db8eaSMax Reitz     c->parent_quiesce_counter++;
117bd86fb99SMax Reitz     if (c->klass->drained_begin) {
118bd86fb99SMax Reitz         c->klass->drained_begin(c);
1194be6a6d1SKevin Wolf     }
1204be6a6d1SKevin Wolf     if (poll) {
1214be6a6d1SKevin Wolf         BDRV_POLL_WHILE(c->bs, bdrv_parent_drained_poll_single(c));
1224be6a6d1SKevin Wolf     }
1234be6a6d1SKevin Wolf }
1244be6a6d1SKevin Wolf 
125d9e0dfa2SEric Blake static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
126d9e0dfa2SEric Blake {
127d9e0dfa2SEric Blake     dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
128d9e0dfa2SEric Blake     dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
129d9e0dfa2SEric Blake     dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
130d9e0dfa2SEric Blake                                  src->opt_mem_alignment);
131d9e0dfa2SEric Blake     dst->min_mem_alignment = MAX(dst->min_mem_alignment,
132d9e0dfa2SEric Blake                                  src->min_mem_alignment);
133d9e0dfa2SEric Blake     dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
134d9e0dfa2SEric Blake }
135d9e0dfa2SEric Blake 
13661007b31SStefan Hajnoczi void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
13761007b31SStefan Hajnoczi {
13861007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
13966b129acSMax Reitz     BdrvChild *c;
14066b129acSMax Reitz     bool have_limits;
14161007b31SStefan Hajnoczi     Error *local_err = NULL;
14261007b31SStefan Hajnoczi 
14361007b31SStefan Hajnoczi     memset(&bs->bl, 0, sizeof(bs->bl));
14461007b31SStefan Hajnoczi 
14561007b31SStefan Hajnoczi     if (!drv) {
14661007b31SStefan Hajnoczi         return;
14761007b31SStefan Hajnoczi     }
14861007b31SStefan Hajnoczi 
14979ba8c98SEric Blake     /* Default alignment based on whether driver has byte interface */
150e31f6864SEric Blake     bs->bl.request_alignment = (drv->bdrv_co_preadv ||
151ac850bf0SVladimir Sementsov-Ogievskiy                                 drv->bdrv_aio_preadv ||
152ac850bf0SVladimir Sementsov-Ogievskiy                                 drv->bdrv_co_preadv_part) ? 1 : 512;
15379ba8c98SEric Blake 
15461007b31SStefan Hajnoczi     /* Take some limits from the children as a default */
15566b129acSMax Reitz     have_limits = false;
15666b129acSMax Reitz     QLIST_FOREACH(c, &bs->children, next) {
15766b129acSMax Reitz         if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW))
15866b129acSMax Reitz         {
15966b129acSMax Reitz             bdrv_refresh_limits(c->bs, &local_err);
16061007b31SStefan Hajnoczi             if (local_err) {
16161007b31SStefan Hajnoczi                 error_propagate(errp, local_err);
16261007b31SStefan Hajnoczi                 return;
16361007b31SStefan Hajnoczi             }
16466b129acSMax Reitz             bdrv_merge_limits(&bs->bl, &c->bs->bl);
16566b129acSMax Reitz             have_limits = true;
16666b129acSMax Reitz         }
16766b129acSMax Reitz     }
16866b129acSMax Reitz 
16966b129acSMax Reitz     if (!have_limits) {
1704196d2f0SDenis V. Lunev         bs->bl.min_mem_alignment = 512;
171038adc2fSWei Yang         bs->bl.opt_mem_alignment = qemu_real_host_page_size;
172bd44feb7SStefan Hajnoczi 
173bd44feb7SStefan Hajnoczi         /* Safe default since most protocols use readv()/writev()/etc */
174bd44feb7SStefan Hajnoczi         bs->bl.max_iov = IOV_MAX;
17561007b31SStefan Hajnoczi     }
17661007b31SStefan Hajnoczi 
17761007b31SStefan Hajnoczi     /* Then let the driver override it */
17861007b31SStefan Hajnoczi     if (drv->bdrv_refresh_limits) {
17961007b31SStefan Hajnoczi         drv->bdrv_refresh_limits(bs, errp);
18061007b31SStefan Hajnoczi     }
18161007b31SStefan Hajnoczi }
18261007b31SStefan Hajnoczi 
18361007b31SStefan Hajnoczi /**
18461007b31SStefan Hajnoczi  * The copy-on-read flag is actually a reference count so multiple users may
18561007b31SStefan Hajnoczi  * use the feature without worrying about clobbering its previous state.
18661007b31SStefan Hajnoczi  * Copy-on-read stays enabled until all users have called to disable it.
18761007b31SStefan Hajnoczi  */
18861007b31SStefan Hajnoczi void bdrv_enable_copy_on_read(BlockDriverState *bs)
18961007b31SStefan Hajnoczi {
190d73415a3SStefan Hajnoczi     qatomic_inc(&bs->copy_on_read);
19161007b31SStefan Hajnoczi }
19261007b31SStefan Hajnoczi 
19361007b31SStefan Hajnoczi void bdrv_disable_copy_on_read(BlockDriverState *bs)
19461007b31SStefan Hajnoczi {
195d73415a3SStefan Hajnoczi     int old = qatomic_fetch_dec(&bs->copy_on_read);
196d3faa13eSPaolo Bonzini     assert(old >= 1);
19761007b31SStefan Hajnoczi }
19861007b31SStefan Hajnoczi 
19961124f03SPaolo Bonzini typedef struct {
20061124f03SPaolo Bonzini     Coroutine *co;
20161124f03SPaolo Bonzini     BlockDriverState *bs;
20261124f03SPaolo Bonzini     bool done;
203481cad48SManos Pitsidianakis     bool begin;
204b0165585SKevin Wolf     bool recursive;
205fe4f0614SKevin Wolf     bool poll;
2060152bf40SKevin Wolf     BdrvChild *parent;
2076cd5c9d7SKevin Wolf     bool ignore_bds_parents;
2088e1da77eSMax Reitz     int *drained_end_counter;
20961124f03SPaolo Bonzini } BdrvCoDrainData;
21061124f03SPaolo Bonzini 
21161124f03SPaolo Bonzini static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
21261124f03SPaolo Bonzini {
21361124f03SPaolo Bonzini     BdrvCoDrainData *data = opaque;
21461124f03SPaolo Bonzini     BlockDriverState *bs = data->bs;
21561124f03SPaolo Bonzini 
216481cad48SManos Pitsidianakis     if (data->begin) {
217f8ea8dacSManos Pitsidianakis         bs->drv->bdrv_co_drain_begin(bs);
218481cad48SManos Pitsidianakis     } else {
219481cad48SManos Pitsidianakis         bs->drv->bdrv_co_drain_end(bs);
220481cad48SManos Pitsidianakis     }
22161124f03SPaolo Bonzini 
22265181d63SMax Reitz     /* Set data->done and decrement drained_end_counter before bdrv_wakeup() */
223d73415a3SStefan Hajnoczi     qatomic_mb_set(&data->done, true);
224e037c09cSMax Reitz     if (!data->begin) {
225d73415a3SStefan Hajnoczi         qatomic_dec(data->drained_end_counter);
2268e1da77eSMax Reitz     }
22765181d63SMax Reitz     bdrv_dec_in_flight(bs);
2288e1da77eSMax Reitz 
2290109e7e6SKevin Wolf     g_free(data);
2300109e7e6SKevin Wolf }
23161124f03SPaolo Bonzini 
232db0289b9SKevin Wolf /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
2338e1da77eSMax Reitz static void bdrv_drain_invoke(BlockDriverState *bs, bool begin,
2348e1da77eSMax Reitz                               int *drained_end_counter)
23561124f03SPaolo Bonzini {
2360109e7e6SKevin Wolf     BdrvCoDrainData *data;
23761124f03SPaolo Bonzini 
238f8ea8dacSManos Pitsidianakis     if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) ||
239481cad48SManos Pitsidianakis             (!begin && !bs->drv->bdrv_co_drain_end)) {
24061124f03SPaolo Bonzini         return;
24161124f03SPaolo Bonzini     }
24261124f03SPaolo Bonzini 
2430109e7e6SKevin Wolf     data = g_new(BdrvCoDrainData, 1);
2440109e7e6SKevin Wolf     *data = (BdrvCoDrainData) {
2450109e7e6SKevin Wolf         .bs = bs,
2460109e7e6SKevin Wolf         .done = false,
2478e1da77eSMax Reitz         .begin = begin,
2488e1da77eSMax Reitz         .drained_end_counter = drained_end_counter,
2490109e7e6SKevin Wolf     };
2500109e7e6SKevin Wolf 
251e037c09cSMax Reitz     if (!begin) {
252d73415a3SStefan Hajnoczi         qatomic_inc(drained_end_counter);
2538e1da77eSMax Reitz     }
2548e1da77eSMax Reitz 
2550109e7e6SKevin Wolf     /* Make sure the driver callback completes during the polling phase for
2560109e7e6SKevin Wolf      * drain_begin. */
2570109e7e6SKevin Wolf     bdrv_inc_in_flight(bs);
2580109e7e6SKevin Wolf     data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data);
2590109e7e6SKevin Wolf     aio_co_schedule(bdrv_get_aio_context(bs), data->co);
26061124f03SPaolo Bonzini }
26161124f03SPaolo Bonzini 
2621cc8e54aSKevin Wolf /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
263fe4f0614SKevin Wolf bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
2646cd5c9d7SKevin Wolf                      BdrvChild *ignore_parent, bool ignore_bds_parents)
26589bd0305SKevin Wolf {
266fe4f0614SKevin Wolf     BdrvChild *child, *next;
267fe4f0614SKevin Wolf 
2686cd5c9d7SKevin Wolf     if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
26989bd0305SKevin Wolf         return true;
27089bd0305SKevin Wolf     }
27189bd0305SKevin Wolf 
272d73415a3SStefan Hajnoczi     if (qatomic_read(&bs->in_flight)) {
273fe4f0614SKevin Wolf         return true;
27489bd0305SKevin Wolf     }
27589bd0305SKevin Wolf 
276fe4f0614SKevin Wolf     if (recursive) {
2776cd5c9d7SKevin Wolf         assert(!ignore_bds_parents);
278fe4f0614SKevin Wolf         QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
2796cd5c9d7SKevin Wolf             if (bdrv_drain_poll(child->bs, recursive, child, false)) {
280fe4f0614SKevin Wolf                 return true;
281fe4f0614SKevin Wolf             }
282fe4f0614SKevin Wolf         }
283fe4f0614SKevin Wolf     }
284fe4f0614SKevin Wolf 
285fe4f0614SKevin Wolf     return false;
286fe4f0614SKevin Wolf }
287fe4f0614SKevin Wolf 
288fe4f0614SKevin Wolf static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive,
28989bd0305SKevin Wolf                                       BdrvChild *ignore_parent)
2901cc8e54aSKevin Wolf {
2916cd5c9d7SKevin Wolf     return bdrv_drain_poll(bs, recursive, ignore_parent, false);
2921cc8e54aSKevin Wolf }
2931cc8e54aSKevin Wolf 
294b0165585SKevin Wolf static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
2956cd5c9d7SKevin Wolf                                   BdrvChild *parent, bool ignore_bds_parents,
2966cd5c9d7SKevin Wolf                                   bool poll);
297b0165585SKevin Wolf static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
2988e1da77eSMax Reitz                                 BdrvChild *parent, bool ignore_bds_parents,
2998e1da77eSMax Reitz                                 int *drained_end_counter);
3000152bf40SKevin Wolf 
301a77fd4bbSFam Zheng static void bdrv_co_drain_bh_cb(void *opaque)
302a77fd4bbSFam Zheng {
303a77fd4bbSFam Zheng     BdrvCoDrainData *data = opaque;
304a77fd4bbSFam Zheng     Coroutine *co = data->co;
30599723548SPaolo Bonzini     BlockDriverState *bs = data->bs;
306a77fd4bbSFam Zheng 
307c8ca33d0SKevin Wolf     if (bs) {
308aa1361d5SKevin Wolf         AioContext *ctx = bdrv_get_aio_context(bs);
309aa1361d5SKevin Wolf         AioContext *co_ctx = qemu_coroutine_get_aio_context(co);
310aa1361d5SKevin Wolf 
311aa1361d5SKevin Wolf         /*
312aa1361d5SKevin Wolf          * When the coroutine yielded, the lock for its home context was
313aa1361d5SKevin Wolf          * released, so we need to re-acquire it here. If it explicitly
314aa1361d5SKevin Wolf          * acquired a different context, the lock is still held and we don't
315aa1361d5SKevin Wolf          * want to lock it a second time (or AIO_WAIT_WHILE() would hang).
316aa1361d5SKevin Wolf          */
317aa1361d5SKevin Wolf         if (ctx == co_ctx) {
318aa1361d5SKevin Wolf             aio_context_acquire(ctx);
319aa1361d5SKevin Wolf         }
32099723548SPaolo Bonzini         bdrv_dec_in_flight(bs);
321481cad48SManos Pitsidianakis         if (data->begin) {
322e037c09cSMax Reitz             assert(!data->drained_end_counter);
3236cd5c9d7SKevin Wolf             bdrv_do_drained_begin(bs, data->recursive, data->parent,
3246cd5c9d7SKevin Wolf                                   data->ignore_bds_parents, data->poll);
325481cad48SManos Pitsidianakis         } else {
326e037c09cSMax Reitz             assert(!data->poll);
3276cd5c9d7SKevin Wolf             bdrv_do_drained_end(bs, data->recursive, data->parent,
3288e1da77eSMax Reitz                                 data->ignore_bds_parents,
3298e1da77eSMax Reitz                                 data->drained_end_counter);
330481cad48SManos Pitsidianakis         }
331aa1361d5SKevin Wolf         if (ctx == co_ctx) {
332aa1361d5SKevin Wolf             aio_context_release(ctx);
333aa1361d5SKevin Wolf         }
334c8ca33d0SKevin Wolf     } else {
335c8ca33d0SKevin Wolf         assert(data->begin);
336c8ca33d0SKevin Wolf         bdrv_drain_all_begin();
337c8ca33d0SKevin Wolf     }
338481cad48SManos Pitsidianakis 
339a77fd4bbSFam Zheng     data->done = true;
3401919631eSPaolo Bonzini     aio_co_wake(co);
341a77fd4bbSFam Zheng }
342a77fd4bbSFam Zheng 
343481cad48SManos Pitsidianakis static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
344b0165585SKevin Wolf                                                 bool begin, bool recursive,
3456cd5c9d7SKevin Wolf                                                 BdrvChild *parent,
3466cd5c9d7SKevin Wolf                                                 bool ignore_bds_parents,
3478e1da77eSMax Reitz                                                 bool poll,
3488e1da77eSMax Reitz                                                 int *drained_end_counter)
349a77fd4bbSFam Zheng {
350a77fd4bbSFam Zheng     BdrvCoDrainData data;
351a77fd4bbSFam Zheng 
352a77fd4bbSFam Zheng     /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
353c40a2545SStefan Hajnoczi      * other coroutines run if they were queued by aio_co_enter(). */
354a77fd4bbSFam Zheng 
355a77fd4bbSFam Zheng     assert(qemu_in_coroutine());
356a77fd4bbSFam Zheng     data = (BdrvCoDrainData) {
357a77fd4bbSFam Zheng         .co = qemu_coroutine_self(),
358a77fd4bbSFam Zheng         .bs = bs,
359a77fd4bbSFam Zheng         .done = false,
360481cad48SManos Pitsidianakis         .begin = begin,
361b0165585SKevin Wolf         .recursive = recursive,
3620152bf40SKevin Wolf         .parent = parent,
3636cd5c9d7SKevin Wolf         .ignore_bds_parents = ignore_bds_parents,
364fe4f0614SKevin Wolf         .poll = poll,
3658e1da77eSMax Reitz         .drained_end_counter = drained_end_counter,
366a77fd4bbSFam Zheng     };
3678e1da77eSMax Reitz 
368c8ca33d0SKevin Wolf     if (bs) {
36999723548SPaolo Bonzini         bdrv_inc_in_flight(bs);
370c8ca33d0SKevin Wolf     }
371e4ec5ad4SPavel Dovgalyuk     replay_bh_schedule_oneshot_event(bdrv_get_aio_context(bs),
372fffb6e12SPaolo Bonzini                                      bdrv_co_drain_bh_cb, &data);
373a77fd4bbSFam Zheng 
374a77fd4bbSFam Zheng     qemu_coroutine_yield();
375a77fd4bbSFam Zheng     /* If we are resumed from some other event (such as an aio completion or a
376a77fd4bbSFam Zheng      * timer callback), it is a bug in the caller that should be fixed. */
377a77fd4bbSFam Zheng     assert(data.done);
378a77fd4bbSFam Zheng }
379a77fd4bbSFam Zheng 
380dcf94a23SKevin Wolf void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
3816cd5c9d7SKevin Wolf                                    BdrvChild *parent, bool ignore_bds_parents)
382dcf94a23SKevin Wolf {
383dcf94a23SKevin Wolf     assert(!qemu_in_coroutine());
384dcf94a23SKevin Wolf 
385dcf94a23SKevin Wolf     /* Stop things in parent-to-child order */
386d73415a3SStefan Hajnoczi     if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) {
387dcf94a23SKevin Wolf         aio_disable_external(bdrv_get_aio_context(bs));
388dcf94a23SKevin Wolf     }
389dcf94a23SKevin Wolf 
3906cd5c9d7SKevin Wolf     bdrv_parent_drained_begin(bs, parent, ignore_bds_parents);
3918e1da77eSMax Reitz     bdrv_drain_invoke(bs, true, NULL);
392dcf94a23SKevin Wolf }
393dcf94a23SKevin Wolf 
394dcf94a23SKevin Wolf static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
3956cd5c9d7SKevin Wolf                                   BdrvChild *parent, bool ignore_bds_parents,
3966cd5c9d7SKevin Wolf                                   bool poll)
3976820643fSKevin Wolf {
398b0165585SKevin Wolf     BdrvChild *child, *next;
399b0165585SKevin Wolf 
400d42cf288SPaolo Bonzini     if (qemu_in_coroutine()) {
4016cd5c9d7SKevin Wolf         bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents,
4028e1da77eSMax Reitz                                poll, NULL);
403d42cf288SPaolo Bonzini         return;
404d42cf288SPaolo Bonzini     }
405d42cf288SPaolo Bonzini 
4066cd5c9d7SKevin Wolf     bdrv_do_drained_begin_quiesce(bs, parent, ignore_bds_parents);
407d30b8e64SKevin Wolf 
408b0165585SKevin Wolf     if (recursive) {
4096cd5c9d7SKevin Wolf         assert(!ignore_bds_parents);
410d736f119SKevin Wolf         bs->recursive_quiesce_counter++;
411b0165585SKevin Wolf         QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
4126cd5c9d7SKevin Wolf             bdrv_do_drained_begin(child->bs, true, child, ignore_bds_parents,
4136cd5c9d7SKevin Wolf                                   false);
414b0165585SKevin Wolf         }
415b0165585SKevin Wolf     }
416fe4f0614SKevin Wolf 
417fe4f0614SKevin Wolf     /*
418fe4f0614SKevin Wolf      * Wait for drained requests to finish.
419fe4f0614SKevin Wolf      *
420fe4f0614SKevin Wolf      * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
421fe4f0614SKevin Wolf      * call is needed so things in this AioContext can make progress even
422fe4f0614SKevin Wolf      * though we don't return to the main AioContext loop - this automatically
423fe4f0614SKevin Wolf      * includes other nodes in the same AioContext and therefore all child
424fe4f0614SKevin Wolf      * nodes.
425fe4f0614SKevin Wolf      */
426fe4f0614SKevin Wolf     if (poll) {
4276cd5c9d7SKevin Wolf         assert(!ignore_bds_parents);
428fe4f0614SKevin Wolf         BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent));
429fe4f0614SKevin Wolf     }
4306820643fSKevin Wolf }
4316820643fSKevin Wolf 
4320152bf40SKevin Wolf void bdrv_drained_begin(BlockDriverState *bs)
4330152bf40SKevin Wolf {
4346cd5c9d7SKevin Wolf     bdrv_do_drained_begin(bs, false, NULL, false, true);
4350152bf40SKevin Wolf }
4360152bf40SKevin Wolf 
437b0165585SKevin Wolf void bdrv_subtree_drained_begin(BlockDriverState *bs)
4386820643fSKevin Wolf {
4396cd5c9d7SKevin Wolf     bdrv_do_drained_begin(bs, true, NULL, false, true);
440b0165585SKevin Wolf }
441b0165585SKevin Wolf 
442e037c09cSMax Reitz /**
443e037c09cSMax Reitz  * This function does not poll, nor must any of its recursively called
444e037c09cSMax Reitz  * functions.  The *drained_end_counter pointee will be incremented
445e037c09cSMax Reitz  * once for every background operation scheduled, and decremented once
446e037c09cSMax Reitz  * the operation settles.  Therefore, the pointer must remain valid
447e037c09cSMax Reitz  * until the pointee reaches 0.  That implies that whoever sets up the
448e037c09cSMax Reitz  * pointee has to poll until it is 0.
449e037c09cSMax Reitz  *
450e037c09cSMax Reitz  * We use atomic operations to access *drained_end_counter, because
451e037c09cSMax Reitz  * (1) when called from bdrv_set_aio_context_ignore(), the subgraph of
452e037c09cSMax Reitz  *     @bs may contain nodes in different AioContexts,
453e037c09cSMax Reitz  * (2) bdrv_drain_all_end() uses the same counter for all nodes,
454e037c09cSMax Reitz  *     regardless of which AioContext they are in.
455e037c09cSMax Reitz  */
4566cd5c9d7SKevin Wolf static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
4578e1da77eSMax Reitz                                 BdrvChild *parent, bool ignore_bds_parents,
4588e1da77eSMax Reitz                                 int *drained_end_counter)
459b0165585SKevin Wolf {
46061ad631cSMax Reitz     BdrvChild *child;
4610f115168SKevin Wolf     int old_quiesce_counter;
4620f115168SKevin Wolf 
463e037c09cSMax Reitz     assert(drained_end_counter != NULL);
464e037c09cSMax Reitz 
465481cad48SManos Pitsidianakis     if (qemu_in_coroutine()) {
4666cd5c9d7SKevin Wolf         bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents,
4678e1da77eSMax Reitz                                false, drained_end_counter);
468481cad48SManos Pitsidianakis         return;
469481cad48SManos Pitsidianakis     }
4706820643fSKevin Wolf     assert(bs->quiesce_counter > 0);
4716820643fSKevin Wolf 
47260369b86SKevin Wolf     /* Re-enable things in child-to-parent order */
4738e1da77eSMax Reitz     bdrv_drain_invoke(bs, false, drained_end_counter);
474e037c09cSMax Reitz     bdrv_parent_drained_end(bs, parent, ignore_bds_parents,
475e037c09cSMax Reitz                             drained_end_counter);
4765cb2737eSMax Reitz 
477d73415a3SStefan Hajnoczi     old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter);
4780f115168SKevin Wolf     if (old_quiesce_counter == 1) {
4796820643fSKevin Wolf         aio_enable_external(bdrv_get_aio_context(bs));
4806820643fSKevin Wolf     }
481b0165585SKevin Wolf 
482b0165585SKevin Wolf     if (recursive) {
4836cd5c9d7SKevin Wolf         assert(!ignore_bds_parents);
484d736f119SKevin Wolf         bs->recursive_quiesce_counter--;
48561ad631cSMax Reitz         QLIST_FOREACH(child, &bs->children, next) {
4868e1da77eSMax Reitz             bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents,
4878e1da77eSMax Reitz                                 drained_end_counter);
488b0165585SKevin Wolf         }
489b0165585SKevin Wolf     }
4900f115168SKevin Wolf }
4916820643fSKevin Wolf 
4920152bf40SKevin Wolf void bdrv_drained_end(BlockDriverState *bs)
4930152bf40SKevin Wolf {
494e037c09cSMax Reitz     int drained_end_counter = 0;
495e037c09cSMax Reitz     bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter);
496d73415a3SStefan Hajnoczi     BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
497e037c09cSMax Reitz }
498e037c09cSMax Reitz 
499e037c09cSMax Reitz void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter)
500e037c09cSMax Reitz {
501e037c09cSMax Reitz     bdrv_do_drained_end(bs, false, NULL, false, drained_end_counter);
502b0165585SKevin Wolf }
503b0165585SKevin Wolf 
504b0165585SKevin Wolf void bdrv_subtree_drained_end(BlockDriverState *bs)
505b0165585SKevin Wolf {
506e037c09cSMax Reitz     int drained_end_counter = 0;
507e037c09cSMax Reitz     bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter);
508d73415a3SStefan Hajnoczi     BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
5090152bf40SKevin Wolf }
5100152bf40SKevin Wolf 
511d736f119SKevin Wolf void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent)
512d736f119SKevin Wolf {
513d736f119SKevin Wolf     int i;
514d736f119SKevin Wolf 
515d736f119SKevin Wolf     for (i = 0; i < new_parent->recursive_quiesce_counter; i++) {
5166cd5c9d7SKevin Wolf         bdrv_do_drained_begin(child->bs, true, child, false, true);
517d736f119SKevin Wolf     }
518d736f119SKevin Wolf }
519d736f119SKevin Wolf 
520d736f119SKevin Wolf void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent)
521d736f119SKevin Wolf {
522e037c09cSMax Reitz     int drained_end_counter = 0;
523d736f119SKevin Wolf     int i;
524d736f119SKevin Wolf 
525d736f119SKevin Wolf     for (i = 0; i < old_parent->recursive_quiesce_counter; i++) {
526e037c09cSMax Reitz         bdrv_do_drained_end(child->bs, true, child, false,
527e037c09cSMax Reitz                             &drained_end_counter);
528d736f119SKevin Wolf     }
529e037c09cSMax Reitz 
530d73415a3SStefan Hajnoczi     BDRV_POLL_WHILE(child->bs, qatomic_read(&drained_end_counter) > 0);
531d736f119SKevin Wolf }
532d736f119SKevin Wolf 
53361007b31SStefan Hajnoczi /*
53467da1dc5SFam Zheng  * Wait for pending requests to complete on a single BlockDriverState subtree,
53567da1dc5SFam Zheng  * and suspend block driver's internal I/O until next request arrives.
53661007b31SStefan Hajnoczi  *
53761007b31SStefan Hajnoczi  * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
53861007b31SStefan Hajnoczi  * AioContext.
53961007b31SStefan Hajnoczi  */
540b6e84c97SPaolo Bonzini void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
541b6e84c97SPaolo Bonzini {
5426820643fSKevin Wolf     assert(qemu_in_coroutine());
5436820643fSKevin Wolf     bdrv_drained_begin(bs);
5446820643fSKevin Wolf     bdrv_drained_end(bs);
545b6e84c97SPaolo Bonzini }
546b6e84c97SPaolo Bonzini 
54761007b31SStefan Hajnoczi void bdrv_drain(BlockDriverState *bs)
54861007b31SStefan Hajnoczi {
5496820643fSKevin Wolf     bdrv_drained_begin(bs);
5506820643fSKevin Wolf     bdrv_drained_end(bs);
55161007b31SStefan Hajnoczi }
55261007b31SStefan Hajnoczi 
553c13ad59fSKevin Wolf static void bdrv_drain_assert_idle(BlockDriverState *bs)
554c13ad59fSKevin Wolf {
555c13ad59fSKevin Wolf     BdrvChild *child, *next;
556c13ad59fSKevin Wolf 
557d73415a3SStefan Hajnoczi     assert(qatomic_read(&bs->in_flight) == 0);
558c13ad59fSKevin Wolf     QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
559c13ad59fSKevin Wolf         bdrv_drain_assert_idle(child->bs);
560c13ad59fSKevin Wolf     }
561c13ad59fSKevin Wolf }
562c13ad59fSKevin Wolf 
5630f12264eSKevin Wolf unsigned int bdrv_drain_all_count = 0;
5640f12264eSKevin Wolf 
5650f12264eSKevin Wolf static bool bdrv_drain_all_poll(void)
5660f12264eSKevin Wolf {
5670f12264eSKevin Wolf     BlockDriverState *bs = NULL;
5680f12264eSKevin Wolf     bool result = false;
5690f12264eSKevin Wolf 
5700f12264eSKevin Wolf     /* bdrv_drain_poll() can't make changes to the graph and we are holding the
5710f12264eSKevin Wolf      * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
5720f12264eSKevin Wolf     while ((bs = bdrv_next_all_states(bs))) {
5730f12264eSKevin Wolf         AioContext *aio_context = bdrv_get_aio_context(bs);
5740f12264eSKevin Wolf         aio_context_acquire(aio_context);
5750f12264eSKevin Wolf         result |= bdrv_drain_poll(bs, false, NULL, true);
5760f12264eSKevin Wolf         aio_context_release(aio_context);
5770f12264eSKevin Wolf     }
5780f12264eSKevin Wolf 
5790f12264eSKevin Wolf     return result;
5800f12264eSKevin Wolf }
5810f12264eSKevin Wolf 
58261007b31SStefan Hajnoczi /*
58361007b31SStefan Hajnoczi  * Wait for pending requests to complete across all BlockDriverStates
58461007b31SStefan Hajnoczi  *
58561007b31SStefan Hajnoczi  * This function does not flush data to disk, use bdrv_flush_all() for that
58661007b31SStefan Hajnoczi  * after calling this function.
587c0778f66SAlberto Garcia  *
588c0778f66SAlberto Garcia  * This pauses all block jobs and disables external clients. It must
589c0778f66SAlberto Garcia  * be paired with bdrv_drain_all_end().
590c0778f66SAlberto Garcia  *
591c0778f66SAlberto Garcia  * NOTE: no new block jobs or BlockDriverStates can be created between
592c0778f66SAlberto Garcia  * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
59361007b31SStefan Hajnoczi  */
594c0778f66SAlberto Garcia void bdrv_drain_all_begin(void)
59561007b31SStefan Hajnoczi {
5960f12264eSKevin Wolf     BlockDriverState *bs = NULL;
59761007b31SStefan Hajnoczi 
598c8ca33d0SKevin Wolf     if (qemu_in_coroutine()) {
5998e1da77eSMax Reitz         bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL);
600c8ca33d0SKevin Wolf         return;
601c8ca33d0SKevin Wolf     }
602c8ca33d0SKevin Wolf 
603c8aa7895SPavel Dovgalyuk     /*
604c8aa7895SPavel Dovgalyuk      * bdrv queue is managed by record/replay,
605c8aa7895SPavel Dovgalyuk      * waiting for finishing the I/O requests may
606c8aa7895SPavel Dovgalyuk      * be infinite
607c8aa7895SPavel Dovgalyuk      */
608c8aa7895SPavel Dovgalyuk     if (replay_events_enabled()) {
609c8aa7895SPavel Dovgalyuk         return;
610c8aa7895SPavel Dovgalyuk     }
611c8aa7895SPavel Dovgalyuk 
6120f12264eSKevin Wolf     /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
6130f12264eSKevin Wolf      * loop AioContext, so make sure we're in the main context. */
6149a7e86c8SKevin Wolf     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
6150f12264eSKevin Wolf     assert(bdrv_drain_all_count < INT_MAX);
6160f12264eSKevin Wolf     bdrv_drain_all_count++;
6179a7e86c8SKevin Wolf 
6180f12264eSKevin Wolf     /* Quiesce all nodes, without polling in-flight requests yet. The graph
6190f12264eSKevin Wolf      * cannot change during this loop. */
6200f12264eSKevin Wolf     while ((bs = bdrv_next_all_states(bs))) {
62161007b31SStefan Hajnoczi         AioContext *aio_context = bdrv_get_aio_context(bs);
62261007b31SStefan Hajnoczi 
62361007b31SStefan Hajnoczi         aio_context_acquire(aio_context);
6240f12264eSKevin Wolf         bdrv_do_drained_begin(bs, false, NULL, true, false);
62561007b31SStefan Hajnoczi         aio_context_release(aio_context);
62661007b31SStefan Hajnoczi     }
62761007b31SStefan Hajnoczi 
6280f12264eSKevin Wolf     /* Now poll the in-flight requests */
629cfe29d82SKevin Wolf     AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll());
6300f12264eSKevin Wolf 
6310f12264eSKevin Wolf     while ((bs = bdrv_next_all_states(bs))) {
632c13ad59fSKevin Wolf         bdrv_drain_assert_idle(bs);
633f406c03cSAlexander Yarygin     }
634f406c03cSAlexander Yarygin }
635c0778f66SAlberto Garcia 
636c0778f66SAlberto Garcia void bdrv_drain_all_end(void)
637c0778f66SAlberto Garcia {
6380f12264eSKevin Wolf     BlockDriverState *bs = NULL;
639e037c09cSMax Reitz     int drained_end_counter = 0;
640c0778f66SAlberto Garcia 
641c8aa7895SPavel Dovgalyuk     /*
642c8aa7895SPavel Dovgalyuk      * bdrv queue is managed by record/replay,
643c8aa7895SPavel Dovgalyuk      * waiting for finishing the I/O requests may
644c8aa7895SPavel Dovgalyuk      * be endless
645c8aa7895SPavel Dovgalyuk      */
646c8aa7895SPavel Dovgalyuk     if (replay_events_enabled()) {
647c8aa7895SPavel Dovgalyuk         return;
648c8aa7895SPavel Dovgalyuk     }
649c8aa7895SPavel Dovgalyuk 
6500f12264eSKevin Wolf     while ((bs = bdrv_next_all_states(bs))) {
65161007b31SStefan Hajnoczi         AioContext *aio_context = bdrv_get_aio_context(bs);
65261007b31SStefan Hajnoczi 
65361007b31SStefan Hajnoczi         aio_context_acquire(aio_context);
654e037c09cSMax Reitz         bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter);
65561007b31SStefan Hajnoczi         aio_context_release(aio_context);
65661007b31SStefan Hajnoczi     }
6570f12264eSKevin Wolf 
658e037c09cSMax Reitz     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
659d73415a3SStefan Hajnoczi     AIO_WAIT_WHILE(NULL, qatomic_read(&drained_end_counter) > 0);
660e037c09cSMax Reitz 
6610f12264eSKevin Wolf     assert(bdrv_drain_all_count > 0);
6620f12264eSKevin Wolf     bdrv_drain_all_count--;
66361007b31SStefan Hajnoczi }
66461007b31SStefan Hajnoczi 
665c0778f66SAlberto Garcia void bdrv_drain_all(void)
666c0778f66SAlberto Garcia {
667c0778f66SAlberto Garcia     bdrv_drain_all_begin();
668c0778f66SAlberto Garcia     bdrv_drain_all_end();
669c0778f66SAlberto Garcia }
670c0778f66SAlberto Garcia 
67161007b31SStefan Hajnoczi /**
67261007b31SStefan Hajnoczi  * Remove an active request from the tracked requests list
67361007b31SStefan Hajnoczi  *
67461007b31SStefan Hajnoczi  * This function should be called when a tracked request is completing.
67561007b31SStefan Hajnoczi  */
67661007b31SStefan Hajnoczi static void tracked_request_end(BdrvTrackedRequest *req)
67761007b31SStefan Hajnoczi {
67861007b31SStefan Hajnoczi     if (req->serialising) {
679d73415a3SStefan Hajnoczi         qatomic_dec(&req->bs->serialising_in_flight);
68061007b31SStefan Hajnoczi     }
68161007b31SStefan Hajnoczi 
6823783fa3dSPaolo Bonzini     qemu_co_mutex_lock(&req->bs->reqs_lock);
68361007b31SStefan Hajnoczi     QLIST_REMOVE(req, list);
68461007b31SStefan Hajnoczi     qemu_co_queue_restart_all(&req->wait_queue);
6853783fa3dSPaolo Bonzini     qemu_co_mutex_unlock(&req->bs->reqs_lock);
68661007b31SStefan Hajnoczi }
68761007b31SStefan Hajnoczi 
68861007b31SStefan Hajnoczi /**
68961007b31SStefan Hajnoczi  * Add an active request to the tracked requests list
69061007b31SStefan Hajnoczi  */
69161007b31SStefan Hajnoczi static void tracked_request_begin(BdrvTrackedRequest *req,
69261007b31SStefan Hajnoczi                                   BlockDriverState *bs,
69361007b31SStefan Hajnoczi                                   int64_t offset,
69422931a15SFam Zheng                                   uint64_t bytes,
695ebde595cSFam Zheng                                   enum BdrvTrackedRequestType type)
69661007b31SStefan Hajnoczi {
69722931a15SFam Zheng     assert(bytes <= INT64_MAX && offset <= INT64_MAX - bytes);
69822931a15SFam Zheng 
69961007b31SStefan Hajnoczi     *req = (BdrvTrackedRequest){
70061007b31SStefan Hajnoczi         .bs = bs,
70161007b31SStefan Hajnoczi         .offset         = offset,
70261007b31SStefan Hajnoczi         .bytes          = bytes,
703ebde595cSFam Zheng         .type           = type,
70461007b31SStefan Hajnoczi         .co             = qemu_coroutine_self(),
70561007b31SStefan Hajnoczi         .serialising    = false,
70661007b31SStefan Hajnoczi         .overlap_offset = offset,
70761007b31SStefan Hajnoczi         .overlap_bytes  = bytes,
70861007b31SStefan Hajnoczi     };
70961007b31SStefan Hajnoczi 
71061007b31SStefan Hajnoczi     qemu_co_queue_init(&req->wait_queue);
71161007b31SStefan Hajnoczi 
7123783fa3dSPaolo Bonzini     qemu_co_mutex_lock(&bs->reqs_lock);
71361007b31SStefan Hajnoczi     QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
7143783fa3dSPaolo Bonzini     qemu_co_mutex_unlock(&bs->reqs_lock);
71561007b31SStefan Hajnoczi }
71661007b31SStefan Hajnoczi 
7173ba0e1a0SPaolo Bonzini static bool tracked_request_overlaps(BdrvTrackedRequest *req,
7183ba0e1a0SPaolo Bonzini                                      int64_t offset, uint64_t bytes)
7193ba0e1a0SPaolo Bonzini {
7203ba0e1a0SPaolo Bonzini     /*        aaaa   bbbb */
7213ba0e1a0SPaolo Bonzini     if (offset >= req->overlap_offset + req->overlap_bytes) {
7223ba0e1a0SPaolo Bonzini         return false;
7233ba0e1a0SPaolo Bonzini     }
7243ba0e1a0SPaolo Bonzini     /* bbbb   aaaa        */
7253ba0e1a0SPaolo Bonzini     if (req->overlap_offset >= offset + bytes) {
7263ba0e1a0SPaolo Bonzini         return false;
7273ba0e1a0SPaolo Bonzini     }
7283ba0e1a0SPaolo Bonzini     return true;
7293ba0e1a0SPaolo Bonzini }
7303ba0e1a0SPaolo Bonzini 
7313ba0e1a0SPaolo Bonzini static bool coroutine_fn
7323ba0e1a0SPaolo Bonzini bdrv_wait_serialising_requests_locked(BlockDriverState *bs,
7333ba0e1a0SPaolo Bonzini                                       BdrvTrackedRequest *self)
7343ba0e1a0SPaolo Bonzini {
7353ba0e1a0SPaolo Bonzini     BdrvTrackedRequest *req;
7363ba0e1a0SPaolo Bonzini     bool retry;
7373ba0e1a0SPaolo Bonzini     bool waited = false;
7383ba0e1a0SPaolo Bonzini 
7393ba0e1a0SPaolo Bonzini     do {
7403ba0e1a0SPaolo Bonzini         retry = false;
7413ba0e1a0SPaolo Bonzini         QLIST_FOREACH(req, &bs->tracked_requests, list) {
7423ba0e1a0SPaolo Bonzini             if (req == self || (!req->serialising && !self->serialising)) {
7433ba0e1a0SPaolo Bonzini                 continue;
7443ba0e1a0SPaolo Bonzini             }
7453ba0e1a0SPaolo Bonzini             if (tracked_request_overlaps(req, self->overlap_offset,
7463ba0e1a0SPaolo Bonzini                                          self->overlap_bytes))
7473ba0e1a0SPaolo Bonzini             {
7483ba0e1a0SPaolo Bonzini                 /* Hitting this means there was a reentrant request, for
7493ba0e1a0SPaolo Bonzini                  * example, a block driver issuing nested requests.  This must
7503ba0e1a0SPaolo Bonzini                  * never happen since it means deadlock.
7513ba0e1a0SPaolo Bonzini                  */
7523ba0e1a0SPaolo Bonzini                 assert(qemu_coroutine_self() != req->co);
7533ba0e1a0SPaolo Bonzini 
7543ba0e1a0SPaolo Bonzini                 /* If the request is already (indirectly) waiting for us, or
7553ba0e1a0SPaolo Bonzini                  * will wait for us as soon as it wakes up, then just go on
7563ba0e1a0SPaolo Bonzini                  * (instead of producing a deadlock in the former case). */
7573ba0e1a0SPaolo Bonzini                 if (!req->waiting_for) {
7583ba0e1a0SPaolo Bonzini                     self->waiting_for = req;
7593ba0e1a0SPaolo Bonzini                     qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock);
7603ba0e1a0SPaolo Bonzini                     self->waiting_for = NULL;
7613ba0e1a0SPaolo Bonzini                     retry = true;
7623ba0e1a0SPaolo Bonzini                     waited = true;
7633ba0e1a0SPaolo Bonzini                     break;
7643ba0e1a0SPaolo Bonzini                 }
7653ba0e1a0SPaolo Bonzini             }
7663ba0e1a0SPaolo Bonzini         }
7673ba0e1a0SPaolo Bonzini     } while (retry);
7683ba0e1a0SPaolo Bonzini     return waited;
7693ba0e1a0SPaolo Bonzini }
7703ba0e1a0SPaolo Bonzini 
77118fbd0deSPaolo Bonzini bool bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
77261007b31SStefan Hajnoczi {
7733ba0e1a0SPaolo Bonzini     BlockDriverState *bs = req->bs;
77461007b31SStefan Hajnoczi     int64_t overlap_offset = req->offset & ~(align - 1);
77522931a15SFam Zheng     uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
77661007b31SStefan Hajnoczi                                - overlap_offset;
7773ba0e1a0SPaolo Bonzini     bool waited;
77861007b31SStefan Hajnoczi 
7793ba0e1a0SPaolo Bonzini     qemu_co_mutex_lock(&bs->reqs_lock);
78061007b31SStefan Hajnoczi     if (!req->serialising) {
781d73415a3SStefan Hajnoczi         qatomic_inc(&req->bs->serialising_in_flight);
78261007b31SStefan Hajnoczi         req->serialising = true;
78361007b31SStefan Hajnoczi     }
78461007b31SStefan Hajnoczi 
78561007b31SStefan Hajnoczi     req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
78661007b31SStefan Hajnoczi     req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
7873ba0e1a0SPaolo Bonzini     waited = bdrv_wait_serialising_requests_locked(bs, req);
7883ba0e1a0SPaolo Bonzini     qemu_co_mutex_unlock(&bs->reqs_lock);
7893ba0e1a0SPaolo Bonzini     return waited;
79009d2f948SVladimir Sementsov-Ogievskiy }
79109d2f948SVladimir Sementsov-Ogievskiy 
79261007b31SStefan Hajnoczi /**
793c28107e9SMax Reitz  * Return the tracked request on @bs for the current coroutine, or
794c28107e9SMax Reitz  * NULL if there is none.
795c28107e9SMax Reitz  */
796c28107e9SMax Reitz BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs)
797c28107e9SMax Reitz {
798c28107e9SMax Reitz     BdrvTrackedRequest *req;
799c28107e9SMax Reitz     Coroutine *self = qemu_coroutine_self();
800c28107e9SMax Reitz 
801c28107e9SMax Reitz     QLIST_FOREACH(req, &bs->tracked_requests, list) {
802c28107e9SMax Reitz         if (req->co == self) {
803c28107e9SMax Reitz             return req;
804c28107e9SMax Reitz         }
805c28107e9SMax Reitz     }
806c28107e9SMax Reitz 
807c28107e9SMax Reitz     return NULL;
808c28107e9SMax Reitz }
809c28107e9SMax Reitz 
810c28107e9SMax Reitz /**
811244483e6SKevin Wolf  * Round a region to cluster boundaries
812244483e6SKevin Wolf  */
813244483e6SKevin Wolf void bdrv_round_to_clusters(BlockDriverState *bs,
8147cfd5275SEric Blake                             int64_t offset, int64_t bytes,
815244483e6SKevin Wolf                             int64_t *cluster_offset,
8167cfd5275SEric Blake                             int64_t *cluster_bytes)
817244483e6SKevin Wolf {
818244483e6SKevin Wolf     BlockDriverInfo bdi;
819244483e6SKevin Wolf 
820244483e6SKevin Wolf     if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
821244483e6SKevin Wolf         *cluster_offset = offset;
822244483e6SKevin Wolf         *cluster_bytes = bytes;
823244483e6SKevin Wolf     } else {
824244483e6SKevin Wolf         int64_t c = bdi.cluster_size;
825244483e6SKevin Wolf         *cluster_offset = QEMU_ALIGN_DOWN(offset, c);
826244483e6SKevin Wolf         *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
827244483e6SKevin Wolf     }
828244483e6SKevin Wolf }
829244483e6SKevin Wolf 
83061007b31SStefan Hajnoczi static int bdrv_get_cluster_size(BlockDriverState *bs)
83161007b31SStefan Hajnoczi {
83261007b31SStefan Hajnoczi     BlockDriverInfo bdi;
83361007b31SStefan Hajnoczi     int ret;
83461007b31SStefan Hajnoczi 
83561007b31SStefan Hajnoczi     ret = bdrv_get_info(bs, &bdi);
83661007b31SStefan Hajnoczi     if (ret < 0 || bdi.cluster_size == 0) {
837a5b8dd2cSEric Blake         return bs->bl.request_alignment;
83861007b31SStefan Hajnoczi     } else {
83961007b31SStefan Hajnoczi         return bdi.cluster_size;
84061007b31SStefan Hajnoczi     }
84161007b31SStefan Hajnoczi }
84261007b31SStefan Hajnoczi 
84399723548SPaolo Bonzini void bdrv_inc_in_flight(BlockDriverState *bs)
84499723548SPaolo Bonzini {
845d73415a3SStefan Hajnoczi     qatomic_inc(&bs->in_flight);
84699723548SPaolo Bonzini }
84799723548SPaolo Bonzini 
848c9d1a561SPaolo Bonzini void bdrv_wakeup(BlockDriverState *bs)
849c9d1a561SPaolo Bonzini {
850cfe29d82SKevin Wolf     aio_wait_kick();
851c9d1a561SPaolo Bonzini }
852c9d1a561SPaolo Bonzini 
85399723548SPaolo Bonzini void bdrv_dec_in_flight(BlockDriverState *bs)
85499723548SPaolo Bonzini {
855d73415a3SStefan Hajnoczi     qatomic_dec(&bs->in_flight);
856c9d1a561SPaolo Bonzini     bdrv_wakeup(bs);
85799723548SPaolo Bonzini }
85899723548SPaolo Bonzini 
85918fbd0deSPaolo Bonzini static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self)
86061007b31SStefan Hajnoczi {
86161007b31SStefan Hajnoczi     BlockDriverState *bs = self->bs;
86261007b31SStefan Hajnoczi     bool waited = false;
86361007b31SStefan Hajnoczi 
864d73415a3SStefan Hajnoczi     if (!qatomic_read(&bs->serialising_in_flight)) {
86561007b31SStefan Hajnoczi         return false;
86661007b31SStefan Hajnoczi     }
86761007b31SStefan Hajnoczi 
8683783fa3dSPaolo Bonzini     qemu_co_mutex_lock(&bs->reqs_lock);
8693ba0e1a0SPaolo Bonzini     waited = bdrv_wait_serialising_requests_locked(bs, self);
8703783fa3dSPaolo Bonzini     qemu_co_mutex_unlock(&bs->reqs_lock);
87161007b31SStefan Hajnoczi 
87261007b31SStefan Hajnoczi     return waited;
87361007b31SStefan Hajnoczi }
87461007b31SStefan Hajnoczi 
87561007b31SStefan Hajnoczi static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
87661007b31SStefan Hajnoczi                                    size_t size)
87761007b31SStefan Hajnoczi {
87841ae31e3SAlberto Garcia     if (size > BDRV_REQUEST_MAX_BYTES) {
87961007b31SStefan Hajnoczi         return -EIO;
88061007b31SStefan Hajnoczi     }
88161007b31SStefan Hajnoczi 
88261007b31SStefan Hajnoczi     if (!bdrv_is_inserted(bs)) {
88361007b31SStefan Hajnoczi         return -ENOMEDIUM;
88461007b31SStefan Hajnoczi     }
88561007b31SStefan Hajnoczi 
88661007b31SStefan Hajnoczi     if (offset < 0) {
88761007b31SStefan Hajnoczi         return -EIO;
88861007b31SStefan Hajnoczi     }
88961007b31SStefan Hajnoczi 
89061007b31SStefan Hajnoczi     return 0;
89161007b31SStefan Hajnoczi }
89261007b31SStefan Hajnoczi 
893720ff280SKevin Wolf int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
894f5a5ca79SManos Pitsidianakis                        int bytes, BdrvRequestFlags flags)
89561007b31SStefan Hajnoczi {
896*fae2681aSVladimir Sementsov-Ogievskiy     return bdrv_pwritev(child, offset, bytes, NULL,
897*fae2681aSVladimir Sementsov-Ogievskiy                         BDRV_REQ_ZERO_WRITE | flags);
89861007b31SStefan Hajnoczi }
89961007b31SStefan Hajnoczi 
90061007b31SStefan Hajnoczi /*
90174021bc4SEric Blake  * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
90261007b31SStefan Hajnoczi  * The operation is sped up by checking the block status and only writing
90361007b31SStefan Hajnoczi  * zeroes to the device if they currently do not return zeroes. Optional
90474021bc4SEric Blake  * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
905465fe887SEric Blake  * BDRV_REQ_FUA).
90661007b31SStefan Hajnoczi  *
907f4649069SEric Blake  * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite().
90861007b31SStefan Hajnoczi  */
909720ff280SKevin Wolf int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
91061007b31SStefan Hajnoczi {
911237d78f8SEric Blake     int ret;
912237d78f8SEric Blake     int64_t target_size, bytes, offset = 0;
913720ff280SKevin Wolf     BlockDriverState *bs = child->bs;
91461007b31SStefan Hajnoczi 
9157286d610SEric Blake     target_size = bdrv_getlength(bs);
9167286d610SEric Blake     if (target_size < 0) {
9177286d610SEric Blake         return target_size;
91861007b31SStefan Hajnoczi     }
91961007b31SStefan Hajnoczi 
92061007b31SStefan Hajnoczi     for (;;) {
9217286d610SEric Blake         bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES);
9227286d610SEric Blake         if (bytes <= 0) {
92361007b31SStefan Hajnoczi             return 0;
92461007b31SStefan Hajnoczi         }
925237d78f8SEric Blake         ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL);
92661007b31SStefan Hajnoczi         if (ret < 0) {
92761007b31SStefan Hajnoczi             return ret;
92861007b31SStefan Hajnoczi         }
92961007b31SStefan Hajnoczi         if (ret & BDRV_BLOCK_ZERO) {
930237d78f8SEric Blake             offset += bytes;
93161007b31SStefan Hajnoczi             continue;
93261007b31SStefan Hajnoczi         }
933237d78f8SEric Blake         ret = bdrv_pwrite_zeroes(child, offset, bytes, flags);
93461007b31SStefan Hajnoczi         if (ret < 0) {
93561007b31SStefan Hajnoczi             return ret;
93661007b31SStefan Hajnoczi         }
937237d78f8SEric Blake         offset += bytes;
93861007b31SStefan Hajnoczi     }
93961007b31SStefan Hajnoczi }
94061007b31SStefan Hajnoczi 
9412e11d756SAlberto Garcia /* See bdrv_pwrite() for the return codes */
942cf2ab8fcSKevin Wolf int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes)
94361007b31SStefan Hajnoczi {
944*fae2681aSVladimir Sementsov-Ogievskiy     int ret;
9450d93ed08SVladimir Sementsov-Ogievskiy     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
94661007b31SStefan Hajnoczi 
94761007b31SStefan Hajnoczi     if (bytes < 0) {
94861007b31SStefan Hajnoczi         return -EINVAL;
94961007b31SStefan Hajnoczi     }
95061007b31SStefan Hajnoczi 
951*fae2681aSVladimir Sementsov-Ogievskiy     ret = bdrv_preadv(child, offset, bytes, &qiov,  0);
95261007b31SStefan Hajnoczi 
953*fae2681aSVladimir Sementsov-Ogievskiy     return ret < 0 ? ret : bytes;
95461007b31SStefan Hajnoczi }
95561007b31SStefan Hajnoczi 
9562e11d756SAlberto Garcia /* Return no. of bytes on success or < 0 on error. Important errors are:
9572e11d756SAlberto Garcia   -EIO         generic I/O error (may happen for all errors)
9582e11d756SAlberto Garcia   -ENOMEDIUM   No media inserted.
9592e11d756SAlberto Garcia   -EINVAL      Invalid offset or number of bytes
9602e11d756SAlberto Garcia   -EACCES      Trying to write a read-only device
9612e11d756SAlberto Garcia */
962d9ca2ea2SKevin Wolf int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes)
96361007b31SStefan Hajnoczi {
964*fae2681aSVladimir Sementsov-Ogievskiy     int ret;
9650d93ed08SVladimir Sementsov-Ogievskiy     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
96661007b31SStefan Hajnoczi 
96761007b31SStefan Hajnoczi     if (bytes < 0) {
96861007b31SStefan Hajnoczi         return -EINVAL;
96961007b31SStefan Hajnoczi     }
97061007b31SStefan Hajnoczi 
971*fae2681aSVladimir Sementsov-Ogievskiy     ret = bdrv_pwritev(child, offset, bytes, &qiov, 0);
972*fae2681aSVladimir Sementsov-Ogievskiy 
973*fae2681aSVladimir Sementsov-Ogievskiy     return ret < 0 ? ret : bytes;
97461007b31SStefan Hajnoczi }
97561007b31SStefan Hajnoczi 
97661007b31SStefan Hajnoczi /*
97761007b31SStefan Hajnoczi  * Writes to the file and ensures that no writes are reordered across this
97861007b31SStefan Hajnoczi  * request (acts as a barrier)
97961007b31SStefan Hajnoczi  *
98061007b31SStefan Hajnoczi  * Returns 0 on success, -errno in error cases.
98161007b31SStefan Hajnoczi  */
982d9ca2ea2SKevin Wolf int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
98361007b31SStefan Hajnoczi                      const void *buf, int count)
98461007b31SStefan Hajnoczi {
98561007b31SStefan Hajnoczi     int ret;
98661007b31SStefan Hajnoczi 
987d9ca2ea2SKevin Wolf     ret = bdrv_pwrite(child, offset, buf, count);
98861007b31SStefan Hajnoczi     if (ret < 0) {
98961007b31SStefan Hajnoczi         return ret;
99061007b31SStefan Hajnoczi     }
99161007b31SStefan Hajnoczi 
992d9ca2ea2SKevin Wolf     ret = bdrv_flush(child->bs);
993855a6a93SKevin Wolf     if (ret < 0) {
994855a6a93SKevin Wolf         return ret;
99561007b31SStefan Hajnoczi     }
99661007b31SStefan Hajnoczi 
99761007b31SStefan Hajnoczi     return 0;
99861007b31SStefan Hajnoczi }
99961007b31SStefan Hajnoczi 
100008844473SKevin Wolf typedef struct CoroutineIOCompletion {
100108844473SKevin Wolf     Coroutine *coroutine;
100208844473SKevin Wolf     int ret;
100308844473SKevin Wolf } CoroutineIOCompletion;
100408844473SKevin Wolf 
100508844473SKevin Wolf static void bdrv_co_io_em_complete(void *opaque, int ret)
100608844473SKevin Wolf {
100708844473SKevin Wolf     CoroutineIOCompletion *co = opaque;
100808844473SKevin Wolf 
100908844473SKevin Wolf     co->ret = ret;
1010b9e413ddSPaolo Bonzini     aio_co_wake(co->coroutine);
101108844473SKevin Wolf }
101208844473SKevin Wolf 
1013166fe960SKevin Wolf static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
1014166fe960SKevin Wolf                                            uint64_t offset, uint64_t bytes,
1015ac850bf0SVladimir Sementsov-Ogievskiy                                            QEMUIOVector *qiov,
1016ac850bf0SVladimir Sementsov-Ogievskiy                                            size_t qiov_offset, int flags)
1017166fe960SKevin Wolf {
1018166fe960SKevin Wolf     BlockDriver *drv = bs->drv;
10193fb06697SKevin Wolf     int64_t sector_num;
10203fb06697SKevin Wolf     unsigned int nb_sectors;
1021ac850bf0SVladimir Sementsov-Ogievskiy     QEMUIOVector local_qiov;
1022ac850bf0SVladimir Sementsov-Ogievskiy     int ret;
10233fb06697SKevin Wolf 
1024fa166538SEric Blake     assert(!(flags & ~BDRV_REQ_MASK));
1025fe0480d6SKevin Wolf     assert(!(flags & BDRV_REQ_NO_FALLBACK));
1026fa166538SEric Blake 
1027d470ad42SMax Reitz     if (!drv) {
1028d470ad42SMax Reitz         return -ENOMEDIUM;
1029d470ad42SMax Reitz     }
1030d470ad42SMax Reitz 
1031ac850bf0SVladimir Sementsov-Ogievskiy     if (drv->bdrv_co_preadv_part) {
1032ac850bf0SVladimir Sementsov-Ogievskiy         return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset,
1033ac850bf0SVladimir Sementsov-Ogievskiy                                         flags);
1034ac850bf0SVladimir Sementsov-Ogievskiy     }
1035ac850bf0SVladimir Sementsov-Ogievskiy 
1036ac850bf0SVladimir Sementsov-Ogievskiy     if (qiov_offset > 0 || bytes != qiov->size) {
1037ac850bf0SVladimir Sementsov-Ogievskiy         qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1038ac850bf0SVladimir Sementsov-Ogievskiy         qiov = &local_qiov;
1039ac850bf0SVladimir Sementsov-Ogievskiy     }
1040ac850bf0SVladimir Sementsov-Ogievskiy 
10413fb06697SKevin Wolf     if (drv->bdrv_co_preadv) {
1042ac850bf0SVladimir Sementsov-Ogievskiy         ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
1043ac850bf0SVladimir Sementsov-Ogievskiy         goto out;
10443fb06697SKevin Wolf     }
10453fb06697SKevin Wolf 
1046edfab6a0SEric Blake     if (drv->bdrv_aio_preadv) {
104708844473SKevin Wolf         BlockAIOCB *acb;
104808844473SKevin Wolf         CoroutineIOCompletion co = {
104908844473SKevin Wolf             .coroutine = qemu_coroutine_self(),
105008844473SKevin Wolf         };
105108844473SKevin Wolf 
1052e31f6864SEric Blake         acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags,
105308844473SKevin Wolf                                    bdrv_co_io_em_complete, &co);
105408844473SKevin Wolf         if (acb == NULL) {
1055ac850bf0SVladimir Sementsov-Ogievskiy             ret = -EIO;
1056ac850bf0SVladimir Sementsov-Ogievskiy             goto out;
105708844473SKevin Wolf         } else {
105808844473SKevin Wolf             qemu_coroutine_yield();
1059ac850bf0SVladimir Sementsov-Ogievskiy             ret = co.ret;
1060ac850bf0SVladimir Sementsov-Ogievskiy             goto out;
106108844473SKevin Wolf         }
106208844473SKevin Wolf     }
1063edfab6a0SEric Blake 
1064edfab6a0SEric Blake     sector_num = offset >> BDRV_SECTOR_BITS;
1065edfab6a0SEric Blake     nb_sectors = bytes >> BDRV_SECTOR_BITS;
1066edfab6a0SEric Blake 
10671bbbf32dSNir Soffer     assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
10681bbbf32dSNir Soffer     assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
106941ae31e3SAlberto Garcia     assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1070edfab6a0SEric Blake     assert(drv->bdrv_co_readv);
1071edfab6a0SEric Blake 
1072ac850bf0SVladimir Sementsov-Ogievskiy     ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
1073ac850bf0SVladimir Sementsov-Ogievskiy 
1074ac850bf0SVladimir Sementsov-Ogievskiy out:
1075ac850bf0SVladimir Sementsov-Ogievskiy     if (qiov == &local_qiov) {
1076ac850bf0SVladimir Sementsov-Ogievskiy         qemu_iovec_destroy(&local_qiov);
1077ac850bf0SVladimir Sementsov-Ogievskiy     }
1078ac850bf0SVladimir Sementsov-Ogievskiy 
1079ac850bf0SVladimir Sementsov-Ogievskiy     return ret;
1080166fe960SKevin Wolf }
1081166fe960SKevin Wolf 
108278a07294SKevin Wolf static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
108378a07294SKevin Wolf                                             uint64_t offset, uint64_t bytes,
1084ac850bf0SVladimir Sementsov-Ogievskiy                                             QEMUIOVector *qiov,
1085ac850bf0SVladimir Sementsov-Ogievskiy                                             size_t qiov_offset, int flags)
108678a07294SKevin Wolf {
108778a07294SKevin Wolf     BlockDriver *drv = bs->drv;
10883fb06697SKevin Wolf     int64_t sector_num;
10893fb06697SKevin Wolf     unsigned int nb_sectors;
1090ac850bf0SVladimir Sementsov-Ogievskiy     QEMUIOVector local_qiov;
109178a07294SKevin Wolf     int ret;
109278a07294SKevin Wolf 
1093fa166538SEric Blake     assert(!(flags & ~BDRV_REQ_MASK));
1094fe0480d6SKevin Wolf     assert(!(flags & BDRV_REQ_NO_FALLBACK));
1095fa166538SEric Blake 
1096d470ad42SMax Reitz     if (!drv) {
1097d470ad42SMax Reitz         return -ENOMEDIUM;
1098d470ad42SMax Reitz     }
1099d470ad42SMax Reitz 
1100ac850bf0SVladimir Sementsov-Ogievskiy     if (drv->bdrv_co_pwritev_part) {
1101ac850bf0SVladimir Sementsov-Ogievskiy         ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset,
1102ac850bf0SVladimir Sementsov-Ogievskiy                                         flags & bs->supported_write_flags);
1103ac850bf0SVladimir Sementsov-Ogievskiy         flags &= ~bs->supported_write_flags;
1104ac850bf0SVladimir Sementsov-Ogievskiy         goto emulate_flags;
1105ac850bf0SVladimir Sementsov-Ogievskiy     }
1106ac850bf0SVladimir Sementsov-Ogievskiy 
1107ac850bf0SVladimir Sementsov-Ogievskiy     if (qiov_offset > 0 || bytes != qiov->size) {
1108ac850bf0SVladimir Sementsov-Ogievskiy         qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1109ac850bf0SVladimir Sementsov-Ogievskiy         qiov = &local_qiov;
1110ac850bf0SVladimir Sementsov-Ogievskiy     }
1111ac850bf0SVladimir Sementsov-Ogievskiy 
11123fb06697SKevin Wolf     if (drv->bdrv_co_pwritev) {
1113515c2f43SKevin Wolf         ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov,
1114515c2f43SKevin Wolf                                    flags & bs->supported_write_flags);
1115515c2f43SKevin Wolf         flags &= ~bs->supported_write_flags;
11163fb06697SKevin Wolf         goto emulate_flags;
11173fb06697SKevin Wolf     }
11183fb06697SKevin Wolf 
1119edfab6a0SEric Blake     if (drv->bdrv_aio_pwritev) {
112008844473SKevin Wolf         BlockAIOCB *acb;
112108844473SKevin Wolf         CoroutineIOCompletion co = {
112208844473SKevin Wolf             .coroutine = qemu_coroutine_self(),
112308844473SKevin Wolf         };
112408844473SKevin Wolf 
1125e31f6864SEric Blake         acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov,
1126e31f6864SEric Blake                                     flags & bs->supported_write_flags,
112708844473SKevin Wolf                                     bdrv_co_io_em_complete, &co);
1128e31f6864SEric Blake         flags &= ~bs->supported_write_flags;
112908844473SKevin Wolf         if (acb == NULL) {
11303fb06697SKevin Wolf             ret = -EIO;
113108844473SKevin Wolf         } else {
113208844473SKevin Wolf             qemu_coroutine_yield();
11333fb06697SKevin Wolf             ret = co.ret;
113408844473SKevin Wolf         }
1135edfab6a0SEric Blake         goto emulate_flags;
1136edfab6a0SEric Blake     }
1137edfab6a0SEric Blake 
1138edfab6a0SEric Blake     sector_num = offset >> BDRV_SECTOR_BITS;
1139edfab6a0SEric Blake     nb_sectors = bytes >> BDRV_SECTOR_BITS;
1140edfab6a0SEric Blake 
11411bbbf32dSNir Soffer     assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
11421bbbf32dSNir Soffer     assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
114341ae31e3SAlberto Garcia     assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1144edfab6a0SEric Blake 
1145e18a58b4SEric Blake     assert(drv->bdrv_co_writev);
1146e18a58b4SEric Blake     ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov,
1147edfab6a0SEric Blake                               flags & bs->supported_write_flags);
1148edfab6a0SEric Blake     flags &= ~bs->supported_write_flags;
114978a07294SKevin Wolf 
11503fb06697SKevin Wolf emulate_flags:
11514df863f3SEric Blake     if (ret == 0 && (flags & BDRV_REQ_FUA)) {
115278a07294SKevin Wolf         ret = bdrv_co_flush(bs);
115378a07294SKevin Wolf     }
115478a07294SKevin Wolf 
1155ac850bf0SVladimir Sementsov-Ogievskiy     if (qiov == &local_qiov) {
1156ac850bf0SVladimir Sementsov-Ogievskiy         qemu_iovec_destroy(&local_qiov);
1157ac850bf0SVladimir Sementsov-Ogievskiy     }
1158ac850bf0SVladimir Sementsov-Ogievskiy 
115978a07294SKevin Wolf     return ret;
116078a07294SKevin Wolf }
116178a07294SKevin Wolf 
116229a298afSPavel Butsykin static int coroutine_fn
116329a298afSPavel Butsykin bdrv_driver_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
1164ac850bf0SVladimir Sementsov-Ogievskiy                                uint64_t bytes, QEMUIOVector *qiov,
1165ac850bf0SVladimir Sementsov-Ogievskiy                                size_t qiov_offset)
116629a298afSPavel Butsykin {
116729a298afSPavel Butsykin     BlockDriver *drv = bs->drv;
1168ac850bf0SVladimir Sementsov-Ogievskiy     QEMUIOVector local_qiov;
1169ac850bf0SVladimir Sementsov-Ogievskiy     int ret;
117029a298afSPavel Butsykin 
1171d470ad42SMax Reitz     if (!drv) {
1172d470ad42SMax Reitz         return -ENOMEDIUM;
1173d470ad42SMax Reitz     }
1174d470ad42SMax Reitz 
1175ac850bf0SVladimir Sementsov-Ogievskiy     if (!block_driver_can_compress(drv)) {
117629a298afSPavel Butsykin         return -ENOTSUP;
117729a298afSPavel Butsykin     }
117829a298afSPavel Butsykin 
1179ac850bf0SVladimir Sementsov-Ogievskiy     if (drv->bdrv_co_pwritev_compressed_part) {
1180ac850bf0SVladimir Sementsov-Ogievskiy         return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes,
1181ac850bf0SVladimir Sementsov-Ogievskiy                                                     qiov, qiov_offset);
1182ac850bf0SVladimir Sementsov-Ogievskiy     }
1183ac850bf0SVladimir Sementsov-Ogievskiy 
1184ac850bf0SVladimir Sementsov-Ogievskiy     if (qiov_offset == 0) {
118529a298afSPavel Butsykin         return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
118629a298afSPavel Butsykin     }
118729a298afSPavel Butsykin 
1188ac850bf0SVladimir Sementsov-Ogievskiy     qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1189ac850bf0SVladimir Sementsov-Ogievskiy     ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov);
1190ac850bf0SVladimir Sementsov-Ogievskiy     qemu_iovec_destroy(&local_qiov);
1191ac850bf0SVladimir Sementsov-Ogievskiy 
1192ac850bf0SVladimir Sementsov-Ogievskiy     return ret;
1193ac850bf0SVladimir Sementsov-Ogievskiy }
1194ac850bf0SVladimir Sementsov-Ogievskiy 
119585c97ca7SKevin Wolf static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
11963299e5ecSVladimir Sementsov-Ogievskiy         int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
11971143ec5eSVladimir Sementsov-Ogievskiy         size_t qiov_offset, int flags)
119861007b31SStefan Hajnoczi {
119985c97ca7SKevin Wolf     BlockDriverState *bs = child->bs;
120085c97ca7SKevin Wolf 
120161007b31SStefan Hajnoczi     /* Perform I/O through a temporary buffer so that users who scribble over
120261007b31SStefan Hajnoczi      * their read buffer while the operation is in progress do not end up
120361007b31SStefan Hajnoczi      * modifying the image file.  This is critical for zero-copy guest I/O
120461007b31SStefan Hajnoczi      * where anything might happen inside guest memory.
120561007b31SStefan Hajnoczi      */
12062275cc90SVladimir Sementsov-Ogievskiy     void *bounce_buffer = NULL;
120761007b31SStefan Hajnoczi 
120861007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
1209244483e6SKevin Wolf     int64_t cluster_offset;
12107cfd5275SEric Blake     int64_t cluster_bytes;
121161007b31SStefan Hajnoczi     size_t skip_bytes;
121261007b31SStefan Hajnoczi     int ret;
1213cb2e2878SEric Blake     int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1214cb2e2878SEric Blake                                     BDRV_REQUEST_MAX_BYTES);
1215cb2e2878SEric Blake     unsigned int progress = 0;
12168644476eSMax Reitz     bool skip_write;
121761007b31SStefan Hajnoczi 
1218d470ad42SMax Reitz     if (!drv) {
1219d470ad42SMax Reitz         return -ENOMEDIUM;
1220d470ad42SMax Reitz     }
1221d470ad42SMax Reitz 
12228644476eSMax Reitz     /*
12238644476eSMax Reitz      * Do not write anything when the BDS is inactive.  That is not
12248644476eSMax Reitz      * allowed, and it would not help.
12258644476eSMax Reitz      */
12268644476eSMax Reitz     skip_write = (bs->open_flags & BDRV_O_INACTIVE);
12278644476eSMax Reitz 
12281bf03e66SKevin Wolf     /* FIXME We cannot require callers to have write permissions when all they
12291bf03e66SKevin Wolf      * are doing is a read request. If we did things right, write permissions
12301bf03e66SKevin Wolf      * would be obtained anyway, but internally by the copy-on-read code. As
1231765d9df9SEric Blake      * long as it is implemented here rather than in a separate filter driver,
12321bf03e66SKevin Wolf      * the copy-on-read code doesn't have its own BdrvChild, however, for which
12331bf03e66SKevin Wolf      * it could request permissions. Therefore we have to bypass the permission
12341bf03e66SKevin Wolf      * system for the moment. */
12351bf03e66SKevin Wolf     // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1236afa4b293SKevin Wolf 
123761007b31SStefan Hajnoczi     /* Cover entire cluster so no additional backing file I/O is required when
1238cb2e2878SEric Blake      * allocating cluster in the image file.  Note that this value may exceed
1239cb2e2878SEric Blake      * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1240cb2e2878SEric Blake      * is one reason we loop rather than doing it all at once.
124161007b31SStefan Hajnoczi      */
1242244483e6SKevin Wolf     bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
1243cb2e2878SEric Blake     skip_bytes = offset - cluster_offset;
124461007b31SStefan Hajnoczi 
1245244483e6SKevin Wolf     trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
1246244483e6SKevin Wolf                                    cluster_offset, cluster_bytes);
124761007b31SStefan Hajnoczi 
1248cb2e2878SEric Blake     while (cluster_bytes) {
1249cb2e2878SEric Blake         int64_t pnum;
125061007b31SStefan Hajnoczi 
12518644476eSMax Reitz         if (skip_write) {
12528644476eSMax Reitz             ret = 1; /* "already allocated", so nothing will be copied */
12538644476eSMax Reitz             pnum = MIN(cluster_bytes, max_transfer);
12548644476eSMax Reitz         } else {
1255cb2e2878SEric Blake             ret = bdrv_is_allocated(bs, cluster_offset,
1256cb2e2878SEric Blake                                     MIN(cluster_bytes, max_transfer), &pnum);
1257cb2e2878SEric Blake             if (ret < 0) {
12588644476eSMax Reitz                 /*
12598644476eSMax Reitz                  * Safe to treat errors in querying allocation as if
1260cb2e2878SEric Blake                  * unallocated; we'll probably fail again soon on the
1261cb2e2878SEric Blake                  * read, but at least that will set a decent errno.
1262cb2e2878SEric Blake                  */
1263cb2e2878SEric Blake                 pnum = MIN(cluster_bytes, max_transfer);
1264cb2e2878SEric Blake             }
1265cb2e2878SEric Blake 
1266b0ddcbbbSKevin Wolf             /* Stop at EOF if the image ends in the middle of the cluster */
1267b0ddcbbbSKevin Wolf             if (ret == 0 && pnum == 0) {
1268b0ddcbbbSKevin Wolf                 assert(progress >= bytes);
1269b0ddcbbbSKevin Wolf                 break;
1270b0ddcbbbSKevin Wolf             }
1271b0ddcbbbSKevin Wolf 
1272cb2e2878SEric Blake             assert(skip_bytes < pnum);
12738644476eSMax Reitz         }
1274cb2e2878SEric Blake 
1275cb2e2878SEric Blake         if (ret <= 0) {
12761143ec5eSVladimir Sementsov-Ogievskiy             QEMUIOVector local_qiov;
12771143ec5eSVladimir Sementsov-Ogievskiy 
1278cb2e2878SEric Blake             /* Must copy-on-read; use the bounce buffer */
12790d93ed08SVladimir Sementsov-Ogievskiy             pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
12802275cc90SVladimir Sementsov-Ogievskiy             if (!bounce_buffer) {
12812275cc90SVladimir Sementsov-Ogievskiy                 int64_t max_we_need = MAX(pnum, cluster_bytes - pnum);
12822275cc90SVladimir Sementsov-Ogievskiy                 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER);
12832275cc90SVladimir Sementsov-Ogievskiy                 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed);
12842275cc90SVladimir Sementsov-Ogievskiy 
12852275cc90SVladimir Sementsov-Ogievskiy                 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len);
12862275cc90SVladimir Sementsov-Ogievskiy                 if (!bounce_buffer) {
12872275cc90SVladimir Sementsov-Ogievskiy                     ret = -ENOMEM;
12882275cc90SVladimir Sementsov-Ogievskiy                     goto err;
12892275cc90SVladimir Sementsov-Ogievskiy                 }
12902275cc90SVladimir Sementsov-Ogievskiy             }
12910d93ed08SVladimir Sementsov-Ogievskiy             qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum);
1292cb2e2878SEric Blake 
1293cb2e2878SEric Blake             ret = bdrv_driver_preadv(bs, cluster_offset, pnum,
1294ac850bf0SVladimir Sementsov-Ogievskiy                                      &local_qiov, 0, 0);
129561007b31SStefan Hajnoczi             if (ret < 0) {
129661007b31SStefan Hajnoczi                 goto err;
129761007b31SStefan Hajnoczi             }
129861007b31SStefan Hajnoczi 
1299d855ebcdSEric Blake             bdrv_debug_event(bs, BLKDBG_COR_WRITE);
1300c1499a5eSEric Blake             if (drv->bdrv_co_pwrite_zeroes &&
1301cb2e2878SEric Blake                 buffer_is_zero(bounce_buffer, pnum)) {
1302a604fa2bSEric Blake                 /* FIXME: Should we (perhaps conditionally) be setting
1303a604fa2bSEric Blake                  * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1304a604fa2bSEric Blake                  * that still correctly reads as zero? */
13057adcf59fSMax Reitz                 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum,
13067adcf59fSMax Reitz                                                BDRV_REQ_WRITE_UNCHANGED);
130761007b31SStefan Hajnoczi             } else {
1308cb2e2878SEric Blake                 /* This does not change the data on the disk, it is not
1309cb2e2878SEric Blake                  * necessary to flush even in cache=writethrough mode.
131061007b31SStefan Hajnoczi                  */
1311cb2e2878SEric Blake                 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum,
1312ac850bf0SVladimir Sementsov-Ogievskiy                                           &local_qiov, 0,
13137adcf59fSMax Reitz                                           BDRV_REQ_WRITE_UNCHANGED);
131461007b31SStefan Hajnoczi             }
131561007b31SStefan Hajnoczi 
131661007b31SStefan Hajnoczi             if (ret < 0) {
1317cb2e2878SEric Blake                 /* It might be okay to ignore write errors for guest
1318cb2e2878SEric Blake                  * requests.  If this is a deliberate copy-on-read
1319cb2e2878SEric Blake                  * then we don't want to ignore the error.  Simply
1320cb2e2878SEric Blake                  * report it in all cases.
132161007b31SStefan Hajnoczi                  */
132261007b31SStefan Hajnoczi                 goto err;
132361007b31SStefan Hajnoczi             }
132461007b31SStefan Hajnoczi 
13253299e5ecSVladimir Sementsov-Ogievskiy             if (!(flags & BDRV_REQ_PREFETCH)) {
13261143ec5eSVladimir Sementsov-Ogievskiy                 qemu_iovec_from_buf(qiov, qiov_offset + progress,
13271143ec5eSVladimir Sementsov-Ogievskiy                                     bounce_buffer + skip_bytes,
13284ab78b19SVladimir Sementsov-Ogievskiy                                     MIN(pnum - skip_bytes, bytes - progress));
13293299e5ecSVladimir Sementsov-Ogievskiy             }
13303299e5ecSVladimir Sementsov-Ogievskiy         } else if (!(flags & BDRV_REQ_PREFETCH)) {
1331cb2e2878SEric Blake             /* Read directly into the destination */
13321143ec5eSVladimir Sementsov-Ogievskiy             ret = bdrv_driver_preadv(bs, offset + progress,
13331143ec5eSVladimir Sementsov-Ogievskiy                                      MIN(pnum - skip_bytes, bytes - progress),
13341143ec5eSVladimir Sementsov-Ogievskiy                                      qiov, qiov_offset + progress, 0);
1335cb2e2878SEric Blake             if (ret < 0) {
1336cb2e2878SEric Blake                 goto err;
1337cb2e2878SEric Blake             }
1338cb2e2878SEric Blake         }
1339cb2e2878SEric Blake 
1340cb2e2878SEric Blake         cluster_offset += pnum;
1341cb2e2878SEric Blake         cluster_bytes -= pnum;
1342cb2e2878SEric Blake         progress += pnum - skip_bytes;
1343cb2e2878SEric Blake         skip_bytes = 0;
1344cb2e2878SEric Blake     }
1345cb2e2878SEric Blake     ret = 0;
134661007b31SStefan Hajnoczi 
134761007b31SStefan Hajnoczi err:
134861007b31SStefan Hajnoczi     qemu_vfree(bounce_buffer);
134961007b31SStefan Hajnoczi     return ret;
135061007b31SStefan Hajnoczi }
135161007b31SStefan Hajnoczi 
135261007b31SStefan Hajnoczi /*
135361007b31SStefan Hajnoczi  * Forwards an already correctly aligned request to the BlockDriver. This
13541a62d0acSEric Blake  * handles copy on read, zeroing after EOF, and fragmentation of large
13551a62d0acSEric Blake  * reads; any other features must be implemented by the caller.
135661007b31SStefan Hajnoczi  */
135785c97ca7SKevin Wolf static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
135861007b31SStefan Hajnoczi     BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
135965cd4424SVladimir Sementsov-Ogievskiy     int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags)
136061007b31SStefan Hajnoczi {
136185c97ca7SKevin Wolf     BlockDriverState *bs = child->bs;
1362c9d20029SKevin Wolf     int64_t total_bytes, max_bytes;
13631a62d0acSEric Blake     int ret = 0;
13641a62d0acSEric Blake     uint64_t bytes_remaining = bytes;
13651a62d0acSEric Blake     int max_transfer;
136661007b31SStefan Hajnoczi 
136749c07526SKevin Wolf     assert(is_power_of_2(align));
136849c07526SKevin Wolf     assert((offset & (align - 1)) == 0);
136949c07526SKevin Wolf     assert((bytes & (align - 1)) == 0);
1370abb06c5aSDaniel P. Berrange     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
13711a62d0acSEric Blake     max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
13721a62d0acSEric Blake                                    align);
1373a604fa2bSEric Blake 
1374a604fa2bSEric Blake     /* TODO: We would need a per-BDS .supported_read_flags and
1375a604fa2bSEric Blake      * potential fallback support, if we ever implement any read flags
1376a604fa2bSEric Blake      * to pass through to drivers.  For now, there aren't any
1377a604fa2bSEric Blake      * passthrough flags.  */
1378c53cb427SPaolo Bonzini     assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH)));
137961007b31SStefan Hajnoczi 
138061007b31SStefan Hajnoczi     /* Handle Copy on Read and associated serialisation */
138161007b31SStefan Hajnoczi     if (flags & BDRV_REQ_COPY_ON_READ) {
138261007b31SStefan Hajnoczi         /* If we touch the same cluster it counts as an overlap.  This
138361007b31SStefan Hajnoczi          * guarantees that allocating writes will be serialized and not race
138461007b31SStefan Hajnoczi          * with each other for the same cluster.  For example, in copy-on-read
138561007b31SStefan Hajnoczi          * it ensures that the CoR read and write operations are atomic and
138661007b31SStefan Hajnoczi          * guest writes cannot interleave between them. */
1387304d9d7fSMax Reitz         bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs));
138818fbd0deSPaolo Bonzini     } else {
1389304d9d7fSMax Reitz         bdrv_wait_serialising_requests(req);
139018fbd0deSPaolo Bonzini     }
139161007b31SStefan Hajnoczi 
139261007b31SStefan Hajnoczi     if (flags & BDRV_REQ_COPY_ON_READ) {
1393d6a644bbSEric Blake         int64_t pnum;
139461007b31SStefan Hajnoczi 
139588e63df2SEric Blake         ret = bdrv_is_allocated(bs, offset, bytes, &pnum);
139661007b31SStefan Hajnoczi         if (ret < 0) {
139761007b31SStefan Hajnoczi             goto out;
139861007b31SStefan Hajnoczi         }
139961007b31SStefan Hajnoczi 
140088e63df2SEric Blake         if (!ret || pnum != bytes) {
140165cd4424SVladimir Sementsov-Ogievskiy             ret = bdrv_co_do_copy_on_readv(child, offset, bytes,
140265cd4424SVladimir Sementsov-Ogievskiy                                            qiov, qiov_offset, flags);
14033299e5ecSVladimir Sementsov-Ogievskiy             goto out;
14043299e5ecSVladimir Sementsov-Ogievskiy         } else if (flags & BDRV_REQ_PREFETCH) {
140561007b31SStefan Hajnoczi             goto out;
140661007b31SStefan Hajnoczi         }
140761007b31SStefan Hajnoczi     }
140861007b31SStefan Hajnoczi 
14091a62d0acSEric Blake     /* Forward the request to the BlockDriver, possibly fragmenting it */
141049c07526SKevin Wolf     total_bytes = bdrv_getlength(bs);
141149c07526SKevin Wolf     if (total_bytes < 0) {
141249c07526SKevin Wolf         ret = total_bytes;
141361007b31SStefan Hajnoczi         goto out;
141461007b31SStefan Hajnoczi     }
141561007b31SStefan Hajnoczi 
141649c07526SKevin Wolf     max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
14171a62d0acSEric Blake     if (bytes <= max_bytes && bytes <= max_transfer) {
141865cd4424SVladimir Sementsov-Ogievskiy         ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, 0);
14191a62d0acSEric Blake         goto out;
142061007b31SStefan Hajnoczi     }
142161007b31SStefan Hajnoczi 
14221a62d0acSEric Blake     while (bytes_remaining) {
14231a62d0acSEric Blake         int num;
14241a62d0acSEric Blake 
14251a62d0acSEric Blake         if (max_bytes) {
14261a62d0acSEric Blake             num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
14271a62d0acSEric Blake             assert(num);
14281a62d0acSEric Blake 
14291a62d0acSEric Blake             ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
1430134b7decSMax Reitz                                      num, qiov,
1431134b7decSMax Reitz                                      qiov_offset + bytes - bytes_remaining, 0);
14321a62d0acSEric Blake             max_bytes -= num;
14331a62d0acSEric Blake         } else {
14341a62d0acSEric Blake             num = bytes_remaining;
1435134b7decSMax Reitz             ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining,
1436134b7decSMax Reitz                                     0, bytes_remaining);
14371a62d0acSEric Blake         }
14381a62d0acSEric Blake         if (ret < 0) {
14391a62d0acSEric Blake             goto out;
14401a62d0acSEric Blake         }
14411a62d0acSEric Blake         bytes_remaining -= num;
144261007b31SStefan Hajnoczi     }
144361007b31SStefan Hajnoczi 
144461007b31SStefan Hajnoczi out:
14451a62d0acSEric Blake     return ret < 0 ? ret : 0;
144661007b31SStefan Hajnoczi }
144761007b31SStefan Hajnoczi 
144861007b31SStefan Hajnoczi /*
14497a3f542fSVladimir Sementsov-Ogievskiy  * Request padding
14507a3f542fSVladimir Sementsov-Ogievskiy  *
14517a3f542fSVladimir Sementsov-Ogievskiy  *  |<---- align ----->|                     |<----- align ---->|
14527a3f542fSVladimir Sementsov-Ogievskiy  *  |<- head ->|<------------- bytes ------------->|<-- tail -->|
14537a3f542fSVladimir Sementsov-Ogievskiy  *  |          |       |                     |     |            |
14547a3f542fSVladimir Sementsov-Ogievskiy  * -*----------$-------*-------- ... --------*-----$------------*---
14557a3f542fSVladimir Sementsov-Ogievskiy  *  |          |       |                     |     |            |
14567a3f542fSVladimir Sementsov-Ogievskiy  *  |          offset  |                     |     end          |
14577a3f542fSVladimir Sementsov-Ogievskiy  *  ALIGN_DOWN(offset) ALIGN_UP(offset)      ALIGN_DOWN(end)   ALIGN_UP(end)
14587a3f542fSVladimir Sementsov-Ogievskiy  *  [buf   ... )                             [tail_buf          )
14597a3f542fSVladimir Sementsov-Ogievskiy  *
14607a3f542fSVladimir Sementsov-Ogievskiy  * @buf is an aligned allocation needed to store @head and @tail paddings. @head
14617a3f542fSVladimir Sementsov-Ogievskiy  * is placed at the beginning of @buf and @tail at the @end.
14627a3f542fSVladimir Sementsov-Ogievskiy  *
14637a3f542fSVladimir Sementsov-Ogievskiy  * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk
14647a3f542fSVladimir Sementsov-Ogievskiy  * around tail, if tail exists.
14657a3f542fSVladimir Sementsov-Ogievskiy  *
14667a3f542fSVladimir Sementsov-Ogievskiy  * @merge_reads is true for small requests,
14677a3f542fSVladimir Sementsov-Ogievskiy  * if @buf_len == @head + bytes + @tail. In this case it is possible that both
14687a3f542fSVladimir Sementsov-Ogievskiy  * head and tail exist but @buf_len == align and @tail_buf == @buf.
146961007b31SStefan Hajnoczi  */
14707a3f542fSVladimir Sementsov-Ogievskiy typedef struct BdrvRequestPadding {
14717a3f542fSVladimir Sementsov-Ogievskiy     uint8_t *buf;
14727a3f542fSVladimir Sementsov-Ogievskiy     size_t buf_len;
14737a3f542fSVladimir Sementsov-Ogievskiy     uint8_t *tail_buf;
14747a3f542fSVladimir Sementsov-Ogievskiy     size_t head;
14757a3f542fSVladimir Sementsov-Ogievskiy     size_t tail;
14767a3f542fSVladimir Sementsov-Ogievskiy     bool merge_reads;
14777a3f542fSVladimir Sementsov-Ogievskiy     QEMUIOVector local_qiov;
14787a3f542fSVladimir Sementsov-Ogievskiy } BdrvRequestPadding;
14797a3f542fSVladimir Sementsov-Ogievskiy 
14807a3f542fSVladimir Sementsov-Ogievskiy static bool bdrv_init_padding(BlockDriverState *bs,
14817a3f542fSVladimir Sementsov-Ogievskiy                               int64_t offset, int64_t bytes,
14827a3f542fSVladimir Sementsov-Ogievskiy                               BdrvRequestPadding *pad)
14837a3f542fSVladimir Sementsov-Ogievskiy {
14847a3f542fSVladimir Sementsov-Ogievskiy     uint64_t align = bs->bl.request_alignment;
14857a3f542fSVladimir Sementsov-Ogievskiy     size_t sum;
14867a3f542fSVladimir Sementsov-Ogievskiy 
14877a3f542fSVladimir Sementsov-Ogievskiy     memset(pad, 0, sizeof(*pad));
14887a3f542fSVladimir Sementsov-Ogievskiy 
14897a3f542fSVladimir Sementsov-Ogievskiy     pad->head = offset & (align - 1);
14907a3f542fSVladimir Sementsov-Ogievskiy     pad->tail = ((offset + bytes) & (align - 1));
14917a3f542fSVladimir Sementsov-Ogievskiy     if (pad->tail) {
14927a3f542fSVladimir Sementsov-Ogievskiy         pad->tail = align - pad->tail;
14937a3f542fSVladimir Sementsov-Ogievskiy     }
14947a3f542fSVladimir Sementsov-Ogievskiy 
1495ac9d00bfSVladimir Sementsov-Ogievskiy     if (!pad->head && !pad->tail) {
14967a3f542fSVladimir Sementsov-Ogievskiy         return false;
14977a3f542fSVladimir Sementsov-Ogievskiy     }
14987a3f542fSVladimir Sementsov-Ogievskiy 
1499ac9d00bfSVladimir Sementsov-Ogievskiy     assert(bytes); /* Nothing good in aligning zero-length requests */
1500ac9d00bfSVladimir Sementsov-Ogievskiy 
15017a3f542fSVladimir Sementsov-Ogievskiy     sum = pad->head + bytes + pad->tail;
15027a3f542fSVladimir Sementsov-Ogievskiy     pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align;
15037a3f542fSVladimir Sementsov-Ogievskiy     pad->buf = qemu_blockalign(bs, pad->buf_len);
15047a3f542fSVladimir Sementsov-Ogievskiy     pad->merge_reads = sum == pad->buf_len;
15057a3f542fSVladimir Sementsov-Ogievskiy     if (pad->tail) {
15067a3f542fSVladimir Sementsov-Ogievskiy         pad->tail_buf = pad->buf + pad->buf_len - align;
15077a3f542fSVladimir Sementsov-Ogievskiy     }
15087a3f542fSVladimir Sementsov-Ogievskiy 
15097a3f542fSVladimir Sementsov-Ogievskiy     return true;
15107a3f542fSVladimir Sementsov-Ogievskiy }
15117a3f542fSVladimir Sementsov-Ogievskiy 
15127a3f542fSVladimir Sementsov-Ogievskiy static int bdrv_padding_rmw_read(BdrvChild *child,
15137a3f542fSVladimir Sementsov-Ogievskiy                                  BdrvTrackedRequest *req,
15147a3f542fSVladimir Sementsov-Ogievskiy                                  BdrvRequestPadding *pad,
15157a3f542fSVladimir Sementsov-Ogievskiy                                  bool zero_middle)
15167a3f542fSVladimir Sementsov-Ogievskiy {
15177a3f542fSVladimir Sementsov-Ogievskiy     QEMUIOVector local_qiov;
15187a3f542fSVladimir Sementsov-Ogievskiy     BlockDriverState *bs = child->bs;
15197a3f542fSVladimir Sementsov-Ogievskiy     uint64_t align = bs->bl.request_alignment;
15207a3f542fSVladimir Sementsov-Ogievskiy     int ret;
15217a3f542fSVladimir Sementsov-Ogievskiy 
15227a3f542fSVladimir Sementsov-Ogievskiy     assert(req->serialising && pad->buf);
15237a3f542fSVladimir Sementsov-Ogievskiy 
15247a3f542fSVladimir Sementsov-Ogievskiy     if (pad->head || pad->merge_reads) {
15257a3f542fSVladimir Sementsov-Ogievskiy         uint64_t bytes = pad->merge_reads ? pad->buf_len : align;
15267a3f542fSVladimir Sementsov-Ogievskiy 
15277a3f542fSVladimir Sementsov-Ogievskiy         qemu_iovec_init_buf(&local_qiov, pad->buf, bytes);
15287a3f542fSVladimir Sementsov-Ogievskiy 
15297a3f542fSVladimir Sementsov-Ogievskiy         if (pad->head) {
15307a3f542fSVladimir Sementsov-Ogievskiy             bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
15317a3f542fSVladimir Sementsov-Ogievskiy         }
15327a3f542fSVladimir Sementsov-Ogievskiy         if (pad->merge_reads && pad->tail) {
15337a3f542fSVladimir Sementsov-Ogievskiy             bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
15347a3f542fSVladimir Sementsov-Ogievskiy         }
15357a3f542fSVladimir Sementsov-Ogievskiy         ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes,
153665cd4424SVladimir Sementsov-Ogievskiy                                   align, &local_qiov, 0, 0);
15377a3f542fSVladimir Sementsov-Ogievskiy         if (ret < 0) {
15387a3f542fSVladimir Sementsov-Ogievskiy             return ret;
15397a3f542fSVladimir Sementsov-Ogievskiy         }
15407a3f542fSVladimir Sementsov-Ogievskiy         if (pad->head) {
15417a3f542fSVladimir Sementsov-Ogievskiy             bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
15427a3f542fSVladimir Sementsov-Ogievskiy         }
15437a3f542fSVladimir Sementsov-Ogievskiy         if (pad->merge_reads && pad->tail) {
15447a3f542fSVladimir Sementsov-Ogievskiy             bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
15457a3f542fSVladimir Sementsov-Ogievskiy         }
15467a3f542fSVladimir Sementsov-Ogievskiy 
15477a3f542fSVladimir Sementsov-Ogievskiy         if (pad->merge_reads) {
15487a3f542fSVladimir Sementsov-Ogievskiy             goto zero_mem;
15497a3f542fSVladimir Sementsov-Ogievskiy         }
15507a3f542fSVladimir Sementsov-Ogievskiy     }
15517a3f542fSVladimir Sementsov-Ogievskiy 
15527a3f542fSVladimir Sementsov-Ogievskiy     if (pad->tail) {
15537a3f542fSVladimir Sementsov-Ogievskiy         qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align);
15547a3f542fSVladimir Sementsov-Ogievskiy 
15557a3f542fSVladimir Sementsov-Ogievskiy         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
15567a3f542fSVladimir Sementsov-Ogievskiy         ret = bdrv_aligned_preadv(
15577a3f542fSVladimir Sementsov-Ogievskiy                 child, req,
15587a3f542fSVladimir Sementsov-Ogievskiy                 req->overlap_offset + req->overlap_bytes - align,
155965cd4424SVladimir Sementsov-Ogievskiy                 align, align, &local_qiov, 0, 0);
15607a3f542fSVladimir Sementsov-Ogievskiy         if (ret < 0) {
15617a3f542fSVladimir Sementsov-Ogievskiy             return ret;
15627a3f542fSVladimir Sementsov-Ogievskiy         }
15637a3f542fSVladimir Sementsov-Ogievskiy         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
15647a3f542fSVladimir Sementsov-Ogievskiy     }
15657a3f542fSVladimir Sementsov-Ogievskiy 
15667a3f542fSVladimir Sementsov-Ogievskiy zero_mem:
15677a3f542fSVladimir Sementsov-Ogievskiy     if (zero_middle) {
15687a3f542fSVladimir Sementsov-Ogievskiy         memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail);
15697a3f542fSVladimir Sementsov-Ogievskiy     }
15707a3f542fSVladimir Sementsov-Ogievskiy 
15717a3f542fSVladimir Sementsov-Ogievskiy     return 0;
15727a3f542fSVladimir Sementsov-Ogievskiy }
15737a3f542fSVladimir Sementsov-Ogievskiy 
15747a3f542fSVladimir Sementsov-Ogievskiy static void bdrv_padding_destroy(BdrvRequestPadding *pad)
15757a3f542fSVladimir Sementsov-Ogievskiy {
15767a3f542fSVladimir Sementsov-Ogievskiy     if (pad->buf) {
15777a3f542fSVladimir Sementsov-Ogievskiy         qemu_vfree(pad->buf);
15787a3f542fSVladimir Sementsov-Ogievskiy         qemu_iovec_destroy(&pad->local_qiov);
15797a3f542fSVladimir Sementsov-Ogievskiy     }
15807a3f542fSVladimir Sementsov-Ogievskiy }
15817a3f542fSVladimir Sementsov-Ogievskiy 
15827a3f542fSVladimir Sementsov-Ogievskiy /*
15837a3f542fSVladimir Sementsov-Ogievskiy  * bdrv_pad_request
15847a3f542fSVladimir Sementsov-Ogievskiy  *
15857a3f542fSVladimir Sementsov-Ogievskiy  * Exchange request parameters with padded request if needed. Don't include RMW
15867a3f542fSVladimir Sementsov-Ogievskiy  * read of padding, bdrv_padding_rmw_read() should be called separately if
15877a3f542fSVladimir Sementsov-Ogievskiy  * needed.
15887a3f542fSVladimir Sementsov-Ogievskiy  *
15897a3f542fSVladimir Sementsov-Ogievskiy  * All parameters except @bs are in-out: they represent original request at
15907a3f542fSVladimir Sementsov-Ogievskiy  * function call and padded (if padding needed) at function finish.
15917a3f542fSVladimir Sementsov-Ogievskiy  *
15927a3f542fSVladimir Sementsov-Ogievskiy  * Function always succeeds.
15937a3f542fSVladimir Sementsov-Ogievskiy  */
15941acc3466SVladimir Sementsov-Ogievskiy static bool bdrv_pad_request(BlockDriverState *bs,
15951acc3466SVladimir Sementsov-Ogievskiy                              QEMUIOVector **qiov, size_t *qiov_offset,
15967a3f542fSVladimir Sementsov-Ogievskiy                              int64_t *offset, unsigned int *bytes,
15977a3f542fSVladimir Sementsov-Ogievskiy                              BdrvRequestPadding *pad)
15987a3f542fSVladimir Sementsov-Ogievskiy {
15997a3f542fSVladimir Sementsov-Ogievskiy     if (!bdrv_init_padding(bs, *offset, *bytes, pad)) {
16007a3f542fSVladimir Sementsov-Ogievskiy         return false;
16017a3f542fSVladimir Sementsov-Ogievskiy     }
16027a3f542fSVladimir Sementsov-Ogievskiy 
16037a3f542fSVladimir Sementsov-Ogievskiy     qemu_iovec_init_extended(&pad->local_qiov, pad->buf, pad->head,
16041acc3466SVladimir Sementsov-Ogievskiy                              *qiov, *qiov_offset, *bytes,
16057a3f542fSVladimir Sementsov-Ogievskiy                              pad->buf + pad->buf_len - pad->tail, pad->tail);
16067a3f542fSVladimir Sementsov-Ogievskiy     *bytes += pad->head + pad->tail;
16077a3f542fSVladimir Sementsov-Ogievskiy     *offset -= pad->head;
16087a3f542fSVladimir Sementsov-Ogievskiy     *qiov = &pad->local_qiov;
16091acc3466SVladimir Sementsov-Ogievskiy     *qiov_offset = 0;
16107a3f542fSVladimir Sementsov-Ogievskiy 
16117a3f542fSVladimir Sementsov-Ogievskiy     return true;
16127a3f542fSVladimir Sementsov-Ogievskiy }
16137a3f542fSVladimir Sementsov-Ogievskiy 
1614a03ef88fSKevin Wolf int coroutine_fn bdrv_co_preadv(BdrvChild *child,
161561007b31SStefan Hajnoczi     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
161661007b31SStefan Hajnoczi     BdrvRequestFlags flags)
161761007b31SStefan Hajnoczi {
16181acc3466SVladimir Sementsov-Ogievskiy     return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags);
16191acc3466SVladimir Sementsov-Ogievskiy }
16201acc3466SVladimir Sementsov-Ogievskiy 
16211acc3466SVladimir Sementsov-Ogievskiy int coroutine_fn bdrv_co_preadv_part(BdrvChild *child,
16221acc3466SVladimir Sementsov-Ogievskiy     int64_t offset, unsigned int bytes,
16231acc3466SVladimir Sementsov-Ogievskiy     QEMUIOVector *qiov, size_t qiov_offset,
16241acc3466SVladimir Sementsov-Ogievskiy     BdrvRequestFlags flags)
16251acc3466SVladimir Sementsov-Ogievskiy {
1626a03ef88fSKevin Wolf     BlockDriverState *bs = child->bs;
162761007b31SStefan Hajnoczi     BdrvTrackedRequest req;
16287a3f542fSVladimir Sementsov-Ogievskiy     BdrvRequestPadding pad;
162961007b31SStefan Hajnoczi     int ret;
163061007b31SStefan Hajnoczi 
16317a3f542fSVladimir Sementsov-Ogievskiy     trace_bdrv_co_preadv(bs, offset, bytes, flags);
163261007b31SStefan Hajnoczi 
163361007b31SStefan Hajnoczi     ret = bdrv_check_byte_request(bs, offset, bytes);
163461007b31SStefan Hajnoczi     if (ret < 0) {
163561007b31SStefan Hajnoczi         return ret;
163661007b31SStefan Hajnoczi     }
163761007b31SStefan Hajnoczi 
1638ac9d00bfSVladimir Sementsov-Ogievskiy     if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
1639ac9d00bfSVladimir Sementsov-Ogievskiy         /*
1640ac9d00bfSVladimir Sementsov-Ogievskiy          * Aligning zero request is nonsense. Even if driver has special meaning
1641ac9d00bfSVladimir Sementsov-Ogievskiy          * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
1642ac9d00bfSVladimir Sementsov-Ogievskiy          * it to driver due to request_alignment.
1643ac9d00bfSVladimir Sementsov-Ogievskiy          *
1644ac9d00bfSVladimir Sementsov-Ogievskiy          * Still, no reason to return an error if someone do unaligned
1645ac9d00bfSVladimir Sementsov-Ogievskiy          * zero-length read occasionally.
1646ac9d00bfSVladimir Sementsov-Ogievskiy          */
1647ac9d00bfSVladimir Sementsov-Ogievskiy         return 0;
1648ac9d00bfSVladimir Sementsov-Ogievskiy     }
1649ac9d00bfSVladimir Sementsov-Ogievskiy 
165099723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
165199723548SPaolo Bonzini 
16529568b511SWen Congyang     /* Don't do copy-on-read if we read data before write operation */
1653d73415a3SStefan Hajnoczi     if (qatomic_read(&bs->copy_on_read)) {
165461007b31SStefan Hajnoczi         flags |= BDRV_REQ_COPY_ON_READ;
165561007b31SStefan Hajnoczi     }
165661007b31SStefan Hajnoczi 
16571acc3466SVladimir Sementsov-Ogievskiy     bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad);
165861007b31SStefan Hajnoczi 
1659ebde595cSFam Zheng     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
16607a3f542fSVladimir Sementsov-Ogievskiy     ret = bdrv_aligned_preadv(child, &req, offset, bytes,
16617a3f542fSVladimir Sementsov-Ogievskiy                               bs->bl.request_alignment,
16621acc3466SVladimir Sementsov-Ogievskiy                               qiov, qiov_offset, flags);
166361007b31SStefan Hajnoczi     tracked_request_end(&req);
166499723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
166561007b31SStefan Hajnoczi 
16667a3f542fSVladimir Sementsov-Ogievskiy     bdrv_padding_destroy(&pad);
166761007b31SStefan Hajnoczi 
166861007b31SStefan Hajnoczi     return ret;
166961007b31SStefan Hajnoczi }
167061007b31SStefan Hajnoczi 
1671d05aa8bbSEric Blake static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
1672f5a5ca79SManos Pitsidianakis     int64_t offset, int bytes, BdrvRequestFlags flags)
167361007b31SStefan Hajnoczi {
167461007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
167561007b31SStefan Hajnoczi     QEMUIOVector qiov;
16760d93ed08SVladimir Sementsov-Ogievskiy     void *buf = NULL;
167761007b31SStefan Hajnoczi     int ret = 0;
1678465fe887SEric Blake     bool need_flush = false;
1679443668caSDenis V. Lunev     int head = 0;
1680443668caSDenis V. Lunev     int tail = 0;
168161007b31SStefan Hajnoczi 
1682cf081fcaSEric Blake     int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
1683a5b8dd2cSEric Blake     int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1684a5b8dd2cSEric Blake                         bs->bl.request_alignment);
1685cb2e2878SEric Blake     int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
1686cf081fcaSEric Blake 
1687d470ad42SMax Reitz     if (!drv) {
1688d470ad42SMax Reitz         return -ENOMEDIUM;
1689d470ad42SMax Reitz     }
1690d470ad42SMax Reitz 
1691fe0480d6SKevin Wolf     if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) {
1692fe0480d6SKevin Wolf         return -ENOTSUP;
1693fe0480d6SKevin Wolf     }
1694fe0480d6SKevin Wolf 
1695b8d0a980SEric Blake     assert(alignment % bs->bl.request_alignment == 0);
1696b8d0a980SEric Blake     head = offset % alignment;
1697f5a5ca79SManos Pitsidianakis     tail = (offset + bytes) % alignment;
1698b8d0a980SEric Blake     max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1699b8d0a980SEric Blake     assert(max_write_zeroes >= bs->bl.request_alignment);
170061007b31SStefan Hajnoczi 
1701f5a5ca79SManos Pitsidianakis     while (bytes > 0 && !ret) {
1702f5a5ca79SManos Pitsidianakis         int num = bytes;
170361007b31SStefan Hajnoczi 
170461007b31SStefan Hajnoczi         /* Align request.  Block drivers can expect the "bulk" of the request
1705443668caSDenis V. Lunev          * to be aligned, and that unaligned requests do not cross cluster
1706443668caSDenis V. Lunev          * boundaries.
170761007b31SStefan Hajnoczi          */
1708443668caSDenis V. Lunev         if (head) {
1709b2f95feeSEric Blake             /* Make a small request up to the first aligned sector. For
1710b2f95feeSEric Blake              * convenience, limit this request to max_transfer even if
1711b2f95feeSEric Blake              * we don't need to fall back to writes.  */
1712f5a5ca79SManos Pitsidianakis             num = MIN(MIN(bytes, max_transfer), alignment - head);
1713b2f95feeSEric Blake             head = (head + num) % alignment;
1714b2f95feeSEric Blake             assert(num < max_write_zeroes);
1715d05aa8bbSEric Blake         } else if (tail && num > alignment) {
1716443668caSDenis V. Lunev             /* Shorten the request to the last aligned sector.  */
1717443668caSDenis V. Lunev             num -= tail;
171861007b31SStefan Hajnoczi         }
171961007b31SStefan Hajnoczi 
172061007b31SStefan Hajnoczi         /* limit request size */
172161007b31SStefan Hajnoczi         if (num > max_write_zeroes) {
172261007b31SStefan Hajnoczi             num = max_write_zeroes;
172361007b31SStefan Hajnoczi         }
172461007b31SStefan Hajnoczi 
172561007b31SStefan Hajnoczi         ret = -ENOTSUP;
172661007b31SStefan Hajnoczi         /* First try the efficient write zeroes operation */
1727d05aa8bbSEric Blake         if (drv->bdrv_co_pwrite_zeroes) {
1728d05aa8bbSEric Blake             ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1729d05aa8bbSEric Blake                                              flags & bs->supported_zero_flags);
1730d05aa8bbSEric Blake             if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1731d05aa8bbSEric Blake                 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1732d05aa8bbSEric Blake                 need_flush = true;
1733d05aa8bbSEric Blake             }
1734465fe887SEric Blake         } else {
1735465fe887SEric Blake             assert(!bs->supported_zero_flags);
173661007b31SStefan Hajnoczi         }
173761007b31SStefan Hajnoczi 
1738294682ccSAndrey Shinkevich         if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) {
173961007b31SStefan Hajnoczi             /* Fall back to bounce buffer if write zeroes is unsupported */
1740465fe887SEric Blake             BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1741465fe887SEric Blake 
1742465fe887SEric Blake             if ((flags & BDRV_REQ_FUA) &&
1743465fe887SEric Blake                 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1744465fe887SEric Blake                 /* No need for bdrv_driver_pwrite() to do a fallback
1745465fe887SEric Blake                  * flush on each chunk; use just one at the end */
1746465fe887SEric Blake                 write_flags &= ~BDRV_REQ_FUA;
1747465fe887SEric Blake                 need_flush = true;
1748465fe887SEric Blake             }
17495def6b80SEric Blake             num = MIN(num, max_transfer);
17500d93ed08SVladimir Sementsov-Ogievskiy             if (buf == NULL) {
17510d93ed08SVladimir Sementsov-Ogievskiy                 buf = qemu_try_blockalign0(bs, num);
17520d93ed08SVladimir Sementsov-Ogievskiy                 if (buf == NULL) {
175361007b31SStefan Hajnoczi                     ret = -ENOMEM;
175461007b31SStefan Hajnoczi                     goto fail;
175561007b31SStefan Hajnoczi                 }
175661007b31SStefan Hajnoczi             }
17570d93ed08SVladimir Sementsov-Ogievskiy             qemu_iovec_init_buf(&qiov, buf, num);
175861007b31SStefan Hajnoczi 
1759ac850bf0SVladimir Sementsov-Ogievskiy             ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags);
176061007b31SStefan Hajnoczi 
176161007b31SStefan Hajnoczi             /* Keep bounce buffer around if it is big enough for all
176261007b31SStefan Hajnoczi              * all future requests.
176361007b31SStefan Hajnoczi              */
17645def6b80SEric Blake             if (num < max_transfer) {
17650d93ed08SVladimir Sementsov-Ogievskiy                 qemu_vfree(buf);
17660d93ed08SVladimir Sementsov-Ogievskiy                 buf = NULL;
176761007b31SStefan Hajnoczi             }
176861007b31SStefan Hajnoczi         }
176961007b31SStefan Hajnoczi 
1770d05aa8bbSEric Blake         offset += num;
1771f5a5ca79SManos Pitsidianakis         bytes -= num;
177261007b31SStefan Hajnoczi     }
177361007b31SStefan Hajnoczi 
177461007b31SStefan Hajnoczi fail:
1775465fe887SEric Blake     if (ret == 0 && need_flush) {
1776465fe887SEric Blake         ret = bdrv_co_flush(bs);
1777465fe887SEric Blake     }
17780d93ed08SVladimir Sementsov-Ogievskiy     qemu_vfree(buf);
177961007b31SStefan Hajnoczi     return ret;
178061007b31SStefan Hajnoczi }
178161007b31SStefan Hajnoczi 
178285fe2479SFam Zheng static inline int coroutine_fn
178385fe2479SFam Zheng bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes,
178485fe2479SFam Zheng                           BdrvTrackedRequest *req, int flags)
178585fe2479SFam Zheng {
178685fe2479SFam Zheng     BlockDriverState *bs = child->bs;
178785fe2479SFam Zheng     bool waited;
178885fe2479SFam Zheng     int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
178985fe2479SFam Zheng 
179085fe2479SFam Zheng     if (bs->read_only) {
179185fe2479SFam Zheng         return -EPERM;
179285fe2479SFam Zheng     }
179385fe2479SFam Zheng 
179485fe2479SFam Zheng     assert(!(bs->open_flags & BDRV_O_INACTIVE));
179585fe2479SFam Zheng     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
179685fe2479SFam Zheng     assert(!(flags & ~BDRV_REQ_MASK));
179785fe2479SFam Zheng 
179885fe2479SFam Zheng     if (flags & BDRV_REQ_SERIALISING) {
179918fbd0deSPaolo Bonzini         waited = bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs));
180018fbd0deSPaolo Bonzini         /*
180118fbd0deSPaolo Bonzini          * For a misaligned request we should have already waited earlier,
180218fbd0deSPaolo Bonzini          * because we come after bdrv_padding_rmw_read which must be called
180318fbd0deSPaolo Bonzini          * with the request already marked as serialising.
180418fbd0deSPaolo Bonzini          */
180518fbd0deSPaolo Bonzini         assert(!waited ||
180618fbd0deSPaolo Bonzini                (req->offset == req->overlap_offset &&
180718fbd0deSPaolo Bonzini                 req->bytes == req->overlap_bytes));
180818fbd0deSPaolo Bonzini     } else {
180918fbd0deSPaolo Bonzini         bdrv_wait_serialising_requests(req);
181085fe2479SFam Zheng     }
181185fe2479SFam Zheng 
181285fe2479SFam Zheng     assert(req->overlap_offset <= offset);
181385fe2479SFam Zheng     assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1814cd47d792SFam Zheng     assert(end_sector <= bs->total_sectors || child->perm & BLK_PERM_RESIZE);
181585fe2479SFam Zheng 
1816cd47d792SFam Zheng     switch (req->type) {
1817cd47d792SFam Zheng     case BDRV_TRACKED_WRITE:
1818cd47d792SFam Zheng     case BDRV_TRACKED_DISCARD:
181985fe2479SFam Zheng         if (flags & BDRV_REQ_WRITE_UNCHANGED) {
182085fe2479SFam Zheng             assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
182185fe2479SFam Zheng         } else {
182285fe2479SFam Zheng             assert(child->perm & BLK_PERM_WRITE);
182385fe2479SFam Zheng         }
1824cd47d792SFam Zheng         return notifier_with_return_list_notify(&bs->before_write_notifiers,
1825cd47d792SFam Zheng                                                 req);
1826cd47d792SFam Zheng     case BDRV_TRACKED_TRUNCATE:
1827cd47d792SFam Zheng         assert(child->perm & BLK_PERM_RESIZE);
1828cd47d792SFam Zheng         return 0;
1829cd47d792SFam Zheng     default:
1830cd47d792SFam Zheng         abort();
1831cd47d792SFam Zheng     }
183285fe2479SFam Zheng }
183385fe2479SFam Zheng 
183485fe2479SFam Zheng static inline void coroutine_fn
183585fe2479SFam Zheng bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, uint64_t bytes,
183685fe2479SFam Zheng                          BdrvTrackedRequest *req, int ret)
183785fe2479SFam Zheng {
183885fe2479SFam Zheng     int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
183985fe2479SFam Zheng     BlockDriverState *bs = child->bs;
184085fe2479SFam Zheng 
1841d73415a3SStefan Hajnoczi     qatomic_inc(&bs->write_gen);
184285fe2479SFam Zheng 
184300695c27SFam Zheng     /*
184400695c27SFam Zheng      * Discard cannot extend the image, but in error handling cases, such as
184500695c27SFam Zheng      * when reverting a qcow2 cluster allocation, the discarded range can pass
184600695c27SFam Zheng      * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
184700695c27SFam Zheng      * here. Instead, just skip it, since semantically a discard request
184800695c27SFam Zheng      * beyond EOF cannot expand the image anyway.
184900695c27SFam Zheng      */
18507f8f03efSFam Zheng     if (ret == 0 &&
1851cd47d792SFam Zheng         (req->type == BDRV_TRACKED_TRUNCATE ||
1852cd47d792SFam Zheng          end_sector > bs->total_sectors) &&
185300695c27SFam Zheng         req->type != BDRV_TRACKED_DISCARD) {
18547f8f03efSFam Zheng         bs->total_sectors = end_sector;
18557f8f03efSFam Zheng         bdrv_parent_cb_resize(bs);
18567f8f03efSFam Zheng         bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS);
185785fe2479SFam Zheng     }
185800695c27SFam Zheng     if (req->bytes) {
185900695c27SFam Zheng         switch (req->type) {
186000695c27SFam Zheng         case BDRV_TRACKED_WRITE:
186100695c27SFam Zheng             stat64_max(&bs->wr_highest_offset, offset + bytes);
186200695c27SFam Zheng             /* fall through, to set dirty bits */
186300695c27SFam Zheng         case BDRV_TRACKED_DISCARD:
18647f8f03efSFam Zheng             bdrv_set_dirty(bs, offset, bytes);
186500695c27SFam Zheng             break;
186600695c27SFam Zheng         default:
186700695c27SFam Zheng             break;
186800695c27SFam Zheng         }
186900695c27SFam Zheng     }
187085fe2479SFam Zheng }
187185fe2479SFam Zheng 
187261007b31SStefan Hajnoczi /*
187304ed95f4SEric Blake  * Forwards an already correctly aligned write request to the BlockDriver,
187404ed95f4SEric Blake  * after possibly fragmenting it.
187561007b31SStefan Hajnoczi  */
187685c97ca7SKevin Wolf static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child,
187761007b31SStefan Hajnoczi     BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
187828c4da28SVladimir Sementsov-Ogievskiy     int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags)
187961007b31SStefan Hajnoczi {
188085c97ca7SKevin Wolf     BlockDriverState *bs = child->bs;
188161007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
188261007b31SStefan Hajnoczi     int ret;
188361007b31SStefan Hajnoczi 
188404ed95f4SEric Blake     uint64_t bytes_remaining = bytes;
188504ed95f4SEric Blake     int max_transfer;
188661007b31SStefan Hajnoczi 
1887d470ad42SMax Reitz     if (!drv) {
1888d470ad42SMax Reitz         return -ENOMEDIUM;
1889d470ad42SMax Reitz     }
1890d470ad42SMax Reitz 
1891d6883bc9SVladimir Sementsov-Ogievskiy     if (bdrv_has_readonly_bitmaps(bs)) {
1892d6883bc9SVladimir Sementsov-Ogievskiy         return -EPERM;
1893d6883bc9SVladimir Sementsov-Ogievskiy     }
1894d6883bc9SVladimir Sementsov-Ogievskiy 
1895cff86b38SEric Blake     assert(is_power_of_2(align));
1896cff86b38SEric Blake     assert((offset & (align - 1)) == 0);
1897cff86b38SEric Blake     assert((bytes & (align - 1)) == 0);
189828c4da28SVladimir Sementsov-Ogievskiy     assert(!qiov || qiov_offset + bytes <= qiov->size);
189904ed95f4SEric Blake     max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
190004ed95f4SEric Blake                                    align);
190161007b31SStefan Hajnoczi 
190285fe2479SFam Zheng     ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags);
190361007b31SStefan Hajnoczi 
190461007b31SStefan Hajnoczi     if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
1905c1499a5eSEric Blake         !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
190628c4da28SVladimir Sementsov-Ogievskiy         qemu_iovec_is_zero(qiov, qiov_offset, bytes)) {
190761007b31SStefan Hajnoczi         flags |= BDRV_REQ_ZERO_WRITE;
190861007b31SStefan Hajnoczi         if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
190961007b31SStefan Hajnoczi             flags |= BDRV_REQ_MAY_UNMAP;
191061007b31SStefan Hajnoczi         }
191161007b31SStefan Hajnoczi     }
191261007b31SStefan Hajnoczi 
191361007b31SStefan Hajnoczi     if (ret < 0) {
191461007b31SStefan Hajnoczi         /* Do nothing, write notifier decided to fail this request */
191561007b31SStefan Hajnoczi     } else if (flags & BDRV_REQ_ZERO_WRITE) {
19169a4f4c31SKevin Wolf         bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
19179896c876SKevin Wolf         ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
19183ea1a091SPavel Butsykin     } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
191928c4da28SVladimir Sementsov-Ogievskiy         ret = bdrv_driver_pwritev_compressed(bs, offset, bytes,
192028c4da28SVladimir Sementsov-Ogievskiy                                              qiov, qiov_offset);
192104ed95f4SEric Blake     } else if (bytes <= max_transfer) {
19229a4f4c31SKevin Wolf         bdrv_debug_event(bs, BLKDBG_PWRITEV);
192328c4da28SVladimir Sementsov-Ogievskiy         ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags);
192404ed95f4SEric Blake     } else {
192504ed95f4SEric Blake         bdrv_debug_event(bs, BLKDBG_PWRITEV);
192604ed95f4SEric Blake         while (bytes_remaining) {
192704ed95f4SEric Blake             int num = MIN(bytes_remaining, max_transfer);
192804ed95f4SEric Blake             int local_flags = flags;
192904ed95f4SEric Blake 
193004ed95f4SEric Blake             assert(num);
193104ed95f4SEric Blake             if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
193204ed95f4SEric Blake                 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
193304ed95f4SEric Blake                 /* If FUA is going to be emulated by flush, we only
193404ed95f4SEric Blake                  * need to flush on the last iteration */
193504ed95f4SEric Blake                 local_flags &= ~BDRV_REQ_FUA;
193604ed95f4SEric Blake             }
193704ed95f4SEric Blake 
193804ed95f4SEric Blake             ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
1939134b7decSMax Reitz                                       num, qiov,
1940134b7decSMax Reitz                                       qiov_offset + bytes - bytes_remaining,
194128c4da28SVladimir Sementsov-Ogievskiy                                       local_flags);
194204ed95f4SEric Blake             if (ret < 0) {
194304ed95f4SEric Blake                 break;
194404ed95f4SEric Blake             }
194504ed95f4SEric Blake             bytes_remaining -= num;
194604ed95f4SEric Blake         }
194761007b31SStefan Hajnoczi     }
19489a4f4c31SKevin Wolf     bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
194961007b31SStefan Hajnoczi 
195061007b31SStefan Hajnoczi     if (ret >= 0) {
195104ed95f4SEric Blake         ret = 0;
195261007b31SStefan Hajnoczi     }
195385fe2479SFam Zheng     bdrv_co_write_req_finish(child, offset, bytes, req, ret);
195461007b31SStefan Hajnoczi 
195561007b31SStefan Hajnoczi     return ret;
195661007b31SStefan Hajnoczi }
195761007b31SStefan Hajnoczi 
195885c97ca7SKevin Wolf static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
19599eeb6dd1SFam Zheng                                                 int64_t offset,
19609eeb6dd1SFam Zheng                                                 unsigned int bytes,
19619eeb6dd1SFam Zheng                                                 BdrvRequestFlags flags,
19629eeb6dd1SFam Zheng                                                 BdrvTrackedRequest *req)
19639eeb6dd1SFam Zheng {
196485c97ca7SKevin Wolf     BlockDriverState *bs = child->bs;
19659eeb6dd1SFam Zheng     QEMUIOVector local_qiov;
1966a5b8dd2cSEric Blake     uint64_t align = bs->bl.request_alignment;
19679eeb6dd1SFam Zheng     int ret = 0;
19687a3f542fSVladimir Sementsov-Ogievskiy     bool padding;
19697a3f542fSVladimir Sementsov-Ogievskiy     BdrvRequestPadding pad;
19709eeb6dd1SFam Zheng 
19717a3f542fSVladimir Sementsov-Ogievskiy     padding = bdrv_init_padding(bs, offset, bytes, &pad);
19727a3f542fSVladimir Sementsov-Ogievskiy     if (padding) {
1973304d9d7fSMax Reitz         bdrv_mark_request_serialising(req, align);
19749eeb6dd1SFam Zheng 
19757a3f542fSVladimir Sementsov-Ogievskiy         bdrv_padding_rmw_read(child, req, &pad, true);
19767a3f542fSVladimir Sementsov-Ogievskiy 
19777a3f542fSVladimir Sementsov-Ogievskiy         if (pad.head || pad.merge_reads) {
19787a3f542fSVladimir Sementsov-Ogievskiy             int64_t aligned_offset = offset & ~(align - 1);
19797a3f542fSVladimir Sementsov-Ogievskiy             int64_t write_bytes = pad.merge_reads ? pad.buf_len : align;
19807a3f542fSVladimir Sementsov-Ogievskiy 
19817a3f542fSVladimir Sementsov-Ogievskiy             qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes);
19827a3f542fSVladimir Sementsov-Ogievskiy             ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes,
198328c4da28SVladimir Sementsov-Ogievskiy                                        align, &local_qiov, 0,
19849eeb6dd1SFam Zheng                                        flags & ~BDRV_REQ_ZERO_WRITE);
19857a3f542fSVladimir Sementsov-Ogievskiy             if (ret < 0 || pad.merge_reads) {
19867a3f542fSVladimir Sementsov-Ogievskiy                 /* Error or all work is done */
19877a3f542fSVladimir Sementsov-Ogievskiy                 goto out;
19889eeb6dd1SFam Zheng             }
19897a3f542fSVladimir Sementsov-Ogievskiy             offset += write_bytes - pad.head;
19907a3f542fSVladimir Sementsov-Ogievskiy             bytes -= write_bytes - pad.head;
19917a3f542fSVladimir Sementsov-Ogievskiy         }
19929eeb6dd1SFam Zheng     }
19939eeb6dd1SFam Zheng 
19949eeb6dd1SFam Zheng     assert(!bytes || (offset & (align - 1)) == 0);
19959eeb6dd1SFam Zheng     if (bytes >= align) {
19969eeb6dd1SFam Zheng         /* Write the aligned part in the middle. */
19979eeb6dd1SFam Zheng         uint64_t aligned_bytes = bytes & ~(align - 1);
199885c97ca7SKevin Wolf         ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
199928c4da28SVladimir Sementsov-Ogievskiy                                    NULL, 0, flags);
20009eeb6dd1SFam Zheng         if (ret < 0) {
20017a3f542fSVladimir Sementsov-Ogievskiy             goto out;
20029eeb6dd1SFam Zheng         }
20039eeb6dd1SFam Zheng         bytes -= aligned_bytes;
20049eeb6dd1SFam Zheng         offset += aligned_bytes;
20059eeb6dd1SFam Zheng     }
20069eeb6dd1SFam Zheng 
20079eeb6dd1SFam Zheng     assert(!bytes || (offset & (align - 1)) == 0);
20089eeb6dd1SFam Zheng     if (bytes) {
20097a3f542fSVladimir Sementsov-Ogievskiy         assert(align == pad.tail + bytes);
20109eeb6dd1SFam Zheng 
20117a3f542fSVladimir Sementsov-Ogievskiy         qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align);
201285c97ca7SKevin Wolf         ret = bdrv_aligned_pwritev(child, req, offset, align, align,
201328c4da28SVladimir Sementsov-Ogievskiy                                    &local_qiov, 0,
201428c4da28SVladimir Sementsov-Ogievskiy                                    flags & ~BDRV_REQ_ZERO_WRITE);
20159eeb6dd1SFam Zheng     }
20169eeb6dd1SFam Zheng 
20177a3f542fSVladimir Sementsov-Ogievskiy out:
20187a3f542fSVladimir Sementsov-Ogievskiy     bdrv_padding_destroy(&pad);
20197a3f542fSVladimir Sementsov-Ogievskiy 
20207a3f542fSVladimir Sementsov-Ogievskiy     return ret;
20219eeb6dd1SFam Zheng }
20229eeb6dd1SFam Zheng 
202361007b31SStefan Hajnoczi /*
202461007b31SStefan Hajnoczi  * Handle a write request in coroutine context
202561007b31SStefan Hajnoczi  */
2026a03ef88fSKevin Wolf int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
202761007b31SStefan Hajnoczi     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
202861007b31SStefan Hajnoczi     BdrvRequestFlags flags)
202961007b31SStefan Hajnoczi {
20301acc3466SVladimir Sementsov-Ogievskiy     return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags);
20311acc3466SVladimir Sementsov-Ogievskiy }
20321acc3466SVladimir Sementsov-Ogievskiy 
20331acc3466SVladimir Sementsov-Ogievskiy int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
20341acc3466SVladimir Sementsov-Ogievskiy     int64_t offset, unsigned int bytes, QEMUIOVector *qiov, size_t qiov_offset,
20351acc3466SVladimir Sementsov-Ogievskiy     BdrvRequestFlags flags)
20361acc3466SVladimir Sementsov-Ogievskiy {
2037a03ef88fSKevin Wolf     BlockDriverState *bs = child->bs;
203861007b31SStefan Hajnoczi     BdrvTrackedRequest req;
2039a5b8dd2cSEric Blake     uint64_t align = bs->bl.request_alignment;
20407a3f542fSVladimir Sementsov-Ogievskiy     BdrvRequestPadding pad;
204161007b31SStefan Hajnoczi     int ret;
204261007b31SStefan Hajnoczi 
2043f42cf447SDaniel P. Berrange     trace_bdrv_co_pwritev(child->bs, offset, bytes, flags);
2044f42cf447SDaniel P. Berrange 
204561007b31SStefan Hajnoczi     if (!bs->drv) {
204661007b31SStefan Hajnoczi         return -ENOMEDIUM;
204761007b31SStefan Hajnoczi     }
204861007b31SStefan Hajnoczi 
204961007b31SStefan Hajnoczi     ret = bdrv_check_byte_request(bs, offset, bytes);
205061007b31SStefan Hajnoczi     if (ret < 0) {
205161007b31SStefan Hajnoczi         return ret;
205261007b31SStefan Hajnoczi     }
205361007b31SStefan Hajnoczi 
2054f2208fdcSAlberto Garcia     /* If the request is misaligned then we can't make it efficient */
2055f2208fdcSAlberto Garcia     if ((flags & BDRV_REQ_NO_FALLBACK) &&
2056f2208fdcSAlberto Garcia         !QEMU_IS_ALIGNED(offset | bytes, align))
2057f2208fdcSAlberto Garcia     {
2058f2208fdcSAlberto Garcia         return -ENOTSUP;
2059f2208fdcSAlberto Garcia     }
2060f2208fdcSAlberto Garcia 
2061ac9d00bfSVladimir Sementsov-Ogievskiy     if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
2062ac9d00bfSVladimir Sementsov-Ogievskiy         /*
2063ac9d00bfSVladimir Sementsov-Ogievskiy          * Aligning zero request is nonsense. Even if driver has special meaning
2064ac9d00bfSVladimir Sementsov-Ogievskiy          * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
2065ac9d00bfSVladimir Sementsov-Ogievskiy          * it to driver due to request_alignment.
2066ac9d00bfSVladimir Sementsov-Ogievskiy          *
2067ac9d00bfSVladimir Sementsov-Ogievskiy          * Still, no reason to return an error if someone do unaligned
2068ac9d00bfSVladimir Sementsov-Ogievskiy          * zero-length write occasionally.
2069ac9d00bfSVladimir Sementsov-Ogievskiy          */
2070ac9d00bfSVladimir Sementsov-Ogievskiy         return 0;
2071ac9d00bfSVladimir Sementsov-Ogievskiy     }
2072ac9d00bfSVladimir Sementsov-Ogievskiy 
207399723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
207461007b31SStefan Hajnoczi     /*
207561007b31SStefan Hajnoczi      * Align write if necessary by performing a read-modify-write cycle.
207661007b31SStefan Hajnoczi      * Pad qiov with the read parts and be sure to have a tracked request not
207761007b31SStefan Hajnoczi      * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
207861007b31SStefan Hajnoczi      */
2079ebde595cSFam Zheng     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
208061007b31SStefan Hajnoczi 
208118a59f03SAnton Nefedov     if (flags & BDRV_REQ_ZERO_WRITE) {
208285c97ca7SKevin Wolf         ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
20839eeb6dd1SFam Zheng         goto out;
20849eeb6dd1SFam Zheng     }
20859eeb6dd1SFam Zheng 
20861acc3466SVladimir Sementsov-Ogievskiy     if (bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad)) {
2087304d9d7fSMax Reitz         bdrv_mark_request_serialising(&req, align);
20887a3f542fSVladimir Sementsov-Ogievskiy         bdrv_padding_rmw_read(child, &req, &pad, false);
208961007b31SStefan Hajnoczi     }
209061007b31SStefan Hajnoczi 
209185c97ca7SKevin Wolf     ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
20921acc3466SVladimir Sementsov-Ogievskiy                                qiov, qiov_offset, flags);
209361007b31SStefan Hajnoczi 
20947a3f542fSVladimir Sementsov-Ogievskiy     bdrv_padding_destroy(&pad);
209561007b31SStefan Hajnoczi 
20969eeb6dd1SFam Zheng out:
20979eeb6dd1SFam Zheng     tracked_request_end(&req);
209899723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
20997a3f542fSVladimir Sementsov-Ogievskiy 
210061007b31SStefan Hajnoczi     return ret;
210161007b31SStefan Hajnoczi }
210261007b31SStefan Hajnoczi 
2103a03ef88fSKevin Wolf int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
2104f5a5ca79SManos Pitsidianakis                                        int bytes, BdrvRequestFlags flags)
210561007b31SStefan Hajnoczi {
2106f5a5ca79SManos Pitsidianakis     trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
210761007b31SStefan Hajnoczi 
2108a03ef88fSKevin Wolf     if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
210961007b31SStefan Hajnoczi         flags &= ~BDRV_REQ_MAY_UNMAP;
211061007b31SStefan Hajnoczi     }
211161007b31SStefan Hajnoczi 
2112f5a5ca79SManos Pitsidianakis     return bdrv_co_pwritev(child, offset, bytes, NULL,
211361007b31SStefan Hajnoczi                            BDRV_REQ_ZERO_WRITE | flags);
211461007b31SStefan Hajnoczi }
211561007b31SStefan Hajnoczi 
21164085f5c7SJohn Snow /*
21174085f5c7SJohn Snow  * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
21184085f5c7SJohn Snow  */
21194085f5c7SJohn Snow int bdrv_flush_all(void)
21204085f5c7SJohn Snow {
21214085f5c7SJohn Snow     BdrvNextIterator it;
21224085f5c7SJohn Snow     BlockDriverState *bs = NULL;
21234085f5c7SJohn Snow     int result = 0;
21244085f5c7SJohn Snow 
2125c8aa7895SPavel Dovgalyuk     /*
2126c8aa7895SPavel Dovgalyuk      * bdrv queue is managed by record/replay,
2127c8aa7895SPavel Dovgalyuk      * creating new flush request for stopping
2128c8aa7895SPavel Dovgalyuk      * the VM may break the determinism
2129c8aa7895SPavel Dovgalyuk      */
2130c8aa7895SPavel Dovgalyuk     if (replay_events_enabled()) {
2131c8aa7895SPavel Dovgalyuk         return result;
2132c8aa7895SPavel Dovgalyuk     }
2133c8aa7895SPavel Dovgalyuk 
21344085f5c7SJohn Snow     for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
21354085f5c7SJohn Snow         AioContext *aio_context = bdrv_get_aio_context(bs);
21364085f5c7SJohn Snow         int ret;
21374085f5c7SJohn Snow 
21384085f5c7SJohn Snow         aio_context_acquire(aio_context);
21394085f5c7SJohn Snow         ret = bdrv_flush(bs);
21404085f5c7SJohn Snow         if (ret < 0 && !result) {
21414085f5c7SJohn Snow             result = ret;
21424085f5c7SJohn Snow         }
21434085f5c7SJohn Snow         aio_context_release(aio_context);
21444085f5c7SJohn Snow     }
21454085f5c7SJohn Snow 
21464085f5c7SJohn Snow     return result;
21474085f5c7SJohn Snow }
21484085f5c7SJohn Snow 
214961007b31SStefan Hajnoczi /*
215061007b31SStefan Hajnoczi  * Returns the allocation status of the specified sectors.
215161007b31SStefan Hajnoczi  * Drivers not implementing the functionality are assumed to not support
215261007b31SStefan Hajnoczi  * backing files, hence all their sectors are reported as allocated.
215361007b31SStefan Hajnoczi  *
215486a3d5c6SEric Blake  * If 'want_zero' is true, the caller is querying for mapping
215586a3d5c6SEric Blake  * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
215686a3d5c6SEric Blake  * _ZERO where possible; otherwise, the result favors larger 'pnum',
215786a3d5c6SEric Blake  * with a focus on accurate BDRV_BLOCK_ALLOCATED.
2158c9ce8c4dSEric Blake  *
21592e8bc787SEric Blake  * If 'offset' is beyond the end of the disk image the return value is
2160fb0d8654SEric Blake  * BDRV_BLOCK_EOF and 'pnum' is set to 0.
216161007b31SStefan Hajnoczi  *
21622e8bc787SEric Blake  * 'bytes' is the max value 'pnum' should be set to.  If bytes goes
2163fb0d8654SEric Blake  * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2164fb0d8654SEric Blake  * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
216567a0fd2aSFam Zheng  *
21662e8bc787SEric Blake  * 'pnum' is set to the number of bytes (including and immediately
21672e8bc787SEric Blake  * following the specified offset) that are easily known to be in the
21682e8bc787SEric Blake  * same allocated/unallocated state.  Note that a second call starting
21692e8bc787SEric Blake  * at the original offset plus returned pnum may have the same status.
21702e8bc787SEric Blake  * The returned value is non-zero on success except at end-of-file.
21712e8bc787SEric Blake  *
21722e8bc787SEric Blake  * Returns negative errno on failure.  Otherwise, if the
21732e8bc787SEric Blake  * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
21742e8bc787SEric Blake  * set to the host mapping and BDS corresponding to the guest offset.
217561007b31SStefan Hajnoczi  */
21762e8bc787SEric Blake static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs,
2177c9ce8c4dSEric Blake                                              bool want_zero,
21782e8bc787SEric Blake                                              int64_t offset, int64_t bytes,
21792e8bc787SEric Blake                                              int64_t *pnum, int64_t *map,
218067a0fd2aSFam Zheng                                              BlockDriverState **file)
218161007b31SStefan Hajnoczi {
21822e8bc787SEric Blake     int64_t total_size;
21832e8bc787SEric Blake     int64_t n; /* bytes */
2184efa6e2edSEric Blake     int ret;
21852e8bc787SEric Blake     int64_t local_map = 0;
2186298a1665SEric Blake     BlockDriverState *local_file = NULL;
2187efa6e2edSEric Blake     int64_t aligned_offset, aligned_bytes;
2188efa6e2edSEric Blake     uint32_t align;
2189549ec0d9SMax Reitz     bool has_filtered_child;
219061007b31SStefan Hajnoczi 
2191298a1665SEric Blake     assert(pnum);
2192298a1665SEric Blake     *pnum = 0;
21932e8bc787SEric Blake     total_size = bdrv_getlength(bs);
21942e8bc787SEric Blake     if (total_size < 0) {
21952e8bc787SEric Blake         ret = total_size;
2196298a1665SEric Blake         goto early_out;
219761007b31SStefan Hajnoczi     }
219861007b31SStefan Hajnoczi 
21992e8bc787SEric Blake     if (offset >= total_size) {
2200298a1665SEric Blake         ret = BDRV_BLOCK_EOF;
2201298a1665SEric Blake         goto early_out;
220261007b31SStefan Hajnoczi     }
22032e8bc787SEric Blake     if (!bytes) {
2204298a1665SEric Blake         ret = 0;
2205298a1665SEric Blake         goto early_out;
22069cdcfd9fSEric Blake     }
220761007b31SStefan Hajnoczi 
22082e8bc787SEric Blake     n = total_size - offset;
22092e8bc787SEric Blake     if (n < bytes) {
22102e8bc787SEric Blake         bytes = n;
221161007b31SStefan Hajnoczi     }
221261007b31SStefan Hajnoczi 
2213d470ad42SMax Reitz     /* Must be non-NULL or bdrv_getlength() would have failed */
2214d470ad42SMax Reitz     assert(bs->drv);
2215549ec0d9SMax Reitz     has_filtered_child = bdrv_filter_child(bs);
2216549ec0d9SMax Reitz     if (!bs->drv->bdrv_co_block_status && !has_filtered_child) {
22172e8bc787SEric Blake         *pnum = bytes;
221861007b31SStefan Hajnoczi         ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
22192e8bc787SEric Blake         if (offset + bytes == total_size) {
2220fb0d8654SEric Blake             ret |= BDRV_BLOCK_EOF;
2221fb0d8654SEric Blake         }
222261007b31SStefan Hajnoczi         if (bs->drv->protocol_name) {
22232e8bc787SEric Blake             ret |= BDRV_BLOCK_OFFSET_VALID;
22242e8bc787SEric Blake             local_map = offset;
2225298a1665SEric Blake             local_file = bs;
222661007b31SStefan Hajnoczi         }
2227298a1665SEric Blake         goto early_out;
222861007b31SStefan Hajnoczi     }
222961007b31SStefan Hajnoczi 
223099723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
2231efa6e2edSEric Blake 
2232efa6e2edSEric Blake     /* Round out to request_alignment boundaries */
223386a3d5c6SEric Blake     align = bs->bl.request_alignment;
2234efa6e2edSEric Blake     aligned_offset = QEMU_ALIGN_DOWN(offset, align);
2235efa6e2edSEric Blake     aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
2236efa6e2edSEric Blake 
2237549ec0d9SMax Reitz     if (bs->drv->bdrv_co_block_status) {
223886a3d5c6SEric Blake         ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
223986a3d5c6SEric Blake                                             aligned_bytes, pnum, &local_map,
224086a3d5c6SEric Blake                                             &local_file);
2241549ec0d9SMax Reitz     } else {
2242549ec0d9SMax Reitz         /* Default code for filters */
2243549ec0d9SMax Reitz 
2244549ec0d9SMax Reitz         local_file = bdrv_filter_bs(bs);
2245549ec0d9SMax Reitz         assert(local_file);
2246549ec0d9SMax Reitz 
2247549ec0d9SMax Reitz         *pnum = aligned_bytes;
2248549ec0d9SMax Reitz         local_map = aligned_offset;
2249549ec0d9SMax Reitz         ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
2250549ec0d9SMax Reitz     }
225186a3d5c6SEric Blake     if (ret < 0) {
225286a3d5c6SEric Blake         *pnum = 0;
225386a3d5c6SEric Blake         goto out;
225486a3d5c6SEric Blake     }
2255efa6e2edSEric Blake 
2256efa6e2edSEric Blake     /*
2257636cb512SEric Blake      * The driver's result must be a non-zero multiple of request_alignment.
2258efa6e2edSEric Blake      * Clamp pnum and adjust map to original request.
2259efa6e2edSEric Blake      */
2260636cb512SEric Blake     assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) &&
2261636cb512SEric Blake            align > offset - aligned_offset);
226269f47505SVladimir Sementsov-Ogievskiy     if (ret & BDRV_BLOCK_RECURSE) {
226369f47505SVladimir Sementsov-Ogievskiy         assert(ret & BDRV_BLOCK_DATA);
226469f47505SVladimir Sementsov-Ogievskiy         assert(ret & BDRV_BLOCK_OFFSET_VALID);
226569f47505SVladimir Sementsov-Ogievskiy         assert(!(ret & BDRV_BLOCK_ZERO));
226669f47505SVladimir Sementsov-Ogievskiy     }
226769f47505SVladimir Sementsov-Ogievskiy 
2268efa6e2edSEric Blake     *pnum -= offset - aligned_offset;
2269efa6e2edSEric Blake     if (*pnum > bytes) {
2270efa6e2edSEric Blake         *pnum = bytes;
2271efa6e2edSEric Blake     }
2272efa6e2edSEric Blake     if (ret & BDRV_BLOCK_OFFSET_VALID) {
2273efa6e2edSEric Blake         local_map += offset - aligned_offset;
2274efa6e2edSEric Blake     }
227561007b31SStefan Hajnoczi 
227661007b31SStefan Hajnoczi     if (ret & BDRV_BLOCK_RAW) {
2277298a1665SEric Blake         assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
22782e8bc787SEric Blake         ret = bdrv_co_block_status(local_file, want_zero, local_map,
22792e8bc787SEric Blake                                    *pnum, pnum, &local_map, &local_file);
228099723548SPaolo Bonzini         goto out;
228161007b31SStefan Hajnoczi     }
228261007b31SStefan Hajnoczi 
228361007b31SStefan Hajnoczi     if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
228461007b31SStefan Hajnoczi         ret |= BDRV_BLOCK_ALLOCATED;
2285a2adbbf6SVladimir Sementsov-Ogievskiy     } else if (want_zero && bs->drv->supports_backing) {
2286cb850315SMax Reitz         BlockDriverState *cow_bs = bdrv_cow_bs(bs);
2287cb850315SMax Reitz 
2288cb850315SMax Reitz         if (cow_bs) {
2289cb850315SMax Reitz             int64_t size2 = bdrv_getlength(cow_bs);
2290c9ce8c4dSEric Blake 
22912e8bc787SEric Blake             if (size2 >= 0 && offset >= size2) {
229261007b31SStefan Hajnoczi                 ret |= BDRV_BLOCK_ZERO;
229361007b31SStefan Hajnoczi             }
22947b1efe99SVladimir Sementsov-Ogievskiy         } else {
22957b1efe99SVladimir Sementsov-Ogievskiy             ret |= BDRV_BLOCK_ZERO;
22967b1efe99SVladimir Sementsov-Ogievskiy         }
229761007b31SStefan Hajnoczi     }
229861007b31SStefan Hajnoczi 
229969f47505SVladimir Sementsov-Ogievskiy     if (want_zero && ret & BDRV_BLOCK_RECURSE &&
230069f47505SVladimir Sementsov-Ogievskiy         local_file && local_file != bs &&
230161007b31SStefan Hajnoczi         (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
230261007b31SStefan Hajnoczi         (ret & BDRV_BLOCK_OFFSET_VALID)) {
23032e8bc787SEric Blake         int64_t file_pnum;
23042e8bc787SEric Blake         int ret2;
230561007b31SStefan Hajnoczi 
23062e8bc787SEric Blake         ret2 = bdrv_co_block_status(local_file, want_zero, local_map,
23072e8bc787SEric Blake                                     *pnum, &file_pnum, NULL, NULL);
230861007b31SStefan Hajnoczi         if (ret2 >= 0) {
230961007b31SStefan Hajnoczi             /* Ignore errors.  This is just providing extra information, it
231061007b31SStefan Hajnoczi              * is useful but not necessary.
231161007b31SStefan Hajnoczi              */
2312c61e684eSEric Blake             if (ret2 & BDRV_BLOCK_EOF &&
2313c61e684eSEric Blake                 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
2314c61e684eSEric Blake                 /*
2315c61e684eSEric Blake                  * It is valid for the format block driver to read
2316c61e684eSEric Blake                  * beyond the end of the underlying file's current
2317c61e684eSEric Blake                  * size; such areas read as zero.
2318c61e684eSEric Blake                  */
231961007b31SStefan Hajnoczi                 ret |= BDRV_BLOCK_ZERO;
232061007b31SStefan Hajnoczi             } else {
232161007b31SStefan Hajnoczi                 /* Limit request to the range reported by the protocol driver */
232261007b31SStefan Hajnoczi                 *pnum = file_pnum;
232361007b31SStefan Hajnoczi                 ret |= (ret2 & BDRV_BLOCK_ZERO);
232461007b31SStefan Hajnoczi             }
232561007b31SStefan Hajnoczi         }
232661007b31SStefan Hajnoczi     }
232761007b31SStefan Hajnoczi 
232899723548SPaolo Bonzini out:
232999723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
23302e8bc787SEric Blake     if (ret >= 0 && offset + *pnum == total_size) {
2331fb0d8654SEric Blake         ret |= BDRV_BLOCK_EOF;
2332fb0d8654SEric Blake     }
2333298a1665SEric Blake early_out:
2334298a1665SEric Blake     if (file) {
2335298a1665SEric Blake         *file = local_file;
2336298a1665SEric Blake     }
23372e8bc787SEric Blake     if (map) {
23382e8bc787SEric Blake         *map = local_map;
23392e8bc787SEric Blake     }
234061007b31SStefan Hajnoczi     return ret;
234161007b31SStefan Hajnoczi }
234261007b31SStefan Hajnoczi 
234321c2283eSVladimir Sementsov-Ogievskiy int coroutine_fn
2344f9e694cbSVladimir Sementsov-Ogievskiy bdrv_co_common_block_status_above(BlockDriverState *bs,
2345ba3f0e25SFam Zheng                                   BlockDriverState *base,
2346c9ce8c4dSEric Blake                                   bool want_zero,
23475b648c67SEric Blake                                   int64_t offset,
23485b648c67SEric Blake                                   int64_t bytes,
23495b648c67SEric Blake                                   int64_t *pnum,
23505b648c67SEric Blake                                   int64_t *map,
235167a0fd2aSFam Zheng                                   BlockDriverState **file)
2352ba3f0e25SFam Zheng {
2353ba3f0e25SFam Zheng     BlockDriverState *p;
23545b648c67SEric Blake     int ret = 0;
2355c61e684eSEric Blake     bool first = true;
2356ba3f0e25SFam Zheng 
2357ba3f0e25SFam Zheng     assert(bs != base);
2358cb850315SMax Reitz     for (p = bs; p != base; p = bdrv_filter_or_cow_bs(p)) {
23595b648c67SEric Blake         ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map,
23605b648c67SEric Blake                                    file);
2361c61e684eSEric Blake         if (ret < 0) {
2362c61e684eSEric Blake             break;
2363c61e684eSEric Blake         }
2364c61e684eSEric Blake         if (ret & BDRV_BLOCK_ZERO && ret & BDRV_BLOCK_EOF && !first) {
2365c61e684eSEric Blake             /*
2366c61e684eSEric Blake              * Reading beyond the end of the file continues to read
2367c61e684eSEric Blake              * zeroes, but we can only widen the result to the
2368c61e684eSEric Blake              * unallocated length we learned from an earlier
2369c61e684eSEric Blake              * iteration.
2370c61e684eSEric Blake              */
23715b648c67SEric Blake             *pnum = bytes;
2372c61e684eSEric Blake         }
2373c61e684eSEric Blake         if (ret & (BDRV_BLOCK_ZERO | BDRV_BLOCK_DATA)) {
2374ba3f0e25SFam Zheng             break;
2375ba3f0e25SFam Zheng         }
23765b648c67SEric Blake         /* [offset, pnum] unallocated on this layer, which could be only
23775b648c67SEric Blake          * the first part of [offset, bytes].  */
23785b648c67SEric Blake         bytes = MIN(bytes, *pnum);
2379c61e684eSEric Blake         first = false;
2380ba3f0e25SFam Zheng     }
2381ba3f0e25SFam Zheng     return ret;
2382ba3f0e25SFam Zheng }
2383ba3f0e25SFam Zheng 
238431826642SEric Blake int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
238531826642SEric Blake                             int64_t offset, int64_t bytes, int64_t *pnum,
238631826642SEric Blake                             int64_t *map, BlockDriverState **file)
2387c9ce8c4dSEric Blake {
238831826642SEric Blake     return bdrv_common_block_status_above(bs, base, true, offset, bytes,
238931826642SEric Blake                                           pnum, map, file);
2390c9ce8c4dSEric Blake }
2391c9ce8c4dSEric Blake 
2392237d78f8SEric Blake int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
2393237d78f8SEric Blake                       int64_t *pnum, int64_t *map, BlockDriverState **file)
2394ba3f0e25SFam Zheng {
2395cb850315SMax Reitz     return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs),
239631826642SEric Blake                                    offset, bytes, pnum, map, file);
2397ba3f0e25SFam Zheng }
2398ba3f0e25SFam Zheng 
2399d6a644bbSEric Blake int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset,
2400d6a644bbSEric Blake                                    int64_t bytes, int64_t *pnum)
240161007b31SStefan Hajnoczi {
24027ddb99b9SEric Blake     int ret;
24037ddb99b9SEric Blake     int64_t dummy;
2404d6a644bbSEric Blake 
2405cb850315SMax Reitz     ret = bdrv_common_block_status_above(bs, bdrv_filter_or_cow_bs(bs), false,
2406cb850315SMax Reitz                                          offset, bytes, pnum ? pnum : &dummy,
2407cb850315SMax Reitz                                          NULL, NULL);
240861007b31SStefan Hajnoczi     if (ret < 0) {
240961007b31SStefan Hajnoczi         return ret;
241061007b31SStefan Hajnoczi     }
241161007b31SStefan Hajnoczi     return !!(ret & BDRV_BLOCK_ALLOCATED);
241261007b31SStefan Hajnoczi }
241361007b31SStefan Hajnoczi 
241461007b31SStefan Hajnoczi /*
241561007b31SStefan Hajnoczi  * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
241661007b31SStefan Hajnoczi  *
2417170d3bd3SAndrey Shinkevich  * Return 1 if (a prefix of) the given range is allocated in any image
2418170d3bd3SAndrey Shinkevich  * between BASE and TOP (BASE is only included if include_base is set).
2419170d3bd3SAndrey Shinkevich  * BASE can be NULL to check if the given offset is allocated in any
2420170d3bd3SAndrey Shinkevich  * image of the chain.  Return 0 otherwise, or negative errno on
2421170d3bd3SAndrey Shinkevich  * failure.
242261007b31SStefan Hajnoczi  *
242351b0a488SEric Blake  * 'pnum' is set to the number of bytes (including and immediately
242451b0a488SEric Blake  * following the specified offset) that are known to be in the same
242551b0a488SEric Blake  * allocated/unallocated state.  Note that a subsequent call starting
242651b0a488SEric Blake  * at 'offset + *pnum' may return the same allocation status (in other
242751b0a488SEric Blake  * words, the result is not necessarily the maximum possible range);
242851b0a488SEric Blake  * but 'pnum' will only be 0 when end of file is reached.
242961007b31SStefan Hajnoczi  *
243061007b31SStefan Hajnoczi  */
243161007b31SStefan Hajnoczi int bdrv_is_allocated_above(BlockDriverState *top,
243261007b31SStefan Hajnoczi                             BlockDriverState *base,
2433170d3bd3SAndrey Shinkevich                             bool include_base, int64_t offset,
2434170d3bd3SAndrey Shinkevich                             int64_t bytes, int64_t *pnum)
243561007b31SStefan Hajnoczi {
243661007b31SStefan Hajnoczi     BlockDriverState *intermediate;
243751b0a488SEric Blake     int ret;
243851b0a488SEric Blake     int64_t n = bytes;
243961007b31SStefan Hajnoczi 
2440170d3bd3SAndrey Shinkevich     assert(base || !include_base);
2441170d3bd3SAndrey Shinkevich 
244261007b31SStefan Hajnoczi     intermediate = top;
2443170d3bd3SAndrey Shinkevich     while (include_base || intermediate != base) {
2444d6a644bbSEric Blake         int64_t pnum_inter;
2445c00716beSEric Blake         int64_t size_inter;
2446d6a644bbSEric Blake 
2447170d3bd3SAndrey Shinkevich         assert(intermediate);
244851b0a488SEric Blake         ret = bdrv_is_allocated(intermediate, offset, bytes, &pnum_inter);
244961007b31SStefan Hajnoczi         if (ret < 0) {
245061007b31SStefan Hajnoczi             return ret;
2451d6a644bbSEric Blake         }
2452d6a644bbSEric Blake         if (ret) {
245351b0a488SEric Blake             *pnum = pnum_inter;
245461007b31SStefan Hajnoczi             return 1;
245561007b31SStefan Hajnoczi         }
245661007b31SStefan Hajnoczi 
245751b0a488SEric Blake         size_inter = bdrv_getlength(intermediate);
2458c00716beSEric Blake         if (size_inter < 0) {
2459c00716beSEric Blake             return size_inter;
2460c00716beSEric Blake         }
246151b0a488SEric Blake         if (n > pnum_inter &&
246251b0a488SEric Blake             (intermediate == top || offset + pnum_inter < size_inter)) {
246351b0a488SEric Blake             n = pnum_inter;
246461007b31SStefan Hajnoczi         }
246561007b31SStefan Hajnoczi 
2466170d3bd3SAndrey Shinkevich         if (intermediate == base) {
2467170d3bd3SAndrey Shinkevich             break;
2468170d3bd3SAndrey Shinkevich         }
2469170d3bd3SAndrey Shinkevich 
2470cb850315SMax Reitz         intermediate = bdrv_filter_or_cow_bs(intermediate);
247161007b31SStefan Hajnoczi     }
247261007b31SStefan Hajnoczi 
247361007b31SStefan Hajnoczi     *pnum = n;
247461007b31SStefan Hajnoczi     return 0;
247561007b31SStefan Hajnoczi }
247661007b31SStefan Hajnoczi 
247721c2283eSVladimir Sementsov-Ogievskiy int coroutine_fn
24781a8ae822SKevin Wolf bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
24791a8ae822SKevin Wolf                    bool is_read)
24801a8ae822SKevin Wolf {
24811a8ae822SKevin Wolf     BlockDriver *drv = bs->drv;
2482c4db2e25SMax Reitz     BlockDriverState *child_bs = bdrv_primary_bs(bs);
2483dc88a467SStefan Hajnoczi     int ret = -ENOTSUP;
2484dc88a467SStefan Hajnoczi 
2485dc88a467SStefan Hajnoczi     bdrv_inc_in_flight(bs);
24861a8ae822SKevin Wolf 
24871a8ae822SKevin Wolf     if (!drv) {
2488dc88a467SStefan Hajnoczi         ret = -ENOMEDIUM;
24891a8ae822SKevin Wolf     } else if (drv->bdrv_load_vmstate) {
2490dc88a467SStefan Hajnoczi         if (is_read) {
2491dc88a467SStefan Hajnoczi             ret = drv->bdrv_load_vmstate(bs, qiov, pos);
2492dc88a467SStefan Hajnoczi         } else {
2493dc88a467SStefan Hajnoczi             ret = drv->bdrv_save_vmstate(bs, qiov, pos);
2494dc88a467SStefan Hajnoczi         }
2495c4db2e25SMax Reitz     } else if (child_bs) {
2496c4db2e25SMax Reitz         ret = bdrv_co_rw_vmstate(child_bs, qiov, pos, is_read);
24971a8ae822SKevin Wolf     }
24981a8ae822SKevin Wolf 
2499dc88a467SStefan Hajnoczi     bdrv_dec_in_flight(bs);
2500dc88a467SStefan Hajnoczi     return ret;
25011a8ae822SKevin Wolf }
25021a8ae822SKevin Wolf 
250361007b31SStefan Hajnoczi int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
250461007b31SStefan Hajnoczi                       int64_t pos, int size)
250561007b31SStefan Hajnoczi {
25060d93ed08SVladimir Sementsov-Ogievskiy     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2507b433d942SKevin Wolf     int ret;
250861007b31SStefan Hajnoczi 
2509b433d942SKevin Wolf     ret = bdrv_writev_vmstate(bs, &qiov, pos);
2510b433d942SKevin Wolf     if (ret < 0) {
2511b433d942SKevin Wolf         return ret;
2512b433d942SKevin Wolf     }
2513b433d942SKevin Wolf 
2514b433d942SKevin Wolf     return size;
251561007b31SStefan Hajnoczi }
251661007b31SStefan Hajnoczi 
251761007b31SStefan Hajnoczi int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
251861007b31SStefan Hajnoczi {
25191a8ae822SKevin Wolf     return bdrv_rw_vmstate(bs, qiov, pos, false);
252061007b31SStefan Hajnoczi }
252161007b31SStefan Hajnoczi 
252261007b31SStefan Hajnoczi int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
252361007b31SStefan Hajnoczi                       int64_t pos, int size)
252461007b31SStefan Hajnoczi {
25250d93ed08SVladimir Sementsov-Ogievskiy     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2526b433d942SKevin Wolf     int ret;
25275ddda0b8SKevin Wolf 
2528b433d942SKevin Wolf     ret = bdrv_readv_vmstate(bs, &qiov, pos);
2529b433d942SKevin Wolf     if (ret < 0) {
2530b433d942SKevin Wolf         return ret;
2531b433d942SKevin Wolf     }
2532b433d942SKevin Wolf 
2533b433d942SKevin Wolf     return size;
25345ddda0b8SKevin Wolf }
25355ddda0b8SKevin Wolf 
25365ddda0b8SKevin Wolf int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
25375ddda0b8SKevin Wolf {
25381a8ae822SKevin Wolf     return bdrv_rw_vmstate(bs, qiov, pos, true);
253961007b31SStefan Hajnoczi }
254061007b31SStefan Hajnoczi 
254161007b31SStefan Hajnoczi /**************************************************************/
254261007b31SStefan Hajnoczi /* async I/Os */
254361007b31SStefan Hajnoczi 
254461007b31SStefan Hajnoczi void bdrv_aio_cancel(BlockAIOCB *acb)
254561007b31SStefan Hajnoczi {
254661007b31SStefan Hajnoczi     qemu_aio_ref(acb);
254761007b31SStefan Hajnoczi     bdrv_aio_cancel_async(acb);
254861007b31SStefan Hajnoczi     while (acb->refcnt > 1) {
254961007b31SStefan Hajnoczi         if (acb->aiocb_info->get_aio_context) {
255061007b31SStefan Hajnoczi             aio_poll(acb->aiocb_info->get_aio_context(acb), true);
255161007b31SStefan Hajnoczi         } else if (acb->bs) {
25522f47da5fSPaolo Bonzini             /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
25532f47da5fSPaolo Bonzini              * assert that we're not using an I/O thread.  Thread-safe
25542f47da5fSPaolo Bonzini              * code should use bdrv_aio_cancel_async exclusively.
25552f47da5fSPaolo Bonzini              */
25562f47da5fSPaolo Bonzini             assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
255761007b31SStefan Hajnoczi             aio_poll(bdrv_get_aio_context(acb->bs), true);
255861007b31SStefan Hajnoczi         } else {
255961007b31SStefan Hajnoczi             abort();
256061007b31SStefan Hajnoczi         }
256161007b31SStefan Hajnoczi     }
256261007b31SStefan Hajnoczi     qemu_aio_unref(acb);
256361007b31SStefan Hajnoczi }
256461007b31SStefan Hajnoczi 
256561007b31SStefan Hajnoczi /* Async version of aio cancel. The caller is not blocked if the acb implements
256661007b31SStefan Hajnoczi  * cancel_async, otherwise we do nothing and let the request normally complete.
256761007b31SStefan Hajnoczi  * In either case the completion callback must be called. */
256861007b31SStefan Hajnoczi void bdrv_aio_cancel_async(BlockAIOCB *acb)
256961007b31SStefan Hajnoczi {
257061007b31SStefan Hajnoczi     if (acb->aiocb_info->cancel_async) {
257161007b31SStefan Hajnoczi         acb->aiocb_info->cancel_async(acb);
257261007b31SStefan Hajnoczi     }
257361007b31SStefan Hajnoczi }
257461007b31SStefan Hajnoczi 
257561007b31SStefan Hajnoczi /**************************************************************/
257661007b31SStefan Hajnoczi /* Coroutine block device emulation */
257761007b31SStefan Hajnoczi 
257861007b31SStefan Hajnoczi int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
257961007b31SStefan Hajnoczi {
2580883833e2SMax Reitz     BdrvChild *primary_child = bdrv_primary_child(bs);
2581883833e2SMax Reitz     BdrvChild *child;
258249ca6259SFam Zheng     int current_gen;
258349ca6259SFam Zheng     int ret = 0;
258461007b31SStefan Hajnoczi 
258599723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
2586c32b82afSPavel Dovgalyuk 
2587e914404eSFam Zheng     if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
258849ca6259SFam Zheng         bdrv_is_sg(bs)) {
258949ca6259SFam Zheng         goto early_exit;
259049ca6259SFam Zheng     }
259149ca6259SFam Zheng 
25923783fa3dSPaolo Bonzini     qemu_co_mutex_lock(&bs->reqs_lock);
2593d73415a3SStefan Hajnoczi     current_gen = qatomic_read(&bs->write_gen);
25943ff2f67aSEvgeny Yakovlev 
25953ff2f67aSEvgeny Yakovlev     /* Wait until any previous flushes are completed */
259699723548SPaolo Bonzini     while (bs->active_flush_req) {
25973783fa3dSPaolo Bonzini         qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
25983ff2f67aSEvgeny Yakovlev     }
25993ff2f67aSEvgeny Yakovlev 
26003783fa3dSPaolo Bonzini     /* Flushes reach this point in nondecreasing current_gen order.  */
260199723548SPaolo Bonzini     bs->active_flush_req = true;
26023783fa3dSPaolo Bonzini     qemu_co_mutex_unlock(&bs->reqs_lock);
26033ff2f67aSEvgeny Yakovlev 
2604c32b82afSPavel Dovgalyuk     /* Write back all layers by calling one driver function */
2605c32b82afSPavel Dovgalyuk     if (bs->drv->bdrv_co_flush) {
2606c32b82afSPavel Dovgalyuk         ret = bs->drv->bdrv_co_flush(bs);
2607c32b82afSPavel Dovgalyuk         goto out;
2608c32b82afSPavel Dovgalyuk     }
2609c32b82afSPavel Dovgalyuk 
261061007b31SStefan Hajnoczi     /* Write back cached data to the OS even with cache=unsafe */
2611883833e2SMax Reitz     BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_OS);
261261007b31SStefan Hajnoczi     if (bs->drv->bdrv_co_flush_to_os) {
261361007b31SStefan Hajnoczi         ret = bs->drv->bdrv_co_flush_to_os(bs);
261461007b31SStefan Hajnoczi         if (ret < 0) {
2615cdb5e315SFam Zheng             goto out;
261661007b31SStefan Hajnoczi         }
261761007b31SStefan Hajnoczi     }
261861007b31SStefan Hajnoczi 
261961007b31SStefan Hajnoczi     /* But don't actually force it to the disk with cache=unsafe */
262061007b31SStefan Hajnoczi     if (bs->open_flags & BDRV_O_NO_FLUSH) {
2621883833e2SMax Reitz         goto flush_children;
262261007b31SStefan Hajnoczi     }
262361007b31SStefan Hajnoczi 
26243ff2f67aSEvgeny Yakovlev     /* Check if we really need to flush anything */
26253ff2f67aSEvgeny Yakovlev     if (bs->flushed_gen == current_gen) {
2626883833e2SMax Reitz         goto flush_children;
26273ff2f67aSEvgeny Yakovlev     }
26283ff2f67aSEvgeny Yakovlev 
2629883833e2SMax Reitz     BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK);
2630d470ad42SMax Reitz     if (!bs->drv) {
2631d470ad42SMax Reitz         /* bs->drv->bdrv_co_flush() might have ejected the BDS
2632d470ad42SMax Reitz          * (even in case of apparent success) */
2633d470ad42SMax Reitz         ret = -ENOMEDIUM;
2634d470ad42SMax Reitz         goto out;
2635d470ad42SMax Reitz     }
263661007b31SStefan Hajnoczi     if (bs->drv->bdrv_co_flush_to_disk) {
263761007b31SStefan Hajnoczi         ret = bs->drv->bdrv_co_flush_to_disk(bs);
263861007b31SStefan Hajnoczi     } else if (bs->drv->bdrv_aio_flush) {
263961007b31SStefan Hajnoczi         BlockAIOCB *acb;
264061007b31SStefan Hajnoczi         CoroutineIOCompletion co = {
264161007b31SStefan Hajnoczi             .coroutine = qemu_coroutine_self(),
264261007b31SStefan Hajnoczi         };
264361007b31SStefan Hajnoczi 
264461007b31SStefan Hajnoczi         acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
264561007b31SStefan Hajnoczi         if (acb == NULL) {
264661007b31SStefan Hajnoczi             ret = -EIO;
264761007b31SStefan Hajnoczi         } else {
264861007b31SStefan Hajnoczi             qemu_coroutine_yield();
264961007b31SStefan Hajnoczi             ret = co.ret;
265061007b31SStefan Hajnoczi         }
265161007b31SStefan Hajnoczi     } else {
265261007b31SStefan Hajnoczi         /*
265361007b31SStefan Hajnoczi          * Some block drivers always operate in either writethrough or unsafe
265461007b31SStefan Hajnoczi          * mode and don't support bdrv_flush therefore. Usually qemu doesn't
265561007b31SStefan Hajnoczi          * know how the server works (because the behaviour is hardcoded or
265661007b31SStefan Hajnoczi          * depends on server-side configuration), so we can't ensure that
265761007b31SStefan Hajnoczi          * everything is safe on disk. Returning an error doesn't work because
265861007b31SStefan Hajnoczi          * that would break guests even if the server operates in writethrough
265961007b31SStefan Hajnoczi          * mode.
266061007b31SStefan Hajnoczi          *
266161007b31SStefan Hajnoczi          * Let's hope the user knows what he's doing.
266261007b31SStefan Hajnoczi          */
266361007b31SStefan Hajnoczi         ret = 0;
266461007b31SStefan Hajnoczi     }
26653ff2f67aSEvgeny Yakovlev 
266661007b31SStefan Hajnoczi     if (ret < 0) {
2667cdb5e315SFam Zheng         goto out;
266861007b31SStefan Hajnoczi     }
266961007b31SStefan Hajnoczi 
267061007b31SStefan Hajnoczi     /* Now flush the underlying protocol.  It will also have BDRV_O_NO_FLUSH
267161007b31SStefan Hajnoczi      * in the case of cache=unsafe, so there are no useless flushes.
267261007b31SStefan Hajnoczi      */
2673883833e2SMax Reitz flush_children:
2674883833e2SMax Reitz     ret = 0;
2675883833e2SMax Reitz     QLIST_FOREACH(child, &bs->children, next) {
2676883833e2SMax Reitz         if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) {
2677883833e2SMax Reitz             int this_child_ret = bdrv_co_flush(child->bs);
2678883833e2SMax Reitz             if (!ret) {
2679883833e2SMax Reitz                 ret = this_child_ret;
2680883833e2SMax Reitz             }
2681883833e2SMax Reitz         }
2682883833e2SMax Reitz     }
2683883833e2SMax Reitz 
2684cdb5e315SFam Zheng out:
26853ff2f67aSEvgeny Yakovlev     /* Notify any pending flushes that we have completed */
2686e6af1e08SKevin Wolf     if (ret == 0) {
26873ff2f67aSEvgeny Yakovlev         bs->flushed_gen = current_gen;
2688e6af1e08SKevin Wolf     }
26893783fa3dSPaolo Bonzini 
26903783fa3dSPaolo Bonzini     qemu_co_mutex_lock(&bs->reqs_lock);
269199723548SPaolo Bonzini     bs->active_flush_req = false;
2692156af3acSDenis V. Lunev     /* Return value is ignored - it's ok if wait queue is empty */
2693156af3acSDenis V. Lunev     qemu_co_queue_next(&bs->flush_queue);
26943783fa3dSPaolo Bonzini     qemu_co_mutex_unlock(&bs->reqs_lock);
26953ff2f67aSEvgeny Yakovlev 
269649ca6259SFam Zheng early_exit:
269799723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
2698cdb5e315SFam Zheng     return ret;
269961007b31SStefan Hajnoczi }
270061007b31SStefan Hajnoczi 
2701d93e5726SVladimir Sementsov-Ogievskiy int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
2702d93e5726SVladimir Sementsov-Ogievskiy                                   int64_t bytes)
270361007b31SStefan Hajnoczi {
2704b1066c87SFam Zheng     BdrvTrackedRequest req;
27059f1963b3SEric Blake     int max_pdiscard, ret;
27063482b9bcSEric Blake     int head, tail, align;
27070b9fd3f4SFam Zheng     BlockDriverState *bs = child->bs;
270861007b31SStefan Hajnoczi 
2709d93e5726SVladimir Sementsov-Ogievskiy     if (!bs || !bs->drv || !bdrv_is_inserted(bs)) {
271061007b31SStefan Hajnoczi         return -ENOMEDIUM;
271161007b31SStefan Hajnoczi     }
271261007b31SStefan Hajnoczi 
2713d6883bc9SVladimir Sementsov-Ogievskiy     if (bdrv_has_readonly_bitmaps(bs)) {
2714d6883bc9SVladimir Sementsov-Ogievskiy         return -EPERM;
2715d6883bc9SVladimir Sementsov-Ogievskiy     }
2716d6883bc9SVladimir Sementsov-Ogievskiy 
2717d93e5726SVladimir Sementsov-Ogievskiy     if (offset < 0 || bytes < 0 || bytes > INT64_MAX - offset) {
2718d93e5726SVladimir Sementsov-Ogievskiy         return -EIO;
271961007b31SStefan Hajnoczi     }
272061007b31SStefan Hajnoczi 
272161007b31SStefan Hajnoczi     /* Do nothing if disabled.  */
272261007b31SStefan Hajnoczi     if (!(bs->open_flags & BDRV_O_UNMAP)) {
272361007b31SStefan Hajnoczi         return 0;
272461007b31SStefan Hajnoczi     }
272561007b31SStefan Hajnoczi 
272602aefe43SEric Blake     if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
272761007b31SStefan Hajnoczi         return 0;
272861007b31SStefan Hajnoczi     }
272961007b31SStefan Hajnoczi 
27303482b9bcSEric Blake     /* Discard is advisory, but some devices track and coalesce
27313482b9bcSEric Blake      * unaligned requests, so we must pass everything down rather than
27323482b9bcSEric Blake      * round here.  Still, most devices will just silently ignore
27333482b9bcSEric Blake      * unaligned requests (by returning -ENOTSUP), so we must fragment
27343482b9bcSEric Blake      * the request accordingly.  */
273502aefe43SEric Blake     align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
2736b8d0a980SEric Blake     assert(align % bs->bl.request_alignment == 0);
2737b8d0a980SEric Blake     head = offset % align;
2738f5a5ca79SManos Pitsidianakis     tail = (offset + bytes) % align;
27399f1963b3SEric Blake 
274099723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
2741f5a5ca79SManos Pitsidianakis     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
274250824995SFam Zheng 
274300695c27SFam Zheng     ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0);
2744ec050f77SDenis V. Lunev     if (ret < 0) {
2745ec050f77SDenis V. Lunev         goto out;
2746ec050f77SDenis V. Lunev     }
2747ec050f77SDenis V. Lunev 
27489f1963b3SEric Blake     max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
27499f1963b3SEric Blake                                    align);
27503482b9bcSEric Blake     assert(max_pdiscard >= bs->bl.request_alignment);
27519f1963b3SEric Blake 
2752f5a5ca79SManos Pitsidianakis     while (bytes > 0) {
2753d93e5726SVladimir Sementsov-Ogievskiy         int64_t num = bytes;
27543482b9bcSEric Blake 
27553482b9bcSEric Blake         if (head) {
27563482b9bcSEric Blake             /* Make small requests to get to alignment boundaries. */
2757f5a5ca79SManos Pitsidianakis             num = MIN(bytes, align - head);
27583482b9bcSEric Blake             if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
27593482b9bcSEric Blake                 num %= bs->bl.request_alignment;
27603482b9bcSEric Blake             }
27613482b9bcSEric Blake             head = (head + num) % align;
27623482b9bcSEric Blake             assert(num < max_pdiscard);
27633482b9bcSEric Blake         } else if (tail) {
27643482b9bcSEric Blake             if (num > align) {
27653482b9bcSEric Blake                 /* Shorten the request to the last aligned cluster.  */
27663482b9bcSEric Blake                 num -= tail;
27673482b9bcSEric Blake             } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
27683482b9bcSEric Blake                        tail > bs->bl.request_alignment) {
27693482b9bcSEric Blake                 tail %= bs->bl.request_alignment;
27703482b9bcSEric Blake                 num -= tail;
27713482b9bcSEric Blake             }
27723482b9bcSEric Blake         }
27733482b9bcSEric Blake         /* limit request size */
27743482b9bcSEric Blake         if (num > max_pdiscard) {
27753482b9bcSEric Blake             num = max_pdiscard;
27763482b9bcSEric Blake         }
277761007b31SStefan Hajnoczi 
2778d470ad42SMax Reitz         if (!bs->drv) {
2779d470ad42SMax Reitz             ret = -ENOMEDIUM;
2780d470ad42SMax Reitz             goto out;
2781d470ad42SMax Reitz         }
278247a5486dSEric Blake         if (bs->drv->bdrv_co_pdiscard) {
278347a5486dSEric Blake             ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
278461007b31SStefan Hajnoczi         } else {
278561007b31SStefan Hajnoczi             BlockAIOCB *acb;
278661007b31SStefan Hajnoczi             CoroutineIOCompletion co = {
278761007b31SStefan Hajnoczi                 .coroutine = qemu_coroutine_self(),
278861007b31SStefan Hajnoczi             };
278961007b31SStefan Hajnoczi 
27904da444a0SEric Blake             acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
279161007b31SStefan Hajnoczi                                              bdrv_co_io_em_complete, &co);
279261007b31SStefan Hajnoczi             if (acb == NULL) {
2793b1066c87SFam Zheng                 ret = -EIO;
2794b1066c87SFam Zheng                 goto out;
279561007b31SStefan Hajnoczi             } else {
279661007b31SStefan Hajnoczi                 qemu_coroutine_yield();
279761007b31SStefan Hajnoczi                 ret = co.ret;
279861007b31SStefan Hajnoczi             }
279961007b31SStefan Hajnoczi         }
280061007b31SStefan Hajnoczi         if (ret && ret != -ENOTSUP) {
2801b1066c87SFam Zheng             goto out;
280261007b31SStefan Hajnoczi         }
280361007b31SStefan Hajnoczi 
28049f1963b3SEric Blake         offset += num;
2805f5a5ca79SManos Pitsidianakis         bytes -= num;
280661007b31SStefan Hajnoczi     }
2807b1066c87SFam Zheng     ret = 0;
2808b1066c87SFam Zheng out:
280900695c27SFam Zheng     bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret);
2810b1066c87SFam Zheng     tracked_request_end(&req);
281199723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
2812b1066c87SFam Zheng     return ret;
281361007b31SStefan Hajnoczi }
281461007b31SStefan Hajnoczi 
281548af776aSKevin Wolf int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
281661007b31SStefan Hajnoczi {
281761007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
28185c5ae76aSFam Zheng     CoroutineIOCompletion co = {
28195c5ae76aSFam Zheng         .coroutine = qemu_coroutine_self(),
28205c5ae76aSFam Zheng     };
28215c5ae76aSFam Zheng     BlockAIOCB *acb;
282261007b31SStefan Hajnoczi 
282399723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
282416a389dcSKevin Wolf     if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
28255c5ae76aSFam Zheng         co.ret = -ENOTSUP;
28265c5ae76aSFam Zheng         goto out;
28275c5ae76aSFam Zheng     }
28285c5ae76aSFam Zheng 
282916a389dcSKevin Wolf     if (drv->bdrv_co_ioctl) {
283016a389dcSKevin Wolf         co.ret = drv->bdrv_co_ioctl(bs, req, buf);
283116a389dcSKevin Wolf     } else {
28325c5ae76aSFam Zheng         acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
28335c5ae76aSFam Zheng         if (!acb) {
2834c8a9fd80SFam Zheng             co.ret = -ENOTSUP;
2835c8a9fd80SFam Zheng             goto out;
28365c5ae76aSFam Zheng         }
28375c5ae76aSFam Zheng         qemu_coroutine_yield();
283816a389dcSKevin Wolf     }
28395c5ae76aSFam Zheng out:
284099723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
28415c5ae76aSFam Zheng     return co.ret;
28425c5ae76aSFam Zheng }
28435c5ae76aSFam Zheng 
284461007b31SStefan Hajnoczi void *qemu_blockalign(BlockDriverState *bs, size_t size)
284561007b31SStefan Hajnoczi {
284661007b31SStefan Hajnoczi     return qemu_memalign(bdrv_opt_mem_align(bs), size);
284761007b31SStefan Hajnoczi }
284861007b31SStefan Hajnoczi 
284961007b31SStefan Hajnoczi void *qemu_blockalign0(BlockDriverState *bs, size_t size)
285061007b31SStefan Hajnoczi {
285161007b31SStefan Hajnoczi     return memset(qemu_blockalign(bs, size), 0, size);
285261007b31SStefan Hajnoczi }
285361007b31SStefan Hajnoczi 
285461007b31SStefan Hajnoczi void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
285561007b31SStefan Hajnoczi {
285661007b31SStefan Hajnoczi     size_t align = bdrv_opt_mem_align(bs);
285761007b31SStefan Hajnoczi 
285861007b31SStefan Hajnoczi     /* Ensure that NULL is never returned on success */
285961007b31SStefan Hajnoczi     assert(align > 0);
286061007b31SStefan Hajnoczi     if (size == 0) {
286161007b31SStefan Hajnoczi         size = align;
286261007b31SStefan Hajnoczi     }
286361007b31SStefan Hajnoczi 
286461007b31SStefan Hajnoczi     return qemu_try_memalign(align, size);
286561007b31SStefan Hajnoczi }
286661007b31SStefan Hajnoczi 
286761007b31SStefan Hajnoczi void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
286861007b31SStefan Hajnoczi {
286961007b31SStefan Hajnoczi     void *mem = qemu_try_blockalign(bs, size);
287061007b31SStefan Hajnoczi 
287161007b31SStefan Hajnoczi     if (mem) {
287261007b31SStefan Hajnoczi         memset(mem, 0, size);
287361007b31SStefan Hajnoczi     }
287461007b31SStefan Hajnoczi 
287561007b31SStefan Hajnoczi     return mem;
287661007b31SStefan Hajnoczi }
287761007b31SStefan Hajnoczi 
287861007b31SStefan Hajnoczi /*
287961007b31SStefan Hajnoczi  * Check if all memory in this vector is sector aligned.
288061007b31SStefan Hajnoczi  */
288161007b31SStefan Hajnoczi bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
288261007b31SStefan Hajnoczi {
288361007b31SStefan Hajnoczi     int i;
28844196d2f0SDenis V. Lunev     size_t alignment = bdrv_min_mem_align(bs);
288561007b31SStefan Hajnoczi 
288661007b31SStefan Hajnoczi     for (i = 0; i < qiov->niov; i++) {
288761007b31SStefan Hajnoczi         if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
288861007b31SStefan Hajnoczi             return false;
288961007b31SStefan Hajnoczi         }
289061007b31SStefan Hajnoczi         if (qiov->iov[i].iov_len % alignment) {
289161007b31SStefan Hajnoczi             return false;
289261007b31SStefan Hajnoczi         }
289361007b31SStefan Hajnoczi     }
289461007b31SStefan Hajnoczi 
289561007b31SStefan Hajnoczi     return true;
289661007b31SStefan Hajnoczi }
289761007b31SStefan Hajnoczi 
289861007b31SStefan Hajnoczi void bdrv_add_before_write_notifier(BlockDriverState *bs,
289961007b31SStefan Hajnoczi                                     NotifierWithReturn *notifier)
290061007b31SStefan Hajnoczi {
290161007b31SStefan Hajnoczi     notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
290261007b31SStefan Hajnoczi }
290361007b31SStefan Hajnoczi 
290461007b31SStefan Hajnoczi void bdrv_io_plug(BlockDriverState *bs)
290561007b31SStefan Hajnoczi {
29066b98bd64SPaolo Bonzini     BdrvChild *child;
29076b98bd64SPaolo Bonzini 
29086b98bd64SPaolo Bonzini     QLIST_FOREACH(child, &bs->children, next) {
29096b98bd64SPaolo Bonzini         bdrv_io_plug(child->bs);
29106b98bd64SPaolo Bonzini     }
29116b98bd64SPaolo Bonzini 
2912d73415a3SStefan Hajnoczi     if (qatomic_fetch_inc(&bs->io_plugged) == 0) {
291361007b31SStefan Hajnoczi         BlockDriver *drv = bs->drv;
291461007b31SStefan Hajnoczi         if (drv && drv->bdrv_io_plug) {
291561007b31SStefan Hajnoczi             drv->bdrv_io_plug(bs);
29166b98bd64SPaolo Bonzini         }
291761007b31SStefan Hajnoczi     }
291861007b31SStefan Hajnoczi }
291961007b31SStefan Hajnoczi 
292061007b31SStefan Hajnoczi void bdrv_io_unplug(BlockDriverState *bs)
292161007b31SStefan Hajnoczi {
29226b98bd64SPaolo Bonzini     BdrvChild *child;
29236b98bd64SPaolo Bonzini 
29246b98bd64SPaolo Bonzini     assert(bs->io_plugged);
2925d73415a3SStefan Hajnoczi     if (qatomic_fetch_dec(&bs->io_plugged) == 1) {
292661007b31SStefan Hajnoczi         BlockDriver *drv = bs->drv;
292761007b31SStefan Hajnoczi         if (drv && drv->bdrv_io_unplug) {
292861007b31SStefan Hajnoczi             drv->bdrv_io_unplug(bs);
292961007b31SStefan Hajnoczi         }
293061007b31SStefan Hajnoczi     }
293161007b31SStefan Hajnoczi 
29326b98bd64SPaolo Bonzini     QLIST_FOREACH(child, &bs->children, next) {
29336b98bd64SPaolo Bonzini         bdrv_io_unplug(child->bs);
29346b98bd64SPaolo Bonzini     }
29356b98bd64SPaolo Bonzini }
293623d0ba93SFam Zheng 
293723d0ba93SFam Zheng void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size)
293823d0ba93SFam Zheng {
293923d0ba93SFam Zheng     BdrvChild *child;
294023d0ba93SFam Zheng 
294123d0ba93SFam Zheng     if (bs->drv && bs->drv->bdrv_register_buf) {
294223d0ba93SFam Zheng         bs->drv->bdrv_register_buf(bs, host, size);
294323d0ba93SFam Zheng     }
294423d0ba93SFam Zheng     QLIST_FOREACH(child, &bs->children, next) {
294523d0ba93SFam Zheng         bdrv_register_buf(child->bs, host, size);
294623d0ba93SFam Zheng     }
294723d0ba93SFam Zheng }
294823d0ba93SFam Zheng 
294923d0ba93SFam Zheng void bdrv_unregister_buf(BlockDriverState *bs, void *host)
295023d0ba93SFam Zheng {
295123d0ba93SFam Zheng     BdrvChild *child;
295223d0ba93SFam Zheng 
295323d0ba93SFam Zheng     if (bs->drv && bs->drv->bdrv_unregister_buf) {
295423d0ba93SFam Zheng         bs->drv->bdrv_unregister_buf(bs, host);
295523d0ba93SFam Zheng     }
295623d0ba93SFam Zheng     QLIST_FOREACH(child, &bs->children, next) {
295723d0ba93SFam Zheng         bdrv_unregister_buf(child->bs, host);
295823d0ba93SFam Zheng     }
295923d0ba93SFam Zheng }
2960fcc67678SFam Zheng 
296167b51fb9SVladimir Sementsov-Ogievskiy static int coroutine_fn bdrv_co_copy_range_internal(
296267b51fb9SVladimir Sementsov-Ogievskiy         BdrvChild *src, uint64_t src_offset, BdrvChild *dst,
296367b51fb9SVladimir Sementsov-Ogievskiy         uint64_t dst_offset, uint64_t bytes,
296467b51fb9SVladimir Sementsov-Ogievskiy         BdrvRequestFlags read_flags, BdrvRequestFlags write_flags,
2965fcc67678SFam Zheng         bool recurse_src)
2966fcc67678SFam Zheng {
2967999658a0SVladimir Sementsov-Ogievskiy     BdrvTrackedRequest req;
2968fcc67678SFam Zheng     int ret;
2969fcc67678SFam Zheng 
2970fe0480d6SKevin Wolf     /* TODO We can support BDRV_REQ_NO_FALLBACK here */
2971fe0480d6SKevin Wolf     assert(!(read_flags & BDRV_REQ_NO_FALLBACK));
2972fe0480d6SKevin Wolf     assert(!(write_flags & BDRV_REQ_NO_FALLBACK));
2973fe0480d6SKevin Wolf 
2974d4d3e5a0SFam Zheng     if (!dst || !dst->bs) {
2975fcc67678SFam Zheng         return -ENOMEDIUM;
2976fcc67678SFam Zheng     }
2977fcc67678SFam Zheng     ret = bdrv_check_byte_request(dst->bs, dst_offset, bytes);
2978fcc67678SFam Zheng     if (ret) {
2979fcc67678SFam Zheng         return ret;
2980fcc67678SFam Zheng     }
298167b51fb9SVladimir Sementsov-Ogievskiy     if (write_flags & BDRV_REQ_ZERO_WRITE) {
298267b51fb9SVladimir Sementsov-Ogievskiy         return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags);
2983fcc67678SFam Zheng     }
2984fcc67678SFam Zheng 
2985d4d3e5a0SFam Zheng     if (!src || !src->bs) {
2986d4d3e5a0SFam Zheng         return -ENOMEDIUM;
2987d4d3e5a0SFam Zheng     }
2988d4d3e5a0SFam Zheng     ret = bdrv_check_byte_request(src->bs, src_offset, bytes);
2989d4d3e5a0SFam Zheng     if (ret) {
2990d4d3e5a0SFam Zheng         return ret;
2991d4d3e5a0SFam Zheng     }
2992d4d3e5a0SFam Zheng 
2993fcc67678SFam Zheng     if (!src->bs->drv->bdrv_co_copy_range_from
2994fcc67678SFam Zheng         || !dst->bs->drv->bdrv_co_copy_range_to
2995fcc67678SFam Zheng         || src->bs->encrypted || dst->bs->encrypted) {
2996fcc67678SFam Zheng         return -ENOTSUP;
2997fcc67678SFam Zheng     }
2998999658a0SVladimir Sementsov-Ogievskiy 
2999999658a0SVladimir Sementsov-Ogievskiy     if (recurse_src) {
3000d4d3e5a0SFam Zheng         bdrv_inc_in_flight(src->bs);
3001999658a0SVladimir Sementsov-Ogievskiy         tracked_request_begin(&req, src->bs, src_offset, bytes,
3002999658a0SVladimir Sementsov-Ogievskiy                               BDRV_TRACKED_READ);
300337aec7d7SFam Zheng 
300409d2f948SVladimir Sementsov-Ogievskiy         /* BDRV_REQ_SERIALISING is only for write operation */
300509d2f948SVladimir Sementsov-Ogievskiy         assert(!(read_flags & BDRV_REQ_SERIALISING));
3006304d9d7fSMax Reitz         bdrv_wait_serialising_requests(&req);
3007999658a0SVladimir Sementsov-Ogievskiy 
300837aec7d7SFam Zheng         ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
3009fcc67678SFam Zheng                                                     src, src_offset,
3010fcc67678SFam Zheng                                                     dst, dst_offset,
301167b51fb9SVladimir Sementsov-Ogievskiy                                                     bytes,
301267b51fb9SVladimir Sementsov-Ogievskiy                                                     read_flags, write_flags);
3013999658a0SVladimir Sementsov-Ogievskiy 
3014999658a0SVladimir Sementsov-Ogievskiy         tracked_request_end(&req);
3015999658a0SVladimir Sementsov-Ogievskiy         bdrv_dec_in_flight(src->bs);
3016fcc67678SFam Zheng     } else {
3017999658a0SVladimir Sementsov-Ogievskiy         bdrv_inc_in_flight(dst->bs);
3018999658a0SVladimir Sementsov-Ogievskiy         tracked_request_begin(&req, dst->bs, dst_offset, bytes,
3019999658a0SVladimir Sementsov-Ogievskiy                               BDRV_TRACKED_WRITE);
30200eb1e891SFam Zheng         ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req,
30210eb1e891SFam Zheng                                         write_flags);
30220eb1e891SFam Zheng         if (!ret) {
302337aec7d7SFam Zheng             ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs,
3024fcc67678SFam Zheng                                                       src, src_offset,
3025fcc67678SFam Zheng                                                       dst, dst_offset,
302667b51fb9SVladimir Sementsov-Ogievskiy                                                       bytes,
302767b51fb9SVladimir Sementsov-Ogievskiy                                                       read_flags, write_flags);
30280eb1e891SFam Zheng         }
30290eb1e891SFam Zheng         bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret);
3030999658a0SVladimir Sementsov-Ogievskiy         tracked_request_end(&req);
3031d4d3e5a0SFam Zheng         bdrv_dec_in_flight(dst->bs);
3032999658a0SVladimir Sementsov-Ogievskiy     }
3033999658a0SVladimir Sementsov-Ogievskiy 
303437aec7d7SFam Zheng     return ret;
3035fcc67678SFam Zheng }
3036fcc67678SFam Zheng 
3037fcc67678SFam Zheng /* Copy range from @src to @dst.
3038fcc67678SFam Zheng  *
3039fcc67678SFam Zheng  * See the comment of bdrv_co_copy_range for the parameter and return value
3040fcc67678SFam Zheng  * semantics. */
3041fcc67678SFam Zheng int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, uint64_t src_offset,
3042fcc67678SFam Zheng                                          BdrvChild *dst, uint64_t dst_offset,
304367b51fb9SVladimir Sementsov-Ogievskiy                                          uint64_t bytes,
304467b51fb9SVladimir Sementsov-Ogievskiy                                          BdrvRequestFlags read_flags,
304567b51fb9SVladimir Sementsov-Ogievskiy                                          BdrvRequestFlags write_flags)
3046fcc67678SFam Zheng {
3047ecc983a5SFam Zheng     trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes,
3048ecc983a5SFam Zheng                                   read_flags, write_flags);
3049fcc67678SFam Zheng     return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
305067b51fb9SVladimir Sementsov-Ogievskiy                                        bytes, read_flags, write_flags, true);
3051fcc67678SFam Zheng }
3052fcc67678SFam Zheng 
3053fcc67678SFam Zheng /* Copy range from @src to @dst.
3054fcc67678SFam Zheng  *
3055fcc67678SFam Zheng  * See the comment of bdrv_co_copy_range for the parameter and return value
3056fcc67678SFam Zheng  * semantics. */
3057fcc67678SFam Zheng int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, uint64_t src_offset,
3058fcc67678SFam Zheng                                        BdrvChild *dst, uint64_t dst_offset,
305967b51fb9SVladimir Sementsov-Ogievskiy                                        uint64_t bytes,
306067b51fb9SVladimir Sementsov-Ogievskiy                                        BdrvRequestFlags read_flags,
306167b51fb9SVladimir Sementsov-Ogievskiy                                        BdrvRequestFlags write_flags)
3062fcc67678SFam Zheng {
3063ecc983a5SFam Zheng     trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
3064ecc983a5SFam Zheng                                 read_flags, write_flags);
3065fcc67678SFam Zheng     return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
306667b51fb9SVladimir Sementsov-Ogievskiy                                        bytes, read_flags, write_flags, false);
3067fcc67678SFam Zheng }
3068fcc67678SFam Zheng 
3069fcc67678SFam Zheng int coroutine_fn bdrv_co_copy_range(BdrvChild *src, uint64_t src_offset,
3070fcc67678SFam Zheng                                     BdrvChild *dst, uint64_t dst_offset,
307167b51fb9SVladimir Sementsov-Ogievskiy                                     uint64_t bytes, BdrvRequestFlags read_flags,
307267b51fb9SVladimir Sementsov-Ogievskiy                                     BdrvRequestFlags write_flags)
3073fcc67678SFam Zheng {
307437aec7d7SFam Zheng     return bdrv_co_copy_range_from(src, src_offset,
3075fcc67678SFam Zheng                                    dst, dst_offset,
307667b51fb9SVladimir Sementsov-Ogievskiy                                    bytes, read_flags, write_flags);
3077fcc67678SFam Zheng }
30783d9f2d2aSKevin Wolf 
30793d9f2d2aSKevin Wolf static void bdrv_parent_cb_resize(BlockDriverState *bs)
30803d9f2d2aSKevin Wolf {
30813d9f2d2aSKevin Wolf     BdrvChild *c;
30823d9f2d2aSKevin Wolf     QLIST_FOREACH(c, &bs->parents, next_parent) {
3083bd86fb99SMax Reitz         if (c->klass->resize) {
3084bd86fb99SMax Reitz             c->klass->resize(c);
30853d9f2d2aSKevin Wolf         }
30863d9f2d2aSKevin Wolf     }
30873d9f2d2aSKevin Wolf }
30883d9f2d2aSKevin Wolf 
30893d9f2d2aSKevin Wolf /**
30903d9f2d2aSKevin Wolf  * Truncate file to 'offset' bytes (needed only for file protocols)
3091c80d8b06SMax Reitz  *
3092c80d8b06SMax Reitz  * If 'exact' is true, the file must be resized to exactly the given
3093c80d8b06SMax Reitz  * 'offset'.  Otherwise, it is sufficient for the node to be at least
3094c80d8b06SMax Reitz  * 'offset' bytes in length.
30953d9f2d2aSKevin Wolf  */
3096c80d8b06SMax Reitz int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
30977b8e4857SKevin Wolf                                   PreallocMode prealloc, BdrvRequestFlags flags,
30987b8e4857SKevin Wolf                                   Error **errp)
30993d9f2d2aSKevin Wolf {
31003d9f2d2aSKevin Wolf     BlockDriverState *bs = child->bs;
310123b93525SMax Reitz     BdrvChild *filtered, *backing;
31023d9f2d2aSKevin Wolf     BlockDriver *drv = bs->drv;
31031bc5f09fSKevin Wolf     BdrvTrackedRequest req;
31041bc5f09fSKevin Wolf     int64_t old_size, new_bytes;
31053d9f2d2aSKevin Wolf     int ret;
31063d9f2d2aSKevin Wolf 
31073d9f2d2aSKevin Wolf 
31083d9f2d2aSKevin Wolf     /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
31093d9f2d2aSKevin Wolf     if (!drv) {
31103d9f2d2aSKevin Wolf         error_setg(errp, "No medium inserted");
31113d9f2d2aSKevin Wolf         return -ENOMEDIUM;
31123d9f2d2aSKevin Wolf     }
31133d9f2d2aSKevin Wolf     if (offset < 0) {
31143d9f2d2aSKevin Wolf         error_setg(errp, "Image size cannot be negative");
31153d9f2d2aSKevin Wolf         return -EINVAL;
31163d9f2d2aSKevin Wolf     }
31173d9f2d2aSKevin Wolf 
31181bc5f09fSKevin Wolf     old_size = bdrv_getlength(bs);
31191bc5f09fSKevin Wolf     if (old_size < 0) {
31201bc5f09fSKevin Wolf         error_setg_errno(errp, -old_size, "Failed to get old image size");
31211bc5f09fSKevin Wolf         return old_size;
31221bc5f09fSKevin Wolf     }
31231bc5f09fSKevin Wolf 
31241bc5f09fSKevin Wolf     if (offset > old_size) {
31251bc5f09fSKevin Wolf         new_bytes = offset - old_size;
31261bc5f09fSKevin Wolf     } else {
31271bc5f09fSKevin Wolf         new_bytes = 0;
31281bc5f09fSKevin Wolf     }
31291bc5f09fSKevin Wolf 
31303d9f2d2aSKevin Wolf     bdrv_inc_in_flight(bs);
31315416a11eSFam Zheng     tracked_request_begin(&req, bs, offset - new_bytes, new_bytes,
31325416a11eSFam Zheng                           BDRV_TRACKED_TRUNCATE);
31331bc5f09fSKevin Wolf 
31341bc5f09fSKevin Wolf     /* If we are growing the image and potentially using preallocation for the
31351bc5f09fSKevin Wolf      * new area, we need to make sure that no write requests are made to it
31361bc5f09fSKevin Wolf      * concurrently or they might be overwritten by preallocation. */
31371bc5f09fSKevin Wolf     if (new_bytes) {
3138304d9d7fSMax Reitz         bdrv_mark_request_serialising(&req, 1);
3139cd47d792SFam Zheng     }
3140cd47d792SFam Zheng     if (bs->read_only) {
3141cd47d792SFam Zheng         error_setg(errp, "Image is read-only");
3142cd47d792SFam Zheng         ret = -EACCES;
3143cd47d792SFam Zheng         goto out;
3144cd47d792SFam Zheng     }
3145cd47d792SFam Zheng     ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req,
3146cd47d792SFam Zheng                                     0);
3147cd47d792SFam Zheng     if (ret < 0) {
3148cd47d792SFam Zheng         error_setg_errno(errp, -ret,
3149cd47d792SFam Zheng                          "Failed to prepare request for truncation");
3150cd47d792SFam Zheng         goto out;
31511bc5f09fSKevin Wolf     }
31523d9f2d2aSKevin Wolf 
315393393e69SMax Reitz     filtered = bdrv_filter_child(bs);
315423b93525SMax Reitz     backing = bdrv_cow_child(bs);
315593393e69SMax Reitz 
3156955c7d66SKevin Wolf     /*
3157955c7d66SKevin Wolf      * If the image has a backing file that is large enough that it would
3158955c7d66SKevin Wolf      * provide data for the new area, we cannot leave it unallocated because
3159955c7d66SKevin Wolf      * then the backing file content would become visible. Instead, zero-fill
3160955c7d66SKevin Wolf      * the new area.
3161955c7d66SKevin Wolf      *
3162955c7d66SKevin Wolf      * Note that if the image has a backing file, but was opened without the
3163955c7d66SKevin Wolf      * backing file, taking care of keeping things consistent with that backing
3164955c7d66SKevin Wolf      * file is the user's responsibility.
3165955c7d66SKevin Wolf      */
316623b93525SMax Reitz     if (new_bytes && backing) {
3167955c7d66SKevin Wolf         int64_t backing_len;
3168955c7d66SKevin Wolf 
316923b93525SMax Reitz         backing_len = bdrv_getlength(backing->bs);
3170955c7d66SKevin Wolf         if (backing_len < 0) {
3171955c7d66SKevin Wolf             ret = backing_len;
3172955c7d66SKevin Wolf             error_setg_errno(errp, -ret, "Could not get backing file size");
3173955c7d66SKevin Wolf             goto out;
3174955c7d66SKevin Wolf         }
3175955c7d66SKevin Wolf 
3176955c7d66SKevin Wolf         if (backing_len > old_size) {
3177955c7d66SKevin Wolf             flags |= BDRV_REQ_ZERO_WRITE;
3178955c7d66SKevin Wolf         }
3179955c7d66SKevin Wolf     }
3180955c7d66SKevin Wolf 
31816b7e8f8bSMax Reitz     if (drv->bdrv_co_truncate) {
318292b92799SKevin Wolf         if (flags & ~bs->supported_truncate_flags) {
318392b92799SKevin Wolf             error_setg(errp, "Block driver does not support requested flags");
318492b92799SKevin Wolf             ret = -ENOTSUP;
318592b92799SKevin Wolf             goto out;
318692b92799SKevin Wolf         }
318792b92799SKevin Wolf         ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp);
318893393e69SMax Reitz     } else if (filtered) {
318993393e69SMax Reitz         ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp);
31906b7e8f8bSMax Reitz     } else {
31913d9f2d2aSKevin Wolf         error_setg(errp, "Image format driver does not support resize");
31923d9f2d2aSKevin Wolf         ret = -ENOTSUP;
31933d9f2d2aSKevin Wolf         goto out;
31943d9f2d2aSKevin Wolf     }
31953d9f2d2aSKevin Wolf     if (ret < 0) {
31963d9f2d2aSKevin Wolf         goto out;
31973d9f2d2aSKevin Wolf     }
31986b7e8f8bSMax Reitz 
31993d9f2d2aSKevin Wolf     ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
32003d9f2d2aSKevin Wolf     if (ret < 0) {
32013d9f2d2aSKevin Wolf         error_setg_errno(errp, -ret, "Could not refresh total sector count");
32023d9f2d2aSKevin Wolf     } else {
32033d9f2d2aSKevin Wolf         offset = bs->total_sectors * BDRV_SECTOR_SIZE;
32043d9f2d2aSKevin Wolf     }
3205cd47d792SFam Zheng     /* It's possible that truncation succeeded but refresh_total_sectors
3206cd47d792SFam Zheng      * failed, but the latter doesn't affect how we should finish the request.
3207cd47d792SFam Zheng      * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. */
3208cd47d792SFam Zheng     bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0);
32093d9f2d2aSKevin Wolf 
32103d9f2d2aSKevin Wolf out:
32111bc5f09fSKevin Wolf     tracked_request_end(&req);
32123d9f2d2aSKevin Wolf     bdrv_dec_in_flight(bs);
32131bc5f09fSKevin Wolf 
32143d9f2d2aSKevin Wolf     return ret;
32153d9f2d2aSKevin Wolf }
3216