xref: /qemu/block/io.c (revision b0165585900f050f403cecba9d89adeccf35dd6c)
161007b31SStefan Hajnoczi /*
261007b31SStefan Hajnoczi  * Block layer I/O functions
361007b31SStefan Hajnoczi  *
461007b31SStefan Hajnoczi  * Copyright (c) 2003 Fabrice Bellard
561007b31SStefan Hajnoczi  *
661007b31SStefan Hajnoczi  * Permission is hereby granted, free of charge, to any person obtaining a copy
761007b31SStefan Hajnoczi  * of this software and associated documentation files (the "Software"), to deal
861007b31SStefan Hajnoczi  * in the Software without restriction, including without limitation the rights
961007b31SStefan Hajnoczi  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
1061007b31SStefan Hajnoczi  * copies of the Software, and to permit persons to whom the Software is
1161007b31SStefan Hajnoczi  * furnished to do so, subject to the following conditions:
1261007b31SStefan Hajnoczi  *
1361007b31SStefan Hajnoczi  * The above copyright notice and this permission notice shall be included in
1461007b31SStefan Hajnoczi  * all copies or substantial portions of the Software.
1561007b31SStefan Hajnoczi  *
1661007b31SStefan Hajnoczi  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1761007b31SStefan Hajnoczi  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1861007b31SStefan Hajnoczi  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1961007b31SStefan Hajnoczi  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2061007b31SStefan Hajnoczi  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
2161007b31SStefan Hajnoczi  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
2261007b31SStefan Hajnoczi  * THE SOFTWARE.
2361007b31SStefan Hajnoczi  */
2461007b31SStefan Hajnoczi 
2580c71a24SPeter Maydell #include "qemu/osdep.h"
2661007b31SStefan Hajnoczi #include "trace.h"
277f0e9da6SMax Reitz #include "sysemu/block-backend.h"
2861007b31SStefan Hajnoczi #include "block/blockjob.h"
29f321dcb5SPaolo Bonzini #include "block/blockjob_int.h"
3061007b31SStefan Hajnoczi #include "block/block_int.h"
31f348b6d1SVeronia Bahaa #include "qemu/cutils.h"
32da34e65cSMarkus Armbruster #include "qapi/error.h"
33d49b6836SMarkus Armbruster #include "qemu/error-report.h"
3461007b31SStefan Hajnoczi 
3561007b31SStefan Hajnoczi #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
3661007b31SStefan Hajnoczi 
37cb2e2878SEric Blake /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
38cb2e2878SEric Blake #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
39cb2e2878SEric Blake 
40d05aa8bbSEric Blake static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
41f5a5ca79SManos Pitsidianakis     int64_t offset, int bytes, BdrvRequestFlags flags);
4261007b31SStefan Hajnoczi 
430152bf40SKevin Wolf void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore)
4461007b31SStefan Hajnoczi {
4502d21300SKevin Wolf     BdrvChild *c, *next;
4627ccdd52SKevin Wolf 
4702d21300SKevin Wolf     QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
480152bf40SKevin Wolf         if (c == ignore) {
490152bf40SKevin Wolf             continue;
500152bf40SKevin Wolf         }
51c2066af0SKevin Wolf         if (c->role->drained_begin) {
52c2066af0SKevin Wolf             c->role->drained_begin(c);
53c2066af0SKevin Wolf         }
54ce0f1412SPaolo Bonzini     }
55ce0f1412SPaolo Bonzini }
56ce0f1412SPaolo Bonzini 
570152bf40SKevin Wolf void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore)
58ce0f1412SPaolo Bonzini {
5902d21300SKevin Wolf     BdrvChild *c, *next;
6027ccdd52SKevin Wolf 
6102d21300SKevin Wolf     QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
620152bf40SKevin Wolf         if (c == ignore) {
630152bf40SKevin Wolf             continue;
640152bf40SKevin Wolf         }
65c2066af0SKevin Wolf         if (c->role->drained_end) {
66c2066af0SKevin Wolf             c->role->drained_end(c);
6727ccdd52SKevin Wolf         }
68c2066af0SKevin Wolf     }
6961007b31SStefan Hajnoczi }
7061007b31SStefan Hajnoczi 
71d9e0dfa2SEric Blake static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
72d9e0dfa2SEric Blake {
73d9e0dfa2SEric Blake     dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
74d9e0dfa2SEric Blake     dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
75d9e0dfa2SEric Blake     dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
76d9e0dfa2SEric Blake                                  src->opt_mem_alignment);
77d9e0dfa2SEric Blake     dst->min_mem_alignment = MAX(dst->min_mem_alignment,
78d9e0dfa2SEric Blake                                  src->min_mem_alignment);
79d9e0dfa2SEric Blake     dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
80d9e0dfa2SEric Blake }
81d9e0dfa2SEric Blake 
8261007b31SStefan Hajnoczi void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
8361007b31SStefan Hajnoczi {
8461007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
8561007b31SStefan Hajnoczi     Error *local_err = NULL;
8661007b31SStefan Hajnoczi 
8761007b31SStefan Hajnoczi     memset(&bs->bl, 0, sizeof(bs->bl));
8861007b31SStefan Hajnoczi 
8961007b31SStefan Hajnoczi     if (!drv) {
9061007b31SStefan Hajnoczi         return;
9161007b31SStefan Hajnoczi     }
9261007b31SStefan Hajnoczi 
9379ba8c98SEric Blake     /* Default alignment based on whether driver has byte interface */
94a5b8dd2cSEric Blake     bs->bl.request_alignment = drv->bdrv_co_preadv ? 1 : 512;
9579ba8c98SEric Blake 
9661007b31SStefan Hajnoczi     /* Take some limits from the children as a default */
9761007b31SStefan Hajnoczi     if (bs->file) {
989a4f4c31SKevin Wolf         bdrv_refresh_limits(bs->file->bs, &local_err);
9961007b31SStefan Hajnoczi         if (local_err) {
10061007b31SStefan Hajnoczi             error_propagate(errp, local_err);
10161007b31SStefan Hajnoczi             return;
10261007b31SStefan Hajnoczi         }
103d9e0dfa2SEric Blake         bdrv_merge_limits(&bs->bl, &bs->file->bs->bl);
10461007b31SStefan Hajnoczi     } else {
1054196d2f0SDenis V. Lunev         bs->bl.min_mem_alignment = 512;
106459b4e66SDenis V. Lunev         bs->bl.opt_mem_alignment = getpagesize();
107bd44feb7SStefan Hajnoczi 
108bd44feb7SStefan Hajnoczi         /* Safe default since most protocols use readv()/writev()/etc */
109bd44feb7SStefan Hajnoczi         bs->bl.max_iov = IOV_MAX;
11061007b31SStefan Hajnoczi     }
11161007b31SStefan Hajnoczi 
112760e0063SKevin Wolf     if (bs->backing) {
113760e0063SKevin Wolf         bdrv_refresh_limits(bs->backing->bs, &local_err);
11461007b31SStefan Hajnoczi         if (local_err) {
11561007b31SStefan Hajnoczi             error_propagate(errp, local_err);
11661007b31SStefan Hajnoczi             return;
11761007b31SStefan Hajnoczi         }
118d9e0dfa2SEric Blake         bdrv_merge_limits(&bs->bl, &bs->backing->bs->bl);
11961007b31SStefan Hajnoczi     }
12061007b31SStefan Hajnoczi 
12161007b31SStefan Hajnoczi     /* Then let the driver override it */
12261007b31SStefan Hajnoczi     if (drv->bdrv_refresh_limits) {
12361007b31SStefan Hajnoczi         drv->bdrv_refresh_limits(bs, errp);
12461007b31SStefan Hajnoczi     }
12561007b31SStefan Hajnoczi }
12661007b31SStefan Hajnoczi 
12761007b31SStefan Hajnoczi /**
12861007b31SStefan Hajnoczi  * The copy-on-read flag is actually a reference count so multiple users may
12961007b31SStefan Hajnoczi  * use the feature without worrying about clobbering its previous state.
13061007b31SStefan Hajnoczi  * Copy-on-read stays enabled until all users have called to disable it.
13161007b31SStefan Hajnoczi  */
13261007b31SStefan Hajnoczi void bdrv_enable_copy_on_read(BlockDriverState *bs)
13361007b31SStefan Hajnoczi {
134d3faa13eSPaolo Bonzini     atomic_inc(&bs->copy_on_read);
13561007b31SStefan Hajnoczi }
13661007b31SStefan Hajnoczi 
13761007b31SStefan Hajnoczi void bdrv_disable_copy_on_read(BlockDriverState *bs)
13861007b31SStefan Hajnoczi {
139d3faa13eSPaolo Bonzini     int old = atomic_fetch_dec(&bs->copy_on_read);
140d3faa13eSPaolo Bonzini     assert(old >= 1);
14161007b31SStefan Hajnoczi }
14261007b31SStefan Hajnoczi 
14361124f03SPaolo Bonzini typedef struct {
14461124f03SPaolo Bonzini     Coroutine *co;
14561124f03SPaolo Bonzini     BlockDriverState *bs;
14661124f03SPaolo Bonzini     bool done;
147481cad48SManos Pitsidianakis     bool begin;
148*b0165585SKevin Wolf     bool recursive;
1490152bf40SKevin Wolf     BdrvChild *parent;
15061124f03SPaolo Bonzini } BdrvCoDrainData;
15161124f03SPaolo Bonzini 
15261124f03SPaolo Bonzini static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
15361124f03SPaolo Bonzini {
15461124f03SPaolo Bonzini     BdrvCoDrainData *data = opaque;
15561124f03SPaolo Bonzini     BlockDriverState *bs = data->bs;
15661124f03SPaolo Bonzini 
157481cad48SManos Pitsidianakis     if (data->begin) {
158f8ea8dacSManos Pitsidianakis         bs->drv->bdrv_co_drain_begin(bs);
159481cad48SManos Pitsidianakis     } else {
160481cad48SManos Pitsidianakis         bs->drv->bdrv_co_drain_end(bs);
161481cad48SManos Pitsidianakis     }
16261124f03SPaolo Bonzini 
16361124f03SPaolo Bonzini     /* Set data->done before reading bs->wakeup.  */
16461124f03SPaolo Bonzini     atomic_mb_set(&data->done, true);
16561124f03SPaolo Bonzini     bdrv_wakeup(bs);
16661124f03SPaolo Bonzini }
16761124f03SPaolo Bonzini 
168db0289b9SKevin Wolf /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
1697b6a3d35SKevin Wolf static void bdrv_drain_invoke(BlockDriverState *bs, bool begin, bool recursive)
17061124f03SPaolo Bonzini {
171db0289b9SKevin Wolf     BdrvChild *child, *tmp;
172481cad48SManos Pitsidianakis     BdrvCoDrainData data = { .bs = bs, .done = false, .begin = begin};
17361124f03SPaolo Bonzini 
174f8ea8dacSManos Pitsidianakis     if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) ||
175481cad48SManos Pitsidianakis             (!begin && !bs->drv->bdrv_co_drain_end)) {
17661124f03SPaolo Bonzini         return;
17761124f03SPaolo Bonzini     }
17861124f03SPaolo Bonzini 
17961124f03SPaolo Bonzini     data.co = qemu_coroutine_create(bdrv_drain_invoke_entry, &data);
18061124f03SPaolo Bonzini     bdrv_coroutine_enter(bs, data.co);
18161124f03SPaolo Bonzini     BDRV_POLL_WHILE(bs, !data.done);
182db0289b9SKevin Wolf 
1837b6a3d35SKevin Wolf     if (recursive) {
184db0289b9SKevin Wolf         QLIST_FOREACH_SAFE(child, &bs->children, next, tmp) {
1857b6a3d35SKevin Wolf             bdrv_drain_invoke(child->bs, begin, true);
1867b6a3d35SKevin Wolf         }
187db0289b9SKevin Wolf     }
18861124f03SPaolo Bonzini }
18961124f03SPaolo Bonzini 
19099c05de9SKevin Wolf static bool bdrv_drain_recurse(BlockDriverState *bs)
19167da1dc5SFam Zheng {
192178bd438SFam Zheng     BdrvChild *child, *tmp;
193d42cf288SPaolo Bonzini     bool waited;
194d42cf288SPaolo Bonzini 
195481cad48SManos Pitsidianakis     /* Wait for drained requests to finish */
196481cad48SManos Pitsidianakis     waited = BDRV_POLL_WHILE(bs, atomic_read(&bs->in_flight) > 0);
197d42cf288SPaolo Bonzini 
198178bd438SFam Zheng     QLIST_FOREACH_SAFE(child, &bs->children, next, tmp) {
199178bd438SFam Zheng         BlockDriverState *bs = child->bs;
200178bd438SFam Zheng         bool in_main_loop =
201178bd438SFam Zheng             qemu_get_current_aio_context() == qemu_get_aio_context();
202178bd438SFam Zheng         assert(bs->refcnt > 0);
203178bd438SFam Zheng         if (in_main_loop) {
204178bd438SFam Zheng             /* In case the recursive bdrv_drain_recurse processes a
205178bd438SFam Zheng              * block_job_defer_to_main_loop BH and modifies the graph,
206178bd438SFam Zheng              * let's hold a reference to bs until we are done.
207178bd438SFam Zheng              *
208178bd438SFam Zheng              * IOThread doesn't have such a BH, and it is not safe to call
209178bd438SFam Zheng              * bdrv_unref without BQL, so skip doing it there.
210178bd438SFam Zheng              */
211178bd438SFam Zheng             bdrv_ref(bs);
212178bd438SFam Zheng         }
21399c05de9SKevin Wolf         waited |= bdrv_drain_recurse(bs);
214178bd438SFam Zheng         if (in_main_loop) {
215178bd438SFam Zheng             bdrv_unref(bs);
216178bd438SFam Zheng         }
21767da1dc5SFam Zheng     }
218d42cf288SPaolo Bonzini 
219d42cf288SPaolo Bonzini     return waited;
22067da1dc5SFam Zheng }
22167da1dc5SFam Zheng 
222*b0165585SKevin Wolf static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
223*b0165585SKevin Wolf                                   BdrvChild *parent);
224*b0165585SKevin Wolf static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
225*b0165585SKevin Wolf                                 BdrvChild *parent);
2260152bf40SKevin Wolf 
227a77fd4bbSFam Zheng static void bdrv_co_drain_bh_cb(void *opaque)
228a77fd4bbSFam Zheng {
229a77fd4bbSFam Zheng     BdrvCoDrainData *data = opaque;
230a77fd4bbSFam Zheng     Coroutine *co = data->co;
23199723548SPaolo Bonzini     BlockDriverState *bs = data->bs;
232a77fd4bbSFam Zheng 
23399723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
234481cad48SManos Pitsidianakis     if (data->begin) {
235*b0165585SKevin Wolf         bdrv_do_drained_begin(bs, data->recursive, data->parent);
236481cad48SManos Pitsidianakis     } else {
237*b0165585SKevin Wolf         bdrv_do_drained_end(bs, data->recursive, data->parent);
238481cad48SManos Pitsidianakis     }
239481cad48SManos Pitsidianakis 
240a77fd4bbSFam Zheng     data->done = true;
2411919631eSPaolo Bonzini     aio_co_wake(co);
242a77fd4bbSFam Zheng }
243a77fd4bbSFam Zheng 
244481cad48SManos Pitsidianakis static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
245*b0165585SKevin Wolf                                                 bool begin, bool recursive,
246*b0165585SKevin Wolf                                                 BdrvChild *parent)
247a77fd4bbSFam Zheng {
248a77fd4bbSFam Zheng     BdrvCoDrainData data;
249a77fd4bbSFam Zheng 
250a77fd4bbSFam Zheng     /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
251a77fd4bbSFam Zheng      * other coroutines run if they were queued from
252a77fd4bbSFam Zheng      * qemu_co_queue_run_restart(). */
253a77fd4bbSFam Zheng 
254a77fd4bbSFam Zheng     assert(qemu_in_coroutine());
255a77fd4bbSFam Zheng     data = (BdrvCoDrainData) {
256a77fd4bbSFam Zheng         .co = qemu_coroutine_self(),
257a77fd4bbSFam Zheng         .bs = bs,
258a77fd4bbSFam Zheng         .done = false,
259481cad48SManos Pitsidianakis         .begin = begin,
260*b0165585SKevin Wolf         .recursive = recursive,
2610152bf40SKevin Wolf         .parent = parent,
262a77fd4bbSFam Zheng     };
26399723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
264fffb6e12SPaolo Bonzini     aio_bh_schedule_oneshot(bdrv_get_aio_context(bs),
265fffb6e12SPaolo Bonzini                             bdrv_co_drain_bh_cb, &data);
266a77fd4bbSFam Zheng 
267a77fd4bbSFam Zheng     qemu_coroutine_yield();
268a77fd4bbSFam Zheng     /* If we are resumed from some other event (such as an aio completion or a
269a77fd4bbSFam Zheng      * timer callback), it is a bug in the caller that should be fixed. */
270a77fd4bbSFam Zheng     assert(data.done);
271a77fd4bbSFam Zheng }
272a77fd4bbSFam Zheng 
273*b0165585SKevin Wolf static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
274*b0165585SKevin Wolf                                   BdrvChild *parent)
2756820643fSKevin Wolf {
276*b0165585SKevin Wolf     BdrvChild *child, *next;
277*b0165585SKevin Wolf 
278d42cf288SPaolo Bonzini     if (qemu_in_coroutine()) {
279*b0165585SKevin Wolf         bdrv_co_yield_to_drain(bs, true, recursive, parent);
280d42cf288SPaolo Bonzini         return;
281d42cf288SPaolo Bonzini     }
282d42cf288SPaolo Bonzini 
28360369b86SKevin Wolf     /* Stop things in parent-to-child order */
284414c2ec3SPaolo Bonzini     if (atomic_fetch_inc(&bs->quiesce_counter) == 0) {
2856820643fSKevin Wolf         aio_disable_external(bdrv_get_aio_context(bs));
2866820643fSKevin Wolf     }
2876820643fSKevin Wolf 
2880152bf40SKevin Wolf     bdrv_parent_drained_begin(bs, parent);
2897b6a3d35SKevin Wolf     bdrv_drain_invoke(bs, true, false);
29099c05de9SKevin Wolf     bdrv_drain_recurse(bs);
291*b0165585SKevin Wolf 
292*b0165585SKevin Wolf     if (recursive) {
293*b0165585SKevin Wolf         QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
294*b0165585SKevin Wolf             bdrv_do_drained_begin(child->bs, true, child);
295*b0165585SKevin Wolf         }
296*b0165585SKevin Wolf     }
2976820643fSKevin Wolf }
2986820643fSKevin Wolf 
2990152bf40SKevin Wolf void bdrv_drained_begin(BlockDriverState *bs)
3000152bf40SKevin Wolf {
301*b0165585SKevin Wolf     bdrv_do_drained_begin(bs, false, NULL);
3020152bf40SKevin Wolf }
3030152bf40SKevin Wolf 
304*b0165585SKevin Wolf void bdrv_subtree_drained_begin(BlockDriverState *bs)
3056820643fSKevin Wolf {
306*b0165585SKevin Wolf     bdrv_do_drained_begin(bs, true, NULL);
307*b0165585SKevin Wolf }
308*b0165585SKevin Wolf 
309*b0165585SKevin Wolf static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
310*b0165585SKevin Wolf                                 BdrvChild *parent)
311*b0165585SKevin Wolf {
312*b0165585SKevin Wolf     BdrvChild *child, *next;
3130f115168SKevin Wolf     int old_quiesce_counter;
3140f115168SKevin Wolf 
315481cad48SManos Pitsidianakis     if (qemu_in_coroutine()) {
316*b0165585SKevin Wolf         bdrv_co_yield_to_drain(bs, false, recursive, parent);
317481cad48SManos Pitsidianakis         return;
318481cad48SManos Pitsidianakis     }
3196820643fSKevin Wolf     assert(bs->quiesce_counter > 0);
3200f115168SKevin Wolf     old_quiesce_counter = atomic_fetch_dec(&bs->quiesce_counter);
3216820643fSKevin Wolf 
32260369b86SKevin Wolf     /* Re-enable things in child-to-parent order */
3237b6a3d35SKevin Wolf     bdrv_drain_invoke(bs, false, false);
3240152bf40SKevin Wolf     bdrv_parent_drained_end(bs, parent);
3250f115168SKevin Wolf     if (old_quiesce_counter == 1) {
3266820643fSKevin Wolf         aio_enable_external(bdrv_get_aio_context(bs));
3276820643fSKevin Wolf     }
328*b0165585SKevin Wolf 
329*b0165585SKevin Wolf     if (recursive) {
330*b0165585SKevin Wolf         QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
331*b0165585SKevin Wolf             bdrv_do_drained_end(child->bs, true, child);
332*b0165585SKevin Wolf         }
333*b0165585SKevin Wolf     }
3340f115168SKevin Wolf }
3356820643fSKevin Wolf 
3360152bf40SKevin Wolf void bdrv_drained_end(BlockDriverState *bs)
3370152bf40SKevin Wolf {
338*b0165585SKevin Wolf     bdrv_do_drained_end(bs, false, NULL);
339*b0165585SKevin Wolf }
340*b0165585SKevin Wolf 
341*b0165585SKevin Wolf void bdrv_subtree_drained_end(BlockDriverState *bs)
342*b0165585SKevin Wolf {
343*b0165585SKevin Wolf     bdrv_do_drained_end(bs, true, NULL);
3440152bf40SKevin Wolf }
3450152bf40SKevin Wolf 
34661007b31SStefan Hajnoczi /*
34767da1dc5SFam Zheng  * Wait for pending requests to complete on a single BlockDriverState subtree,
34867da1dc5SFam Zheng  * and suspend block driver's internal I/O until next request arrives.
34961007b31SStefan Hajnoczi  *
35061007b31SStefan Hajnoczi  * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
35161007b31SStefan Hajnoczi  * AioContext.
3527a63f3cdSStefan Hajnoczi  *
3537a63f3cdSStefan Hajnoczi  * Only this BlockDriverState's AioContext is run, so in-flight requests must
3547a63f3cdSStefan Hajnoczi  * not depend on events in other AioContexts.  In that case, use
3557a63f3cdSStefan Hajnoczi  * bdrv_drain_all() instead.
35661007b31SStefan Hajnoczi  */
357b6e84c97SPaolo Bonzini void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
358b6e84c97SPaolo Bonzini {
3596820643fSKevin Wolf     assert(qemu_in_coroutine());
3606820643fSKevin Wolf     bdrv_drained_begin(bs);
3616820643fSKevin Wolf     bdrv_drained_end(bs);
362b6e84c97SPaolo Bonzini }
363b6e84c97SPaolo Bonzini 
36461007b31SStefan Hajnoczi void bdrv_drain(BlockDriverState *bs)
36561007b31SStefan Hajnoczi {
3666820643fSKevin Wolf     bdrv_drained_begin(bs);
3676820643fSKevin Wolf     bdrv_drained_end(bs);
36861007b31SStefan Hajnoczi }
36961007b31SStefan Hajnoczi 
37061007b31SStefan Hajnoczi /*
37161007b31SStefan Hajnoczi  * Wait for pending requests to complete across all BlockDriverStates
37261007b31SStefan Hajnoczi  *
37361007b31SStefan Hajnoczi  * This function does not flush data to disk, use bdrv_flush_all() for that
37461007b31SStefan Hajnoczi  * after calling this function.
375c0778f66SAlberto Garcia  *
376c0778f66SAlberto Garcia  * This pauses all block jobs and disables external clients. It must
377c0778f66SAlberto Garcia  * be paired with bdrv_drain_all_end().
378c0778f66SAlberto Garcia  *
379c0778f66SAlberto Garcia  * NOTE: no new block jobs or BlockDriverStates can be created between
380c0778f66SAlberto Garcia  * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
38161007b31SStefan Hajnoczi  */
382c0778f66SAlberto Garcia void bdrv_drain_all_begin(void)
38361007b31SStefan Hajnoczi {
38461007b31SStefan Hajnoczi     /* Always run first iteration so any pending completion BHs run */
38599723548SPaolo Bonzini     bool waited = true;
3867c8eece4SKevin Wolf     BlockDriverState *bs;
38788be7b4bSKevin Wolf     BdrvNextIterator it;
388f406c03cSAlexander Yarygin     GSList *aio_ctxs = NULL, *ctx;
38961007b31SStefan Hajnoczi 
3909a7e86c8SKevin Wolf     /* BDRV_POLL_WHILE() for a node can only be called from its own I/O thread
3919a7e86c8SKevin Wolf      * or the main loop AioContext. We potentially use BDRV_POLL_WHILE() on
3929a7e86c8SKevin Wolf      * nodes in several different AioContexts, so make sure we're in the main
3939a7e86c8SKevin Wolf      * context. */
3949a7e86c8SKevin Wolf     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
3959a7e86c8SKevin Wolf 
39688be7b4bSKevin Wolf     for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
39761007b31SStefan Hajnoczi         AioContext *aio_context = bdrv_get_aio_context(bs);
39861007b31SStefan Hajnoczi 
39960369b86SKevin Wolf         /* Stop things in parent-to-child order */
40061007b31SStefan Hajnoczi         aio_context_acquire(aio_context);
401c0778f66SAlberto Garcia         aio_disable_external(aio_context);
4020152bf40SKevin Wolf         bdrv_parent_drained_begin(bs, NULL);
4037b6a3d35SKevin Wolf         bdrv_drain_invoke(bs, true, true);
40461007b31SStefan Hajnoczi         aio_context_release(aio_context);
405f406c03cSAlexander Yarygin 
406764ba3aeSAlberto Garcia         if (!g_slist_find(aio_ctxs, aio_context)) {
407f406c03cSAlexander Yarygin             aio_ctxs = g_slist_prepend(aio_ctxs, aio_context);
408f406c03cSAlexander Yarygin         }
40961007b31SStefan Hajnoczi     }
41061007b31SStefan Hajnoczi 
4117a63f3cdSStefan Hajnoczi     /* Note that completion of an asynchronous I/O operation can trigger any
4127a63f3cdSStefan Hajnoczi      * number of other I/O operations on other devices---for example a
4137a63f3cdSStefan Hajnoczi      * coroutine can submit an I/O request to another device in response to
4147a63f3cdSStefan Hajnoczi      * request completion.  Therefore we must keep looping until there was no
4157a63f3cdSStefan Hajnoczi      * more activity rather than simply draining each device independently.
4167a63f3cdSStefan Hajnoczi      */
41799723548SPaolo Bonzini     while (waited) {
41899723548SPaolo Bonzini         waited = false;
419f406c03cSAlexander Yarygin 
420f406c03cSAlexander Yarygin         for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) {
421f406c03cSAlexander Yarygin             AioContext *aio_context = ctx->data;
42261007b31SStefan Hajnoczi 
42361007b31SStefan Hajnoczi             aio_context_acquire(aio_context);
42488be7b4bSKevin Wolf             for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
425f406c03cSAlexander Yarygin                 if (aio_context == bdrv_get_aio_context(bs)) {
42699c05de9SKevin Wolf                     waited |= bdrv_drain_recurse(bs);
427f406c03cSAlexander Yarygin                 }
428f406c03cSAlexander Yarygin             }
42961007b31SStefan Hajnoczi             aio_context_release(aio_context);
43061007b31SStefan Hajnoczi         }
43161007b31SStefan Hajnoczi     }
43261007b31SStefan Hajnoczi 
433c0778f66SAlberto Garcia     g_slist_free(aio_ctxs);
434c0778f66SAlberto Garcia }
435c0778f66SAlberto Garcia 
436c0778f66SAlberto Garcia void bdrv_drain_all_end(void)
437c0778f66SAlberto Garcia {
438c0778f66SAlberto Garcia     BlockDriverState *bs;
439c0778f66SAlberto Garcia     BdrvNextIterator it;
440c0778f66SAlberto Garcia 
44188be7b4bSKevin Wolf     for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
44261007b31SStefan Hajnoczi         AioContext *aio_context = bdrv_get_aio_context(bs);
44361007b31SStefan Hajnoczi 
44460369b86SKevin Wolf         /* Re-enable things in child-to-parent order */
44561007b31SStefan Hajnoczi         aio_context_acquire(aio_context);
4467b6a3d35SKevin Wolf         bdrv_drain_invoke(bs, false, true);
4470152bf40SKevin Wolf         bdrv_parent_drained_end(bs, NULL);
44860369b86SKevin Wolf         aio_enable_external(aio_context);
44961007b31SStefan Hajnoczi         aio_context_release(aio_context);
45061007b31SStefan Hajnoczi     }
45161007b31SStefan Hajnoczi }
45261007b31SStefan Hajnoczi 
453c0778f66SAlberto Garcia void bdrv_drain_all(void)
454c0778f66SAlberto Garcia {
455c0778f66SAlberto Garcia     bdrv_drain_all_begin();
456c0778f66SAlberto Garcia     bdrv_drain_all_end();
457c0778f66SAlberto Garcia }
458c0778f66SAlberto Garcia 
45961007b31SStefan Hajnoczi /**
46061007b31SStefan Hajnoczi  * Remove an active request from the tracked requests list
46161007b31SStefan Hajnoczi  *
46261007b31SStefan Hajnoczi  * This function should be called when a tracked request is completing.
46361007b31SStefan Hajnoczi  */
46461007b31SStefan Hajnoczi static void tracked_request_end(BdrvTrackedRequest *req)
46561007b31SStefan Hajnoczi {
46661007b31SStefan Hajnoczi     if (req->serialising) {
46720fc71b2SPaolo Bonzini         atomic_dec(&req->bs->serialising_in_flight);
46861007b31SStefan Hajnoczi     }
46961007b31SStefan Hajnoczi 
4703783fa3dSPaolo Bonzini     qemu_co_mutex_lock(&req->bs->reqs_lock);
47161007b31SStefan Hajnoczi     QLIST_REMOVE(req, list);
47261007b31SStefan Hajnoczi     qemu_co_queue_restart_all(&req->wait_queue);
4733783fa3dSPaolo Bonzini     qemu_co_mutex_unlock(&req->bs->reqs_lock);
47461007b31SStefan Hajnoczi }
47561007b31SStefan Hajnoczi 
47661007b31SStefan Hajnoczi /**
47761007b31SStefan Hajnoczi  * Add an active request to the tracked requests list
47861007b31SStefan Hajnoczi  */
47961007b31SStefan Hajnoczi static void tracked_request_begin(BdrvTrackedRequest *req,
48061007b31SStefan Hajnoczi                                   BlockDriverState *bs,
48161007b31SStefan Hajnoczi                                   int64_t offset,
482ebde595cSFam Zheng                                   unsigned int bytes,
483ebde595cSFam Zheng                                   enum BdrvTrackedRequestType type)
48461007b31SStefan Hajnoczi {
48561007b31SStefan Hajnoczi     *req = (BdrvTrackedRequest){
48661007b31SStefan Hajnoczi         .bs = bs,
48761007b31SStefan Hajnoczi         .offset         = offset,
48861007b31SStefan Hajnoczi         .bytes          = bytes,
489ebde595cSFam Zheng         .type           = type,
49061007b31SStefan Hajnoczi         .co             = qemu_coroutine_self(),
49161007b31SStefan Hajnoczi         .serialising    = false,
49261007b31SStefan Hajnoczi         .overlap_offset = offset,
49361007b31SStefan Hajnoczi         .overlap_bytes  = bytes,
49461007b31SStefan Hajnoczi     };
49561007b31SStefan Hajnoczi 
49661007b31SStefan Hajnoczi     qemu_co_queue_init(&req->wait_queue);
49761007b31SStefan Hajnoczi 
4983783fa3dSPaolo Bonzini     qemu_co_mutex_lock(&bs->reqs_lock);
49961007b31SStefan Hajnoczi     QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
5003783fa3dSPaolo Bonzini     qemu_co_mutex_unlock(&bs->reqs_lock);
50161007b31SStefan Hajnoczi }
50261007b31SStefan Hajnoczi 
50361007b31SStefan Hajnoczi static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
50461007b31SStefan Hajnoczi {
50561007b31SStefan Hajnoczi     int64_t overlap_offset = req->offset & ~(align - 1);
50661007b31SStefan Hajnoczi     unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
50761007b31SStefan Hajnoczi                                - overlap_offset;
50861007b31SStefan Hajnoczi 
50961007b31SStefan Hajnoczi     if (!req->serialising) {
51020fc71b2SPaolo Bonzini         atomic_inc(&req->bs->serialising_in_flight);
51161007b31SStefan Hajnoczi         req->serialising = true;
51261007b31SStefan Hajnoczi     }
51361007b31SStefan Hajnoczi 
51461007b31SStefan Hajnoczi     req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
51561007b31SStefan Hajnoczi     req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
51661007b31SStefan Hajnoczi }
51761007b31SStefan Hajnoczi 
51861007b31SStefan Hajnoczi /**
519244483e6SKevin Wolf  * Round a region to cluster boundaries
520244483e6SKevin Wolf  */
521244483e6SKevin Wolf void bdrv_round_to_clusters(BlockDriverState *bs,
5227cfd5275SEric Blake                             int64_t offset, int64_t bytes,
523244483e6SKevin Wolf                             int64_t *cluster_offset,
5247cfd5275SEric Blake                             int64_t *cluster_bytes)
525244483e6SKevin Wolf {
526244483e6SKevin Wolf     BlockDriverInfo bdi;
527244483e6SKevin Wolf 
528244483e6SKevin Wolf     if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
529244483e6SKevin Wolf         *cluster_offset = offset;
530244483e6SKevin Wolf         *cluster_bytes = bytes;
531244483e6SKevin Wolf     } else {
532244483e6SKevin Wolf         int64_t c = bdi.cluster_size;
533244483e6SKevin Wolf         *cluster_offset = QEMU_ALIGN_DOWN(offset, c);
534244483e6SKevin Wolf         *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
535244483e6SKevin Wolf     }
536244483e6SKevin Wolf }
537244483e6SKevin Wolf 
53861007b31SStefan Hajnoczi static int bdrv_get_cluster_size(BlockDriverState *bs)
53961007b31SStefan Hajnoczi {
54061007b31SStefan Hajnoczi     BlockDriverInfo bdi;
54161007b31SStefan Hajnoczi     int ret;
54261007b31SStefan Hajnoczi 
54361007b31SStefan Hajnoczi     ret = bdrv_get_info(bs, &bdi);
54461007b31SStefan Hajnoczi     if (ret < 0 || bdi.cluster_size == 0) {
545a5b8dd2cSEric Blake         return bs->bl.request_alignment;
54661007b31SStefan Hajnoczi     } else {
54761007b31SStefan Hajnoczi         return bdi.cluster_size;
54861007b31SStefan Hajnoczi     }
54961007b31SStefan Hajnoczi }
55061007b31SStefan Hajnoczi 
55161007b31SStefan Hajnoczi static bool tracked_request_overlaps(BdrvTrackedRequest *req,
55261007b31SStefan Hajnoczi                                      int64_t offset, unsigned int bytes)
55361007b31SStefan Hajnoczi {
55461007b31SStefan Hajnoczi     /*        aaaa   bbbb */
55561007b31SStefan Hajnoczi     if (offset >= req->overlap_offset + req->overlap_bytes) {
55661007b31SStefan Hajnoczi         return false;
55761007b31SStefan Hajnoczi     }
55861007b31SStefan Hajnoczi     /* bbbb   aaaa        */
55961007b31SStefan Hajnoczi     if (req->overlap_offset >= offset + bytes) {
56061007b31SStefan Hajnoczi         return false;
56161007b31SStefan Hajnoczi     }
56261007b31SStefan Hajnoczi     return true;
56361007b31SStefan Hajnoczi }
56461007b31SStefan Hajnoczi 
56599723548SPaolo Bonzini void bdrv_inc_in_flight(BlockDriverState *bs)
56699723548SPaolo Bonzini {
56799723548SPaolo Bonzini     atomic_inc(&bs->in_flight);
56899723548SPaolo Bonzini }
56999723548SPaolo Bonzini 
570c9d1a561SPaolo Bonzini static void dummy_bh_cb(void *opaque)
571c9d1a561SPaolo Bonzini {
572c9d1a561SPaolo Bonzini }
573c9d1a561SPaolo Bonzini 
574c9d1a561SPaolo Bonzini void bdrv_wakeup(BlockDriverState *bs)
575c9d1a561SPaolo Bonzini {
576e2a6ae7fSPaolo Bonzini     /* The barrier (or an atomic op) is in the caller.  */
577e2a6ae7fSPaolo Bonzini     if (atomic_read(&bs->wakeup)) {
578c9d1a561SPaolo Bonzini         aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL);
579c9d1a561SPaolo Bonzini     }
580c9d1a561SPaolo Bonzini }
581c9d1a561SPaolo Bonzini 
58299723548SPaolo Bonzini void bdrv_dec_in_flight(BlockDriverState *bs)
58399723548SPaolo Bonzini {
58499723548SPaolo Bonzini     atomic_dec(&bs->in_flight);
585c9d1a561SPaolo Bonzini     bdrv_wakeup(bs);
58699723548SPaolo Bonzini }
58799723548SPaolo Bonzini 
58861007b31SStefan Hajnoczi static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
58961007b31SStefan Hajnoczi {
59061007b31SStefan Hajnoczi     BlockDriverState *bs = self->bs;
59161007b31SStefan Hajnoczi     BdrvTrackedRequest *req;
59261007b31SStefan Hajnoczi     bool retry;
59361007b31SStefan Hajnoczi     bool waited = false;
59461007b31SStefan Hajnoczi 
59520fc71b2SPaolo Bonzini     if (!atomic_read(&bs->serialising_in_flight)) {
59661007b31SStefan Hajnoczi         return false;
59761007b31SStefan Hajnoczi     }
59861007b31SStefan Hajnoczi 
59961007b31SStefan Hajnoczi     do {
60061007b31SStefan Hajnoczi         retry = false;
6013783fa3dSPaolo Bonzini         qemu_co_mutex_lock(&bs->reqs_lock);
60261007b31SStefan Hajnoczi         QLIST_FOREACH(req, &bs->tracked_requests, list) {
60361007b31SStefan Hajnoczi             if (req == self || (!req->serialising && !self->serialising)) {
60461007b31SStefan Hajnoczi                 continue;
60561007b31SStefan Hajnoczi             }
60661007b31SStefan Hajnoczi             if (tracked_request_overlaps(req, self->overlap_offset,
60761007b31SStefan Hajnoczi                                          self->overlap_bytes))
60861007b31SStefan Hajnoczi             {
60961007b31SStefan Hajnoczi                 /* Hitting this means there was a reentrant request, for
61061007b31SStefan Hajnoczi                  * example, a block driver issuing nested requests.  This must
61161007b31SStefan Hajnoczi                  * never happen since it means deadlock.
61261007b31SStefan Hajnoczi                  */
61361007b31SStefan Hajnoczi                 assert(qemu_coroutine_self() != req->co);
61461007b31SStefan Hajnoczi 
61561007b31SStefan Hajnoczi                 /* If the request is already (indirectly) waiting for us, or
61661007b31SStefan Hajnoczi                  * will wait for us as soon as it wakes up, then just go on
61761007b31SStefan Hajnoczi                  * (instead of producing a deadlock in the former case). */
61861007b31SStefan Hajnoczi                 if (!req->waiting_for) {
61961007b31SStefan Hajnoczi                     self->waiting_for = req;
6203783fa3dSPaolo Bonzini                     qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock);
62161007b31SStefan Hajnoczi                     self->waiting_for = NULL;
62261007b31SStefan Hajnoczi                     retry = true;
62361007b31SStefan Hajnoczi                     waited = true;
62461007b31SStefan Hajnoczi                     break;
62561007b31SStefan Hajnoczi                 }
62661007b31SStefan Hajnoczi             }
62761007b31SStefan Hajnoczi         }
6283783fa3dSPaolo Bonzini         qemu_co_mutex_unlock(&bs->reqs_lock);
62961007b31SStefan Hajnoczi     } while (retry);
63061007b31SStefan Hajnoczi 
63161007b31SStefan Hajnoczi     return waited;
63261007b31SStefan Hajnoczi }
63361007b31SStefan Hajnoczi 
63461007b31SStefan Hajnoczi static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
63561007b31SStefan Hajnoczi                                    size_t size)
63661007b31SStefan Hajnoczi {
63761007b31SStefan Hajnoczi     if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) {
63861007b31SStefan Hajnoczi         return -EIO;
63961007b31SStefan Hajnoczi     }
64061007b31SStefan Hajnoczi 
64161007b31SStefan Hajnoczi     if (!bdrv_is_inserted(bs)) {
64261007b31SStefan Hajnoczi         return -ENOMEDIUM;
64361007b31SStefan Hajnoczi     }
64461007b31SStefan Hajnoczi 
64561007b31SStefan Hajnoczi     if (offset < 0) {
64661007b31SStefan Hajnoczi         return -EIO;
64761007b31SStefan Hajnoczi     }
64861007b31SStefan Hajnoczi 
64961007b31SStefan Hajnoczi     return 0;
65061007b31SStefan Hajnoczi }
65161007b31SStefan Hajnoczi 
65261007b31SStefan Hajnoczi typedef struct RwCo {
653e293b7a3SKevin Wolf     BdrvChild *child;
65461007b31SStefan Hajnoczi     int64_t offset;
65561007b31SStefan Hajnoczi     QEMUIOVector *qiov;
65661007b31SStefan Hajnoczi     bool is_write;
65761007b31SStefan Hajnoczi     int ret;
65861007b31SStefan Hajnoczi     BdrvRequestFlags flags;
65961007b31SStefan Hajnoczi } RwCo;
66061007b31SStefan Hajnoczi 
66161007b31SStefan Hajnoczi static void coroutine_fn bdrv_rw_co_entry(void *opaque)
66261007b31SStefan Hajnoczi {
66361007b31SStefan Hajnoczi     RwCo *rwco = opaque;
66461007b31SStefan Hajnoczi 
66561007b31SStefan Hajnoczi     if (!rwco->is_write) {
666a03ef88fSKevin Wolf         rwco->ret = bdrv_co_preadv(rwco->child, rwco->offset,
66761007b31SStefan Hajnoczi                                    rwco->qiov->size, rwco->qiov,
66861007b31SStefan Hajnoczi                                    rwco->flags);
66961007b31SStefan Hajnoczi     } else {
670a03ef88fSKevin Wolf         rwco->ret = bdrv_co_pwritev(rwco->child, rwco->offset,
67161007b31SStefan Hajnoczi                                     rwco->qiov->size, rwco->qiov,
67261007b31SStefan Hajnoczi                                     rwco->flags);
67361007b31SStefan Hajnoczi     }
67461007b31SStefan Hajnoczi }
67561007b31SStefan Hajnoczi 
67661007b31SStefan Hajnoczi /*
67761007b31SStefan Hajnoczi  * Process a vectored synchronous request using coroutines
67861007b31SStefan Hajnoczi  */
679e293b7a3SKevin Wolf static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
68061007b31SStefan Hajnoczi                         QEMUIOVector *qiov, bool is_write,
68161007b31SStefan Hajnoczi                         BdrvRequestFlags flags)
68261007b31SStefan Hajnoczi {
68361007b31SStefan Hajnoczi     Coroutine *co;
68461007b31SStefan Hajnoczi     RwCo rwco = {
685e293b7a3SKevin Wolf         .child = child,
68661007b31SStefan Hajnoczi         .offset = offset,
68761007b31SStefan Hajnoczi         .qiov = qiov,
68861007b31SStefan Hajnoczi         .is_write = is_write,
68961007b31SStefan Hajnoczi         .ret = NOT_DONE,
69061007b31SStefan Hajnoczi         .flags = flags,
69161007b31SStefan Hajnoczi     };
69261007b31SStefan Hajnoczi 
69361007b31SStefan Hajnoczi     if (qemu_in_coroutine()) {
69461007b31SStefan Hajnoczi         /* Fast-path if already in coroutine context */
69561007b31SStefan Hajnoczi         bdrv_rw_co_entry(&rwco);
69661007b31SStefan Hajnoczi     } else {
6970b8b8753SPaolo Bonzini         co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco);
698e92f0e19SFam Zheng         bdrv_coroutine_enter(child->bs, co);
69988b062c2SPaolo Bonzini         BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE);
70061007b31SStefan Hajnoczi     }
70161007b31SStefan Hajnoczi     return rwco.ret;
70261007b31SStefan Hajnoczi }
70361007b31SStefan Hajnoczi 
70461007b31SStefan Hajnoczi /*
70561007b31SStefan Hajnoczi  * Process a synchronous request using coroutines
70661007b31SStefan Hajnoczi  */
707e293b7a3SKevin Wolf static int bdrv_rw_co(BdrvChild *child, int64_t sector_num, uint8_t *buf,
70861007b31SStefan Hajnoczi                       int nb_sectors, bool is_write, BdrvRequestFlags flags)
70961007b31SStefan Hajnoczi {
71061007b31SStefan Hajnoczi     QEMUIOVector qiov;
71161007b31SStefan Hajnoczi     struct iovec iov = {
71261007b31SStefan Hajnoczi         .iov_base = (void *)buf,
71361007b31SStefan Hajnoczi         .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
71461007b31SStefan Hajnoczi     };
71561007b31SStefan Hajnoczi 
71661007b31SStefan Hajnoczi     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
71761007b31SStefan Hajnoczi         return -EINVAL;
71861007b31SStefan Hajnoczi     }
71961007b31SStefan Hajnoczi 
72061007b31SStefan Hajnoczi     qemu_iovec_init_external(&qiov, &iov, 1);
721e293b7a3SKevin Wolf     return bdrv_prwv_co(child, sector_num << BDRV_SECTOR_BITS,
72261007b31SStefan Hajnoczi                         &qiov, is_write, flags);
72361007b31SStefan Hajnoczi }
72461007b31SStefan Hajnoczi 
72561007b31SStefan Hajnoczi /* return < 0 if error. See bdrv_write() for the return codes */
726fbcbbf4eSKevin Wolf int bdrv_read(BdrvChild *child, int64_t sector_num,
72761007b31SStefan Hajnoczi               uint8_t *buf, int nb_sectors)
72861007b31SStefan Hajnoczi {
729e293b7a3SKevin Wolf     return bdrv_rw_co(child, sector_num, buf, nb_sectors, false, 0);
73061007b31SStefan Hajnoczi }
73161007b31SStefan Hajnoczi 
73261007b31SStefan Hajnoczi /* Return < 0 if error. Important errors are:
73361007b31SStefan Hajnoczi   -EIO         generic I/O error (may happen for all errors)
73461007b31SStefan Hajnoczi   -ENOMEDIUM   No media inserted.
73561007b31SStefan Hajnoczi   -EINVAL      Invalid sector number or nb_sectors
73661007b31SStefan Hajnoczi   -EACCES      Trying to write a read-only device
73761007b31SStefan Hajnoczi */
73818d51c4bSKevin Wolf int bdrv_write(BdrvChild *child, int64_t sector_num,
73961007b31SStefan Hajnoczi                const uint8_t *buf, int nb_sectors)
74061007b31SStefan Hajnoczi {
741e293b7a3SKevin Wolf     return bdrv_rw_co(child, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
74261007b31SStefan Hajnoczi }
74361007b31SStefan Hajnoczi 
744720ff280SKevin Wolf int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
745f5a5ca79SManos Pitsidianakis                        int bytes, BdrvRequestFlags flags)
74661007b31SStefan Hajnoczi {
74774021bc4SEric Blake     QEMUIOVector qiov;
74874021bc4SEric Blake     struct iovec iov = {
74974021bc4SEric Blake         .iov_base = NULL,
750f5a5ca79SManos Pitsidianakis         .iov_len = bytes,
75174021bc4SEric Blake     };
75274021bc4SEric Blake 
75374021bc4SEric Blake     qemu_iovec_init_external(&qiov, &iov, 1);
754e293b7a3SKevin Wolf     return bdrv_prwv_co(child, offset, &qiov, true,
75561007b31SStefan Hajnoczi                         BDRV_REQ_ZERO_WRITE | flags);
75661007b31SStefan Hajnoczi }
75761007b31SStefan Hajnoczi 
75861007b31SStefan Hajnoczi /*
75974021bc4SEric Blake  * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
76061007b31SStefan Hajnoczi  * The operation is sped up by checking the block status and only writing
76161007b31SStefan Hajnoczi  * zeroes to the device if they currently do not return zeroes. Optional
76274021bc4SEric Blake  * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
763465fe887SEric Blake  * BDRV_REQ_FUA).
76461007b31SStefan Hajnoczi  *
76561007b31SStefan Hajnoczi  * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
76661007b31SStefan Hajnoczi  */
767720ff280SKevin Wolf int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
76861007b31SStefan Hajnoczi {
769237d78f8SEric Blake     int ret;
770237d78f8SEric Blake     int64_t target_size, bytes, offset = 0;
771720ff280SKevin Wolf     BlockDriverState *bs = child->bs;
77261007b31SStefan Hajnoczi 
7737286d610SEric Blake     target_size = bdrv_getlength(bs);
7747286d610SEric Blake     if (target_size < 0) {
7757286d610SEric Blake         return target_size;
77661007b31SStefan Hajnoczi     }
77761007b31SStefan Hajnoczi 
77861007b31SStefan Hajnoczi     for (;;) {
7797286d610SEric Blake         bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES);
7807286d610SEric Blake         if (bytes <= 0) {
78161007b31SStefan Hajnoczi             return 0;
78261007b31SStefan Hajnoczi         }
783237d78f8SEric Blake         ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL);
78461007b31SStefan Hajnoczi         if (ret < 0) {
7857286d610SEric Blake             error_report("error getting block status at offset %" PRId64 ": %s",
7867286d610SEric Blake                          offset, strerror(-ret));
78761007b31SStefan Hajnoczi             return ret;
78861007b31SStefan Hajnoczi         }
78961007b31SStefan Hajnoczi         if (ret & BDRV_BLOCK_ZERO) {
790237d78f8SEric Blake             offset += bytes;
79161007b31SStefan Hajnoczi             continue;
79261007b31SStefan Hajnoczi         }
793237d78f8SEric Blake         ret = bdrv_pwrite_zeroes(child, offset, bytes, flags);
79461007b31SStefan Hajnoczi         if (ret < 0) {
7957286d610SEric Blake             error_report("error writing zeroes at offset %" PRId64 ": %s",
7967286d610SEric Blake                          offset, strerror(-ret));
79761007b31SStefan Hajnoczi             return ret;
79861007b31SStefan Hajnoczi         }
799237d78f8SEric Blake         offset += bytes;
80061007b31SStefan Hajnoczi     }
80161007b31SStefan Hajnoczi }
80261007b31SStefan Hajnoczi 
803cf2ab8fcSKevin Wolf int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
804f1e84741SKevin Wolf {
805f1e84741SKevin Wolf     int ret;
806f1e84741SKevin Wolf 
807e293b7a3SKevin Wolf     ret = bdrv_prwv_co(child, offset, qiov, false, 0);
808f1e84741SKevin Wolf     if (ret < 0) {
809f1e84741SKevin Wolf         return ret;
810f1e84741SKevin Wolf     }
811f1e84741SKevin Wolf 
812f1e84741SKevin Wolf     return qiov->size;
813f1e84741SKevin Wolf }
814f1e84741SKevin Wolf 
815cf2ab8fcSKevin Wolf int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes)
81661007b31SStefan Hajnoczi {
81761007b31SStefan Hajnoczi     QEMUIOVector qiov;
81861007b31SStefan Hajnoczi     struct iovec iov = {
81961007b31SStefan Hajnoczi         .iov_base = (void *)buf,
82061007b31SStefan Hajnoczi         .iov_len = bytes,
82161007b31SStefan Hajnoczi     };
82261007b31SStefan Hajnoczi 
82361007b31SStefan Hajnoczi     if (bytes < 0) {
82461007b31SStefan Hajnoczi         return -EINVAL;
82561007b31SStefan Hajnoczi     }
82661007b31SStefan Hajnoczi 
82761007b31SStefan Hajnoczi     qemu_iovec_init_external(&qiov, &iov, 1);
828cf2ab8fcSKevin Wolf     return bdrv_preadv(child, offset, &qiov);
82961007b31SStefan Hajnoczi }
83061007b31SStefan Hajnoczi 
831d9ca2ea2SKevin Wolf int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
83261007b31SStefan Hajnoczi {
83361007b31SStefan Hajnoczi     int ret;
83461007b31SStefan Hajnoczi 
835e293b7a3SKevin Wolf     ret = bdrv_prwv_co(child, offset, qiov, true, 0);
83661007b31SStefan Hajnoczi     if (ret < 0) {
83761007b31SStefan Hajnoczi         return ret;
83861007b31SStefan Hajnoczi     }
83961007b31SStefan Hajnoczi 
84061007b31SStefan Hajnoczi     return qiov->size;
84161007b31SStefan Hajnoczi }
84261007b31SStefan Hajnoczi 
843d9ca2ea2SKevin Wolf int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes)
84461007b31SStefan Hajnoczi {
84561007b31SStefan Hajnoczi     QEMUIOVector qiov;
84661007b31SStefan Hajnoczi     struct iovec iov = {
84761007b31SStefan Hajnoczi         .iov_base   = (void *) buf,
84861007b31SStefan Hajnoczi         .iov_len    = bytes,
84961007b31SStefan Hajnoczi     };
85061007b31SStefan Hajnoczi 
85161007b31SStefan Hajnoczi     if (bytes < 0) {
85261007b31SStefan Hajnoczi         return -EINVAL;
85361007b31SStefan Hajnoczi     }
85461007b31SStefan Hajnoczi 
85561007b31SStefan Hajnoczi     qemu_iovec_init_external(&qiov, &iov, 1);
856d9ca2ea2SKevin Wolf     return bdrv_pwritev(child, offset, &qiov);
85761007b31SStefan Hajnoczi }
85861007b31SStefan Hajnoczi 
85961007b31SStefan Hajnoczi /*
86061007b31SStefan Hajnoczi  * Writes to the file and ensures that no writes are reordered across this
86161007b31SStefan Hajnoczi  * request (acts as a barrier)
86261007b31SStefan Hajnoczi  *
86361007b31SStefan Hajnoczi  * Returns 0 on success, -errno in error cases.
86461007b31SStefan Hajnoczi  */
865d9ca2ea2SKevin Wolf int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
86661007b31SStefan Hajnoczi                      const void *buf, int count)
86761007b31SStefan Hajnoczi {
86861007b31SStefan Hajnoczi     int ret;
86961007b31SStefan Hajnoczi 
870d9ca2ea2SKevin Wolf     ret = bdrv_pwrite(child, offset, buf, count);
87161007b31SStefan Hajnoczi     if (ret < 0) {
87261007b31SStefan Hajnoczi         return ret;
87361007b31SStefan Hajnoczi     }
87461007b31SStefan Hajnoczi 
875d9ca2ea2SKevin Wolf     ret = bdrv_flush(child->bs);
876855a6a93SKevin Wolf     if (ret < 0) {
877855a6a93SKevin Wolf         return ret;
87861007b31SStefan Hajnoczi     }
87961007b31SStefan Hajnoczi 
88061007b31SStefan Hajnoczi     return 0;
88161007b31SStefan Hajnoczi }
88261007b31SStefan Hajnoczi 
88308844473SKevin Wolf typedef struct CoroutineIOCompletion {
88408844473SKevin Wolf     Coroutine *coroutine;
88508844473SKevin Wolf     int ret;
88608844473SKevin Wolf } CoroutineIOCompletion;
88708844473SKevin Wolf 
88808844473SKevin Wolf static void bdrv_co_io_em_complete(void *opaque, int ret)
88908844473SKevin Wolf {
89008844473SKevin Wolf     CoroutineIOCompletion *co = opaque;
89108844473SKevin Wolf 
89208844473SKevin Wolf     co->ret = ret;
893b9e413ddSPaolo Bonzini     aio_co_wake(co->coroutine);
89408844473SKevin Wolf }
89508844473SKevin Wolf 
896166fe960SKevin Wolf static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
897166fe960SKevin Wolf                                            uint64_t offset, uint64_t bytes,
898166fe960SKevin Wolf                                            QEMUIOVector *qiov, int flags)
899166fe960SKevin Wolf {
900166fe960SKevin Wolf     BlockDriver *drv = bs->drv;
9013fb06697SKevin Wolf     int64_t sector_num;
9023fb06697SKevin Wolf     unsigned int nb_sectors;
9033fb06697SKevin Wolf 
904fa166538SEric Blake     assert(!(flags & ~BDRV_REQ_MASK));
905fa166538SEric Blake 
906d470ad42SMax Reitz     if (!drv) {
907d470ad42SMax Reitz         return -ENOMEDIUM;
908d470ad42SMax Reitz     }
909d470ad42SMax Reitz 
9103fb06697SKevin Wolf     if (drv->bdrv_co_preadv) {
9113fb06697SKevin Wolf         return drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
9123fb06697SKevin Wolf     }
9133fb06697SKevin Wolf 
9143fb06697SKevin Wolf     sector_num = offset >> BDRV_SECTOR_BITS;
9153fb06697SKevin Wolf     nb_sectors = bytes >> BDRV_SECTOR_BITS;
916166fe960SKevin Wolf 
917166fe960SKevin Wolf     assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
918166fe960SKevin Wolf     assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
919166fe960SKevin Wolf     assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
920166fe960SKevin Wolf 
92108844473SKevin Wolf     if (drv->bdrv_co_readv) {
922166fe960SKevin Wolf         return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
92308844473SKevin Wolf     } else {
92408844473SKevin Wolf         BlockAIOCB *acb;
92508844473SKevin Wolf         CoroutineIOCompletion co = {
92608844473SKevin Wolf             .coroutine = qemu_coroutine_self(),
92708844473SKevin Wolf         };
92808844473SKevin Wolf 
92908844473SKevin Wolf         acb = bs->drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors,
93008844473SKevin Wolf                                       bdrv_co_io_em_complete, &co);
93108844473SKevin Wolf         if (acb == NULL) {
93208844473SKevin Wolf             return -EIO;
93308844473SKevin Wolf         } else {
93408844473SKevin Wolf             qemu_coroutine_yield();
93508844473SKevin Wolf             return co.ret;
93608844473SKevin Wolf         }
93708844473SKevin Wolf     }
938166fe960SKevin Wolf }
939166fe960SKevin Wolf 
94078a07294SKevin Wolf static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
94178a07294SKevin Wolf                                             uint64_t offset, uint64_t bytes,
94278a07294SKevin Wolf                                             QEMUIOVector *qiov, int flags)
94378a07294SKevin Wolf {
94478a07294SKevin Wolf     BlockDriver *drv = bs->drv;
9453fb06697SKevin Wolf     int64_t sector_num;
9463fb06697SKevin Wolf     unsigned int nb_sectors;
94778a07294SKevin Wolf     int ret;
94878a07294SKevin Wolf 
949fa166538SEric Blake     assert(!(flags & ~BDRV_REQ_MASK));
950fa166538SEric Blake 
951d470ad42SMax Reitz     if (!drv) {
952d470ad42SMax Reitz         return -ENOMEDIUM;
953d470ad42SMax Reitz     }
954d470ad42SMax Reitz 
9553fb06697SKevin Wolf     if (drv->bdrv_co_pwritev) {
956515c2f43SKevin Wolf         ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov,
957515c2f43SKevin Wolf                                    flags & bs->supported_write_flags);
958515c2f43SKevin Wolf         flags &= ~bs->supported_write_flags;
9593fb06697SKevin Wolf         goto emulate_flags;
9603fb06697SKevin Wolf     }
9613fb06697SKevin Wolf 
9623fb06697SKevin Wolf     sector_num = offset >> BDRV_SECTOR_BITS;
9633fb06697SKevin Wolf     nb_sectors = bytes >> BDRV_SECTOR_BITS;
9643fb06697SKevin Wolf 
96578a07294SKevin Wolf     assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
96678a07294SKevin Wolf     assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
96778a07294SKevin Wolf     assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
96878a07294SKevin Wolf 
96978a07294SKevin Wolf     if (drv->bdrv_co_writev_flags) {
97078a07294SKevin Wolf         ret = drv->bdrv_co_writev_flags(bs, sector_num, nb_sectors, qiov,
9714df863f3SEric Blake                                         flags & bs->supported_write_flags);
9724df863f3SEric Blake         flags &= ~bs->supported_write_flags;
97308844473SKevin Wolf     } else if (drv->bdrv_co_writev) {
9744df863f3SEric Blake         assert(!bs->supported_write_flags);
97578a07294SKevin Wolf         ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
97608844473SKevin Wolf     } else {
97708844473SKevin Wolf         BlockAIOCB *acb;
97808844473SKevin Wolf         CoroutineIOCompletion co = {
97908844473SKevin Wolf             .coroutine = qemu_coroutine_self(),
98008844473SKevin Wolf         };
98108844473SKevin Wolf 
98208844473SKevin Wolf         acb = bs->drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors,
98308844473SKevin Wolf                                        bdrv_co_io_em_complete, &co);
98408844473SKevin Wolf         if (acb == NULL) {
9853fb06697SKevin Wolf             ret = -EIO;
98608844473SKevin Wolf         } else {
98708844473SKevin Wolf             qemu_coroutine_yield();
9883fb06697SKevin Wolf             ret = co.ret;
98908844473SKevin Wolf         }
99078a07294SKevin Wolf     }
99178a07294SKevin Wolf 
9923fb06697SKevin Wolf emulate_flags:
9934df863f3SEric Blake     if (ret == 0 && (flags & BDRV_REQ_FUA)) {
99478a07294SKevin Wolf         ret = bdrv_co_flush(bs);
99578a07294SKevin Wolf     }
99678a07294SKevin Wolf 
99778a07294SKevin Wolf     return ret;
99878a07294SKevin Wolf }
99978a07294SKevin Wolf 
100029a298afSPavel Butsykin static int coroutine_fn
100129a298afSPavel Butsykin bdrv_driver_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
100229a298afSPavel Butsykin                                uint64_t bytes, QEMUIOVector *qiov)
100329a298afSPavel Butsykin {
100429a298afSPavel Butsykin     BlockDriver *drv = bs->drv;
100529a298afSPavel Butsykin 
1006d470ad42SMax Reitz     if (!drv) {
1007d470ad42SMax Reitz         return -ENOMEDIUM;
1008d470ad42SMax Reitz     }
1009d470ad42SMax Reitz 
101029a298afSPavel Butsykin     if (!drv->bdrv_co_pwritev_compressed) {
101129a298afSPavel Butsykin         return -ENOTSUP;
101229a298afSPavel Butsykin     }
101329a298afSPavel Butsykin 
101429a298afSPavel Butsykin     return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
101529a298afSPavel Butsykin }
101629a298afSPavel Butsykin 
101785c97ca7SKevin Wolf static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
1018244483e6SKevin Wolf         int64_t offset, unsigned int bytes, QEMUIOVector *qiov)
101961007b31SStefan Hajnoczi {
102085c97ca7SKevin Wolf     BlockDriverState *bs = child->bs;
102185c97ca7SKevin Wolf 
102261007b31SStefan Hajnoczi     /* Perform I/O through a temporary buffer so that users who scribble over
102361007b31SStefan Hajnoczi      * their read buffer while the operation is in progress do not end up
102461007b31SStefan Hajnoczi      * modifying the image file.  This is critical for zero-copy guest I/O
102561007b31SStefan Hajnoczi      * where anything might happen inside guest memory.
102661007b31SStefan Hajnoczi      */
102761007b31SStefan Hajnoczi     void *bounce_buffer;
102861007b31SStefan Hajnoczi 
102961007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
103061007b31SStefan Hajnoczi     struct iovec iov;
1031cb2e2878SEric Blake     QEMUIOVector local_qiov;
1032244483e6SKevin Wolf     int64_t cluster_offset;
10337cfd5275SEric Blake     int64_t cluster_bytes;
103461007b31SStefan Hajnoczi     size_t skip_bytes;
103561007b31SStefan Hajnoczi     int ret;
1036cb2e2878SEric Blake     int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1037cb2e2878SEric Blake                                     BDRV_REQUEST_MAX_BYTES);
1038cb2e2878SEric Blake     unsigned int progress = 0;
103961007b31SStefan Hajnoczi 
1040d470ad42SMax Reitz     if (!drv) {
1041d470ad42SMax Reitz         return -ENOMEDIUM;
1042d470ad42SMax Reitz     }
1043d470ad42SMax Reitz 
10441bf03e66SKevin Wolf     /* FIXME We cannot require callers to have write permissions when all they
10451bf03e66SKevin Wolf      * are doing is a read request. If we did things right, write permissions
10461bf03e66SKevin Wolf      * would be obtained anyway, but internally by the copy-on-read code. As
1047765d9df9SEric Blake      * long as it is implemented here rather than in a separate filter driver,
10481bf03e66SKevin Wolf      * the copy-on-read code doesn't have its own BdrvChild, however, for which
10491bf03e66SKevin Wolf      * it could request permissions. Therefore we have to bypass the permission
10501bf03e66SKevin Wolf      * system for the moment. */
10511bf03e66SKevin Wolf     // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1052afa4b293SKevin Wolf 
105361007b31SStefan Hajnoczi     /* Cover entire cluster so no additional backing file I/O is required when
1054cb2e2878SEric Blake      * allocating cluster in the image file.  Note that this value may exceed
1055cb2e2878SEric Blake      * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1056cb2e2878SEric Blake      * is one reason we loop rather than doing it all at once.
105761007b31SStefan Hajnoczi      */
1058244483e6SKevin Wolf     bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
1059cb2e2878SEric Blake     skip_bytes = offset - cluster_offset;
106061007b31SStefan Hajnoczi 
1061244483e6SKevin Wolf     trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
1062244483e6SKevin Wolf                                    cluster_offset, cluster_bytes);
106361007b31SStefan Hajnoczi 
1064cb2e2878SEric Blake     bounce_buffer = qemu_try_blockalign(bs,
1065cb2e2878SEric Blake                                         MIN(MIN(max_transfer, cluster_bytes),
1066cb2e2878SEric Blake                                             MAX_BOUNCE_BUFFER));
106761007b31SStefan Hajnoczi     if (bounce_buffer == NULL) {
106861007b31SStefan Hajnoczi         ret = -ENOMEM;
106961007b31SStefan Hajnoczi         goto err;
107061007b31SStefan Hajnoczi     }
107161007b31SStefan Hajnoczi 
1072cb2e2878SEric Blake     while (cluster_bytes) {
1073cb2e2878SEric Blake         int64_t pnum;
107461007b31SStefan Hajnoczi 
1075cb2e2878SEric Blake         ret = bdrv_is_allocated(bs, cluster_offset,
1076cb2e2878SEric Blake                                 MIN(cluster_bytes, max_transfer), &pnum);
1077cb2e2878SEric Blake         if (ret < 0) {
1078cb2e2878SEric Blake             /* Safe to treat errors in querying allocation as if
1079cb2e2878SEric Blake              * unallocated; we'll probably fail again soon on the
1080cb2e2878SEric Blake              * read, but at least that will set a decent errno.
1081cb2e2878SEric Blake              */
1082cb2e2878SEric Blake             pnum = MIN(cluster_bytes, max_transfer);
1083cb2e2878SEric Blake         }
1084cb2e2878SEric Blake 
1085cb2e2878SEric Blake         assert(skip_bytes < pnum);
1086cb2e2878SEric Blake 
1087cb2e2878SEric Blake         if (ret <= 0) {
1088cb2e2878SEric Blake             /* Must copy-on-read; use the bounce buffer */
1089cb2e2878SEric Blake             iov.iov_base = bounce_buffer;
1090cb2e2878SEric Blake             iov.iov_len = pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
1091cb2e2878SEric Blake             qemu_iovec_init_external(&local_qiov, &iov, 1);
1092cb2e2878SEric Blake 
1093cb2e2878SEric Blake             ret = bdrv_driver_preadv(bs, cluster_offset, pnum,
1094cb2e2878SEric Blake                                      &local_qiov, 0);
109561007b31SStefan Hajnoczi             if (ret < 0) {
109661007b31SStefan Hajnoczi                 goto err;
109761007b31SStefan Hajnoczi             }
109861007b31SStefan Hajnoczi 
1099d855ebcdSEric Blake             bdrv_debug_event(bs, BLKDBG_COR_WRITE);
1100c1499a5eSEric Blake             if (drv->bdrv_co_pwrite_zeroes &&
1101cb2e2878SEric Blake                 buffer_is_zero(bounce_buffer, pnum)) {
1102a604fa2bSEric Blake                 /* FIXME: Should we (perhaps conditionally) be setting
1103a604fa2bSEric Blake                  * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1104a604fa2bSEric Blake                  * that still correctly reads as zero? */
1105cb2e2878SEric Blake                 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum, 0);
110661007b31SStefan Hajnoczi             } else {
1107cb2e2878SEric Blake                 /* This does not change the data on the disk, it is not
1108cb2e2878SEric Blake                  * necessary to flush even in cache=writethrough mode.
110961007b31SStefan Hajnoczi                  */
1110cb2e2878SEric Blake                 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum,
1111cb2e2878SEric Blake                                           &local_qiov, 0);
111261007b31SStefan Hajnoczi             }
111361007b31SStefan Hajnoczi 
111461007b31SStefan Hajnoczi             if (ret < 0) {
1115cb2e2878SEric Blake                 /* It might be okay to ignore write errors for guest
1116cb2e2878SEric Blake                  * requests.  If this is a deliberate copy-on-read
1117cb2e2878SEric Blake                  * then we don't want to ignore the error.  Simply
1118cb2e2878SEric Blake                  * report it in all cases.
111961007b31SStefan Hajnoczi                  */
112061007b31SStefan Hajnoczi                 goto err;
112161007b31SStefan Hajnoczi             }
112261007b31SStefan Hajnoczi 
1123cb2e2878SEric Blake             qemu_iovec_from_buf(qiov, progress, bounce_buffer + skip_bytes,
1124cb2e2878SEric Blake                                 pnum - skip_bytes);
1125cb2e2878SEric Blake         } else {
1126cb2e2878SEric Blake             /* Read directly into the destination */
1127cb2e2878SEric Blake             qemu_iovec_init(&local_qiov, qiov->niov);
1128cb2e2878SEric Blake             qemu_iovec_concat(&local_qiov, qiov, progress, pnum - skip_bytes);
1129cb2e2878SEric Blake             ret = bdrv_driver_preadv(bs, offset + progress, local_qiov.size,
1130cb2e2878SEric Blake                                      &local_qiov, 0);
1131cb2e2878SEric Blake             qemu_iovec_destroy(&local_qiov);
1132cb2e2878SEric Blake             if (ret < 0) {
1133cb2e2878SEric Blake                 goto err;
1134cb2e2878SEric Blake             }
1135cb2e2878SEric Blake         }
1136cb2e2878SEric Blake 
1137cb2e2878SEric Blake         cluster_offset += pnum;
1138cb2e2878SEric Blake         cluster_bytes -= pnum;
1139cb2e2878SEric Blake         progress += pnum - skip_bytes;
1140cb2e2878SEric Blake         skip_bytes = 0;
1141cb2e2878SEric Blake     }
1142cb2e2878SEric Blake     ret = 0;
114361007b31SStefan Hajnoczi 
114461007b31SStefan Hajnoczi err:
114561007b31SStefan Hajnoczi     qemu_vfree(bounce_buffer);
114661007b31SStefan Hajnoczi     return ret;
114761007b31SStefan Hajnoczi }
114861007b31SStefan Hajnoczi 
114961007b31SStefan Hajnoczi /*
115061007b31SStefan Hajnoczi  * Forwards an already correctly aligned request to the BlockDriver. This
11511a62d0acSEric Blake  * handles copy on read, zeroing after EOF, and fragmentation of large
11521a62d0acSEric Blake  * reads; any other features must be implemented by the caller.
115361007b31SStefan Hajnoczi  */
115485c97ca7SKevin Wolf static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
115561007b31SStefan Hajnoczi     BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
115661007b31SStefan Hajnoczi     int64_t align, QEMUIOVector *qiov, int flags)
115761007b31SStefan Hajnoczi {
115885c97ca7SKevin Wolf     BlockDriverState *bs = child->bs;
1159c9d20029SKevin Wolf     int64_t total_bytes, max_bytes;
11601a62d0acSEric Blake     int ret = 0;
11611a62d0acSEric Blake     uint64_t bytes_remaining = bytes;
11621a62d0acSEric Blake     int max_transfer;
116361007b31SStefan Hajnoczi 
116449c07526SKevin Wolf     assert(is_power_of_2(align));
116549c07526SKevin Wolf     assert((offset & (align - 1)) == 0);
116649c07526SKevin Wolf     assert((bytes & (align - 1)) == 0);
116761007b31SStefan Hajnoczi     assert(!qiov || bytes == qiov->size);
1168abb06c5aSDaniel P. Berrange     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
11691a62d0acSEric Blake     max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
11701a62d0acSEric Blake                                    align);
1171a604fa2bSEric Blake 
1172a604fa2bSEric Blake     /* TODO: We would need a per-BDS .supported_read_flags and
1173a604fa2bSEric Blake      * potential fallback support, if we ever implement any read flags
1174a604fa2bSEric Blake      * to pass through to drivers.  For now, there aren't any
1175a604fa2bSEric Blake      * passthrough flags.  */
1176a604fa2bSEric Blake     assert(!(flags & ~(BDRV_REQ_NO_SERIALISING | BDRV_REQ_COPY_ON_READ)));
117761007b31SStefan Hajnoczi 
117861007b31SStefan Hajnoczi     /* Handle Copy on Read and associated serialisation */
117961007b31SStefan Hajnoczi     if (flags & BDRV_REQ_COPY_ON_READ) {
118061007b31SStefan Hajnoczi         /* If we touch the same cluster it counts as an overlap.  This
118161007b31SStefan Hajnoczi          * guarantees that allocating writes will be serialized and not race
118261007b31SStefan Hajnoczi          * with each other for the same cluster.  For example, in copy-on-read
118361007b31SStefan Hajnoczi          * it ensures that the CoR read and write operations are atomic and
118461007b31SStefan Hajnoczi          * guest writes cannot interleave between them. */
118561007b31SStefan Hajnoczi         mark_request_serialising(req, bdrv_get_cluster_size(bs));
118661007b31SStefan Hajnoczi     }
118761007b31SStefan Hajnoczi 
118861408b25SFam Zheng     if (!(flags & BDRV_REQ_NO_SERIALISING)) {
118961007b31SStefan Hajnoczi         wait_serialising_requests(req);
119061408b25SFam Zheng     }
119161007b31SStefan Hajnoczi 
119261007b31SStefan Hajnoczi     if (flags & BDRV_REQ_COPY_ON_READ) {
1193d6a644bbSEric Blake         int64_t pnum;
119461007b31SStefan Hajnoczi 
119588e63df2SEric Blake         ret = bdrv_is_allocated(bs, offset, bytes, &pnum);
119661007b31SStefan Hajnoczi         if (ret < 0) {
119761007b31SStefan Hajnoczi             goto out;
119861007b31SStefan Hajnoczi         }
119961007b31SStefan Hajnoczi 
120088e63df2SEric Blake         if (!ret || pnum != bytes) {
120185c97ca7SKevin Wolf             ret = bdrv_co_do_copy_on_readv(child, offset, bytes, qiov);
120261007b31SStefan Hajnoczi             goto out;
120361007b31SStefan Hajnoczi         }
120461007b31SStefan Hajnoczi     }
120561007b31SStefan Hajnoczi 
12061a62d0acSEric Blake     /* Forward the request to the BlockDriver, possibly fragmenting it */
120749c07526SKevin Wolf     total_bytes = bdrv_getlength(bs);
120849c07526SKevin Wolf     if (total_bytes < 0) {
120949c07526SKevin Wolf         ret = total_bytes;
121061007b31SStefan Hajnoczi         goto out;
121161007b31SStefan Hajnoczi     }
121261007b31SStefan Hajnoczi 
121349c07526SKevin Wolf     max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
12141a62d0acSEric Blake     if (bytes <= max_bytes && bytes <= max_transfer) {
1215166fe960SKevin Wolf         ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
12161a62d0acSEric Blake         goto out;
121761007b31SStefan Hajnoczi     }
121861007b31SStefan Hajnoczi 
12191a62d0acSEric Blake     while (bytes_remaining) {
12201a62d0acSEric Blake         int num;
12211a62d0acSEric Blake 
12221a62d0acSEric Blake         if (max_bytes) {
12231a62d0acSEric Blake             QEMUIOVector local_qiov;
12241a62d0acSEric Blake 
12251a62d0acSEric Blake             num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
12261a62d0acSEric Blake             assert(num);
12271a62d0acSEric Blake             qemu_iovec_init(&local_qiov, qiov->niov);
12281a62d0acSEric Blake             qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
12291a62d0acSEric Blake 
12301a62d0acSEric Blake             ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
12311a62d0acSEric Blake                                      num, &local_qiov, 0);
12321a62d0acSEric Blake             max_bytes -= num;
12331a62d0acSEric Blake             qemu_iovec_destroy(&local_qiov);
12341a62d0acSEric Blake         } else {
12351a62d0acSEric Blake             num = bytes_remaining;
12361a62d0acSEric Blake             ret = qemu_iovec_memset(qiov, bytes - bytes_remaining, 0,
12371a62d0acSEric Blake                                     bytes_remaining);
12381a62d0acSEric Blake         }
12391a62d0acSEric Blake         if (ret < 0) {
12401a62d0acSEric Blake             goto out;
12411a62d0acSEric Blake         }
12421a62d0acSEric Blake         bytes_remaining -= num;
124361007b31SStefan Hajnoczi     }
124461007b31SStefan Hajnoczi 
124561007b31SStefan Hajnoczi out:
12461a62d0acSEric Blake     return ret < 0 ? ret : 0;
124761007b31SStefan Hajnoczi }
124861007b31SStefan Hajnoczi 
124961007b31SStefan Hajnoczi /*
125061007b31SStefan Hajnoczi  * Handle a read request in coroutine context
125161007b31SStefan Hajnoczi  */
1252a03ef88fSKevin Wolf int coroutine_fn bdrv_co_preadv(BdrvChild *child,
125361007b31SStefan Hajnoczi     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
125461007b31SStefan Hajnoczi     BdrvRequestFlags flags)
125561007b31SStefan Hajnoczi {
1256a03ef88fSKevin Wolf     BlockDriverState *bs = child->bs;
125761007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
125861007b31SStefan Hajnoczi     BdrvTrackedRequest req;
125961007b31SStefan Hajnoczi 
1260a5b8dd2cSEric Blake     uint64_t align = bs->bl.request_alignment;
126161007b31SStefan Hajnoczi     uint8_t *head_buf = NULL;
126261007b31SStefan Hajnoczi     uint8_t *tail_buf = NULL;
126361007b31SStefan Hajnoczi     QEMUIOVector local_qiov;
126461007b31SStefan Hajnoczi     bool use_local_qiov = false;
126561007b31SStefan Hajnoczi     int ret;
126661007b31SStefan Hajnoczi 
1267f42cf447SDaniel P. Berrange     trace_bdrv_co_preadv(child->bs, offset, bytes, flags);
1268f42cf447SDaniel P. Berrange 
126961007b31SStefan Hajnoczi     if (!drv) {
127061007b31SStefan Hajnoczi         return -ENOMEDIUM;
127161007b31SStefan Hajnoczi     }
127261007b31SStefan Hajnoczi 
127361007b31SStefan Hajnoczi     ret = bdrv_check_byte_request(bs, offset, bytes);
127461007b31SStefan Hajnoczi     if (ret < 0) {
127561007b31SStefan Hajnoczi         return ret;
127661007b31SStefan Hajnoczi     }
127761007b31SStefan Hajnoczi 
127899723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
127999723548SPaolo Bonzini 
12809568b511SWen Congyang     /* Don't do copy-on-read if we read data before write operation */
1281d3faa13eSPaolo Bonzini     if (atomic_read(&bs->copy_on_read) && !(flags & BDRV_REQ_NO_SERIALISING)) {
128261007b31SStefan Hajnoczi         flags |= BDRV_REQ_COPY_ON_READ;
128361007b31SStefan Hajnoczi     }
128461007b31SStefan Hajnoczi 
128561007b31SStefan Hajnoczi     /* Align read if necessary by padding qiov */
128661007b31SStefan Hajnoczi     if (offset & (align - 1)) {
128761007b31SStefan Hajnoczi         head_buf = qemu_blockalign(bs, align);
128861007b31SStefan Hajnoczi         qemu_iovec_init(&local_qiov, qiov->niov + 2);
128961007b31SStefan Hajnoczi         qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
129061007b31SStefan Hajnoczi         qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
129161007b31SStefan Hajnoczi         use_local_qiov = true;
129261007b31SStefan Hajnoczi 
129361007b31SStefan Hajnoczi         bytes += offset & (align - 1);
129461007b31SStefan Hajnoczi         offset = offset & ~(align - 1);
129561007b31SStefan Hajnoczi     }
129661007b31SStefan Hajnoczi 
129761007b31SStefan Hajnoczi     if ((offset + bytes) & (align - 1)) {
129861007b31SStefan Hajnoczi         if (!use_local_qiov) {
129961007b31SStefan Hajnoczi             qemu_iovec_init(&local_qiov, qiov->niov + 1);
130061007b31SStefan Hajnoczi             qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
130161007b31SStefan Hajnoczi             use_local_qiov = true;
130261007b31SStefan Hajnoczi         }
130361007b31SStefan Hajnoczi         tail_buf = qemu_blockalign(bs, align);
130461007b31SStefan Hajnoczi         qemu_iovec_add(&local_qiov, tail_buf,
130561007b31SStefan Hajnoczi                        align - ((offset + bytes) & (align - 1)));
130661007b31SStefan Hajnoczi 
130761007b31SStefan Hajnoczi         bytes = ROUND_UP(bytes, align);
130861007b31SStefan Hajnoczi     }
130961007b31SStefan Hajnoczi 
1310ebde595cSFam Zheng     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
131185c97ca7SKevin Wolf     ret = bdrv_aligned_preadv(child, &req, offset, bytes, align,
131261007b31SStefan Hajnoczi                               use_local_qiov ? &local_qiov : qiov,
131361007b31SStefan Hajnoczi                               flags);
131461007b31SStefan Hajnoczi     tracked_request_end(&req);
131599723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
131661007b31SStefan Hajnoczi 
131761007b31SStefan Hajnoczi     if (use_local_qiov) {
131861007b31SStefan Hajnoczi         qemu_iovec_destroy(&local_qiov);
131961007b31SStefan Hajnoczi         qemu_vfree(head_buf);
132061007b31SStefan Hajnoczi         qemu_vfree(tail_buf);
132161007b31SStefan Hajnoczi     }
132261007b31SStefan Hajnoczi 
132361007b31SStefan Hajnoczi     return ret;
132461007b31SStefan Hajnoczi }
132561007b31SStefan Hajnoczi 
1326adad6496SKevin Wolf static int coroutine_fn bdrv_co_do_readv(BdrvChild *child,
132761007b31SStefan Hajnoczi     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
132861007b31SStefan Hajnoczi     BdrvRequestFlags flags)
132961007b31SStefan Hajnoczi {
133061007b31SStefan Hajnoczi     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
133161007b31SStefan Hajnoczi         return -EINVAL;
133261007b31SStefan Hajnoczi     }
133361007b31SStefan Hajnoczi 
1334a03ef88fSKevin Wolf     return bdrv_co_preadv(child, sector_num << BDRV_SECTOR_BITS,
133561007b31SStefan Hajnoczi                           nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
133661007b31SStefan Hajnoczi }
133761007b31SStefan Hajnoczi 
133828b04a8fSKevin Wolf int coroutine_fn bdrv_co_readv(BdrvChild *child, int64_t sector_num,
133961007b31SStefan Hajnoczi                                int nb_sectors, QEMUIOVector *qiov)
134061007b31SStefan Hajnoczi {
1341adad6496SKevin Wolf     return bdrv_co_do_readv(child, sector_num, nb_sectors, qiov, 0);
134261007b31SStefan Hajnoczi }
134361007b31SStefan Hajnoczi 
1344d05aa8bbSEric Blake static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
1345f5a5ca79SManos Pitsidianakis     int64_t offset, int bytes, BdrvRequestFlags flags)
134661007b31SStefan Hajnoczi {
134761007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
134861007b31SStefan Hajnoczi     QEMUIOVector qiov;
134961007b31SStefan Hajnoczi     struct iovec iov = {0};
135061007b31SStefan Hajnoczi     int ret = 0;
1351465fe887SEric Blake     bool need_flush = false;
1352443668caSDenis V. Lunev     int head = 0;
1353443668caSDenis V. Lunev     int tail = 0;
135461007b31SStefan Hajnoczi 
1355cf081fcaSEric Blake     int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
1356a5b8dd2cSEric Blake     int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1357a5b8dd2cSEric Blake                         bs->bl.request_alignment);
1358cb2e2878SEric Blake     int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
1359cf081fcaSEric Blake 
1360d470ad42SMax Reitz     if (!drv) {
1361d470ad42SMax Reitz         return -ENOMEDIUM;
1362d470ad42SMax Reitz     }
1363d470ad42SMax Reitz 
1364b8d0a980SEric Blake     assert(alignment % bs->bl.request_alignment == 0);
1365b8d0a980SEric Blake     head = offset % alignment;
1366f5a5ca79SManos Pitsidianakis     tail = (offset + bytes) % alignment;
1367b8d0a980SEric Blake     max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1368b8d0a980SEric Blake     assert(max_write_zeroes >= bs->bl.request_alignment);
136961007b31SStefan Hajnoczi 
1370f5a5ca79SManos Pitsidianakis     while (bytes > 0 && !ret) {
1371f5a5ca79SManos Pitsidianakis         int num = bytes;
137261007b31SStefan Hajnoczi 
137361007b31SStefan Hajnoczi         /* Align request.  Block drivers can expect the "bulk" of the request
1374443668caSDenis V. Lunev          * to be aligned, and that unaligned requests do not cross cluster
1375443668caSDenis V. Lunev          * boundaries.
137661007b31SStefan Hajnoczi          */
1377443668caSDenis V. Lunev         if (head) {
1378b2f95feeSEric Blake             /* Make a small request up to the first aligned sector. For
1379b2f95feeSEric Blake              * convenience, limit this request to max_transfer even if
1380b2f95feeSEric Blake              * we don't need to fall back to writes.  */
1381f5a5ca79SManos Pitsidianakis             num = MIN(MIN(bytes, max_transfer), alignment - head);
1382b2f95feeSEric Blake             head = (head + num) % alignment;
1383b2f95feeSEric Blake             assert(num < max_write_zeroes);
1384d05aa8bbSEric Blake         } else if (tail && num > alignment) {
1385443668caSDenis V. Lunev             /* Shorten the request to the last aligned sector.  */
1386443668caSDenis V. Lunev             num -= tail;
138761007b31SStefan Hajnoczi         }
138861007b31SStefan Hajnoczi 
138961007b31SStefan Hajnoczi         /* limit request size */
139061007b31SStefan Hajnoczi         if (num > max_write_zeroes) {
139161007b31SStefan Hajnoczi             num = max_write_zeroes;
139261007b31SStefan Hajnoczi         }
139361007b31SStefan Hajnoczi 
139461007b31SStefan Hajnoczi         ret = -ENOTSUP;
139561007b31SStefan Hajnoczi         /* First try the efficient write zeroes operation */
1396d05aa8bbSEric Blake         if (drv->bdrv_co_pwrite_zeroes) {
1397d05aa8bbSEric Blake             ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1398d05aa8bbSEric Blake                                              flags & bs->supported_zero_flags);
1399d05aa8bbSEric Blake             if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1400d05aa8bbSEric Blake                 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1401d05aa8bbSEric Blake                 need_flush = true;
1402d05aa8bbSEric Blake             }
1403465fe887SEric Blake         } else {
1404465fe887SEric Blake             assert(!bs->supported_zero_flags);
140561007b31SStefan Hajnoczi         }
140661007b31SStefan Hajnoczi 
140761007b31SStefan Hajnoczi         if (ret == -ENOTSUP) {
140861007b31SStefan Hajnoczi             /* Fall back to bounce buffer if write zeroes is unsupported */
1409465fe887SEric Blake             BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1410465fe887SEric Blake 
1411465fe887SEric Blake             if ((flags & BDRV_REQ_FUA) &&
1412465fe887SEric Blake                 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1413465fe887SEric Blake                 /* No need for bdrv_driver_pwrite() to do a fallback
1414465fe887SEric Blake                  * flush on each chunk; use just one at the end */
1415465fe887SEric Blake                 write_flags &= ~BDRV_REQ_FUA;
1416465fe887SEric Blake                 need_flush = true;
1417465fe887SEric Blake             }
14185def6b80SEric Blake             num = MIN(num, max_transfer);
1419d05aa8bbSEric Blake             iov.iov_len = num;
142061007b31SStefan Hajnoczi             if (iov.iov_base == NULL) {
1421d05aa8bbSEric Blake                 iov.iov_base = qemu_try_blockalign(bs, num);
142261007b31SStefan Hajnoczi                 if (iov.iov_base == NULL) {
142361007b31SStefan Hajnoczi                     ret = -ENOMEM;
142461007b31SStefan Hajnoczi                     goto fail;
142561007b31SStefan Hajnoczi                 }
1426d05aa8bbSEric Blake                 memset(iov.iov_base, 0, num);
142761007b31SStefan Hajnoczi             }
142861007b31SStefan Hajnoczi             qemu_iovec_init_external(&qiov, &iov, 1);
142961007b31SStefan Hajnoczi 
1430d05aa8bbSEric Blake             ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags);
143161007b31SStefan Hajnoczi 
143261007b31SStefan Hajnoczi             /* Keep bounce buffer around if it is big enough for all
143361007b31SStefan Hajnoczi              * all future requests.
143461007b31SStefan Hajnoczi              */
14355def6b80SEric Blake             if (num < max_transfer) {
143661007b31SStefan Hajnoczi                 qemu_vfree(iov.iov_base);
143761007b31SStefan Hajnoczi                 iov.iov_base = NULL;
143861007b31SStefan Hajnoczi             }
143961007b31SStefan Hajnoczi         }
144061007b31SStefan Hajnoczi 
1441d05aa8bbSEric Blake         offset += num;
1442f5a5ca79SManos Pitsidianakis         bytes -= num;
144361007b31SStefan Hajnoczi     }
144461007b31SStefan Hajnoczi 
144561007b31SStefan Hajnoczi fail:
1446465fe887SEric Blake     if (ret == 0 && need_flush) {
1447465fe887SEric Blake         ret = bdrv_co_flush(bs);
1448465fe887SEric Blake     }
144961007b31SStefan Hajnoczi     qemu_vfree(iov.iov_base);
145061007b31SStefan Hajnoczi     return ret;
145161007b31SStefan Hajnoczi }
145261007b31SStefan Hajnoczi 
145361007b31SStefan Hajnoczi /*
145404ed95f4SEric Blake  * Forwards an already correctly aligned write request to the BlockDriver,
145504ed95f4SEric Blake  * after possibly fragmenting it.
145661007b31SStefan Hajnoczi  */
145785c97ca7SKevin Wolf static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child,
145861007b31SStefan Hajnoczi     BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1459cff86b38SEric Blake     int64_t align, QEMUIOVector *qiov, int flags)
146061007b31SStefan Hajnoczi {
146185c97ca7SKevin Wolf     BlockDriverState *bs = child->bs;
146261007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
146361007b31SStefan Hajnoczi     bool waited;
146461007b31SStefan Hajnoczi     int ret;
146561007b31SStefan Hajnoczi 
14669896c876SKevin Wolf     int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
146704ed95f4SEric Blake     uint64_t bytes_remaining = bytes;
146804ed95f4SEric Blake     int max_transfer;
146961007b31SStefan Hajnoczi 
1470d470ad42SMax Reitz     if (!drv) {
1471d470ad42SMax Reitz         return -ENOMEDIUM;
1472d470ad42SMax Reitz     }
1473d470ad42SMax Reitz 
1474d6883bc9SVladimir Sementsov-Ogievskiy     if (bdrv_has_readonly_bitmaps(bs)) {
1475d6883bc9SVladimir Sementsov-Ogievskiy         return -EPERM;
1476d6883bc9SVladimir Sementsov-Ogievskiy     }
1477d6883bc9SVladimir Sementsov-Ogievskiy 
1478cff86b38SEric Blake     assert(is_power_of_2(align));
1479cff86b38SEric Blake     assert((offset & (align - 1)) == 0);
1480cff86b38SEric Blake     assert((bytes & (align - 1)) == 0);
148161007b31SStefan Hajnoczi     assert(!qiov || bytes == qiov->size);
1482abb06c5aSDaniel P. Berrange     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1483fa166538SEric Blake     assert(!(flags & ~BDRV_REQ_MASK));
148404ed95f4SEric Blake     max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
148504ed95f4SEric Blake                                    align);
148661007b31SStefan Hajnoczi 
148761007b31SStefan Hajnoczi     waited = wait_serialising_requests(req);
148861007b31SStefan Hajnoczi     assert(!waited || !req->serialising);
148961007b31SStefan Hajnoczi     assert(req->overlap_offset <= offset);
149061007b31SStefan Hajnoczi     assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1491362b3786SMax Reitz     assert(child->perm & BLK_PERM_WRITE);
1492362b3786SMax Reitz     assert(end_sector <= bs->total_sectors || child->perm & BLK_PERM_RESIZE);
149361007b31SStefan Hajnoczi 
149461007b31SStefan Hajnoczi     ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
149561007b31SStefan Hajnoczi 
149661007b31SStefan Hajnoczi     if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
1497c1499a5eSEric Blake         !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
149861007b31SStefan Hajnoczi         qemu_iovec_is_zero(qiov)) {
149961007b31SStefan Hajnoczi         flags |= BDRV_REQ_ZERO_WRITE;
150061007b31SStefan Hajnoczi         if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
150161007b31SStefan Hajnoczi             flags |= BDRV_REQ_MAY_UNMAP;
150261007b31SStefan Hajnoczi         }
150361007b31SStefan Hajnoczi     }
150461007b31SStefan Hajnoczi 
150561007b31SStefan Hajnoczi     if (ret < 0) {
150661007b31SStefan Hajnoczi         /* Do nothing, write notifier decided to fail this request */
150761007b31SStefan Hajnoczi     } else if (flags & BDRV_REQ_ZERO_WRITE) {
15089a4f4c31SKevin Wolf         bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
15099896c876SKevin Wolf         ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
15103ea1a091SPavel Butsykin     } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
15113ea1a091SPavel Butsykin         ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, qiov);
151204ed95f4SEric Blake     } else if (bytes <= max_transfer) {
15139a4f4c31SKevin Wolf         bdrv_debug_event(bs, BLKDBG_PWRITEV);
151478a07294SKevin Wolf         ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags);
151504ed95f4SEric Blake     } else {
151604ed95f4SEric Blake         bdrv_debug_event(bs, BLKDBG_PWRITEV);
151704ed95f4SEric Blake         while (bytes_remaining) {
151804ed95f4SEric Blake             int num = MIN(bytes_remaining, max_transfer);
151904ed95f4SEric Blake             QEMUIOVector local_qiov;
152004ed95f4SEric Blake             int local_flags = flags;
152104ed95f4SEric Blake 
152204ed95f4SEric Blake             assert(num);
152304ed95f4SEric Blake             if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
152404ed95f4SEric Blake                 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
152504ed95f4SEric Blake                 /* If FUA is going to be emulated by flush, we only
152604ed95f4SEric Blake                  * need to flush on the last iteration */
152704ed95f4SEric Blake                 local_flags &= ~BDRV_REQ_FUA;
152804ed95f4SEric Blake             }
152904ed95f4SEric Blake             qemu_iovec_init(&local_qiov, qiov->niov);
153004ed95f4SEric Blake             qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
153104ed95f4SEric Blake 
153204ed95f4SEric Blake             ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
153304ed95f4SEric Blake                                       num, &local_qiov, local_flags);
153404ed95f4SEric Blake             qemu_iovec_destroy(&local_qiov);
153504ed95f4SEric Blake             if (ret < 0) {
153604ed95f4SEric Blake                 break;
153704ed95f4SEric Blake             }
153804ed95f4SEric Blake             bytes_remaining -= num;
153904ed95f4SEric Blake         }
154061007b31SStefan Hajnoczi     }
15419a4f4c31SKevin Wolf     bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
154261007b31SStefan Hajnoczi 
154347fec599SPaolo Bonzini     atomic_inc(&bs->write_gen);
15440fdf1a4fSEric Blake     bdrv_set_dirty(bs, offset, bytes);
154561007b31SStefan Hajnoczi 
1546f7946da2SPaolo Bonzini     stat64_max(&bs->wr_highest_offset, offset + bytes);
154761007b31SStefan Hajnoczi 
154861007b31SStefan Hajnoczi     if (ret >= 0) {
15499896c876SKevin Wolf         bs->total_sectors = MAX(bs->total_sectors, end_sector);
155004ed95f4SEric Blake         ret = 0;
155161007b31SStefan Hajnoczi     }
155261007b31SStefan Hajnoczi 
155361007b31SStefan Hajnoczi     return ret;
155461007b31SStefan Hajnoczi }
155561007b31SStefan Hajnoczi 
155685c97ca7SKevin Wolf static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
15579eeb6dd1SFam Zheng                                                 int64_t offset,
15589eeb6dd1SFam Zheng                                                 unsigned int bytes,
15599eeb6dd1SFam Zheng                                                 BdrvRequestFlags flags,
15609eeb6dd1SFam Zheng                                                 BdrvTrackedRequest *req)
15619eeb6dd1SFam Zheng {
156285c97ca7SKevin Wolf     BlockDriverState *bs = child->bs;
15639eeb6dd1SFam Zheng     uint8_t *buf = NULL;
15649eeb6dd1SFam Zheng     QEMUIOVector local_qiov;
15659eeb6dd1SFam Zheng     struct iovec iov;
1566a5b8dd2cSEric Blake     uint64_t align = bs->bl.request_alignment;
15679eeb6dd1SFam Zheng     unsigned int head_padding_bytes, tail_padding_bytes;
15689eeb6dd1SFam Zheng     int ret = 0;
15699eeb6dd1SFam Zheng 
15709eeb6dd1SFam Zheng     head_padding_bytes = offset & (align - 1);
1571f13ce1beSDenis V. Lunev     tail_padding_bytes = (align - (offset + bytes)) & (align - 1);
15729eeb6dd1SFam Zheng 
15739eeb6dd1SFam Zheng 
15749eeb6dd1SFam Zheng     assert(flags & BDRV_REQ_ZERO_WRITE);
15759eeb6dd1SFam Zheng     if (head_padding_bytes || tail_padding_bytes) {
15769eeb6dd1SFam Zheng         buf = qemu_blockalign(bs, align);
15779eeb6dd1SFam Zheng         iov = (struct iovec) {
15789eeb6dd1SFam Zheng             .iov_base   = buf,
15799eeb6dd1SFam Zheng             .iov_len    = align,
15809eeb6dd1SFam Zheng         };
15819eeb6dd1SFam Zheng         qemu_iovec_init_external(&local_qiov, &iov, 1);
15829eeb6dd1SFam Zheng     }
15839eeb6dd1SFam Zheng     if (head_padding_bytes) {
15849eeb6dd1SFam Zheng         uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes);
15859eeb6dd1SFam Zheng 
15869eeb6dd1SFam Zheng         /* RMW the unaligned part before head. */
15879eeb6dd1SFam Zheng         mark_request_serialising(req, align);
15889eeb6dd1SFam Zheng         wait_serialising_requests(req);
15899a4f4c31SKevin Wolf         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
159085c97ca7SKevin Wolf         ret = bdrv_aligned_preadv(child, req, offset & ~(align - 1), align,
15919eeb6dd1SFam Zheng                                   align, &local_qiov, 0);
15929eeb6dd1SFam Zheng         if (ret < 0) {
15939eeb6dd1SFam Zheng             goto fail;
15949eeb6dd1SFam Zheng         }
15959a4f4c31SKevin Wolf         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
15969eeb6dd1SFam Zheng 
15979eeb6dd1SFam Zheng         memset(buf + head_padding_bytes, 0, zero_bytes);
159885c97ca7SKevin Wolf         ret = bdrv_aligned_pwritev(child, req, offset & ~(align - 1), align,
1599cff86b38SEric Blake                                    align, &local_qiov,
16009eeb6dd1SFam Zheng                                    flags & ~BDRV_REQ_ZERO_WRITE);
16019eeb6dd1SFam Zheng         if (ret < 0) {
16029eeb6dd1SFam Zheng             goto fail;
16039eeb6dd1SFam Zheng         }
16049eeb6dd1SFam Zheng         offset += zero_bytes;
16059eeb6dd1SFam Zheng         bytes -= zero_bytes;
16069eeb6dd1SFam Zheng     }
16079eeb6dd1SFam Zheng 
16089eeb6dd1SFam Zheng     assert(!bytes || (offset & (align - 1)) == 0);
16099eeb6dd1SFam Zheng     if (bytes >= align) {
16109eeb6dd1SFam Zheng         /* Write the aligned part in the middle. */
16119eeb6dd1SFam Zheng         uint64_t aligned_bytes = bytes & ~(align - 1);
161285c97ca7SKevin Wolf         ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
16139eeb6dd1SFam Zheng                                    NULL, flags);
16149eeb6dd1SFam Zheng         if (ret < 0) {
16159eeb6dd1SFam Zheng             goto fail;
16169eeb6dd1SFam Zheng         }
16179eeb6dd1SFam Zheng         bytes -= aligned_bytes;
16189eeb6dd1SFam Zheng         offset += aligned_bytes;
16199eeb6dd1SFam Zheng     }
16209eeb6dd1SFam Zheng 
16219eeb6dd1SFam Zheng     assert(!bytes || (offset & (align - 1)) == 0);
16229eeb6dd1SFam Zheng     if (bytes) {
16239eeb6dd1SFam Zheng         assert(align == tail_padding_bytes + bytes);
16249eeb6dd1SFam Zheng         /* RMW the unaligned part after tail. */
16259eeb6dd1SFam Zheng         mark_request_serialising(req, align);
16269eeb6dd1SFam Zheng         wait_serialising_requests(req);
16279a4f4c31SKevin Wolf         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
162885c97ca7SKevin Wolf         ret = bdrv_aligned_preadv(child, req, offset, align,
16299eeb6dd1SFam Zheng                                   align, &local_qiov, 0);
16309eeb6dd1SFam Zheng         if (ret < 0) {
16319eeb6dd1SFam Zheng             goto fail;
16329eeb6dd1SFam Zheng         }
16339a4f4c31SKevin Wolf         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
16349eeb6dd1SFam Zheng 
16359eeb6dd1SFam Zheng         memset(buf, 0, bytes);
163685c97ca7SKevin Wolf         ret = bdrv_aligned_pwritev(child, req, offset, align, align,
16379eeb6dd1SFam Zheng                                    &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE);
16389eeb6dd1SFam Zheng     }
16399eeb6dd1SFam Zheng fail:
16409eeb6dd1SFam Zheng     qemu_vfree(buf);
16419eeb6dd1SFam Zheng     return ret;
16429eeb6dd1SFam Zheng 
16439eeb6dd1SFam Zheng }
16449eeb6dd1SFam Zheng 
164561007b31SStefan Hajnoczi /*
164661007b31SStefan Hajnoczi  * Handle a write request in coroutine context
164761007b31SStefan Hajnoczi  */
1648a03ef88fSKevin Wolf int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
164961007b31SStefan Hajnoczi     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
165061007b31SStefan Hajnoczi     BdrvRequestFlags flags)
165161007b31SStefan Hajnoczi {
1652a03ef88fSKevin Wolf     BlockDriverState *bs = child->bs;
165361007b31SStefan Hajnoczi     BdrvTrackedRequest req;
1654a5b8dd2cSEric Blake     uint64_t align = bs->bl.request_alignment;
165561007b31SStefan Hajnoczi     uint8_t *head_buf = NULL;
165661007b31SStefan Hajnoczi     uint8_t *tail_buf = NULL;
165761007b31SStefan Hajnoczi     QEMUIOVector local_qiov;
165861007b31SStefan Hajnoczi     bool use_local_qiov = false;
165961007b31SStefan Hajnoczi     int ret;
166061007b31SStefan Hajnoczi 
1661f42cf447SDaniel P. Berrange     trace_bdrv_co_pwritev(child->bs, offset, bytes, flags);
1662f42cf447SDaniel P. Berrange 
166361007b31SStefan Hajnoczi     if (!bs->drv) {
166461007b31SStefan Hajnoczi         return -ENOMEDIUM;
166561007b31SStefan Hajnoczi     }
166661007b31SStefan Hajnoczi     if (bs->read_only) {
1667eaf5fe2dSPaolo Bonzini         return -EPERM;
166861007b31SStefan Hajnoczi     }
166904c01a5cSKevin Wolf     assert(!(bs->open_flags & BDRV_O_INACTIVE));
167061007b31SStefan Hajnoczi 
167161007b31SStefan Hajnoczi     ret = bdrv_check_byte_request(bs, offset, bytes);
167261007b31SStefan Hajnoczi     if (ret < 0) {
167361007b31SStefan Hajnoczi         return ret;
167461007b31SStefan Hajnoczi     }
167561007b31SStefan Hajnoczi 
167699723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
167761007b31SStefan Hajnoczi     /*
167861007b31SStefan Hajnoczi      * Align write if necessary by performing a read-modify-write cycle.
167961007b31SStefan Hajnoczi      * Pad qiov with the read parts and be sure to have a tracked request not
168061007b31SStefan Hajnoczi      * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
168161007b31SStefan Hajnoczi      */
1682ebde595cSFam Zheng     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
168361007b31SStefan Hajnoczi 
16849eeb6dd1SFam Zheng     if (!qiov) {
168585c97ca7SKevin Wolf         ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
16869eeb6dd1SFam Zheng         goto out;
16879eeb6dd1SFam Zheng     }
16889eeb6dd1SFam Zheng 
168961007b31SStefan Hajnoczi     if (offset & (align - 1)) {
169061007b31SStefan Hajnoczi         QEMUIOVector head_qiov;
169161007b31SStefan Hajnoczi         struct iovec head_iov;
169261007b31SStefan Hajnoczi 
169361007b31SStefan Hajnoczi         mark_request_serialising(&req, align);
169461007b31SStefan Hajnoczi         wait_serialising_requests(&req);
169561007b31SStefan Hajnoczi 
169661007b31SStefan Hajnoczi         head_buf = qemu_blockalign(bs, align);
169761007b31SStefan Hajnoczi         head_iov = (struct iovec) {
169861007b31SStefan Hajnoczi             .iov_base   = head_buf,
169961007b31SStefan Hajnoczi             .iov_len    = align,
170061007b31SStefan Hajnoczi         };
170161007b31SStefan Hajnoczi         qemu_iovec_init_external(&head_qiov, &head_iov, 1);
170261007b31SStefan Hajnoczi 
17039a4f4c31SKevin Wolf         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
170485c97ca7SKevin Wolf         ret = bdrv_aligned_preadv(child, &req, offset & ~(align - 1), align,
170561007b31SStefan Hajnoczi                                   align, &head_qiov, 0);
170661007b31SStefan Hajnoczi         if (ret < 0) {
170761007b31SStefan Hajnoczi             goto fail;
170861007b31SStefan Hajnoczi         }
17099a4f4c31SKevin Wolf         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
171061007b31SStefan Hajnoczi 
171161007b31SStefan Hajnoczi         qemu_iovec_init(&local_qiov, qiov->niov + 2);
171261007b31SStefan Hajnoczi         qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
171361007b31SStefan Hajnoczi         qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
171461007b31SStefan Hajnoczi         use_local_qiov = true;
171561007b31SStefan Hajnoczi 
171661007b31SStefan Hajnoczi         bytes += offset & (align - 1);
171761007b31SStefan Hajnoczi         offset = offset & ~(align - 1);
1718117bc3faSPeter Lieven 
1719117bc3faSPeter Lieven         /* We have read the tail already if the request is smaller
1720117bc3faSPeter Lieven          * than one aligned block.
1721117bc3faSPeter Lieven          */
1722117bc3faSPeter Lieven         if (bytes < align) {
1723117bc3faSPeter Lieven             qemu_iovec_add(&local_qiov, head_buf + bytes, align - bytes);
1724117bc3faSPeter Lieven             bytes = align;
1725117bc3faSPeter Lieven         }
172661007b31SStefan Hajnoczi     }
172761007b31SStefan Hajnoczi 
172861007b31SStefan Hajnoczi     if ((offset + bytes) & (align - 1)) {
172961007b31SStefan Hajnoczi         QEMUIOVector tail_qiov;
173061007b31SStefan Hajnoczi         struct iovec tail_iov;
173161007b31SStefan Hajnoczi         size_t tail_bytes;
173261007b31SStefan Hajnoczi         bool waited;
173361007b31SStefan Hajnoczi 
173461007b31SStefan Hajnoczi         mark_request_serialising(&req, align);
173561007b31SStefan Hajnoczi         waited = wait_serialising_requests(&req);
173661007b31SStefan Hajnoczi         assert(!waited || !use_local_qiov);
173761007b31SStefan Hajnoczi 
173861007b31SStefan Hajnoczi         tail_buf = qemu_blockalign(bs, align);
173961007b31SStefan Hajnoczi         tail_iov = (struct iovec) {
174061007b31SStefan Hajnoczi             .iov_base   = tail_buf,
174161007b31SStefan Hajnoczi             .iov_len    = align,
174261007b31SStefan Hajnoczi         };
174361007b31SStefan Hajnoczi         qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
174461007b31SStefan Hajnoczi 
17459a4f4c31SKevin Wolf         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
174685c97ca7SKevin Wolf         ret = bdrv_aligned_preadv(child, &req, (offset + bytes) & ~(align - 1),
174785c97ca7SKevin Wolf                                   align, align, &tail_qiov, 0);
174861007b31SStefan Hajnoczi         if (ret < 0) {
174961007b31SStefan Hajnoczi             goto fail;
175061007b31SStefan Hajnoczi         }
17519a4f4c31SKevin Wolf         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
175261007b31SStefan Hajnoczi 
175361007b31SStefan Hajnoczi         if (!use_local_qiov) {
175461007b31SStefan Hajnoczi             qemu_iovec_init(&local_qiov, qiov->niov + 1);
175561007b31SStefan Hajnoczi             qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
175661007b31SStefan Hajnoczi             use_local_qiov = true;
175761007b31SStefan Hajnoczi         }
175861007b31SStefan Hajnoczi 
175961007b31SStefan Hajnoczi         tail_bytes = (offset + bytes) & (align - 1);
176061007b31SStefan Hajnoczi         qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
176161007b31SStefan Hajnoczi 
176261007b31SStefan Hajnoczi         bytes = ROUND_UP(bytes, align);
176361007b31SStefan Hajnoczi     }
176461007b31SStefan Hajnoczi 
176585c97ca7SKevin Wolf     ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
176661007b31SStefan Hajnoczi                                use_local_qiov ? &local_qiov : qiov,
176761007b31SStefan Hajnoczi                                flags);
176861007b31SStefan Hajnoczi 
176961007b31SStefan Hajnoczi fail:
177061007b31SStefan Hajnoczi 
177161007b31SStefan Hajnoczi     if (use_local_qiov) {
177261007b31SStefan Hajnoczi         qemu_iovec_destroy(&local_qiov);
177361007b31SStefan Hajnoczi     }
177461007b31SStefan Hajnoczi     qemu_vfree(head_buf);
177561007b31SStefan Hajnoczi     qemu_vfree(tail_buf);
17769eeb6dd1SFam Zheng out:
17779eeb6dd1SFam Zheng     tracked_request_end(&req);
177899723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
177961007b31SStefan Hajnoczi     return ret;
178061007b31SStefan Hajnoczi }
178161007b31SStefan Hajnoczi 
1782adad6496SKevin Wolf static int coroutine_fn bdrv_co_do_writev(BdrvChild *child,
178361007b31SStefan Hajnoczi     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
178461007b31SStefan Hajnoczi     BdrvRequestFlags flags)
178561007b31SStefan Hajnoczi {
178661007b31SStefan Hajnoczi     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
178761007b31SStefan Hajnoczi         return -EINVAL;
178861007b31SStefan Hajnoczi     }
178961007b31SStefan Hajnoczi 
1790a03ef88fSKevin Wolf     return bdrv_co_pwritev(child, sector_num << BDRV_SECTOR_BITS,
179161007b31SStefan Hajnoczi                            nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
179261007b31SStefan Hajnoczi }
179361007b31SStefan Hajnoczi 
179425ec177dSKevin Wolf int coroutine_fn bdrv_co_writev(BdrvChild *child, int64_t sector_num,
179561007b31SStefan Hajnoczi     int nb_sectors, QEMUIOVector *qiov)
179661007b31SStefan Hajnoczi {
1797adad6496SKevin Wolf     return bdrv_co_do_writev(child, sector_num, nb_sectors, qiov, 0);
179861007b31SStefan Hajnoczi }
179961007b31SStefan Hajnoczi 
1800a03ef88fSKevin Wolf int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
1801f5a5ca79SManos Pitsidianakis                                        int bytes, BdrvRequestFlags flags)
180261007b31SStefan Hajnoczi {
1803f5a5ca79SManos Pitsidianakis     trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
180461007b31SStefan Hajnoczi 
1805a03ef88fSKevin Wolf     if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
180661007b31SStefan Hajnoczi         flags &= ~BDRV_REQ_MAY_UNMAP;
180761007b31SStefan Hajnoczi     }
180861007b31SStefan Hajnoczi 
1809f5a5ca79SManos Pitsidianakis     return bdrv_co_pwritev(child, offset, bytes, NULL,
181061007b31SStefan Hajnoczi                            BDRV_REQ_ZERO_WRITE | flags);
181161007b31SStefan Hajnoczi }
181261007b31SStefan Hajnoczi 
18134085f5c7SJohn Snow /*
18144085f5c7SJohn Snow  * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
18154085f5c7SJohn Snow  */
18164085f5c7SJohn Snow int bdrv_flush_all(void)
18174085f5c7SJohn Snow {
18184085f5c7SJohn Snow     BdrvNextIterator it;
18194085f5c7SJohn Snow     BlockDriverState *bs = NULL;
18204085f5c7SJohn Snow     int result = 0;
18214085f5c7SJohn Snow 
18224085f5c7SJohn Snow     for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
18234085f5c7SJohn Snow         AioContext *aio_context = bdrv_get_aio_context(bs);
18244085f5c7SJohn Snow         int ret;
18254085f5c7SJohn Snow 
18264085f5c7SJohn Snow         aio_context_acquire(aio_context);
18274085f5c7SJohn Snow         ret = bdrv_flush(bs);
18284085f5c7SJohn Snow         if (ret < 0 && !result) {
18294085f5c7SJohn Snow             result = ret;
18304085f5c7SJohn Snow         }
18314085f5c7SJohn Snow         aio_context_release(aio_context);
18324085f5c7SJohn Snow     }
18334085f5c7SJohn Snow 
18344085f5c7SJohn Snow     return result;
18354085f5c7SJohn Snow }
18364085f5c7SJohn Snow 
18374085f5c7SJohn Snow 
18384bcd936eSEric Blake typedef struct BdrvCoBlockStatusData {
183961007b31SStefan Hajnoczi     BlockDriverState *bs;
184061007b31SStefan Hajnoczi     BlockDriverState *base;
1841c9ce8c4dSEric Blake     bool want_zero;
18424bcd936eSEric Blake     int64_t offset;
18434bcd936eSEric Blake     int64_t bytes;
18444bcd936eSEric Blake     int64_t *pnum;
18454bcd936eSEric Blake     int64_t *map;
1846c9ce8c4dSEric Blake     BlockDriverState **file;
18474bcd936eSEric Blake     int ret;
184861007b31SStefan Hajnoczi     bool done;
18494bcd936eSEric Blake } BdrvCoBlockStatusData;
185061007b31SStefan Hajnoczi 
1851f7cc69b3SManos Pitsidianakis int64_t coroutine_fn bdrv_co_get_block_status_from_file(BlockDriverState *bs,
1852f7cc69b3SManos Pitsidianakis                                                         int64_t sector_num,
1853f7cc69b3SManos Pitsidianakis                                                         int nb_sectors,
1854f7cc69b3SManos Pitsidianakis                                                         int *pnum,
1855f7cc69b3SManos Pitsidianakis                                                         BlockDriverState **file)
1856f7cc69b3SManos Pitsidianakis {
1857f7cc69b3SManos Pitsidianakis     assert(bs->file && bs->file->bs);
1858f7cc69b3SManos Pitsidianakis     *pnum = nb_sectors;
1859f7cc69b3SManos Pitsidianakis     *file = bs->file->bs;
1860f7cc69b3SManos Pitsidianakis     return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID |
1861f7cc69b3SManos Pitsidianakis            (sector_num << BDRV_SECTOR_BITS);
1862f7cc69b3SManos Pitsidianakis }
1863f7cc69b3SManos Pitsidianakis 
1864f7cc69b3SManos Pitsidianakis int64_t coroutine_fn bdrv_co_get_block_status_from_backing(BlockDriverState *bs,
1865f7cc69b3SManos Pitsidianakis                                                            int64_t sector_num,
1866f7cc69b3SManos Pitsidianakis                                                            int nb_sectors,
1867f7cc69b3SManos Pitsidianakis                                                            int *pnum,
1868f7cc69b3SManos Pitsidianakis                                                            BlockDriverState **file)
1869f7cc69b3SManos Pitsidianakis {
1870f7cc69b3SManos Pitsidianakis     assert(bs->backing && bs->backing->bs);
1871f7cc69b3SManos Pitsidianakis     *pnum = nb_sectors;
1872f7cc69b3SManos Pitsidianakis     *file = bs->backing->bs;
1873f7cc69b3SManos Pitsidianakis     return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID |
1874f7cc69b3SManos Pitsidianakis            (sector_num << BDRV_SECTOR_BITS);
1875f7cc69b3SManos Pitsidianakis }
1876f7cc69b3SManos Pitsidianakis 
187761007b31SStefan Hajnoczi /*
187861007b31SStefan Hajnoczi  * Returns the allocation status of the specified sectors.
187961007b31SStefan Hajnoczi  * Drivers not implementing the functionality are assumed to not support
188061007b31SStefan Hajnoczi  * backing files, hence all their sectors are reported as allocated.
188161007b31SStefan Hajnoczi  *
1882c9ce8c4dSEric Blake  * If 'want_zero' is true, the caller is querying for mapping purposes,
1883c9ce8c4dSEric Blake  * and the result should include BDRV_BLOCK_OFFSET_VALID and
1884c9ce8c4dSEric Blake  * BDRV_BLOCK_ZERO where possible; otherwise, the result may omit those
1885c9ce8c4dSEric Blake  * bits particularly if it allows for a larger value in 'pnum'.
1886c9ce8c4dSEric Blake  *
18872e8bc787SEric Blake  * If 'offset' is beyond the end of the disk image the return value is
1888fb0d8654SEric Blake  * BDRV_BLOCK_EOF and 'pnum' is set to 0.
188961007b31SStefan Hajnoczi  *
18902e8bc787SEric Blake  * 'bytes' is the max value 'pnum' should be set to.  If bytes goes
1891fb0d8654SEric Blake  * beyond the end of the disk image it will be clamped; if 'pnum' is set to
1892fb0d8654SEric Blake  * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
189367a0fd2aSFam Zheng  *
18942e8bc787SEric Blake  * 'pnum' is set to the number of bytes (including and immediately
18952e8bc787SEric Blake  * following the specified offset) that are easily known to be in the
18962e8bc787SEric Blake  * same allocated/unallocated state.  Note that a second call starting
18972e8bc787SEric Blake  * at the original offset plus returned pnum may have the same status.
18982e8bc787SEric Blake  * The returned value is non-zero on success except at end-of-file.
18992e8bc787SEric Blake  *
19002e8bc787SEric Blake  * Returns negative errno on failure.  Otherwise, if the
19012e8bc787SEric Blake  * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
19022e8bc787SEric Blake  * set to the host mapping and BDS corresponding to the guest offset.
190361007b31SStefan Hajnoczi  */
19042e8bc787SEric Blake static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs,
1905c9ce8c4dSEric Blake                                              bool want_zero,
19062e8bc787SEric Blake                                              int64_t offset, int64_t bytes,
19072e8bc787SEric Blake                                              int64_t *pnum, int64_t *map,
190867a0fd2aSFam Zheng                                              BlockDriverState **file)
190961007b31SStefan Hajnoczi {
19102e8bc787SEric Blake     int64_t total_size;
19112e8bc787SEric Blake     int64_t n; /* bytes */
1912efa6e2edSEric Blake     int ret;
19132e8bc787SEric Blake     int64_t local_map = 0;
1914298a1665SEric Blake     BlockDriverState *local_file = NULL;
1915efa6e2edSEric Blake     int64_t aligned_offset, aligned_bytes;
1916efa6e2edSEric Blake     uint32_t align;
191761007b31SStefan Hajnoczi 
1918298a1665SEric Blake     assert(pnum);
1919298a1665SEric Blake     *pnum = 0;
19202e8bc787SEric Blake     total_size = bdrv_getlength(bs);
19212e8bc787SEric Blake     if (total_size < 0) {
19222e8bc787SEric Blake         ret = total_size;
1923298a1665SEric Blake         goto early_out;
192461007b31SStefan Hajnoczi     }
192561007b31SStefan Hajnoczi 
19262e8bc787SEric Blake     if (offset >= total_size) {
1927298a1665SEric Blake         ret = BDRV_BLOCK_EOF;
1928298a1665SEric Blake         goto early_out;
192961007b31SStefan Hajnoczi     }
19302e8bc787SEric Blake     if (!bytes) {
1931298a1665SEric Blake         ret = 0;
1932298a1665SEric Blake         goto early_out;
19339cdcfd9fSEric Blake     }
193461007b31SStefan Hajnoczi 
19352e8bc787SEric Blake     n = total_size - offset;
19362e8bc787SEric Blake     if (n < bytes) {
19372e8bc787SEric Blake         bytes = n;
193861007b31SStefan Hajnoczi     }
193961007b31SStefan Hajnoczi 
1940d470ad42SMax Reitz     /* Must be non-NULL or bdrv_getlength() would have failed */
1941d470ad42SMax Reitz     assert(bs->drv);
194261007b31SStefan Hajnoczi     if (!bs->drv->bdrv_co_get_block_status) {
19432e8bc787SEric Blake         *pnum = bytes;
194461007b31SStefan Hajnoczi         ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
19452e8bc787SEric Blake         if (offset + bytes == total_size) {
1946fb0d8654SEric Blake             ret |= BDRV_BLOCK_EOF;
1947fb0d8654SEric Blake         }
194861007b31SStefan Hajnoczi         if (bs->drv->protocol_name) {
19492e8bc787SEric Blake             ret |= BDRV_BLOCK_OFFSET_VALID;
19502e8bc787SEric Blake             local_map = offset;
1951298a1665SEric Blake             local_file = bs;
195261007b31SStefan Hajnoczi         }
1953298a1665SEric Blake         goto early_out;
195461007b31SStefan Hajnoczi     }
195561007b31SStefan Hajnoczi 
195699723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
1957efa6e2edSEric Blake 
1958efa6e2edSEric Blake     /* Round out to request_alignment boundaries */
1959efa6e2edSEric Blake     /* TODO: until we have a byte-based driver callback, we also have to
1960efa6e2edSEric Blake      * round out to sectors, even if that is bigger than request_alignment */
1961efa6e2edSEric Blake     align = MAX(bs->bl.request_alignment, BDRV_SECTOR_SIZE);
1962efa6e2edSEric Blake     aligned_offset = QEMU_ALIGN_DOWN(offset, align);
1963efa6e2edSEric Blake     aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
1964efa6e2edSEric Blake 
1965efa6e2edSEric Blake     {
1966efa6e2edSEric Blake         int count; /* sectors */
1967efa6e2edSEric Blake         int64_t longret;
1968efa6e2edSEric Blake 
1969efa6e2edSEric Blake         assert(QEMU_IS_ALIGNED(aligned_offset | aligned_bytes,
1970efa6e2edSEric Blake                                BDRV_SECTOR_SIZE));
19712e8bc787SEric Blake         /*
19722e8bc787SEric Blake          * The contract allows us to return pnum smaller than bytes, even
19732e8bc787SEric Blake          * if the next query would see the same status; we truncate the
19742e8bc787SEric Blake          * request to avoid overflowing the driver's 32-bit interface.
19752e8bc787SEric Blake          */
1976efa6e2edSEric Blake         longret = bs->drv->bdrv_co_get_block_status(
1977efa6e2edSEric Blake             bs, aligned_offset >> BDRV_SECTOR_BITS,
1978efa6e2edSEric Blake             MIN(INT_MAX, aligned_bytes) >> BDRV_SECTOR_BITS, &count,
1979298a1665SEric Blake             &local_file);
1980efa6e2edSEric Blake         if (longret < 0) {
1981efa6e2edSEric Blake             assert(INT_MIN <= longret);
1982efa6e2edSEric Blake             ret = longret;
198399723548SPaolo Bonzini             goto out;
198461007b31SStefan Hajnoczi         }
1985efa6e2edSEric Blake         if (longret & BDRV_BLOCK_OFFSET_VALID) {
1986efa6e2edSEric Blake             local_map = longret & BDRV_BLOCK_OFFSET_MASK;
19872e8bc787SEric Blake         }
1988efa6e2edSEric Blake         ret = longret & ~BDRV_BLOCK_OFFSET_MASK;
19892e8bc787SEric Blake         *pnum = count * BDRV_SECTOR_SIZE;
1990efa6e2edSEric Blake     }
1991efa6e2edSEric Blake 
1992efa6e2edSEric Blake     /*
1993efa6e2edSEric Blake      * The driver's result must be a multiple of request_alignment.
1994efa6e2edSEric Blake      * Clamp pnum and adjust map to original request.
1995efa6e2edSEric Blake      */
1996efa6e2edSEric Blake     assert(QEMU_IS_ALIGNED(*pnum, align) && align > offset - aligned_offset);
1997efa6e2edSEric Blake     *pnum -= offset - aligned_offset;
1998efa6e2edSEric Blake     if (*pnum > bytes) {
1999efa6e2edSEric Blake         *pnum = bytes;
2000efa6e2edSEric Blake     }
2001efa6e2edSEric Blake     if (ret & BDRV_BLOCK_OFFSET_VALID) {
2002efa6e2edSEric Blake         local_map += offset - aligned_offset;
2003efa6e2edSEric Blake     }
200461007b31SStefan Hajnoczi 
200561007b31SStefan Hajnoczi     if (ret & BDRV_BLOCK_RAW) {
2006298a1665SEric Blake         assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
20072e8bc787SEric Blake         ret = bdrv_co_block_status(local_file, want_zero, local_map,
20082e8bc787SEric Blake                                    *pnum, pnum, &local_map, &local_file);
200999723548SPaolo Bonzini         goto out;
201061007b31SStefan Hajnoczi     }
201161007b31SStefan Hajnoczi 
201261007b31SStefan Hajnoczi     if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
201361007b31SStefan Hajnoczi         ret |= BDRV_BLOCK_ALLOCATED;
2014c9ce8c4dSEric Blake     } else if (want_zero) {
201561007b31SStefan Hajnoczi         if (bdrv_unallocated_blocks_are_zero(bs)) {
201661007b31SStefan Hajnoczi             ret |= BDRV_BLOCK_ZERO;
2017760e0063SKevin Wolf         } else if (bs->backing) {
2018760e0063SKevin Wolf             BlockDriverState *bs2 = bs->backing->bs;
20192e8bc787SEric Blake             int64_t size2 = bdrv_getlength(bs2);
2020c9ce8c4dSEric Blake 
20212e8bc787SEric Blake             if (size2 >= 0 && offset >= size2) {
202261007b31SStefan Hajnoczi                 ret |= BDRV_BLOCK_ZERO;
202361007b31SStefan Hajnoczi             }
202461007b31SStefan Hajnoczi         }
202561007b31SStefan Hajnoczi     }
202661007b31SStefan Hajnoczi 
2027c9ce8c4dSEric Blake     if (want_zero && local_file && local_file != bs &&
202861007b31SStefan Hajnoczi         (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
202961007b31SStefan Hajnoczi         (ret & BDRV_BLOCK_OFFSET_VALID)) {
20302e8bc787SEric Blake         int64_t file_pnum;
20312e8bc787SEric Blake         int ret2;
203261007b31SStefan Hajnoczi 
20332e8bc787SEric Blake         ret2 = bdrv_co_block_status(local_file, want_zero, local_map,
20342e8bc787SEric Blake                                     *pnum, &file_pnum, NULL, NULL);
203561007b31SStefan Hajnoczi         if (ret2 >= 0) {
203661007b31SStefan Hajnoczi             /* Ignore errors.  This is just providing extra information, it
203761007b31SStefan Hajnoczi              * is useful but not necessary.
203861007b31SStefan Hajnoczi              */
2039c61e684eSEric Blake             if (ret2 & BDRV_BLOCK_EOF &&
2040c61e684eSEric Blake                 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
2041c61e684eSEric Blake                 /*
2042c61e684eSEric Blake                  * It is valid for the format block driver to read
2043c61e684eSEric Blake                  * beyond the end of the underlying file's current
2044c61e684eSEric Blake                  * size; such areas read as zero.
2045c61e684eSEric Blake                  */
204661007b31SStefan Hajnoczi                 ret |= BDRV_BLOCK_ZERO;
204761007b31SStefan Hajnoczi             } else {
204861007b31SStefan Hajnoczi                 /* Limit request to the range reported by the protocol driver */
204961007b31SStefan Hajnoczi                 *pnum = file_pnum;
205061007b31SStefan Hajnoczi                 ret |= (ret2 & BDRV_BLOCK_ZERO);
205161007b31SStefan Hajnoczi             }
205261007b31SStefan Hajnoczi         }
205361007b31SStefan Hajnoczi     }
205461007b31SStefan Hajnoczi 
205599723548SPaolo Bonzini out:
205699723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
20572e8bc787SEric Blake     if (ret >= 0 && offset + *pnum == total_size) {
2058fb0d8654SEric Blake         ret |= BDRV_BLOCK_EOF;
2059fb0d8654SEric Blake     }
2060298a1665SEric Blake early_out:
2061298a1665SEric Blake     if (file) {
2062298a1665SEric Blake         *file = local_file;
2063298a1665SEric Blake     }
20642e8bc787SEric Blake     if (map) {
20652e8bc787SEric Blake         *map = local_map;
20662e8bc787SEric Blake     }
206761007b31SStefan Hajnoczi     return ret;
206861007b31SStefan Hajnoczi }
206961007b31SStefan Hajnoczi 
20705b648c67SEric Blake static int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs,
2071ba3f0e25SFam Zheng                                                    BlockDriverState *base,
2072c9ce8c4dSEric Blake                                                    bool want_zero,
20735b648c67SEric Blake                                                    int64_t offset,
20745b648c67SEric Blake                                                    int64_t bytes,
20755b648c67SEric Blake                                                    int64_t *pnum,
20765b648c67SEric Blake                                                    int64_t *map,
207767a0fd2aSFam Zheng                                                    BlockDriverState **file)
2078ba3f0e25SFam Zheng {
2079ba3f0e25SFam Zheng     BlockDriverState *p;
20805b648c67SEric Blake     int ret = 0;
2081c61e684eSEric Blake     bool first = true;
2082ba3f0e25SFam Zheng 
2083ba3f0e25SFam Zheng     assert(bs != base);
2084760e0063SKevin Wolf     for (p = bs; p != base; p = backing_bs(p)) {
20855b648c67SEric Blake         ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map,
20865b648c67SEric Blake                                    file);
2087c61e684eSEric Blake         if (ret < 0) {
2088c61e684eSEric Blake             break;
2089c61e684eSEric Blake         }
2090c61e684eSEric Blake         if (ret & BDRV_BLOCK_ZERO && ret & BDRV_BLOCK_EOF && !first) {
2091c61e684eSEric Blake             /*
2092c61e684eSEric Blake              * Reading beyond the end of the file continues to read
2093c61e684eSEric Blake              * zeroes, but we can only widen the result to the
2094c61e684eSEric Blake              * unallocated length we learned from an earlier
2095c61e684eSEric Blake              * iteration.
2096c61e684eSEric Blake              */
20975b648c67SEric Blake             *pnum = bytes;
2098c61e684eSEric Blake         }
2099c61e684eSEric Blake         if (ret & (BDRV_BLOCK_ZERO | BDRV_BLOCK_DATA)) {
2100ba3f0e25SFam Zheng             break;
2101ba3f0e25SFam Zheng         }
21025b648c67SEric Blake         /* [offset, pnum] unallocated on this layer, which could be only
21035b648c67SEric Blake          * the first part of [offset, bytes].  */
21045b648c67SEric Blake         bytes = MIN(bytes, *pnum);
2105c61e684eSEric Blake         first = false;
2106ba3f0e25SFam Zheng     }
2107ba3f0e25SFam Zheng     return ret;
2108ba3f0e25SFam Zheng }
2109ba3f0e25SFam Zheng 
211031826642SEric Blake /* Coroutine wrapper for bdrv_block_status_above() */
21115b648c67SEric Blake static void coroutine_fn bdrv_block_status_above_co_entry(void *opaque)
211261007b31SStefan Hajnoczi {
21134bcd936eSEric Blake     BdrvCoBlockStatusData *data = opaque;
211461007b31SStefan Hajnoczi 
21155b648c67SEric Blake     data->ret = bdrv_co_block_status_above(data->bs, data->base,
2116c9ce8c4dSEric Blake                                            data->want_zero,
21175b648c67SEric Blake                                            data->offset, data->bytes,
21185b648c67SEric Blake                                            data->pnum, data->map, data->file);
211961007b31SStefan Hajnoczi     data->done = true;
212061007b31SStefan Hajnoczi }
212161007b31SStefan Hajnoczi 
212261007b31SStefan Hajnoczi /*
21235b648c67SEric Blake  * Synchronous wrapper around bdrv_co_block_status_above().
212461007b31SStefan Hajnoczi  *
21255b648c67SEric Blake  * See bdrv_co_block_status_above() for details.
212661007b31SStefan Hajnoczi  */
21277ddb99b9SEric Blake static int bdrv_common_block_status_above(BlockDriverState *bs,
2128ba3f0e25SFam Zheng                                           BlockDriverState *base,
21297ddb99b9SEric Blake                                           bool want_zero, int64_t offset,
21307ddb99b9SEric Blake                                           int64_t bytes, int64_t *pnum,
21317ddb99b9SEric Blake                                           int64_t *map,
213267a0fd2aSFam Zheng                                           BlockDriverState **file)
213361007b31SStefan Hajnoczi {
213461007b31SStefan Hajnoczi     Coroutine *co;
21354bcd936eSEric Blake     BdrvCoBlockStatusData data = {
213661007b31SStefan Hajnoczi         .bs = bs,
2137ba3f0e25SFam Zheng         .base = base,
2138c9ce8c4dSEric Blake         .want_zero = want_zero,
21397ddb99b9SEric Blake         .offset = offset,
21407ddb99b9SEric Blake         .bytes = bytes,
21417ddb99b9SEric Blake         .pnum = pnum,
21427ddb99b9SEric Blake         .map = map,
2143c9ce8c4dSEric Blake         .file = file,
214461007b31SStefan Hajnoczi         .done = false,
214561007b31SStefan Hajnoczi     };
214661007b31SStefan Hajnoczi 
214761007b31SStefan Hajnoczi     if (qemu_in_coroutine()) {
214861007b31SStefan Hajnoczi         /* Fast-path if already in coroutine context */
21495b648c67SEric Blake         bdrv_block_status_above_co_entry(&data);
215061007b31SStefan Hajnoczi     } else {
21515b648c67SEric Blake         co = qemu_coroutine_create(bdrv_block_status_above_co_entry, &data);
2152e92f0e19SFam Zheng         bdrv_coroutine_enter(bs, co);
215388b062c2SPaolo Bonzini         BDRV_POLL_WHILE(bs, !data.done);
215461007b31SStefan Hajnoczi     }
215561007b31SStefan Hajnoczi     return data.ret;
215661007b31SStefan Hajnoczi }
215761007b31SStefan Hajnoczi 
215831826642SEric Blake int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
215931826642SEric Blake                             int64_t offset, int64_t bytes, int64_t *pnum,
216031826642SEric Blake                             int64_t *map, BlockDriverState **file)
2161c9ce8c4dSEric Blake {
216231826642SEric Blake     return bdrv_common_block_status_above(bs, base, true, offset, bytes,
216331826642SEric Blake                                           pnum, map, file);
2164c9ce8c4dSEric Blake }
2165c9ce8c4dSEric Blake 
2166237d78f8SEric Blake int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
2167237d78f8SEric Blake                       int64_t *pnum, int64_t *map, BlockDriverState **file)
2168ba3f0e25SFam Zheng {
216931826642SEric Blake     return bdrv_block_status_above(bs, backing_bs(bs),
217031826642SEric Blake                                    offset, bytes, pnum, map, file);
2171ba3f0e25SFam Zheng }
2172ba3f0e25SFam Zheng 
2173d6a644bbSEric Blake int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset,
2174d6a644bbSEric Blake                                    int64_t bytes, int64_t *pnum)
217561007b31SStefan Hajnoczi {
21767ddb99b9SEric Blake     int ret;
21777ddb99b9SEric Blake     int64_t dummy;
2178d6a644bbSEric Blake 
21797ddb99b9SEric Blake     ret = bdrv_common_block_status_above(bs, backing_bs(bs), false, offset,
21807ddb99b9SEric Blake                                          bytes, pnum ? pnum : &dummy, NULL,
2181298a1665SEric Blake                                          NULL);
218261007b31SStefan Hajnoczi     if (ret < 0) {
218361007b31SStefan Hajnoczi         return ret;
218461007b31SStefan Hajnoczi     }
218561007b31SStefan Hajnoczi     return !!(ret & BDRV_BLOCK_ALLOCATED);
218661007b31SStefan Hajnoczi }
218761007b31SStefan Hajnoczi 
218861007b31SStefan Hajnoczi /*
218961007b31SStefan Hajnoczi  * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
219061007b31SStefan Hajnoczi  *
219151b0a488SEric Blake  * Return true if (a prefix of) the given range is allocated in any image
219251b0a488SEric Blake  * between BASE and TOP (inclusive).  BASE can be NULL to check if the given
219351b0a488SEric Blake  * offset is allocated in any image of the chain.  Return false otherwise,
2194d6a644bbSEric Blake  * or negative errno on failure.
219561007b31SStefan Hajnoczi  *
219651b0a488SEric Blake  * 'pnum' is set to the number of bytes (including and immediately
219751b0a488SEric Blake  * following the specified offset) that are known to be in the same
219851b0a488SEric Blake  * allocated/unallocated state.  Note that a subsequent call starting
219951b0a488SEric Blake  * at 'offset + *pnum' may return the same allocation status (in other
220051b0a488SEric Blake  * words, the result is not necessarily the maximum possible range);
220151b0a488SEric Blake  * but 'pnum' will only be 0 when end of file is reached.
220261007b31SStefan Hajnoczi  *
220361007b31SStefan Hajnoczi  */
220461007b31SStefan Hajnoczi int bdrv_is_allocated_above(BlockDriverState *top,
220561007b31SStefan Hajnoczi                             BlockDriverState *base,
220651b0a488SEric Blake                             int64_t offset, int64_t bytes, int64_t *pnum)
220761007b31SStefan Hajnoczi {
220861007b31SStefan Hajnoczi     BlockDriverState *intermediate;
220951b0a488SEric Blake     int ret;
221051b0a488SEric Blake     int64_t n = bytes;
221161007b31SStefan Hajnoczi 
221261007b31SStefan Hajnoczi     intermediate = top;
221361007b31SStefan Hajnoczi     while (intermediate && intermediate != base) {
2214d6a644bbSEric Blake         int64_t pnum_inter;
2215c00716beSEric Blake         int64_t size_inter;
2216d6a644bbSEric Blake 
221751b0a488SEric Blake         ret = bdrv_is_allocated(intermediate, offset, bytes, &pnum_inter);
221861007b31SStefan Hajnoczi         if (ret < 0) {
221961007b31SStefan Hajnoczi             return ret;
2220d6a644bbSEric Blake         }
2221d6a644bbSEric Blake         if (ret) {
222251b0a488SEric Blake             *pnum = pnum_inter;
222361007b31SStefan Hajnoczi             return 1;
222461007b31SStefan Hajnoczi         }
222561007b31SStefan Hajnoczi 
222651b0a488SEric Blake         size_inter = bdrv_getlength(intermediate);
2227c00716beSEric Blake         if (size_inter < 0) {
2228c00716beSEric Blake             return size_inter;
2229c00716beSEric Blake         }
223051b0a488SEric Blake         if (n > pnum_inter &&
223151b0a488SEric Blake             (intermediate == top || offset + pnum_inter < size_inter)) {
223251b0a488SEric Blake             n = pnum_inter;
223361007b31SStefan Hajnoczi         }
223461007b31SStefan Hajnoczi 
2235760e0063SKevin Wolf         intermediate = backing_bs(intermediate);
223661007b31SStefan Hajnoczi     }
223761007b31SStefan Hajnoczi 
223861007b31SStefan Hajnoczi     *pnum = n;
223961007b31SStefan Hajnoczi     return 0;
224061007b31SStefan Hajnoczi }
224161007b31SStefan Hajnoczi 
22421a8ae822SKevin Wolf typedef struct BdrvVmstateCo {
22431a8ae822SKevin Wolf     BlockDriverState    *bs;
22441a8ae822SKevin Wolf     QEMUIOVector        *qiov;
22451a8ae822SKevin Wolf     int64_t             pos;
22461a8ae822SKevin Wolf     bool                is_read;
22471a8ae822SKevin Wolf     int                 ret;
22481a8ae822SKevin Wolf } BdrvVmstateCo;
22491a8ae822SKevin Wolf 
22501a8ae822SKevin Wolf static int coroutine_fn
22511a8ae822SKevin Wolf bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
22521a8ae822SKevin Wolf                    bool is_read)
22531a8ae822SKevin Wolf {
22541a8ae822SKevin Wolf     BlockDriver *drv = bs->drv;
2255dc88a467SStefan Hajnoczi     int ret = -ENOTSUP;
2256dc88a467SStefan Hajnoczi 
2257dc88a467SStefan Hajnoczi     bdrv_inc_in_flight(bs);
22581a8ae822SKevin Wolf 
22591a8ae822SKevin Wolf     if (!drv) {
2260dc88a467SStefan Hajnoczi         ret = -ENOMEDIUM;
22611a8ae822SKevin Wolf     } else if (drv->bdrv_load_vmstate) {
2262dc88a467SStefan Hajnoczi         if (is_read) {
2263dc88a467SStefan Hajnoczi             ret = drv->bdrv_load_vmstate(bs, qiov, pos);
2264dc88a467SStefan Hajnoczi         } else {
2265dc88a467SStefan Hajnoczi             ret = drv->bdrv_save_vmstate(bs, qiov, pos);
2266dc88a467SStefan Hajnoczi         }
22671a8ae822SKevin Wolf     } else if (bs->file) {
2268dc88a467SStefan Hajnoczi         ret = bdrv_co_rw_vmstate(bs->file->bs, qiov, pos, is_read);
22691a8ae822SKevin Wolf     }
22701a8ae822SKevin Wolf 
2271dc88a467SStefan Hajnoczi     bdrv_dec_in_flight(bs);
2272dc88a467SStefan Hajnoczi     return ret;
22731a8ae822SKevin Wolf }
22741a8ae822SKevin Wolf 
22751a8ae822SKevin Wolf static void coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque)
22761a8ae822SKevin Wolf {
22771a8ae822SKevin Wolf     BdrvVmstateCo *co = opaque;
22781a8ae822SKevin Wolf     co->ret = bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read);
22791a8ae822SKevin Wolf }
22801a8ae822SKevin Wolf 
22811a8ae822SKevin Wolf static inline int
22821a8ae822SKevin Wolf bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
22831a8ae822SKevin Wolf                 bool is_read)
22841a8ae822SKevin Wolf {
22851a8ae822SKevin Wolf     if (qemu_in_coroutine()) {
22861a8ae822SKevin Wolf         return bdrv_co_rw_vmstate(bs, qiov, pos, is_read);
22871a8ae822SKevin Wolf     } else {
22881a8ae822SKevin Wolf         BdrvVmstateCo data = {
22891a8ae822SKevin Wolf             .bs         = bs,
22901a8ae822SKevin Wolf             .qiov       = qiov,
22911a8ae822SKevin Wolf             .pos        = pos,
22921a8ae822SKevin Wolf             .is_read    = is_read,
22931a8ae822SKevin Wolf             .ret        = -EINPROGRESS,
22941a8ae822SKevin Wolf         };
22950b8b8753SPaolo Bonzini         Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry, &data);
22961a8ae822SKevin Wolf 
2297e92f0e19SFam Zheng         bdrv_coroutine_enter(bs, co);
2298ea17c9d2SStefan Hajnoczi         BDRV_POLL_WHILE(bs, data.ret == -EINPROGRESS);
22991a8ae822SKevin Wolf         return data.ret;
23001a8ae822SKevin Wolf     }
23011a8ae822SKevin Wolf }
23021a8ae822SKevin Wolf 
230361007b31SStefan Hajnoczi int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
230461007b31SStefan Hajnoczi                       int64_t pos, int size)
230561007b31SStefan Hajnoczi {
230661007b31SStefan Hajnoczi     QEMUIOVector qiov;
230761007b31SStefan Hajnoczi     struct iovec iov = {
230861007b31SStefan Hajnoczi         .iov_base   = (void *) buf,
230961007b31SStefan Hajnoczi         .iov_len    = size,
231061007b31SStefan Hajnoczi     };
2311b433d942SKevin Wolf     int ret;
231261007b31SStefan Hajnoczi 
231361007b31SStefan Hajnoczi     qemu_iovec_init_external(&qiov, &iov, 1);
2314b433d942SKevin Wolf 
2315b433d942SKevin Wolf     ret = bdrv_writev_vmstate(bs, &qiov, pos);
2316b433d942SKevin Wolf     if (ret < 0) {
2317b433d942SKevin Wolf         return ret;
2318b433d942SKevin Wolf     }
2319b433d942SKevin Wolf 
2320b433d942SKevin Wolf     return size;
232161007b31SStefan Hajnoczi }
232261007b31SStefan Hajnoczi 
232361007b31SStefan Hajnoczi int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
232461007b31SStefan Hajnoczi {
23251a8ae822SKevin Wolf     return bdrv_rw_vmstate(bs, qiov, pos, false);
232661007b31SStefan Hajnoczi }
232761007b31SStefan Hajnoczi 
232861007b31SStefan Hajnoczi int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
232961007b31SStefan Hajnoczi                       int64_t pos, int size)
233061007b31SStefan Hajnoczi {
23315ddda0b8SKevin Wolf     QEMUIOVector qiov;
23325ddda0b8SKevin Wolf     struct iovec iov = {
23335ddda0b8SKevin Wolf         .iov_base   = buf,
23345ddda0b8SKevin Wolf         .iov_len    = size,
23355ddda0b8SKevin Wolf     };
2336b433d942SKevin Wolf     int ret;
23375ddda0b8SKevin Wolf 
23385ddda0b8SKevin Wolf     qemu_iovec_init_external(&qiov, &iov, 1);
2339b433d942SKevin Wolf     ret = bdrv_readv_vmstate(bs, &qiov, pos);
2340b433d942SKevin Wolf     if (ret < 0) {
2341b433d942SKevin Wolf         return ret;
2342b433d942SKevin Wolf     }
2343b433d942SKevin Wolf 
2344b433d942SKevin Wolf     return size;
23455ddda0b8SKevin Wolf }
23465ddda0b8SKevin Wolf 
23475ddda0b8SKevin Wolf int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
23485ddda0b8SKevin Wolf {
23491a8ae822SKevin Wolf     return bdrv_rw_vmstate(bs, qiov, pos, true);
235061007b31SStefan Hajnoczi }
235161007b31SStefan Hajnoczi 
235261007b31SStefan Hajnoczi /**************************************************************/
235361007b31SStefan Hajnoczi /* async I/Os */
235461007b31SStefan Hajnoczi 
235561007b31SStefan Hajnoczi void bdrv_aio_cancel(BlockAIOCB *acb)
235661007b31SStefan Hajnoczi {
235761007b31SStefan Hajnoczi     qemu_aio_ref(acb);
235861007b31SStefan Hajnoczi     bdrv_aio_cancel_async(acb);
235961007b31SStefan Hajnoczi     while (acb->refcnt > 1) {
236061007b31SStefan Hajnoczi         if (acb->aiocb_info->get_aio_context) {
236161007b31SStefan Hajnoczi             aio_poll(acb->aiocb_info->get_aio_context(acb), true);
236261007b31SStefan Hajnoczi         } else if (acb->bs) {
23632f47da5fSPaolo Bonzini             /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
23642f47da5fSPaolo Bonzini              * assert that we're not using an I/O thread.  Thread-safe
23652f47da5fSPaolo Bonzini              * code should use bdrv_aio_cancel_async exclusively.
23662f47da5fSPaolo Bonzini              */
23672f47da5fSPaolo Bonzini             assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
236861007b31SStefan Hajnoczi             aio_poll(bdrv_get_aio_context(acb->bs), true);
236961007b31SStefan Hajnoczi         } else {
237061007b31SStefan Hajnoczi             abort();
237161007b31SStefan Hajnoczi         }
237261007b31SStefan Hajnoczi     }
237361007b31SStefan Hajnoczi     qemu_aio_unref(acb);
237461007b31SStefan Hajnoczi }
237561007b31SStefan Hajnoczi 
237661007b31SStefan Hajnoczi /* Async version of aio cancel. The caller is not blocked if the acb implements
237761007b31SStefan Hajnoczi  * cancel_async, otherwise we do nothing and let the request normally complete.
237861007b31SStefan Hajnoczi  * In either case the completion callback must be called. */
237961007b31SStefan Hajnoczi void bdrv_aio_cancel_async(BlockAIOCB *acb)
238061007b31SStefan Hajnoczi {
238161007b31SStefan Hajnoczi     if (acb->aiocb_info->cancel_async) {
238261007b31SStefan Hajnoczi         acb->aiocb_info->cancel_async(acb);
238361007b31SStefan Hajnoczi     }
238461007b31SStefan Hajnoczi }
238561007b31SStefan Hajnoczi 
238661007b31SStefan Hajnoczi /**************************************************************/
238761007b31SStefan Hajnoczi /* Coroutine block device emulation */
238861007b31SStefan Hajnoczi 
2389e293b7a3SKevin Wolf typedef struct FlushCo {
2390e293b7a3SKevin Wolf     BlockDriverState *bs;
2391e293b7a3SKevin Wolf     int ret;
2392e293b7a3SKevin Wolf } FlushCo;
2393e293b7a3SKevin Wolf 
2394e293b7a3SKevin Wolf 
239561007b31SStefan Hajnoczi static void coroutine_fn bdrv_flush_co_entry(void *opaque)
239661007b31SStefan Hajnoczi {
2397e293b7a3SKevin Wolf     FlushCo *rwco = opaque;
239861007b31SStefan Hajnoczi 
239961007b31SStefan Hajnoczi     rwco->ret = bdrv_co_flush(rwco->bs);
240061007b31SStefan Hajnoczi }
240161007b31SStefan Hajnoczi 
240261007b31SStefan Hajnoczi int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
240361007b31SStefan Hajnoczi {
240449ca6259SFam Zheng     int current_gen;
240549ca6259SFam Zheng     int ret = 0;
240661007b31SStefan Hajnoczi 
240799723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
2408c32b82afSPavel Dovgalyuk 
2409e914404eSFam Zheng     if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
241049ca6259SFam Zheng         bdrv_is_sg(bs)) {
241149ca6259SFam Zheng         goto early_exit;
241249ca6259SFam Zheng     }
241349ca6259SFam Zheng 
24143783fa3dSPaolo Bonzini     qemu_co_mutex_lock(&bs->reqs_lock);
241547fec599SPaolo Bonzini     current_gen = atomic_read(&bs->write_gen);
24163ff2f67aSEvgeny Yakovlev 
24173ff2f67aSEvgeny Yakovlev     /* Wait until any previous flushes are completed */
241899723548SPaolo Bonzini     while (bs->active_flush_req) {
24193783fa3dSPaolo Bonzini         qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
24203ff2f67aSEvgeny Yakovlev     }
24213ff2f67aSEvgeny Yakovlev 
24223783fa3dSPaolo Bonzini     /* Flushes reach this point in nondecreasing current_gen order.  */
242399723548SPaolo Bonzini     bs->active_flush_req = true;
24243783fa3dSPaolo Bonzini     qemu_co_mutex_unlock(&bs->reqs_lock);
24253ff2f67aSEvgeny Yakovlev 
2426c32b82afSPavel Dovgalyuk     /* Write back all layers by calling one driver function */
2427c32b82afSPavel Dovgalyuk     if (bs->drv->bdrv_co_flush) {
2428c32b82afSPavel Dovgalyuk         ret = bs->drv->bdrv_co_flush(bs);
2429c32b82afSPavel Dovgalyuk         goto out;
2430c32b82afSPavel Dovgalyuk     }
2431c32b82afSPavel Dovgalyuk 
243261007b31SStefan Hajnoczi     /* Write back cached data to the OS even with cache=unsafe */
243361007b31SStefan Hajnoczi     BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
243461007b31SStefan Hajnoczi     if (bs->drv->bdrv_co_flush_to_os) {
243561007b31SStefan Hajnoczi         ret = bs->drv->bdrv_co_flush_to_os(bs);
243661007b31SStefan Hajnoczi         if (ret < 0) {
2437cdb5e315SFam Zheng             goto out;
243861007b31SStefan Hajnoczi         }
243961007b31SStefan Hajnoczi     }
244061007b31SStefan Hajnoczi 
244161007b31SStefan Hajnoczi     /* But don't actually force it to the disk with cache=unsafe */
244261007b31SStefan Hajnoczi     if (bs->open_flags & BDRV_O_NO_FLUSH) {
244361007b31SStefan Hajnoczi         goto flush_parent;
244461007b31SStefan Hajnoczi     }
244561007b31SStefan Hajnoczi 
24463ff2f67aSEvgeny Yakovlev     /* Check if we really need to flush anything */
24473ff2f67aSEvgeny Yakovlev     if (bs->flushed_gen == current_gen) {
24483ff2f67aSEvgeny Yakovlev         goto flush_parent;
24493ff2f67aSEvgeny Yakovlev     }
24503ff2f67aSEvgeny Yakovlev 
245161007b31SStefan Hajnoczi     BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
2452d470ad42SMax Reitz     if (!bs->drv) {
2453d470ad42SMax Reitz         /* bs->drv->bdrv_co_flush() might have ejected the BDS
2454d470ad42SMax Reitz          * (even in case of apparent success) */
2455d470ad42SMax Reitz         ret = -ENOMEDIUM;
2456d470ad42SMax Reitz         goto out;
2457d470ad42SMax Reitz     }
245861007b31SStefan Hajnoczi     if (bs->drv->bdrv_co_flush_to_disk) {
245961007b31SStefan Hajnoczi         ret = bs->drv->bdrv_co_flush_to_disk(bs);
246061007b31SStefan Hajnoczi     } else if (bs->drv->bdrv_aio_flush) {
246161007b31SStefan Hajnoczi         BlockAIOCB *acb;
246261007b31SStefan Hajnoczi         CoroutineIOCompletion co = {
246361007b31SStefan Hajnoczi             .coroutine = qemu_coroutine_self(),
246461007b31SStefan Hajnoczi         };
246561007b31SStefan Hajnoczi 
246661007b31SStefan Hajnoczi         acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
246761007b31SStefan Hajnoczi         if (acb == NULL) {
246861007b31SStefan Hajnoczi             ret = -EIO;
246961007b31SStefan Hajnoczi         } else {
247061007b31SStefan Hajnoczi             qemu_coroutine_yield();
247161007b31SStefan Hajnoczi             ret = co.ret;
247261007b31SStefan Hajnoczi         }
247361007b31SStefan Hajnoczi     } else {
247461007b31SStefan Hajnoczi         /*
247561007b31SStefan Hajnoczi          * Some block drivers always operate in either writethrough or unsafe
247661007b31SStefan Hajnoczi          * mode and don't support bdrv_flush therefore. Usually qemu doesn't
247761007b31SStefan Hajnoczi          * know how the server works (because the behaviour is hardcoded or
247861007b31SStefan Hajnoczi          * depends on server-side configuration), so we can't ensure that
247961007b31SStefan Hajnoczi          * everything is safe on disk. Returning an error doesn't work because
248061007b31SStefan Hajnoczi          * that would break guests even if the server operates in writethrough
248161007b31SStefan Hajnoczi          * mode.
248261007b31SStefan Hajnoczi          *
248361007b31SStefan Hajnoczi          * Let's hope the user knows what he's doing.
248461007b31SStefan Hajnoczi          */
248561007b31SStefan Hajnoczi         ret = 0;
248661007b31SStefan Hajnoczi     }
24873ff2f67aSEvgeny Yakovlev 
248861007b31SStefan Hajnoczi     if (ret < 0) {
2489cdb5e315SFam Zheng         goto out;
249061007b31SStefan Hajnoczi     }
249161007b31SStefan Hajnoczi 
249261007b31SStefan Hajnoczi     /* Now flush the underlying protocol.  It will also have BDRV_O_NO_FLUSH
249361007b31SStefan Hajnoczi      * in the case of cache=unsafe, so there are no useless flushes.
249461007b31SStefan Hajnoczi      */
249561007b31SStefan Hajnoczi flush_parent:
2496cdb5e315SFam Zheng     ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0;
2497cdb5e315SFam Zheng out:
24983ff2f67aSEvgeny Yakovlev     /* Notify any pending flushes that we have completed */
2499e6af1e08SKevin Wolf     if (ret == 0) {
25003ff2f67aSEvgeny Yakovlev         bs->flushed_gen = current_gen;
2501e6af1e08SKevin Wolf     }
25023783fa3dSPaolo Bonzini 
25033783fa3dSPaolo Bonzini     qemu_co_mutex_lock(&bs->reqs_lock);
250499723548SPaolo Bonzini     bs->active_flush_req = false;
2505156af3acSDenis V. Lunev     /* Return value is ignored - it's ok if wait queue is empty */
2506156af3acSDenis V. Lunev     qemu_co_queue_next(&bs->flush_queue);
25073783fa3dSPaolo Bonzini     qemu_co_mutex_unlock(&bs->reqs_lock);
25083ff2f67aSEvgeny Yakovlev 
250949ca6259SFam Zheng early_exit:
251099723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
2511cdb5e315SFam Zheng     return ret;
251261007b31SStefan Hajnoczi }
251361007b31SStefan Hajnoczi 
251461007b31SStefan Hajnoczi int bdrv_flush(BlockDriverState *bs)
251561007b31SStefan Hajnoczi {
251661007b31SStefan Hajnoczi     Coroutine *co;
2517e293b7a3SKevin Wolf     FlushCo flush_co = {
251861007b31SStefan Hajnoczi         .bs = bs,
251961007b31SStefan Hajnoczi         .ret = NOT_DONE,
252061007b31SStefan Hajnoczi     };
252161007b31SStefan Hajnoczi 
252261007b31SStefan Hajnoczi     if (qemu_in_coroutine()) {
252361007b31SStefan Hajnoczi         /* Fast-path if already in coroutine context */
2524e293b7a3SKevin Wolf         bdrv_flush_co_entry(&flush_co);
252561007b31SStefan Hajnoczi     } else {
25260b8b8753SPaolo Bonzini         co = qemu_coroutine_create(bdrv_flush_co_entry, &flush_co);
2527e92f0e19SFam Zheng         bdrv_coroutine_enter(bs, co);
252888b062c2SPaolo Bonzini         BDRV_POLL_WHILE(bs, flush_co.ret == NOT_DONE);
252961007b31SStefan Hajnoczi     }
253061007b31SStefan Hajnoczi 
2531e293b7a3SKevin Wolf     return flush_co.ret;
253261007b31SStefan Hajnoczi }
253361007b31SStefan Hajnoczi 
253461007b31SStefan Hajnoczi typedef struct DiscardCo {
253561007b31SStefan Hajnoczi     BlockDriverState *bs;
25360c51a893SEric Blake     int64_t offset;
2537f5a5ca79SManos Pitsidianakis     int bytes;
253861007b31SStefan Hajnoczi     int ret;
253961007b31SStefan Hajnoczi } DiscardCo;
25400c51a893SEric Blake static void coroutine_fn bdrv_pdiscard_co_entry(void *opaque)
254161007b31SStefan Hajnoczi {
254261007b31SStefan Hajnoczi     DiscardCo *rwco = opaque;
254361007b31SStefan Hajnoczi 
2544f5a5ca79SManos Pitsidianakis     rwco->ret = bdrv_co_pdiscard(rwco->bs, rwco->offset, rwco->bytes);
254561007b31SStefan Hajnoczi }
254661007b31SStefan Hajnoczi 
25479f1963b3SEric Blake int coroutine_fn bdrv_co_pdiscard(BlockDriverState *bs, int64_t offset,
2548f5a5ca79SManos Pitsidianakis                                   int bytes)
254961007b31SStefan Hajnoczi {
2550b1066c87SFam Zheng     BdrvTrackedRequest req;
25519f1963b3SEric Blake     int max_pdiscard, ret;
25523482b9bcSEric Blake     int head, tail, align;
255361007b31SStefan Hajnoczi 
255461007b31SStefan Hajnoczi     if (!bs->drv) {
255561007b31SStefan Hajnoczi         return -ENOMEDIUM;
255661007b31SStefan Hajnoczi     }
255761007b31SStefan Hajnoczi 
2558d6883bc9SVladimir Sementsov-Ogievskiy     if (bdrv_has_readonly_bitmaps(bs)) {
2559d6883bc9SVladimir Sementsov-Ogievskiy         return -EPERM;
2560d6883bc9SVladimir Sementsov-Ogievskiy     }
2561d6883bc9SVladimir Sementsov-Ogievskiy 
2562f5a5ca79SManos Pitsidianakis     ret = bdrv_check_byte_request(bs, offset, bytes);
256361007b31SStefan Hajnoczi     if (ret < 0) {
256461007b31SStefan Hajnoczi         return ret;
256561007b31SStefan Hajnoczi     } else if (bs->read_only) {
2566eaf5fe2dSPaolo Bonzini         return -EPERM;
256761007b31SStefan Hajnoczi     }
256804c01a5cSKevin Wolf     assert(!(bs->open_flags & BDRV_O_INACTIVE));
256961007b31SStefan Hajnoczi 
257061007b31SStefan Hajnoczi     /* Do nothing if disabled.  */
257161007b31SStefan Hajnoczi     if (!(bs->open_flags & BDRV_O_UNMAP)) {
257261007b31SStefan Hajnoczi         return 0;
257361007b31SStefan Hajnoczi     }
257461007b31SStefan Hajnoczi 
257502aefe43SEric Blake     if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
257661007b31SStefan Hajnoczi         return 0;
257761007b31SStefan Hajnoczi     }
257861007b31SStefan Hajnoczi 
25793482b9bcSEric Blake     /* Discard is advisory, but some devices track and coalesce
25803482b9bcSEric Blake      * unaligned requests, so we must pass everything down rather than
25813482b9bcSEric Blake      * round here.  Still, most devices will just silently ignore
25823482b9bcSEric Blake      * unaligned requests (by returning -ENOTSUP), so we must fragment
25833482b9bcSEric Blake      * the request accordingly.  */
258402aefe43SEric Blake     align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
2585b8d0a980SEric Blake     assert(align % bs->bl.request_alignment == 0);
2586b8d0a980SEric Blake     head = offset % align;
2587f5a5ca79SManos Pitsidianakis     tail = (offset + bytes) % align;
25889f1963b3SEric Blake 
258999723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
2590f5a5ca79SManos Pitsidianakis     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
259150824995SFam Zheng 
2592ec050f77SDenis V. Lunev     ret = notifier_with_return_list_notify(&bs->before_write_notifiers, &req);
2593ec050f77SDenis V. Lunev     if (ret < 0) {
2594ec050f77SDenis V. Lunev         goto out;
2595ec050f77SDenis V. Lunev     }
2596ec050f77SDenis V. Lunev 
25979f1963b3SEric Blake     max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
25989f1963b3SEric Blake                                    align);
25993482b9bcSEric Blake     assert(max_pdiscard >= bs->bl.request_alignment);
26009f1963b3SEric Blake 
2601f5a5ca79SManos Pitsidianakis     while (bytes > 0) {
2602f5a5ca79SManos Pitsidianakis         int num = bytes;
26033482b9bcSEric Blake 
26043482b9bcSEric Blake         if (head) {
26053482b9bcSEric Blake             /* Make small requests to get to alignment boundaries. */
2606f5a5ca79SManos Pitsidianakis             num = MIN(bytes, align - head);
26073482b9bcSEric Blake             if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
26083482b9bcSEric Blake                 num %= bs->bl.request_alignment;
26093482b9bcSEric Blake             }
26103482b9bcSEric Blake             head = (head + num) % align;
26113482b9bcSEric Blake             assert(num < max_pdiscard);
26123482b9bcSEric Blake         } else if (tail) {
26133482b9bcSEric Blake             if (num > align) {
26143482b9bcSEric Blake                 /* Shorten the request to the last aligned cluster.  */
26153482b9bcSEric Blake                 num -= tail;
26163482b9bcSEric Blake             } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
26173482b9bcSEric Blake                        tail > bs->bl.request_alignment) {
26183482b9bcSEric Blake                 tail %= bs->bl.request_alignment;
26193482b9bcSEric Blake                 num -= tail;
26203482b9bcSEric Blake             }
26213482b9bcSEric Blake         }
26223482b9bcSEric Blake         /* limit request size */
26233482b9bcSEric Blake         if (num > max_pdiscard) {
26243482b9bcSEric Blake             num = max_pdiscard;
26253482b9bcSEric Blake         }
262661007b31SStefan Hajnoczi 
2627d470ad42SMax Reitz         if (!bs->drv) {
2628d470ad42SMax Reitz             ret = -ENOMEDIUM;
2629d470ad42SMax Reitz             goto out;
2630d470ad42SMax Reitz         }
263147a5486dSEric Blake         if (bs->drv->bdrv_co_pdiscard) {
263247a5486dSEric Blake             ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
263361007b31SStefan Hajnoczi         } else {
263461007b31SStefan Hajnoczi             BlockAIOCB *acb;
263561007b31SStefan Hajnoczi             CoroutineIOCompletion co = {
263661007b31SStefan Hajnoczi                 .coroutine = qemu_coroutine_self(),
263761007b31SStefan Hajnoczi             };
263861007b31SStefan Hajnoczi 
26394da444a0SEric Blake             acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
264061007b31SStefan Hajnoczi                                              bdrv_co_io_em_complete, &co);
264161007b31SStefan Hajnoczi             if (acb == NULL) {
2642b1066c87SFam Zheng                 ret = -EIO;
2643b1066c87SFam Zheng                 goto out;
264461007b31SStefan Hajnoczi             } else {
264561007b31SStefan Hajnoczi                 qemu_coroutine_yield();
264661007b31SStefan Hajnoczi                 ret = co.ret;
264761007b31SStefan Hajnoczi             }
264861007b31SStefan Hajnoczi         }
264961007b31SStefan Hajnoczi         if (ret && ret != -ENOTSUP) {
2650b1066c87SFam Zheng             goto out;
265161007b31SStefan Hajnoczi         }
265261007b31SStefan Hajnoczi 
26539f1963b3SEric Blake         offset += num;
2654f5a5ca79SManos Pitsidianakis         bytes -= num;
265561007b31SStefan Hajnoczi     }
2656b1066c87SFam Zheng     ret = 0;
2657b1066c87SFam Zheng out:
265847fec599SPaolo Bonzini     atomic_inc(&bs->write_gen);
26590fdf1a4fSEric Blake     bdrv_set_dirty(bs, req.offset, req.bytes);
2660b1066c87SFam Zheng     tracked_request_end(&req);
266199723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
2662b1066c87SFam Zheng     return ret;
266361007b31SStefan Hajnoczi }
266461007b31SStefan Hajnoczi 
2665f5a5ca79SManos Pitsidianakis int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
266661007b31SStefan Hajnoczi {
266761007b31SStefan Hajnoczi     Coroutine *co;
266861007b31SStefan Hajnoczi     DiscardCo rwco = {
266961007b31SStefan Hajnoczi         .bs = bs,
26700c51a893SEric Blake         .offset = offset,
2671f5a5ca79SManos Pitsidianakis         .bytes = bytes,
267261007b31SStefan Hajnoczi         .ret = NOT_DONE,
267361007b31SStefan Hajnoczi     };
267461007b31SStefan Hajnoczi 
267561007b31SStefan Hajnoczi     if (qemu_in_coroutine()) {
267661007b31SStefan Hajnoczi         /* Fast-path if already in coroutine context */
26770c51a893SEric Blake         bdrv_pdiscard_co_entry(&rwco);
267861007b31SStefan Hajnoczi     } else {
26790c51a893SEric Blake         co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco);
2680e92f0e19SFam Zheng         bdrv_coroutine_enter(bs, co);
268188b062c2SPaolo Bonzini         BDRV_POLL_WHILE(bs, rwco.ret == NOT_DONE);
268261007b31SStefan Hajnoczi     }
268361007b31SStefan Hajnoczi 
268461007b31SStefan Hajnoczi     return rwco.ret;
268561007b31SStefan Hajnoczi }
268661007b31SStefan Hajnoczi 
268748af776aSKevin Wolf int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
268861007b31SStefan Hajnoczi {
268961007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
26905c5ae76aSFam Zheng     CoroutineIOCompletion co = {
26915c5ae76aSFam Zheng         .coroutine = qemu_coroutine_self(),
26925c5ae76aSFam Zheng     };
26935c5ae76aSFam Zheng     BlockAIOCB *acb;
269461007b31SStefan Hajnoczi 
269599723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
269616a389dcSKevin Wolf     if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
26975c5ae76aSFam Zheng         co.ret = -ENOTSUP;
26985c5ae76aSFam Zheng         goto out;
26995c5ae76aSFam Zheng     }
27005c5ae76aSFam Zheng 
270116a389dcSKevin Wolf     if (drv->bdrv_co_ioctl) {
270216a389dcSKevin Wolf         co.ret = drv->bdrv_co_ioctl(bs, req, buf);
270316a389dcSKevin Wolf     } else {
27045c5ae76aSFam Zheng         acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
27055c5ae76aSFam Zheng         if (!acb) {
2706c8a9fd80SFam Zheng             co.ret = -ENOTSUP;
2707c8a9fd80SFam Zheng             goto out;
27085c5ae76aSFam Zheng         }
27095c5ae76aSFam Zheng         qemu_coroutine_yield();
271016a389dcSKevin Wolf     }
27115c5ae76aSFam Zheng out:
271299723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
27135c5ae76aSFam Zheng     return co.ret;
27145c5ae76aSFam Zheng }
27155c5ae76aSFam Zheng 
271661007b31SStefan Hajnoczi void *qemu_blockalign(BlockDriverState *bs, size_t size)
271761007b31SStefan Hajnoczi {
271861007b31SStefan Hajnoczi     return qemu_memalign(bdrv_opt_mem_align(bs), size);
271961007b31SStefan Hajnoczi }
272061007b31SStefan Hajnoczi 
272161007b31SStefan Hajnoczi void *qemu_blockalign0(BlockDriverState *bs, size_t size)
272261007b31SStefan Hajnoczi {
272361007b31SStefan Hajnoczi     return memset(qemu_blockalign(bs, size), 0, size);
272461007b31SStefan Hajnoczi }
272561007b31SStefan Hajnoczi 
272661007b31SStefan Hajnoczi void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
272761007b31SStefan Hajnoczi {
272861007b31SStefan Hajnoczi     size_t align = bdrv_opt_mem_align(bs);
272961007b31SStefan Hajnoczi 
273061007b31SStefan Hajnoczi     /* Ensure that NULL is never returned on success */
273161007b31SStefan Hajnoczi     assert(align > 0);
273261007b31SStefan Hajnoczi     if (size == 0) {
273361007b31SStefan Hajnoczi         size = align;
273461007b31SStefan Hajnoczi     }
273561007b31SStefan Hajnoczi 
273661007b31SStefan Hajnoczi     return qemu_try_memalign(align, size);
273761007b31SStefan Hajnoczi }
273861007b31SStefan Hajnoczi 
273961007b31SStefan Hajnoczi void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
274061007b31SStefan Hajnoczi {
274161007b31SStefan Hajnoczi     void *mem = qemu_try_blockalign(bs, size);
274261007b31SStefan Hajnoczi 
274361007b31SStefan Hajnoczi     if (mem) {
274461007b31SStefan Hajnoczi         memset(mem, 0, size);
274561007b31SStefan Hajnoczi     }
274661007b31SStefan Hajnoczi 
274761007b31SStefan Hajnoczi     return mem;
274861007b31SStefan Hajnoczi }
274961007b31SStefan Hajnoczi 
275061007b31SStefan Hajnoczi /*
275161007b31SStefan Hajnoczi  * Check if all memory in this vector is sector aligned.
275261007b31SStefan Hajnoczi  */
275361007b31SStefan Hajnoczi bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
275461007b31SStefan Hajnoczi {
275561007b31SStefan Hajnoczi     int i;
27564196d2f0SDenis V. Lunev     size_t alignment = bdrv_min_mem_align(bs);
275761007b31SStefan Hajnoczi 
275861007b31SStefan Hajnoczi     for (i = 0; i < qiov->niov; i++) {
275961007b31SStefan Hajnoczi         if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
276061007b31SStefan Hajnoczi             return false;
276161007b31SStefan Hajnoczi         }
276261007b31SStefan Hajnoczi         if (qiov->iov[i].iov_len % alignment) {
276361007b31SStefan Hajnoczi             return false;
276461007b31SStefan Hajnoczi         }
276561007b31SStefan Hajnoczi     }
276661007b31SStefan Hajnoczi 
276761007b31SStefan Hajnoczi     return true;
276861007b31SStefan Hajnoczi }
276961007b31SStefan Hajnoczi 
277061007b31SStefan Hajnoczi void bdrv_add_before_write_notifier(BlockDriverState *bs,
277161007b31SStefan Hajnoczi                                     NotifierWithReturn *notifier)
277261007b31SStefan Hajnoczi {
277361007b31SStefan Hajnoczi     notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
277461007b31SStefan Hajnoczi }
277561007b31SStefan Hajnoczi 
277661007b31SStefan Hajnoczi void bdrv_io_plug(BlockDriverState *bs)
277761007b31SStefan Hajnoczi {
27786b98bd64SPaolo Bonzini     BdrvChild *child;
27796b98bd64SPaolo Bonzini 
27806b98bd64SPaolo Bonzini     QLIST_FOREACH(child, &bs->children, next) {
27816b98bd64SPaolo Bonzini         bdrv_io_plug(child->bs);
27826b98bd64SPaolo Bonzini     }
27836b98bd64SPaolo Bonzini 
2784850d54a2SPaolo Bonzini     if (atomic_fetch_inc(&bs->io_plugged) == 0) {
278561007b31SStefan Hajnoczi         BlockDriver *drv = bs->drv;
278661007b31SStefan Hajnoczi         if (drv && drv->bdrv_io_plug) {
278761007b31SStefan Hajnoczi             drv->bdrv_io_plug(bs);
27886b98bd64SPaolo Bonzini         }
278961007b31SStefan Hajnoczi     }
279061007b31SStefan Hajnoczi }
279161007b31SStefan Hajnoczi 
279261007b31SStefan Hajnoczi void bdrv_io_unplug(BlockDriverState *bs)
279361007b31SStefan Hajnoczi {
27946b98bd64SPaolo Bonzini     BdrvChild *child;
27956b98bd64SPaolo Bonzini 
27966b98bd64SPaolo Bonzini     assert(bs->io_plugged);
2797850d54a2SPaolo Bonzini     if (atomic_fetch_dec(&bs->io_plugged) == 1) {
279861007b31SStefan Hajnoczi         BlockDriver *drv = bs->drv;
279961007b31SStefan Hajnoczi         if (drv && drv->bdrv_io_unplug) {
280061007b31SStefan Hajnoczi             drv->bdrv_io_unplug(bs);
280161007b31SStefan Hajnoczi         }
280261007b31SStefan Hajnoczi     }
280361007b31SStefan Hajnoczi 
28046b98bd64SPaolo Bonzini     QLIST_FOREACH(child, &bs->children, next) {
28056b98bd64SPaolo Bonzini         bdrv_io_unplug(child->bs);
28066b98bd64SPaolo Bonzini     }
28076b98bd64SPaolo Bonzini }
2808