xref: /qemu/block/io.c (revision c61e684e44272f2acb2bef34cf2aa234582a73a9)
161007b31SStefan Hajnoczi /*
261007b31SStefan Hajnoczi  * Block layer I/O functions
361007b31SStefan Hajnoczi  *
461007b31SStefan Hajnoczi  * Copyright (c) 2003 Fabrice Bellard
561007b31SStefan Hajnoczi  *
661007b31SStefan Hajnoczi  * Permission is hereby granted, free of charge, to any person obtaining a copy
761007b31SStefan Hajnoczi  * of this software and associated documentation files (the "Software"), to deal
861007b31SStefan Hajnoczi  * in the Software without restriction, including without limitation the rights
961007b31SStefan Hajnoczi  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
1061007b31SStefan Hajnoczi  * copies of the Software, and to permit persons to whom the Software is
1161007b31SStefan Hajnoczi  * furnished to do so, subject to the following conditions:
1261007b31SStefan Hajnoczi  *
1361007b31SStefan Hajnoczi  * The above copyright notice and this permission notice shall be included in
1461007b31SStefan Hajnoczi  * all copies or substantial portions of the Software.
1561007b31SStefan Hajnoczi  *
1661007b31SStefan Hajnoczi  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1761007b31SStefan Hajnoczi  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1861007b31SStefan Hajnoczi  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1961007b31SStefan Hajnoczi  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2061007b31SStefan Hajnoczi  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
2161007b31SStefan Hajnoczi  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
2261007b31SStefan Hajnoczi  * THE SOFTWARE.
2361007b31SStefan Hajnoczi  */
2461007b31SStefan Hajnoczi 
2580c71a24SPeter Maydell #include "qemu/osdep.h"
2661007b31SStefan Hajnoczi #include "trace.h"
277f0e9da6SMax Reitz #include "sysemu/block-backend.h"
2861007b31SStefan Hajnoczi #include "block/blockjob.h"
29f321dcb5SPaolo Bonzini #include "block/blockjob_int.h"
3061007b31SStefan Hajnoczi #include "block/block_int.h"
31f348b6d1SVeronia Bahaa #include "qemu/cutils.h"
32da34e65cSMarkus Armbruster #include "qapi/error.h"
33d49b6836SMarkus Armbruster #include "qemu/error-report.h"
3461007b31SStefan Hajnoczi 
3561007b31SStefan Hajnoczi #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
3661007b31SStefan Hajnoczi 
37d05aa8bbSEric Blake static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
38f5a5ca79SManos Pitsidianakis     int64_t offset, int bytes, BdrvRequestFlags flags);
3961007b31SStefan Hajnoczi 
4014e9559fSFam Zheng void bdrv_parent_drained_begin(BlockDriverState *bs)
4161007b31SStefan Hajnoczi {
42c2066af0SKevin Wolf     BdrvChild *c;
4327ccdd52SKevin Wolf 
44c2066af0SKevin Wolf     QLIST_FOREACH(c, &bs->parents, next_parent) {
45c2066af0SKevin Wolf         if (c->role->drained_begin) {
46c2066af0SKevin Wolf             c->role->drained_begin(c);
47c2066af0SKevin Wolf         }
48ce0f1412SPaolo Bonzini     }
49ce0f1412SPaolo Bonzini }
50ce0f1412SPaolo Bonzini 
5114e9559fSFam Zheng void bdrv_parent_drained_end(BlockDriverState *bs)
52ce0f1412SPaolo Bonzini {
53c2066af0SKevin Wolf     BdrvChild *c;
5427ccdd52SKevin Wolf 
55c2066af0SKevin Wolf     QLIST_FOREACH(c, &bs->parents, next_parent) {
56c2066af0SKevin Wolf         if (c->role->drained_end) {
57c2066af0SKevin Wolf             c->role->drained_end(c);
5827ccdd52SKevin Wolf         }
59c2066af0SKevin Wolf     }
6061007b31SStefan Hajnoczi }
6161007b31SStefan Hajnoczi 
62d9e0dfa2SEric Blake static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
63d9e0dfa2SEric Blake {
64d9e0dfa2SEric Blake     dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
65d9e0dfa2SEric Blake     dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
66d9e0dfa2SEric Blake     dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
67d9e0dfa2SEric Blake                                  src->opt_mem_alignment);
68d9e0dfa2SEric Blake     dst->min_mem_alignment = MAX(dst->min_mem_alignment,
69d9e0dfa2SEric Blake                                  src->min_mem_alignment);
70d9e0dfa2SEric Blake     dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
71d9e0dfa2SEric Blake }
72d9e0dfa2SEric Blake 
7361007b31SStefan Hajnoczi void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
7461007b31SStefan Hajnoczi {
7561007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
7661007b31SStefan Hajnoczi     Error *local_err = NULL;
7761007b31SStefan Hajnoczi 
7861007b31SStefan Hajnoczi     memset(&bs->bl, 0, sizeof(bs->bl));
7961007b31SStefan Hajnoczi 
8061007b31SStefan Hajnoczi     if (!drv) {
8161007b31SStefan Hajnoczi         return;
8261007b31SStefan Hajnoczi     }
8361007b31SStefan Hajnoczi 
8479ba8c98SEric Blake     /* Default alignment based on whether driver has byte interface */
85a5b8dd2cSEric Blake     bs->bl.request_alignment = drv->bdrv_co_preadv ? 1 : 512;
8679ba8c98SEric Blake 
8761007b31SStefan Hajnoczi     /* Take some limits from the children as a default */
8861007b31SStefan Hajnoczi     if (bs->file) {
899a4f4c31SKevin Wolf         bdrv_refresh_limits(bs->file->bs, &local_err);
9061007b31SStefan Hajnoczi         if (local_err) {
9161007b31SStefan Hajnoczi             error_propagate(errp, local_err);
9261007b31SStefan Hajnoczi             return;
9361007b31SStefan Hajnoczi         }
94d9e0dfa2SEric Blake         bdrv_merge_limits(&bs->bl, &bs->file->bs->bl);
9561007b31SStefan Hajnoczi     } else {
964196d2f0SDenis V. Lunev         bs->bl.min_mem_alignment = 512;
97459b4e66SDenis V. Lunev         bs->bl.opt_mem_alignment = getpagesize();
98bd44feb7SStefan Hajnoczi 
99bd44feb7SStefan Hajnoczi         /* Safe default since most protocols use readv()/writev()/etc */
100bd44feb7SStefan Hajnoczi         bs->bl.max_iov = IOV_MAX;
10161007b31SStefan Hajnoczi     }
10261007b31SStefan Hajnoczi 
103760e0063SKevin Wolf     if (bs->backing) {
104760e0063SKevin Wolf         bdrv_refresh_limits(bs->backing->bs, &local_err);
10561007b31SStefan Hajnoczi         if (local_err) {
10661007b31SStefan Hajnoczi             error_propagate(errp, local_err);
10761007b31SStefan Hajnoczi             return;
10861007b31SStefan Hajnoczi         }
109d9e0dfa2SEric Blake         bdrv_merge_limits(&bs->bl, &bs->backing->bs->bl);
11061007b31SStefan Hajnoczi     }
11161007b31SStefan Hajnoczi 
11261007b31SStefan Hajnoczi     /* Then let the driver override it */
11361007b31SStefan Hajnoczi     if (drv->bdrv_refresh_limits) {
11461007b31SStefan Hajnoczi         drv->bdrv_refresh_limits(bs, errp);
11561007b31SStefan Hajnoczi     }
11661007b31SStefan Hajnoczi }
11761007b31SStefan Hajnoczi 
11861007b31SStefan Hajnoczi /**
11961007b31SStefan Hajnoczi  * The copy-on-read flag is actually a reference count so multiple users may
12061007b31SStefan Hajnoczi  * use the feature without worrying about clobbering its previous state.
12161007b31SStefan Hajnoczi  * Copy-on-read stays enabled until all users have called to disable it.
12261007b31SStefan Hajnoczi  */
12361007b31SStefan Hajnoczi void bdrv_enable_copy_on_read(BlockDriverState *bs)
12461007b31SStefan Hajnoczi {
125d3faa13eSPaolo Bonzini     atomic_inc(&bs->copy_on_read);
12661007b31SStefan Hajnoczi }
12761007b31SStefan Hajnoczi 
12861007b31SStefan Hajnoczi void bdrv_disable_copy_on_read(BlockDriverState *bs)
12961007b31SStefan Hajnoczi {
130d3faa13eSPaolo Bonzini     int old = atomic_fetch_dec(&bs->copy_on_read);
131d3faa13eSPaolo Bonzini     assert(old >= 1);
13261007b31SStefan Hajnoczi }
13361007b31SStefan Hajnoczi 
13461007b31SStefan Hajnoczi /* Check if any requests are in-flight (including throttled requests) */
135439db28cSKevin Wolf bool bdrv_requests_pending(BlockDriverState *bs)
13661007b31SStefan Hajnoczi {
13737a639a7SKevin Wolf     BdrvChild *child;
13837a639a7SKevin Wolf 
13999723548SPaolo Bonzini     if (atomic_read(&bs->in_flight)) {
14061007b31SStefan Hajnoczi         return true;
14161007b31SStefan Hajnoczi     }
14237a639a7SKevin Wolf 
14337a639a7SKevin Wolf     QLIST_FOREACH(child, &bs->children, next) {
14437a639a7SKevin Wolf         if (bdrv_requests_pending(child->bs)) {
14561007b31SStefan Hajnoczi             return true;
14661007b31SStefan Hajnoczi         }
14761007b31SStefan Hajnoczi     }
14837a639a7SKevin Wolf 
14961007b31SStefan Hajnoczi     return false;
15061007b31SStefan Hajnoczi }
15161007b31SStefan Hajnoczi 
152d42cf288SPaolo Bonzini static bool bdrv_drain_recurse(BlockDriverState *bs)
15367da1dc5SFam Zheng {
154178bd438SFam Zheng     BdrvChild *child, *tmp;
155d42cf288SPaolo Bonzini     bool waited;
156d42cf288SPaolo Bonzini 
15788b062c2SPaolo Bonzini     waited = BDRV_POLL_WHILE(bs, atomic_read(&bs->in_flight) > 0);
15867da1dc5SFam Zheng 
15967da1dc5SFam Zheng     if (bs->drv && bs->drv->bdrv_drain) {
16067da1dc5SFam Zheng         bs->drv->bdrv_drain(bs);
16167da1dc5SFam Zheng     }
162d42cf288SPaolo Bonzini 
163178bd438SFam Zheng     QLIST_FOREACH_SAFE(child, &bs->children, next, tmp) {
164178bd438SFam Zheng         BlockDriverState *bs = child->bs;
165178bd438SFam Zheng         bool in_main_loop =
166178bd438SFam Zheng             qemu_get_current_aio_context() == qemu_get_aio_context();
167178bd438SFam Zheng         assert(bs->refcnt > 0);
168178bd438SFam Zheng         if (in_main_loop) {
169178bd438SFam Zheng             /* In case the recursive bdrv_drain_recurse processes a
170178bd438SFam Zheng              * block_job_defer_to_main_loop BH and modifies the graph,
171178bd438SFam Zheng              * let's hold a reference to bs until we are done.
172178bd438SFam Zheng              *
173178bd438SFam Zheng              * IOThread doesn't have such a BH, and it is not safe to call
174178bd438SFam Zheng              * bdrv_unref without BQL, so skip doing it there.
175178bd438SFam Zheng              */
176178bd438SFam Zheng             bdrv_ref(bs);
177178bd438SFam Zheng         }
178178bd438SFam Zheng         waited |= bdrv_drain_recurse(bs);
179178bd438SFam Zheng         if (in_main_loop) {
180178bd438SFam Zheng             bdrv_unref(bs);
181178bd438SFam Zheng         }
18267da1dc5SFam Zheng     }
183d42cf288SPaolo Bonzini 
184d42cf288SPaolo Bonzini     return waited;
18567da1dc5SFam Zheng }
18667da1dc5SFam Zheng 
187a77fd4bbSFam Zheng typedef struct {
188a77fd4bbSFam Zheng     Coroutine *co;
189a77fd4bbSFam Zheng     BlockDriverState *bs;
190a77fd4bbSFam Zheng     bool done;
191a77fd4bbSFam Zheng } BdrvCoDrainData;
192a77fd4bbSFam Zheng 
193a77fd4bbSFam Zheng static void bdrv_co_drain_bh_cb(void *opaque)
194a77fd4bbSFam Zheng {
195a77fd4bbSFam Zheng     BdrvCoDrainData *data = opaque;
196a77fd4bbSFam Zheng     Coroutine *co = data->co;
19799723548SPaolo Bonzini     BlockDriverState *bs = data->bs;
198a77fd4bbSFam Zheng 
19999723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
200d42cf288SPaolo Bonzini     bdrv_drained_begin(bs);
201a77fd4bbSFam Zheng     data->done = true;
2021919631eSPaolo Bonzini     aio_co_wake(co);
203a77fd4bbSFam Zheng }
204a77fd4bbSFam Zheng 
205b6e84c97SPaolo Bonzini static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs)
206a77fd4bbSFam Zheng {
207a77fd4bbSFam Zheng     BdrvCoDrainData data;
208a77fd4bbSFam Zheng 
209a77fd4bbSFam Zheng     /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
210a77fd4bbSFam Zheng      * other coroutines run if they were queued from
211a77fd4bbSFam Zheng      * qemu_co_queue_run_restart(). */
212a77fd4bbSFam Zheng 
213a77fd4bbSFam Zheng     assert(qemu_in_coroutine());
214a77fd4bbSFam Zheng     data = (BdrvCoDrainData) {
215a77fd4bbSFam Zheng         .co = qemu_coroutine_self(),
216a77fd4bbSFam Zheng         .bs = bs,
217a77fd4bbSFam Zheng         .done = false,
218a77fd4bbSFam Zheng     };
21999723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
220fffb6e12SPaolo Bonzini     aio_bh_schedule_oneshot(bdrv_get_aio_context(bs),
221fffb6e12SPaolo Bonzini                             bdrv_co_drain_bh_cb, &data);
222a77fd4bbSFam Zheng 
223a77fd4bbSFam Zheng     qemu_coroutine_yield();
224a77fd4bbSFam Zheng     /* If we are resumed from some other event (such as an aio completion or a
225a77fd4bbSFam Zheng      * timer callback), it is a bug in the caller that should be fixed. */
226a77fd4bbSFam Zheng     assert(data.done);
227a77fd4bbSFam Zheng }
228a77fd4bbSFam Zheng 
2296820643fSKevin Wolf void bdrv_drained_begin(BlockDriverState *bs)
2306820643fSKevin Wolf {
231d42cf288SPaolo Bonzini     if (qemu_in_coroutine()) {
232d42cf288SPaolo Bonzini         bdrv_co_yield_to_drain(bs);
233d42cf288SPaolo Bonzini         return;
234d42cf288SPaolo Bonzini     }
235d42cf288SPaolo Bonzini 
236414c2ec3SPaolo Bonzini     if (atomic_fetch_inc(&bs->quiesce_counter) == 0) {
2376820643fSKevin Wolf         aio_disable_external(bdrv_get_aio_context(bs));
2386820643fSKevin Wolf         bdrv_parent_drained_begin(bs);
2396820643fSKevin Wolf     }
2406820643fSKevin Wolf 
2416820643fSKevin Wolf     bdrv_drain_recurse(bs);
2426820643fSKevin Wolf }
2436820643fSKevin Wolf 
2446820643fSKevin Wolf void bdrv_drained_end(BlockDriverState *bs)
2456820643fSKevin Wolf {
2466820643fSKevin Wolf     assert(bs->quiesce_counter > 0);
247414c2ec3SPaolo Bonzini     if (atomic_fetch_dec(&bs->quiesce_counter) > 1) {
2486820643fSKevin Wolf         return;
2496820643fSKevin Wolf     }
2506820643fSKevin Wolf 
2516820643fSKevin Wolf     bdrv_parent_drained_end(bs);
2526820643fSKevin Wolf     aio_enable_external(bdrv_get_aio_context(bs));
2536820643fSKevin Wolf }
2546820643fSKevin Wolf 
25561007b31SStefan Hajnoczi /*
25667da1dc5SFam Zheng  * Wait for pending requests to complete on a single BlockDriverState subtree,
25767da1dc5SFam Zheng  * and suspend block driver's internal I/O until next request arrives.
25861007b31SStefan Hajnoczi  *
25961007b31SStefan Hajnoczi  * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
26061007b31SStefan Hajnoczi  * AioContext.
2617a63f3cdSStefan Hajnoczi  *
2627a63f3cdSStefan Hajnoczi  * Only this BlockDriverState's AioContext is run, so in-flight requests must
2637a63f3cdSStefan Hajnoczi  * not depend on events in other AioContexts.  In that case, use
2647a63f3cdSStefan Hajnoczi  * bdrv_drain_all() instead.
26561007b31SStefan Hajnoczi  */
266b6e84c97SPaolo Bonzini void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
267b6e84c97SPaolo Bonzini {
2686820643fSKevin Wolf     assert(qemu_in_coroutine());
2696820643fSKevin Wolf     bdrv_drained_begin(bs);
2706820643fSKevin Wolf     bdrv_drained_end(bs);
271b6e84c97SPaolo Bonzini }
272b6e84c97SPaolo Bonzini 
27361007b31SStefan Hajnoczi void bdrv_drain(BlockDriverState *bs)
27461007b31SStefan Hajnoczi {
2756820643fSKevin Wolf     bdrv_drained_begin(bs);
2766820643fSKevin Wolf     bdrv_drained_end(bs);
27761007b31SStefan Hajnoczi }
27861007b31SStefan Hajnoczi 
27961007b31SStefan Hajnoczi /*
28061007b31SStefan Hajnoczi  * Wait for pending requests to complete across all BlockDriverStates
28161007b31SStefan Hajnoczi  *
28261007b31SStefan Hajnoczi  * This function does not flush data to disk, use bdrv_flush_all() for that
28361007b31SStefan Hajnoczi  * after calling this function.
284c0778f66SAlberto Garcia  *
285c0778f66SAlberto Garcia  * This pauses all block jobs and disables external clients. It must
286c0778f66SAlberto Garcia  * be paired with bdrv_drain_all_end().
287c0778f66SAlberto Garcia  *
288c0778f66SAlberto Garcia  * NOTE: no new block jobs or BlockDriverStates can be created between
289c0778f66SAlberto Garcia  * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
29061007b31SStefan Hajnoczi  */
291c0778f66SAlberto Garcia void bdrv_drain_all_begin(void)
29261007b31SStefan Hajnoczi {
29361007b31SStefan Hajnoczi     /* Always run first iteration so any pending completion BHs run */
29499723548SPaolo Bonzini     bool waited = true;
2957c8eece4SKevin Wolf     BlockDriverState *bs;
29688be7b4bSKevin Wolf     BdrvNextIterator it;
297f406c03cSAlexander Yarygin     GSList *aio_ctxs = NULL, *ctx;
29861007b31SStefan Hajnoczi 
299f321dcb5SPaolo Bonzini     block_job_pause_all();
300eb1364ceSAlberto Garcia 
30188be7b4bSKevin Wolf     for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
30261007b31SStefan Hajnoczi         AioContext *aio_context = bdrv_get_aio_context(bs);
30361007b31SStefan Hajnoczi 
30461007b31SStefan Hajnoczi         aio_context_acquire(aio_context);
305c2066af0SKevin Wolf         bdrv_parent_drained_begin(bs);
306c0778f66SAlberto Garcia         aio_disable_external(aio_context);
30761007b31SStefan Hajnoczi         aio_context_release(aio_context);
308f406c03cSAlexander Yarygin 
309764ba3aeSAlberto Garcia         if (!g_slist_find(aio_ctxs, aio_context)) {
310f406c03cSAlexander Yarygin             aio_ctxs = g_slist_prepend(aio_ctxs, aio_context);
311f406c03cSAlexander Yarygin         }
31261007b31SStefan Hajnoczi     }
31361007b31SStefan Hajnoczi 
3147a63f3cdSStefan Hajnoczi     /* Note that completion of an asynchronous I/O operation can trigger any
3157a63f3cdSStefan Hajnoczi      * number of other I/O operations on other devices---for example a
3167a63f3cdSStefan Hajnoczi      * coroutine can submit an I/O request to another device in response to
3177a63f3cdSStefan Hajnoczi      * request completion.  Therefore we must keep looping until there was no
3187a63f3cdSStefan Hajnoczi      * more activity rather than simply draining each device independently.
3197a63f3cdSStefan Hajnoczi      */
32099723548SPaolo Bonzini     while (waited) {
32199723548SPaolo Bonzini         waited = false;
322f406c03cSAlexander Yarygin 
323f406c03cSAlexander Yarygin         for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) {
324f406c03cSAlexander Yarygin             AioContext *aio_context = ctx->data;
32561007b31SStefan Hajnoczi 
32661007b31SStefan Hajnoczi             aio_context_acquire(aio_context);
32788be7b4bSKevin Wolf             for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
328f406c03cSAlexander Yarygin                 if (aio_context == bdrv_get_aio_context(bs)) {
329d42cf288SPaolo Bonzini                     waited |= bdrv_drain_recurse(bs);
330f406c03cSAlexander Yarygin                 }
331f406c03cSAlexander Yarygin             }
33261007b31SStefan Hajnoczi             aio_context_release(aio_context);
33361007b31SStefan Hajnoczi         }
33461007b31SStefan Hajnoczi     }
33561007b31SStefan Hajnoczi 
336c0778f66SAlberto Garcia     g_slist_free(aio_ctxs);
337c0778f66SAlberto Garcia }
338c0778f66SAlberto Garcia 
339c0778f66SAlberto Garcia void bdrv_drain_all_end(void)
340c0778f66SAlberto Garcia {
341c0778f66SAlberto Garcia     BlockDriverState *bs;
342c0778f66SAlberto Garcia     BdrvNextIterator it;
343c0778f66SAlberto Garcia 
34488be7b4bSKevin Wolf     for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
34561007b31SStefan Hajnoczi         AioContext *aio_context = bdrv_get_aio_context(bs);
34661007b31SStefan Hajnoczi 
34761007b31SStefan Hajnoczi         aio_context_acquire(aio_context);
348c0778f66SAlberto Garcia         aio_enable_external(aio_context);
349c2066af0SKevin Wolf         bdrv_parent_drained_end(bs);
35061007b31SStefan Hajnoczi         aio_context_release(aio_context);
35161007b31SStefan Hajnoczi     }
352eb1364ceSAlberto Garcia 
353f321dcb5SPaolo Bonzini     block_job_resume_all();
35461007b31SStefan Hajnoczi }
35561007b31SStefan Hajnoczi 
356c0778f66SAlberto Garcia void bdrv_drain_all(void)
357c0778f66SAlberto Garcia {
358c0778f66SAlberto Garcia     bdrv_drain_all_begin();
359c0778f66SAlberto Garcia     bdrv_drain_all_end();
360c0778f66SAlberto Garcia }
361c0778f66SAlberto Garcia 
36261007b31SStefan Hajnoczi /**
36361007b31SStefan Hajnoczi  * Remove an active request from the tracked requests list
36461007b31SStefan Hajnoczi  *
36561007b31SStefan Hajnoczi  * This function should be called when a tracked request is completing.
36661007b31SStefan Hajnoczi  */
36761007b31SStefan Hajnoczi static void tracked_request_end(BdrvTrackedRequest *req)
36861007b31SStefan Hajnoczi {
36961007b31SStefan Hajnoczi     if (req->serialising) {
37020fc71b2SPaolo Bonzini         atomic_dec(&req->bs->serialising_in_flight);
37161007b31SStefan Hajnoczi     }
37261007b31SStefan Hajnoczi 
3733783fa3dSPaolo Bonzini     qemu_co_mutex_lock(&req->bs->reqs_lock);
37461007b31SStefan Hajnoczi     QLIST_REMOVE(req, list);
37561007b31SStefan Hajnoczi     qemu_co_queue_restart_all(&req->wait_queue);
3763783fa3dSPaolo Bonzini     qemu_co_mutex_unlock(&req->bs->reqs_lock);
37761007b31SStefan Hajnoczi }
37861007b31SStefan Hajnoczi 
37961007b31SStefan Hajnoczi /**
38061007b31SStefan Hajnoczi  * Add an active request to the tracked requests list
38161007b31SStefan Hajnoczi  */
38261007b31SStefan Hajnoczi static void tracked_request_begin(BdrvTrackedRequest *req,
38361007b31SStefan Hajnoczi                                   BlockDriverState *bs,
38461007b31SStefan Hajnoczi                                   int64_t offset,
385ebde595cSFam Zheng                                   unsigned int bytes,
386ebde595cSFam Zheng                                   enum BdrvTrackedRequestType type)
38761007b31SStefan Hajnoczi {
38861007b31SStefan Hajnoczi     *req = (BdrvTrackedRequest){
38961007b31SStefan Hajnoczi         .bs = bs,
39061007b31SStefan Hajnoczi         .offset         = offset,
39161007b31SStefan Hajnoczi         .bytes          = bytes,
392ebde595cSFam Zheng         .type           = type,
39361007b31SStefan Hajnoczi         .co             = qemu_coroutine_self(),
39461007b31SStefan Hajnoczi         .serialising    = false,
39561007b31SStefan Hajnoczi         .overlap_offset = offset,
39661007b31SStefan Hajnoczi         .overlap_bytes  = bytes,
39761007b31SStefan Hajnoczi     };
39861007b31SStefan Hajnoczi 
39961007b31SStefan Hajnoczi     qemu_co_queue_init(&req->wait_queue);
40061007b31SStefan Hajnoczi 
4013783fa3dSPaolo Bonzini     qemu_co_mutex_lock(&bs->reqs_lock);
40261007b31SStefan Hajnoczi     QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
4033783fa3dSPaolo Bonzini     qemu_co_mutex_unlock(&bs->reqs_lock);
40461007b31SStefan Hajnoczi }
40561007b31SStefan Hajnoczi 
40661007b31SStefan Hajnoczi static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
40761007b31SStefan Hajnoczi {
40861007b31SStefan Hajnoczi     int64_t overlap_offset = req->offset & ~(align - 1);
40961007b31SStefan Hajnoczi     unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
41061007b31SStefan Hajnoczi                                - overlap_offset;
41161007b31SStefan Hajnoczi 
41261007b31SStefan Hajnoczi     if (!req->serialising) {
41320fc71b2SPaolo Bonzini         atomic_inc(&req->bs->serialising_in_flight);
41461007b31SStefan Hajnoczi         req->serialising = true;
41561007b31SStefan Hajnoczi     }
41661007b31SStefan Hajnoczi 
41761007b31SStefan Hajnoczi     req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
41861007b31SStefan Hajnoczi     req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
41961007b31SStefan Hajnoczi }
42061007b31SStefan Hajnoczi 
42161007b31SStefan Hajnoczi /**
422244483e6SKevin Wolf  * Round a region to cluster boundaries (sector-based)
42361007b31SStefan Hajnoczi  */
424244483e6SKevin Wolf void bdrv_round_sectors_to_clusters(BlockDriverState *bs,
42561007b31SStefan Hajnoczi                                     int64_t sector_num, int nb_sectors,
42661007b31SStefan Hajnoczi                                     int64_t *cluster_sector_num,
42761007b31SStefan Hajnoczi                                     int *cluster_nb_sectors)
42861007b31SStefan Hajnoczi {
42961007b31SStefan Hajnoczi     BlockDriverInfo bdi;
43061007b31SStefan Hajnoczi 
43161007b31SStefan Hajnoczi     if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
43261007b31SStefan Hajnoczi         *cluster_sector_num = sector_num;
43361007b31SStefan Hajnoczi         *cluster_nb_sectors = nb_sectors;
43461007b31SStefan Hajnoczi     } else {
43561007b31SStefan Hajnoczi         int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
43661007b31SStefan Hajnoczi         *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
43761007b31SStefan Hajnoczi         *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
43861007b31SStefan Hajnoczi                                             nb_sectors, c);
43961007b31SStefan Hajnoczi     }
44061007b31SStefan Hajnoczi }
44161007b31SStefan Hajnoczi 
442244483e6SKevin Wolf /**
443244483e6SKevin Wolf  * Round a region to cluster boundaries
444244483e6SKevin Wolf  */
445244483e6SKevin Wolf void bdrv_round_to_clusters(BlockDriverState *bs,
446244483e6SKevin Wolf                             int64_t offset, unsigned int bytes,
447244483e6SKevin Wolf                             int64_t *cluster_offset,
448244483e6SKevin Wolf                             unsigned int *cluster_bytes)
449244483e6SKevin Wolf {
450244483e6SKevin Wolf     BlockDriverInfo bdi;
451244483e6SKevin Wolf 
452244483e6SKevin Wolf     if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
453244483e6SKevin Wolf         *cluster_offset = offset;
454244483e6SKevin Wolf         *cluster_bytes = bytes;
455244483e6SKevin Wolf     } else {
456244483e6SKevin Wolf         int64_t c = bdi.cluster_size;
457244483e6SKevin Wolf         *cluster_offset = QEMU_ALIGN_DOWN(offset, c);
458244483e6SKevin Wolf         *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
459244483e6SKevin Wolf     }
460244483e6SKevin Wolf }
461244483e6SKevin Wolf 
46261007b31SStefan Hajnoczi static int bdrv_get_cluster_size(BlockDriverState *bs)
46361007b31SStefan Hajnoczi {
46461007b31SStefan Hajnoczi     BlockDriverInfo bdi;
46561007b31SStefan Hajnoczi     int ret;
46661007b31SStefan Hajnoczi 
46761007b31SStefan Hajnoczi     ret = bdrv_get_info(bs, &bdi);
46861007b31SStefan Hajnoczi     if (ret < 0 || bdi.cluster_size == 0) {
469a5b8dd2cSEric Blake         return bs->bl.request_alignment;
47061007b31SStefan Hajnoczi     } else {
47161007b31SStefan Hajnoczi         return bdi.cluster_size;
47261007b31SStefan Hajnoczi     }
47361007b31SStefan Hajnoczi }
47461007b31SStefan Hajnoczi 
47561007b31SStefan Hajnoczi static bool tracked_request_overlaps(BdrvTrackedRequest *req,
47661007b31SStefan Hajnoczi                                      int64_t offset, unsigned int bytes)
47761007b31SStefan Hajnoczi {
47861007b31SStefan Hajnoczi     /*        aaaa   bbbb */
47961007b31SStefan Hajnoczi     if (offset >= req->overlap_offset + req->overlap_bytes) {
48061007b31SStefan Hajnoczi         return false;
48161007b31SStefan Hajnoczi     }
48261007b31SStefan Hajnoczi     /* bbbb   aaaa        */
48361007b31SStefan Hajnoczi     if (req->overlap_offset >= offset + bytes) {
48461007b31SStefan Hajnoczi         return false;
48561007b31SStefan Hajnoczi     }
48661007b31SStefan Hajnoczi     return true;
48761007b31SStefan Hajnoczi }
48861007b31SStefan Hajnoczi 
48999723548SPaolo Bonzini void bdrv_inc_in_flight(BlockDriverState *bs)
49099723548SPaolo Bonzini {
49199723548SPaolo Bonzini     atomic_inc(&bs->in_flight);
49299723548SPaolo Bonzini }
49399723548SPaolo Bonzini 
494c9d1a561SPaolo Bonzini static void dummy_bh_cb(void *opaque)
495c9d1a561SPaolo Bonzini {
496c9d1a561SPaolo Bonzini }
497c9d1a561SPaolo Bonzini 
498c9d1a561SPaolo Bonzini void bdrv_wakeup(BlockDriverState *bs)
499c9d1a561SPaolo Bonzini {
500e2a6ae7fSPaolo Bonzini     /* The barrier (or an atomic op) is in the caller.  */
501e2a6ae7fSPaolo Bonzini     if (atomic_read(&bs->wakeup)) {
502c9d1a561SPaolo Bonzini         aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL);
503c9d1a561SPaolo Bonzini     }
504c9d1a561SPaolo Bonzini }
505c9d1a561SPaolo Bonzini 
50699723548SPaolo Bonzini void bdrv_dec_in_flight(BlockDriverState *bs)
50799723548SPaolo Bonzini {
50899723548SPaolo Bonzini     atomic_dec(&bs->in_flight);
509c9d1a561SPaolo Bonzini     bdrv_wakeup(bs);
51099723548SPaolo Bonzini }
51199723548SPaolo Bonzini 
51261007b31SStefan Hajnoczi static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
51361007b31SStefan Hajnoczi {
51461007b31SStefan Hajnoczi     BlockDriverState *bs = self->bs;
51561007b31SStefan Hajnoczi     BdrvTrackedRequest *req;
51661007b31SStefan Hajnoczi     bool retry;
51761007b31SStefan Hajnoczi     bool waited = false;
51861007b31SStefan Hajnoczi 
51920fc71b2SPaolo Bonzini     if (!atomic_read(&bs->serialising_in_flight)) {
52061007b31SStefan Hajnoczi         return false;
52161007b31SStefan Hajnoczi     }
52261007b31SStefan Hajnoczi 
52361007b31SStefan Hajnoczi     do {
52461007b31SStefan Hajnoczi         retry = false;
5253783fa3dSPaolo Bonzini         qemu_co_mutex_lock(&bs->reqs_lock);
52661007b31SStefan Hajnoczi         QLIST_FOREACH(req, &bs->tracked_requests, list) {
52761007b31SStefan Hajnoczi             if (req == self || (!req->serialising && !self->serialising)) {
52861007b31SStefan Hajnoczi                 continue;
52961007b31SStefan Hajnoczi             }
53061007b31SStefan Hajnoczi             if (tracked_request_overlaps(req, self->overlap_offset,
53161007b31SStefan Hajnoczi                                          self->overlap_bytes))
53261007b31SStefan Hajnoczi             {
53361007b31SStefan Hajnoczi                 /* Hitting this means there was a reentrant request, for
53461007b31SStefan Hajnoczi                  * example, a block driver issuing nested requests.  This must
53561007b31SStefan Hajnoczi                  * never happen since it means deadlock.
53661007b31SStefan Hajnoczi                  */
53761007b31SStefan Hajnoczi                 assert(qemu_coroutine_self() != req->co);
53861007b31SStefan Hajnoczi 
53961007b31SStefan Hajnoczi                 /* If the request is already (indirectly) waiting for us, or
54061007b31SStefan Hajnoczi                  * will wait for us as soon as it wakes up, then just go on
54161007b31SStefan Hajnoczi                  * (instead of producing a deadlock in the former case). */
54261007b31SStefan Hajnoczi                 if (!req->waiting_for) {
54361007b31SStefan Hajnoczi                     self->waiting_for = req;
5443783fa3dSPaolo Bonzini                     qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock);
54561007b31SStefan Hajnoczi                     self->waiting_for = NULL;
54661007b31SStefan Hajnoczi                     retry = true;
54761007b31SStefan Hajnoczi                     waited = true;
54861007b31SStefan Hajnoczi                     break;
54961007b31SStefan Hajnoczi                 }
55061007b31SStefan Hajnoczi             }
55161007b31SStefan Hajnoczi         }
5523783fa3dSPaolo Bonzini         qemu_co_mutex_unlock(&bs->reqs_lock);
55361007b31SStefan Hajnoczi     } while (retry);
55461007b31SStefan Hajnoczi 
55561007b31SStefan Hajnoczi     return waited;
55661007b31SStefan Hajnoczi }
55761007b31SStefan Hajnoczi 
55861007b31SStefan Hajnoczi static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
55961007b31SStefan Hajnoczi                                    size_t size)
56061007b31SStefan Hajnoczi {
56161007b31SStefan Hajnoczi     if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) {
56261007b31SStefan Hajnoczi         return -EIO;
56361007b31SStefan Hajnoczi     }
56461007b31SStefan Hajnoczi 
56561007b31SStefan Hajnoczi     if (!bdrv_is_inserted(bs)) {
56661007b31SStefan Hajnoczi         return -ENOMEDIUM;
56761007b31SStefan Hajnoczi     }
56861007b31SStefan Hajnoczi 
56961007b31SStefan Hajnoczi     if (offset < 0) {
57061007b31SStefan Hajnoczi         return -EIO;
57161007b31SStefan Hajnoczi     }
57261007b31SStefan Hajnoczi 
57361007b31SStefan Hajnoczi     return 0;
57461007b31SStefan Hajnoczi }
57561007b31SStefan Hajnoczi 
57661007b31SStefan Hajnoczi typedef struct RwCo {
577e293b7a3SKevin Wolf     BdrvChild *child;
57861007b31SStefan Hajnoczi     int64_t offset;
57961007b31SStefan Hajnoczi     QEMUIOVector *qiov;
58061007b31SStefan Hajnoczi     bool is_write;
58161007b31SStefan Hajnoczi     int ret;
58261007b31SStefan Hajnoczi     BdrvRequestFlags flags;
58361007b31SStefan Hajnoczi } RwCo;
58461007b31SStefan Hajnoczi 
58561007b31SStefan Hajnoczi static void coroutine_fn bdrv_rw_co_entry(void *opaque)
58661007b31SStefan Hajnoczi {
58761007b31SStefan Hajnoczi     RwCo *rwco = opaque;
58861007b31SStefan Hajnoczi 
58961007b31SStefan Hajnoczi     if (!rwco->is_write) {
590a03ef88fSKevin Wolf         rwco->ret = bdrv_co_preadv(rwco->child, rwco->offset,
59161007b31SStefan Hajnoczi                                    rwco->qiov->size, rwco->qiov,
59261007b31SStefan Hajnoczi                                    rwco->flags);
59361007b31SStefan Hajnoczi     } else {
594a03ef88fSKevin Wolf         rwco->ret = bdrv_co_pwritev(rwco->child, rwco->offset,
59561007b31SStefan Hajnoczi                                     rwco->qiov->size, rwco->qiov,
59661007b31SStefan Hajnoczi                                     rwco->flags);
59761007b31SStefan Hajnoczi     }
59861007b31SStefan Hajnoczi }
59961007b31SStefan Hajnoczi 
60061007b31SStefan Hajnoczi /*
60161007b31SStefan Hajnoczi  * Process a vectored synchronous request using coroutines
60261007b31SStefan Hajnoczi  */
603e293b7a3SKevin Wolf static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
60461007b31SStefan Hajnoczi                         QEMUIOVector *qiov, bool is_write,
60561007b31SStefan Hajnoczi                         BdrvRequestFlags flags)
60661007b31SStefan Hajnoczi {
60761007b31SStefan Hajnoczi     Coroutine *co;
60861007b31SStefan Hajnoczi     RwCo rwco = {
609e293b7a3SKevin Wolf         .child = child,
61061007b31SStefan Hajnoczi         .offset = offset,
61161007b31SStefan Hajnoczi         .qiov = qiov,
61261007b31SStefan Hajnoczi         .is_write = is_write,
61361007b31SStefan Hajnoczi         .ret = NOT_DONE,
61461007b31SStefan Hajnoczi         .flags = flags,
61561007b31SStefan Hajnoczi     };
61661007b31SStefan Hajnoczi 
61761007b31SStefan Hajnoczi     if (qemu_in_coroutine()) {
61861007b31SStefan Hajnoczi         /* Fast-path if already in coroutine context */
61961007b31SStefan Hajnoczi         bdrv_rw_co_entry(&rwco);
62061007b31SStefan Hajnoczi     } else {
6210b8b8753SPaolo Bonzini         co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco);
622e92f0e19SFam Zheng         bdrv_coroutine_enter(child->bs, co);
62388b062c2SPaolo Bonzini         BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE);
62461007b31SStefan Hajnoczi     }
62561007b31SStefan Hajnoczi     return rwco.ret;
62661007b31SStefan Hajnoczi }
62761007b31SStefan Hajnoczi 
62861007b31SStefan Hajnoczi /*
62961007b31SStefan Hajnoczi  * Process a synchronous request using coroutines
63061007b31SStefan Hajnoczi  */
631e293b7a3SKevin Wolf static int bdrv_rw_co(BdrvChild *child, int64_t sector_num, uint8_t *buf,
63261007b31SStefan Hajnoczi                       int nb_sectors, bool is_write, BdrvRequestFlags flags)
63361007b31SStefan Hajnoczi {
63461007b31SStefan Hajnoczi     QEMUIOVector qiov;
63561007b31SStefan Hajnoczi     struct iovec iov = {
63661007b31SStefan Hajnoczi         .iov_base = (void *)buf,
63761007b31SStefan Hajnoczi         .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
63861007b31SStefan Hajnoczi     };
63961007b31SStefan Hajnoczi 
64061007b31SStefan Hajnoczi     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
64161007b31SStefan Hajnoczi         return -EINVAL;
64261007b31SStefan Hajnoczi     }
64361007b31SStefan Hajnoczi 
64461007b31SStefan Hajnoczi     qemu_iovec_init_external(&qiov, &iov, 1);
645e293b7a3SKevin Wolf     return bdrv_prwv_co(child, sector_num << BDRV_SECTOR_BITS,
64661007b31SStefan Hajnoczi                         &qiov, is_write, flags);
64761007b31SStefan Hajnoczi }
64861007b31SStefan Hajnoczi 
64961007b31SStefan Hajnoczi /* return < 0 if error. See bdrv_write() for the return codes */
650fbcbbf4eSKevin Wolf int bdrv_read(BdrvChild *child, int64_t sector_num,
65161007b31SStefan Hajnoczi               uint8_t *buf, int nb_sectors)
65261007b31SStefan Hajnoczi {
653e293b7a3SKevin Wolf     return bdrv_rw_co(child, sector_num, buf, nb_sectors, false, 0);
65461007b31SStefan Hajnoczi }
65561007b31SStefan Hajnoczi 
65661007b31SStefan Hajnoczi /* Return < 0 if error. Important errors are:
65761007b31SStefan Hajnoczi   -EIO         generic I/O error (may happen for all errors)
65861007b31SStefan Hajnoczi   -ENOMEDIUM   No media inserted.
65961007b31SStefan Hajnoczi   -EINVAL      Invalid sector number or nb_sectors
66061007b31SStefan Hajnoczi   -EACCES      Trying to write a read-only device
66161007b31SStefan Hajnoczi */
66218d51c4bSKevin Wolf int bdrv_write(BdrvChild *child, int64_t sector_num,
66361007b31SStefan Hajnoczi                const uint8_t *buf, int nb_sectors)
66461007b31SStefan Hajnoczi {
665e293b7a3SKevin Wolf     return bdrv_rw_co(child, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
66661007b31SStefan Hajnoczi }
66761007b31SStefan Hajnoczi 
668720ff280SKevin Wolf int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
669f5a5ca79SManos Pitsidianakis                        int bytes, BdrvRequestFlags flags)
67061007b31SStefan Hajnoczi {
67174021bc4SEric Blake     QEMUIOVector qiov;
67274021bc4SEric Blake     struct iovec iov = {
67374021bc4SEric Blake         .iov_base = NULL,
674f5a5ca79SManos Pitsidianakis         .iov_len = bytes,
67574021bc4SEric Blake     };
67674021bc4SEric Blake 
67774021bc4SEric Blake     qemu_iovec_init_external(&qiov, &iov, 1);
678e293b7a3SKevin Wolf     return bdrv_prwv_co(child, offset, &qiov, true,
67961007b31SStefan Hajnoczi                         BDRV_REQ_ZERO_WRITE | flags);
68061007b31SStefan Hajnoczi }
68161007b31SStefan Hajnoczi 
68261007b31SStefan Hajnoczi /*
68374021bc4SEric Blake  * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
68461007b31SStefan Hajnoczi  * The operation is sped up by checking the block status and only writing
68561007b31SStefan Hajnoczi  * zeroes to the device if they currently do not return zeroes. Optional
68674021bc4SEric Blake  * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
687465fe887SEric Blake  * BDRV_REQ_FUA).
68861007b31SStefan Hajnoczi  *
68961007b31SStefan Hajnoczi  * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
69061007b31SStefan Hajnoczi  */
691720ff280SKevin Wolf int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
69261007b31SStefan Hajnoczi {
69361007b31SStefan Hajnoczi     int64_t target_sectors, ret, nb_sectors, sector_num = 0;
694720ff280SKevin Wolf     BlockDriverState *bs = child->bs;
69567a0fd2aSFam Zheng     BlockDriverState *file;
69661007b31SStefan Hajnoczi     int n;
69761007b31SStefan Hajnoczi 
69861007b31SStefan Hajnoczi     target_sectors = bdrv_nb_sectors(bs);
69961007b31SStefan Hajnoczi     if (target_sectors < 0) {
70061007b31SStefan Hajnoczi         return target_sectors;
70161007b31SStefan Hajnoczi     }
70261007b31SStefan Hajnoczi 
70361007b31SStefan Hajnoczi     for (;;) {
70461007b31SStefan Hajnoczi         nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS);
70561007b31SStefan Hajnoczi         if (nb_sectors <= 0) {
70661007b31SStefan Hajnoczi             return 0;
70761007b31SStefan Hajnoczi         }
70867a0fd2aSFam Zheng         ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n, &file);
70961007b31SStefan Hajnoczi         if (ret < 0) {
71061007b31SStefan Hajnoczi             error_report("error getting block status at sector %" PRId64 ": %s",
71161007b31SStefan Hajnoczi                          sector_num, strerror(-ret));
71261007b31SStefan Hajnoczi             return ret;
71361007b31SStefan Hajnoczi         }
71461007b31SStefan Hajnoczi         if (ret & BDRV_BLOCK_ZERO) {
71561007b31SStefan Hajnoczi             sector_num += n;
71661007b31SStefan Hajnoczi             continue;
71761007b31SStefan Hajnoczi         }
718720ff280SKevin Wolf         ret = bdrv_pwrite_zeroes(child, sector_num << BDRV_SECTOR_BITS,
71974021bc4SEric Blake                                  n << BDRV_SECTOR_BITS, flags);
72061007b31SStefan Hajnoczi         if (ret < 0) {
72161007b31SStefan Hajnoczi             error_report("error writing zeroes at sector %" PRId64 ": %s",
72261007b31SStefan Hajnoczi                          sector_num, strerror(-ret));
72361007b31SStefan Hajnoczi             return ret;
72461007b31SStefan Hajnoczi         }
72561007b31SStefan Hajnoczi         sector_num += n;
72661007b31SStefan Hajnoczi     }
72761007b31SStefan Hajnoczi }
72861007b31SStefan Hajnoczi 
729cf2ab8fcSKevin Wolf int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
730f1e84741SKevin Wolf {
731f1e84741SKevin Wolf     int ret;
732f1e84741SKevin Wolf 
733e293b7a3SKevin Wolf     ret = bdrv_prwv_co(child, offset, qiov, false, 0);
734f1e84741SKevin Wolf     if (ret < 0) {
735f1e84741SKevin Wolf         return ret;
736f1e84741SKevin Wolf     }
737f1e84741SKevin Wolf 
738f1e84741SKevin Wolf     return qiov->size;
739f1e84741SKevin Wolf }
740f1e84741SKevin Wolf 
741cf2ab8fcSKevin Wolf int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes)
74261007b31SStefan Hajnoczi {
74361007b31SStefan Hajnoczi     QEMUIOVector qiov;
74461007b31SStefan Hajnoczi     struct iovec iov = {
74561007b31SStefan Hajnoczi         .iov_base = (void *)buf,
74661007b31SStefan Hajnoczi         .iov_len = bytes,
74761007b31SStefan Hajnoczi     };
74861007b31SStefan Hajnoczi 
74961007b31SStefan Hajnoczi     if (bytes < 0) {
75061007b31SStefan Hajnoczi         return -EINVAL;
75161007b31SStefan Hajnoczi     }
75261007b31SStefan Hajnoczi 
75361007b31SStefan Hajnoczi     qemu_iovec_init_external(&qiov, &iov, 1);
754cf2ab8fcSKevin Wolf     return bdrv_preadv(child, offset, &qiov);
75561007b31SStefan Hajnoczi }
75661007b31SStefan Hajnoczi 
757d9ca2ea2SKevin Wolf int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
75861007b31SStefan Hajnoczi {
75961007b31SStefan Hajnoczi     int ret;
76061007b31SStefan Hajnoczi 
761e293b7a3SKevin Wolf     ret = bdrv_prwv_co(child, offset, qiov, true, 0);
76261007b31SStefan Hajnoczi     if (ret < 0) {
76361007b31SStefan Hajnoczi         return ret;
76461007b31SStefan Hajnoczi     }
76561007b31SStefan Hajnoczi 
76661007b31SStefan Hajnoczi     return qiov->size;
76761007b31SStefan Hajnoczi }
76861007b31SStefan Hajnoczi 
769d9ca2ea2SKevin Wolf int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes)
77061007b31SStefan Hajnoczi {
77161007b31SStefan Hajnoczi     QEMUIOVector qiov;
77261007b31SStefan Hajnoczi     struct iovec iov = {
77361007b31SStefan Hajnoczi         .iov_base   = (void *) buf,
77461007b31SStefan Hajnoczi         .iov_len    = bytes,
77561007b31SStefan Hajnoczi     };
77661007b31SStefan Hajnoczi 
77761007b31SStefan Hajnoczi     if (bytes < 0) {
77861007b31SStefan Hajnoczi         return -EINVAL;
77961007b31SStefan Hajnoczi     }
78061007b31SStefan Hajnoczi 
78161007b31SStefan Hajnoczi     qemu_iovec_init_external(&qiov, &iov, 1);
782d9ca2ea2SKevin Wolf     return bdrv_pwritev(child, offset, &qiov);
78361007b31SStefan Hajnoczi }
78461007b31SStefan Hajnoczi 
78561007b31SStefan Hajnoczi /*
78661007b31SStefan Hajnoczi  * Writes to the file and ensures that no writes are reordered across this
78761007b31SStefan Hajnoczi  * request (acts as a barrier)
78861007b31SStefan Hajnoczi  *
78961007b31SStefan Hajnoczi  * Returns 0 on success, -errno in error cases.
79061007b31SStefan Hajnoczi  */
791d9ca2ea2SKevin Wolf int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
79261007b31SStefan Hajnoczi                      const void *buf, int count)
79361007b31SStefan Hajnoczi {
79461007b31SStefan Hajnoczi     int ret;
79561007b31SStefan Hajnoczi 
796d9ca2ea2SKevin Wolf     ret = bdrv_pwrite(child, offset, buf, count);
79761007b31SStefan Hajnoczi     if (ret < 0) {
79861007b31SStefan Hajnoczi         return ret;
79961007b31SStefan Hajnoczi     }
80061007b31SStefan Hajnoczi 
801d9ca2ea2SKevin Wolf     ret = bdrv_flush(child->bs);
802855a6a93SKevin Wolf     if (ret < 0) {
803855a6a93SKevin Wolf         return ret;
80461007b31SStefan Hajnoczi     }
80561007b31SStefan Hajnoczi 
80661007b31SStefan Hajnoczi     return 0;
80761007b31SStefan Hajnoczi }
80861007b31SStefan Hajnoczi 
80908844473SKevin Wolf typedef struct CoroutineIOCompletion {
81008844473SKevin Wolf     Coroutine *coroutine;
81108844473SKevin Wolf     int ret;
81208844473SKevin Wolf } CoroutineIOCompletion;
81308844473SKevin Wolf 
81408844473SKevin Wolf static void bdrv_co_io_em_complete(void *opaque, int ret)
81508844473SKevin Wolf {
81608844473SKevin Wolf     CoroutineIOCompletion *co = opaque;
81708844473SKevin Wolf 
81808844473SKevin Wolf     co->ret = ret;
819b9e413ddSPaolo Bonzini     aio_co_wake(co->coroutine);
82008844473SKevin Wolf }
82108844473SKevin Wolf 
822166fe960SKevin Wolf static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
823166fe960SKevin Wolf                                            uint64_t offset, uint64_t bytes,
824166fe960SKevin Wolf                                            QEMUIOVector *qiov, int flags)
825166fe960SKevin Wolf {
826166fe960SKevin Wolf     BlockDriver *drv = bs->drv;
8273fb06697SKevin Wolf     int64_t sector_num;
8283fb06697SKevin Wolf     unsigned int nb_sectors;
8293fb06697SKevin Wolf 
830fa166538SEric Blake     assert(!(flags & ~BDRV_REQ_MASK));
831fa166538SEric Blake 
8323fb06697SKevin Wolf     if (drv->bdrv_co_preadv) {
8333fb06697SKevin Wolf         return drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
8343fb06697SKevin Wolf     }
8353fb06697SKevin Wolf 
8363fb06697SKevin Wolf     sector_num = offset >> BDRV_SECTOR_BITS;
8373fb06697SKevin Wolf     nb_sectors = bytes >> BDRV_SECTOR_BITS;
838166fe960SKevin Wolf 
839166fe960SKevin Wolf     assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
840166fe960SKevin Wolf     assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
841166fe960SKevin Wolf     assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
842166fe960SKevin Wolf 
84308844473SKevin Wolf     if (drv->bdrv_co_readv) {
844166fe960SKevin Wolf         return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
84508844473SKevin Wolf     } else {
84608844473SKevin Wolf         BlockAIOCB *acb;
84708844473SKevin Wolf         CoroutineIOCompletion co = {
84808844473SKevin Wolf             .coroutine = qemu_coroutine_self(),
84908844473SKevin Wolf         };
85008844473SKevin Wolf 
85108844473SKevin Wolf         acb = bs->drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors,
85208844473SKevin Wolf                                       bdrv_co_io_em_complete, &co);
85308844473SKevin Wolf         if (acb == NULL) {
85408844473SKevin Wolf             return -EIO;
85508844473SKevin Wolf         } else {
85608844473SKevin Wolf             qemu_coroutine_yield();
85708844473SKevin Wolf             return co.ret;
85808844473SKevin Wolf         }
85908844473SKevin Wolf     }
860166fe960SKevin Wolf }
861166fe960SKevin Wolf 
86278a07294SKevin Wolf static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
86378a07294SKevin Wolf                                             uint64_t offset, uint64_t bytes,
86478a07294SKevin Wolf                                             QEMUIOVector *qiov, int flags)
86578a07294SKevin Wolf {
86678a07294SKevin Wolf     BlockDriver *drv = bs->drv;
8673fb06697SKevin Wolf     int64_t sector_num;
8683fb06697SKevin Wolf     unsigned int nb_sectors;
86978a07294SKevin Wolf     int ret;
87078a07294SKevin Wolf 
871fa166538SEric Blake     assert(!(flags & ~BDRV_REQ_MASK));
872fa166538SEric Blake 
8733fb06697SKevin Wolf     if (drv->bdrv_co_pwritev) {
874515c2f43SKevin Wolf         ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov,
875515c2f43SKevin Wolf                                    flags & bs->supported_write_flags);
876515c2f43SKevin Wolf         flags &= ~bs->supported_write_flags;
8773fb06697SKevin Wolf         goto emulate_flags;
8783fb06697SKevin Wolf     }
8793fb06697SKevin Wolf 
8803fb06697SKevin Wolf     sector_num = offset >> BDRV_SECTOR_BITS;
8813fb06697SKevin Wolf     nb_sectors = bytes >> BDRV_SECTOR_BITS;
8823fb06697SKevin Wolf 
88378a07294SKevin Wolf     assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
88478a07294SKevin Wolf     assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
88578a07294SKevin Wolf     assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
88678a07294SKevin Wolf 
88778a07294SKevin Wolf     if (drv->bdrv_co_writev_flags) {
88878a07294SKevin Wolf         ret = drv->bdrv_co_writev_flags(bs, sector_num, nb_sectors, qiov,
8894df863f3SEric Blake                                         flags & bs->supported_write_flags);
8904df863f3SEric Blake         flags &= ~bs->supported_write_flags;
89108844473SKevin Wolf     } else if (drv->bdrv_co_writev) {
8924df863f3SEric Blake         assert(!bs->supported_write_flags);
89378a07294SKevin Wolf         ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
89408844473SKevin Wolf     } else {
89508844473SKevin Wolf         BlockAIOCB *acb;
89608844473SKevin Wolf         CoroutineIOCompletion co = {
89708844473SKevin Wolf             .coroutine = qemu_coroutine_self(),
89808844473SKevin Wolf         };
89908844473SKevin Wolf 
90008844473SKevin Wolf         acb = bs->drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors,
90108844473SKevin Wolf                                        bdrv_co_io_em_complete, &co);
90208844473SKevin Wolf         if (acb == NULL) {
9033fb06697SKevin Wolf             ret = -EIO;
90408844473SKevin Wolf         } else {
90508844473SKevin Wolf             qemu_coroutine_yield();
9063fb06697SKevin Wolf             ret = co.ret;
90708844473SKevin Wolf         }
90878a07294SKevin Wolf     }
90978a07294SKevin Wolf 
9103fb06697SKevin Wolf emulate_flags:
9114df863f3SEric Blake     if (ret == 0 && (flags & BDRV_REQ_FUA)) {
91278a07294SKevin Wolf         ret = bdrv_co_flush(bs);
91378a07294SKevin Wolf     }
91478a07294SKevin Wolf 
91578a07294SKevin Wolf     return ret;
91678a07294SKevin Wolf }
91778a07294SKevin Wolf 
91829a298afSPavel Butsykin static int coroutine_fn
91929a298afSPavel Butsykin bdrv_driver_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
92029a298afSPavel Butsykin                                uint64_t bytes, QEMUIOVector *qiov)
92129a298afSPavel Butsykin {
92229a298afSPavel Butsykin     BlockDriver *drv = bs->drv;
92329a298afSPavel Butsykin 
92429a298afSPavel Butsykin     if (!drv->bdrv_co_pwritev_compressed) {
92529a298afSPavel Butsykin         return -ENOTSUP;
92629a298afSPavel Butsykin     }
92729a298afSPavel Butsykin 
92829a298afSPavel Butsykin     return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
92929a298afSPavel Butsykin }
93029a298afSPavel Butsykin 
93185c97ca7SKevin Wolf static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
932244483e6SKevin Wolf         int64_t offset, unsigned int bytes, QEMUIOVector *qiov)
93361007b31SStefan Hajnoczi {
93485c97ca7SKevin Wolf     BlockDriverState *bs = child->bs;
93585c97ca7SKevin Wolf 
93661007b31SStefan Hajnoczi     /* Perform I/O through a temporary buffer so that users who scribble over
93761007b31SStefan Hajnoczi      * their read buffer while the operation is in progress do not end up
93861007b31SStefan Hajnoczi      * modifying the image file.  This is critical for zero-copy guest I/O
93961007b31SStefan Hajnoczi      * where anything might happen inside guest memory.
94061007b31SStefan Hajnoczi      */
94161007b31SStefan Hajnoczi     void *bounce_buffer;
94261007b31SStefan Hajnoczi 
94361007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
94461007b31SStefan Hajnoczi     struct iovec iov;
94561007b31SStefan Hajnoczi     QEMUIOVector bounce_qiov;
946244483e6SKevin Wolf     int64_t cluster_offset;
947244483e6SKevin Wolf     unsigned int cluster_bytes;
94861007b31SStefan Hajnoczi     size_t skip_bytes;
94961007b31SStefan Hajnoczi     int ret;
95061007b31SStefan Hajnoczi 
9511bf03e66SKevin Wolf     /* FIXME We cannot require callers to have write permissions when all they
9521bf03e66SKevin Wolf      * are doing is a read request. If we did things right, write permissions
9531bf03e66SKevin Wolf      * would be obtained anyway, but internally by the copy-on-read code. As
9541bf03e66SKevin Wolf      * long as it is implemented here rather than in a separat filter driver,
9551bf03e66SKevin Wolf      * the copy-on-read code doesn't have its own BdrvChild, however, for which
9561bf03e66SKevin Wolf      * it could request permissions. Therefore we have to bypass the permission
9571bf03e66SKevin Wolf      * system for the moment. */
9581bf03e66SKevin Wolf     // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
959afa4b293SKevin Wolf 
96061007b31SStefan Hajnoczi     /* Cover entire cluster so no additional backing file I/O is required when
96161007b31SStefan Hajnoczi      * allocating cluster in the image file.
96261007b31SStefan Hajnoczi      */
963244483e6SKevin Wolf     bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
96461007b31SStefan Hajnoczi 
965244483e6SKevin Wolf     trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
966244483e6SKevin Wolf                                    cluster_offset, cluster_bytes);
96761007b31SStefan Hajnoczi 
968244483e6SKevin Wolf     iov.iov_len = cluster_bytes;
96961007b31SStefan Hajnoczi     iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len);
97061007b31SStefan Hajnoczi     if (bounce_buffer == NULL) {
97161007b31SStefan Hajnoczi         ret = -ENOMEM;
97261007b31SStefan Hajnoczi         goto err;
97361007b31SStefan Hajnoczi     }
97461007b31SStefan Hajnoczi 
97561007b31SStefan Hajnoczi     qemu_iovec_init_external(&bounce_qiov, &iov, 1);
97661007b31SStefan Hajnoczi 
977244483e6SKevin Wolf     ret = bdrv_driver_preadv(bs, cluster_offset, cluster_bytes,
978166fe960SKevin Wolf                              &bounce_qiov, 0);
97961007b31SStefan Hajnoczi     if (ret < 0) {
98061007b31SStefan Hajnoczi         goto err;
98161007b31SStefan Hajnoczi     }
98261007b31SStefan Hajnoczi 
983c1499a5eSEric Blake     if (drv->bdrv_co_pwrite_zeroes &&
98461007b31SStefan Hajnoczi         buffer_is_zero(bounce_buffer, iov.iov_len)) {
985a604fa2bSEric Blake         /* FIXME: Should we (perhaps conditionally) be setting
986a604fa2bSEric Blake          * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
987a604fa2bSEric Blake          * that still correctly reads as zero? */
988244483e6SKevin Wolf         ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, cluster_bytes, 0);
98961007b31SStefan Hajnoczi     } else {
99061007b31SStefan Hajnoczi         /* This does not change the data on the disk, it is not necessary
99161007b31SStefan Hajnoczi          * to flush even in cache=writethrough mode.
99261007b31SStefan Hajnoczi          */
993244483e6SKevin Wolf         ret = bdrv_driver_pwritev(bs, cluster_offset, cluster_bytes,
99478a07294SKevin Wolf                                   &bounce_qiov, 0);
99561007b31SStefan Hajnoczi     }
99661007b31SStefan Hajnoczi 
99761007b31SStefan Hajnoczi     if (ret < 0) {
99861007b31SStefan Hajnoczi         /* It might be okay to ignore write errors for guest requests.  If this
99961007b31SStefan Hajnoczi          * is a deliberate copy-on-read then we don't want to ignore the error.
100061007b31SStefan Hajnoczi          * Simply report it in all cases.
100161007b31SStefan Hajnoczi          */
100261007b31SStefan Hajnoczi         goto err;
100361007b31SStefan Hajnoczi     }
100461007b31SStefan Hajnoczi 
1005244483e6SKevin Wolf     skip_bytes = offset - cluster_offset;
1006244483e6SKevin Wolf     qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, bytes);
100761007b31SStefan Hajnoczi 
100861007b31SStefan Hajnoczi err:
100961007b31SStefan Hajnoczi     qemu_vfree(bounce_buffer);
101061007b31SStefan Hajnoczi     return ret;
101161007b31SStefan Hajnoczi }
101261007b31SStefan Hajnoczi 
101361007b31SStefan Hajnoczi /*
101461007b31SStefan Hajnoczi  * Forwards an already correctly aligned request to the BlockDriver. This
10151a62d0acSEric Blake  * handles copy on read, zeroing after EOF, and fragmentation of large
10161a62d0acSEric Blake  * reads; any other features must be implemented by the caller.
101761007b31SStefan Hajnoczi  */
101885c97ca7SKevin Wolf static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
101961007b31SStefan Hajnoczi     BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
102061007b31SStefan Hajnoczi     int64_t align, QEMUIOVector *qiov, int flags)
102161007b31SStefan Hajnoczi {
102285c97ca7SKevin Wolf     BlockDriverState *bs = child->bs;
1023c9d20029SKevin Wolf     int64_t total_bytes, max_bytes;
10241a62d0acSEric Blake     int ret = 0;
10251a62d0acSEric Blake     uint64_t bytes_remaining = bytes;
10261a62d0acSEric Blake     int max_transfer;
102761007b31SStefan Hajnoczi 
102849c07526SKevin Wolf     assert(is_power_of_2(align));
102949c07526SKevin Wolf     assert((offset & (align - 1)) == 0);
103049c07526SKevin Wolf     assert((bytes & (align - 1)) == 0);
103161007b31SStefan Hajnoczi     assert(!qiov || bytes == qiov->size);
1032abb06c5aSDaniel P. Berrange     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
10331a62d0acSEric Blake     max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
10341a62d0acSEric Blake                                    align);
1035a604fa2bSEric Blake 
1036a604fa2bSEric Blake     /* TODO: We would need a per-BDS .supported_read_flags and
1037a604fa2bSEric Blake      * potential fallback support, if we ever implement any read flags
1038a604fa2bSEric Blake      * to pass through to drivers.  For now, there aren't any
1039a604fa2bSEric Blake      * passthrough flags.  */
1040a604fa2bSEric Blake     assert(!(flags & ~(BDRV_REQ_NO_SERIALISING | BDRV_REQ_COPY_ON_READ)));
104161007b31SStefan Hajnoczi 
104261007b31SStefan Hajnoczi     /* Handle Copy on Read and associated serialisation */
104361007b31SStefan Hajnoczi     if (flags & BDRV_REQ_COPY_ON_READ) {
104461007b31SStefan Hajnoczi         /* If we touch the same cluster it counts as an overlap.  This
104561007b31SStefan Hajnoczi          * guarantees that allocating writes will be serialized and not race
104661007b31SStefan Hajnoczi          * with each other for the same cluster.  For example, in copy-on-read
104761007b31SStefan Hajnoczi          * it ensures that the CoR read and write operations are atomic and
104861007b31SStefan Hajnoczi          * guest writes cannot interleave between them. */
104961007b31SStefan Hajnoczi         mark_request_serialising(req, bdrv_get_cluster_size(bs));
105061007b31SStefan Hajnoczi     }
105161007b31SStefan Hajnoczi 
105261408b25SFam Zheng     if (!(flags & BDRV_REQ_NO_SERIALISING)) {
105361007b31SStefan Hajnoczi         wait_serialising_requests(req);
105461408b25SFam Zheng     }
105561007b31SStefan Hajnoczi 
105661007b31SStefan Hajnoczi     if (flags & BDRV_REQ_COPY_ON_READ) {
105749c07526SKevin Wolf         int64_t start_sector = offset >> BDRV_SECTOR_BITS;
105849c07526SKevin Wolf         int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
105949c07526SKevin Wolf         unsigned int nb_sectors = end_sector - start_sector;
106061007b31SStefan Hajnoczi         int pnum;
106161007b31SStefan Hajnoczi 
106249c07526SKevin Wolf         ret = bdrv_is_allocated(bs, start_sector, nb_sectors, &pnum);
106361007b31SStefan Hajnoczi         if (ret < 0) {
106461007b31SStefan Hajnoczi             goto out;
106561007b31SStefan Hajnoczi         }
106661007b31SStefan Hajnoczi 
106761007b31SStefan Hajnoczi         if (!ret || pnum != nb_sectors) {
106885c97ca7SKevin Wolf             ret = bdrv_co_do_copy_on_readv(child, offset, bytes, qiov);
106961007b31SStefan Hajnoczi             goto out;
107061007b31SStefan Hajnoczi         }
107161007b31SStefan Hajnoczi     }
107261007b31SStefan Hajnoczi 
10731a62d0acSEric Blake     /* Forward the request to the BlockDriver, possibly fragmenting it */
107449c07526SKevin Wolf     total_bytes = bdrv_getlength(bs);
107549c07526SKevin Wolf     if (total_bytes < 0) {
107649c07526SKevin Wolf         ret = total_bytes;
107761007b31SStefan Hajnoczi         goto out;
107861007b31SStefan Hajnoczi     }
107961007b31SStefan Hajnoczi 
108049c07526SKevin Wolf     max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
10811a62d0acSEric Blake     if (bytes <= max_bytes && bytes <= max_transfer) {
1082166fe960SKevin Wolf         ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
10831a62d0acSEric Blake         goto out;
108461007b31SStefan Hajnoczi     }
108561007b31SStefan Hajnoczi 
10861a62d0acSEric Blake     while (bytes_remaining) {
10871a62d0acSEric Blake         int num;
10881a62d0acSEric Blake 
10891a62d0acSEric Blake         if (max_bytes) {
10901a62d0acSEric Blake             QEMUIOVector local_qiov;
10911a62d0acSEric Blake 
10921a62d0acSEric Blake             num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
10931a62d0acSEric Blake             assert(num);
10941a62d0acSEric Blake             qemu_iovec_init(&local_qiov, qiov->niov);
10951a62d0acSEric Blake             qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
10961a62d0acSEric Blake 
10971a62d0acSEric Blake             ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
10981a62d0acSEric Blake                                      num, &local_qiov, 0);
10991a62d0acSEric Blake             max_bytes -= num;
11001a62d0acSEric Blake             qemu_iovec_destroy(&local_qiov);
11011a62d0acSEric Blake         } else {
11021a62d0acSEric Blake             num = bytes_remaining;
11031a62d0acSEric Blake             ret = qemu_iovec_memset(qiov, bytes - bytes_remaining, 0,
11041a62d0acSEric Blake                                     bytes_remaining);
11051a62d0acSEric Blake         }
11061a62d0acSEric Blake         if (ret < 0) {
11071a62d0acSEric Blake             goto out;
11081a62d0acSEric Blake         }
11091a62d0acSEric Blake         bytes_remaining -= num;
111061007b31SStefan Hajnoczi     }
111161007b31SStefan Hajnoczi 
111261007b31SStefan Hajnoczi out:
11131a62d0acSEric Blake     return ret < 0 ? ret : 0;
111461007b31SStefan Hajnoczi }
111561007b31SStefan Hajnoczi 
111661007b31SStefan Hajnoczi /*
111761007b31SStefan Hajnoczi  * Handle a read request in coroutine context
111861007b31SStefan Hajnoczi  */
1119a03ef88fSKevin Wolf int coroutine_fn bdrv_co_preadv(BdrvChild *child,
112061007b31SStefan Hajnoczi     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
112161007b31SStefan Hajnoczi     BdrvRequestFlags flags)
112261007b31SStefan Hajnoczi {
1123a03ef88fSKevin Wolf     BlockDriverState *bs = child->bs;
112461007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
112561007b31SStefan Hajnoczi     BdrvTrackedRequest req;
112661007b31SStefan Hajnoczi 
1127a5b8dd2cSEric Blake     uint64_t align = bs->bl.request_alignment;
112861007b31SStefan Hajnoczi     uint8_t *head_buf = NULL;
112961007b31SStefan Hajnoczi     uint8_t *tail_buf = NULL;
113061007b31SStefan Hajnoczi     QEMUIOVector local_qiov;
113161007b31SStefan Hajnoczi     bool use_local_qiov = false;
113261007b31SStefan Hajnoczi     int ret;
113361007b31SStefan Hajnoczi 
113461007b31SStefan Hajnoczi     if (!drv) {
113561007b31SStefan Hajnoczi         return -ENOMEDIUM;
113661007b31SStefan Hajnoczi     }
113761007b31SStefan Hajnoczi 
113861007b31SStefan Hajnoczi     ret = bdrv_check_byte_request(bs, offset, bytes);
113961007b31SStefan Hajnoczi     if (ret < 0) {
114061007b31SStefan Hajnoczi         return ret;
114161007b31SStefan Hajnoczi     }
114261007b31SStefan Hajnoczi 
114399723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
114499723548SPaolo Bonzini 
11459568b511SWen Congyang     /* Don't do copy-on-read if we read data before write operation */
1146d3faa13eSPaolo Bonzini     if (atomic_read(&bs->copy_on_read) && !(flags & BDRV_REQ_NO_SERIALISING)) {
114761007b31SStefan Hajnoczi         flags |= BDRV_REQ_COPY_ON_READ;
114861007b31SStefan Hajnoczi     }
114961007b31SStefan Hajnoczi 
115061007b31SStefan Hajnoczi     /* Align read if necessary by padding qiov */
115161007b31SStefan Hajnoczi     if (offset & (align - 1)) {
115261007b31SStefan Hajnoczi         head_buf = qemu_blockalign(bs, align);
115361007b31SStefan Hajnoczi         qemu_iovec_init(&local_qiov, qiov->niov + 2);
115461007b31SStefan Hajnoczi         qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
115561007b31SStefan Hajnoczi         qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
115661007b31SStefan Hajnoczi         use_local_qiov = true;
115761007b31SStefan Hajnoczi 
115861007b31SStefan Hajnoczi         bytes += offset & (align - 1);
115961007b31SStefan Hajnoczi         offset = offset & ~(align - 1);
116061007b31SStefan Hajnoczi     }
116161007b31SStefan Hajnoczi 
116261007b31SStefan Hajnoczi     if ((offset + bytes) & (align - 1)) {
116361007b31SStefan Hajnoczi         if (!use_local_qiov) {
116461007b31SStefan Hajnoczi             qemu_iovec_init(&local_qiov, qiov->niov + 1);
116561007b31SStefan Hajnoczi             qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
116661007b31SStefan Hajnoczi             use_local_qiov = true;
116761007b31SStefan Hajnoczi         }
116861007b31SStefan Hajnoczi         tail_buf = qemu_blockalign(bs, align);
116961007b31SStefan Hajnoczi         qemu_iovec_add(&local_qiov, tail_buf,
117061007b31SStefan Hajnoczi                        align - ((offset + bytes) & (align - 1)));
117161007b31SStefan Hajnoczi 
117261007b31SStefan Hajnoczi         bytes = ROUND_UP(bytes, align);
117361007b31SStefan Hajnoczi     }
117461007b31SStefan Hajnoczi 
1175ebde595cSFam Zheng     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
117685c97ca7SKevin Wolf     ret = bdrv_aligned_preadv(child, &req, offset, bytes, align,
117761007b31SStefan Hajnoczi                               use_local_qiov ? &local_qiov : qiov,
117861007b31SStefan Hajnoczi                               flags);
117961007b31SStefan Hajnoczi     tracked_request_end(&req);
118099723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
118161007b31SStefan Hajnoczi 
118261007b31SStefan Hajnoczi     if (use_local_qiov) {
118361007b31SStefan Hajnoczi         qemu_iovec_destroy(&local_qiov);
118461007b31SStefan Hajnoczi         qemu_vfree(head_buf);
118561007b31SStefan Hajnoczi         qemu_vfree(tail_buf);
118661007b31SStefan Hajnoczi     }
118761007b31SStefan Hajnoczi 
118861007b31SStefan Hajnoczi     return ret;
118961007b31SStefan Hajnoczi }
119061007b31SStefan Hajnoczi 
1191adad6496SKevin Wolf static int coroutine_fn bdrv_co_do_readv(BdrvChild *child,
119261007b31SStefan Hajnoczi     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
119361007b31SStefan Hajnoczi     BdrvRequestFlags flags)
119461007b31SStefan Hajnoczi {
119561007b31SStefan Hajnoczi     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
119661007b31SStefan Hajnoczi         return -EINVAL;
119761007b31SStefan Hajnoczi     }
119861007b31SStefan Hajnoczi 
1199a03ef88fSKevin Wolf     return bdrv_co_preadv(child, sector_num << BDRV_SECTOR_BITS,
120061007b31SStefan Hajnoczi                           nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
120161007b31SStefan Hajnoczi }
120261007b31SStefan Hajnoczi 
120328b04a8fSKevin Wolf int coroutine_fn bdrv_co_readv(BdrvChild *child, int64_t sector_num,
120461007b31SStefan Hajnoczi                                int nb_sectors, QEMUIOVector *qiov)
120561007b31SStefan Hajnoczi {
120628b04a8fSKevin Wolf     trace_bdrv_co_readv(child->bs, sector_num, nb_sectors);
120761007b31SStefan Hajnoczi 
1208adad6496SKevin Wolf     return bdrv_co_do_readv(child, sector_num, nb_sectors, qiov, 0);
120961007b31SStefan Hajnoczi }
121061007b31SStefan Hajnoczi 
12115def6b80SEric Blake /* Maximum buffer for write zeroes fallback, in bytes */
12125def6b80SEric Blake #define MAX_WRITE_ZEROES_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
121361007b31SStefan Hajnoczi 
1214d05aa8bbSEric Blake static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
1215f5a5ca79SManos Pitsidianakis     int64_t offset, int bytes, BdrvRequestFlags flags)
121661007b31SStefan Hajnoczi {
121761007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
121861007b31SStefan Hajnoczi     QEMUIOVector qiov;
121961007b31SStefan Hajnoczi     struct iovec iov = {0};
122061007b31SStefan Hajnoczi     int ret = 0;
1221465fe887SEric Blake     bool need_flush = false;
1222443668caSDenis V. Lunev     int head = 0;
1223443668caSDenis V. Lunev     int tail = 0;
122461007b31SStefan Hajnoczi 
1225cf081fcaSEric Blake     int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
1226a5b8dd2cSEric Blake     int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1227a5b8dd2cSEric Blake                         bs->bl.request_alignment);
1228b2f95feeSEric Blake     int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1229b2f95feeSEric Blake                                     MAX_WRITE_ZEROES_BOUNCE_BUFFER);
1230cf081fcaSEric Blake 
1231b8d0a980SEric Blake     assert(alignment % bs->bl.request_alignment == 0);
1232b8d0a980SEric Blake     head = offset % alignment;
1233f5a5ca79SManos Pitsidianakis     tail = (offset + bytes) % alignment;
1234b8d0a980SEric Blake     max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1235b8d0a980SEric Blake     assert(max_write_zeroes >= bs->bl.request_alignment);
123661007b31SStefan Hajnoczi 
1237f5a5ca79SManos Pitsidianakis     while (bytes > 0 && !ret) {
1238f5a5ca79SManos Pitsidianakis         int num = bytes;
123961007b31SStefan Hajnoczi 
124061007b31SStefan Hajnoczi         /* Align request.  Block drivers can expect the "bulk" of the request
1241443668caSDenis V. Lunev          * to be aligned, and that unaligned requests do not cross cluster
1242443668caSDenis V. Lunev          * boundaries.
124361007b31SStefan Hajnoczi          */
1244443668caSDenis V. Lunev         if (head) {
1245b2f95feeSEric Blake             /* Make a small request up to the first aligned sector. For
1246b2f95feeSEric Blake              * convenience, limit this request to max_transfer even if
1247b2f95feeSEric Blake              * we don't need to fall back to writes.  */
1248f5a5ca79SManos Pitsidianakis             num = MIN(MIN(bytes, max_transfer), alignment - head);
1249b2f95feeSEric Blake             head = (head + num) % alignment;
1250b2f95feeSEric Blake             assert(num < max_write_zeroes);
1251d05aa8bbSEric Blake         } else if (tail && num > alignment) {
1252443668caSDenis V. Lunev             /* Shorten the request to the last aligned sector.  */
1253443668caSDenis V. Lunev             num -= tail;
125461007b31SStefan Hajnoczi         }
125561007b31SStefan Hajnoczi 
125661007b31SStefan Hajnoczi         /* limit request size */
125761007b31SStefan Hajnoczi         if (num > max_write_zeroes) {
125861007b31SStefan Hajnoczi             num = max_write_zeroes;
125961007b31SStefan Hajnoczi         }
126061007b31SStefan Hajnoczi 
126161007b31SStefan Hajnoczi         ret = -ENOTSUP;
126261007b31SStefan Hajnoczi         /* First try the efficient write zeroes operation */
1263d05aa8bbSEric Blake         if (drv->bdrv_co_pwrite_zeroes) {
1264d05aa8bbSEric Blake             ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1265d05aa8bbSEric Blake                                              flags & bs->supported_zero_flags);
1266d05aa8bbSEric Blake             if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1267d05aa8bbSEric Blake                 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1268d05aa8bbSEric Blake                 need_flush = true;
1269d05aa8bbSEric Blake             }
1270465fe887SEric Blake         } else {
1271465fe887SEric Blake             assert(!bs->supported_zero_flags);
127261007b31SStefan Hajnoczi         }
127361007b31SStefan Hajnoczi 
127461007b31SStefan Hajnoczi         if (ret == -ENOTSUP) {
127561007b31SStefan Hajnoczi             /* Fall back to bounce buffer if write zeroes is unsupported */
1276465fe887SEric Blake             BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1277465fe887SEric Blake 
1278465fe887SEric Blake             if ((flags & BDRV_REQ_FUA) &&
1279465fe887SEric Blake                 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1280465fe887SEric Blake                 /* No need for bdrv_driver_pwrite() to do a fallback
1281465fe887SEric Blake                  * flush on each chunk; use just one at the end */
1282465fe887SEric Blake                 write_flags &= ~BDRV_REQ_FUA;
1283465fe887SEric Blake                 need_flush = true;
1284465fe887SEric Blake             }
12855def6b80SEric Blake             num = MIN(num, max_transfer);
1286d05aa8bbSEric Blake             iov.iov_len = num;
128761007b31SStefan Hajnoczi             if (iov.iov_base == NULL) {
1288d05aa8bbSEric Blake                 iov.iov_base = qemu_try_blockalign(bs, num);
128961007b31SStefan Hajnoczi                 if (iov.iov_base == NULL) {
129061007b31SStefan Hajnoczi                     ret = -ENOMEM;
129161007b31SStefan Hajnoczi                     goto fail;
129261007b31SStefan Hajnoczi                 }
1293d05aa8bbSEric Blake                 memset(iov.iov_base, 0, num);
129461007b31SStefan Hajnoczi             }
129561007b31SStefan Hajnoczi             qemu_iovec_init_external(&qiov, &iov, 1);
129661007b31SStefan Hajnoczi 
1297d05aa8bbSEric Blake             ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags);
129861007b31SStefan Hajnoczi 
129961007b31SStefan Hajnoczi             /* Keep bounce buffer around if it is big enough for all
130061007b31SStefan Hajnoczi              * all future requests.
130161007b31SStefan Hajnoczi              */
13025def6b80SEric Blake             if (num < max_transfer) {
130361007b31SStefan Hajnoczi                 qemu_vfree(iov.iov_base);
130461007b31SStefan Hajnoczi                 iov.iov_base = NULL;
130561007b31SStefan Hajnoczi             }
130661007b31SStefan Hajnoczi         }
130761007b31SStefan Hajnoczi 
1308d05aa8bbSEric Blake         offset += num;
1309f5a5ca79SManos Pitsidianakis         bytes -= num;
131061007b31SStefan Hajnoczi     }
131161007b31SStefan Hajnoczi 
131261007b31SStefan Hajnoczi fail:
1313465fe887SEric Blake     if (ret == 0 && need_flush) {
1314465fe887SEric Blake         ret = bdrv_co_flush(bs);
1315465fe887SEric Blake     }
131661007b31SStefan Hajnoczi     qemu_vfree(iov.iov_base);
131761007b31SStefan Hajnoczi     return ret;
131861007b31SStefan Hajnoczi }
131961007b31SStefan Hajnoczi 
132061007b31SStefan Hajnoczi /*
132104ed95f4SEric Blake  * Forwards an already correctly aligned write request to the BlockDriver,
132204ed95f4SEric Blake  * after possibly fragmenting it.
132361007b31SStefan Hajnoczi  */
132485c97ca7SKevin Wolf static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child,
132561007b31SStefan Hajnoczi     BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1326cff86b38SEric Blake     int64_t align, QEMUIOVector *qiov, int flags)
132761007b31SStefan Hajnoczi {
132885c97ca7SKevin Wolf     BlockDriverState *bs = child->bs;
132961007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
133061007b31SStefan Hajnoczi     bool waited;
133161007b31SStefan Hajnoczi     int ret;
133261007b31SStefan Hajnoczi 
13339896c876SKevin Wolf     int64_t start_sector = offset >> BDRV_SECTOR_BITS;
13349896c876SKevin Wolf     int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
133504ed95f4SEric Blake     uint64_t bytes_remaining = bytes;
133604ed95f4SEric Blake     int max_transfer;
133761007b31SStefan Hajnoczi 
1338cff86b38SEric Blake     assert(is_power_of_2(align));
1339cff86b38SEric Blake     assert((offset & (align - 1)) == 0);
1340cff86b38SEric Blake     assert((bytes & (align - 1)) == 0);
134161007b31SStefan Hajnoczi     assert(!qiov || bytes == qiov->size);
1342abb06c5aSDaniel P. Berrange     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1343fa166538SEric Blake     assert(!(flags & ~BDRV_REQ_MASK));
134404ed95f4SEric Blake     max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
134504ed95f4SEric Blake                                    align);
134661007b31SStefan Hajnoczi 
134761007b31SStefan Hajnoczi     waited = wait_serialising_requests(req);
134861007b31SStefan Hajnoczi     assert(!waited || !req->serialising);
134961007b31SStefan Hajnoczi     assert(req->overlap_offset <= offset);
135061007b31SStefan Hajnoczi     assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1351362b3786SMax Reitz     assert(child->perm & BLK_PERM_WRITE);
1352362b3786SMax Reitz     assert(end_sector <= bs->total_sectors || child->perm & BLK_PERM_RESIZE);
135361007b31SStefan Hajnoczi 
135461007b31SStefan Hajnoczi     ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
135561007b31SStefan Hajnoczi 
135661007b31SStefan Hajnoczi     if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
1357c1499a5eSEric Blake         !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
135861007b31SStefan Hajnoczi         qemu_iovec_is_zero(qiov)) {
135961007b31SStefan Hajnoczi         flags |= BDRV_REQ_ZERO_WRITE;
136061007b31SStefan Hajnoczi         if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
136161007b31SStefan Hajnoczi             flags |= BDRV_REQ_MAY_UNMAP;
136261007b31SStefan Hajnoczi         }
136361007b31SStefan Hajnoczi     }
136461007b31SStefan Hajnoczi 
136561007b31SStefan Hajnoczi     if (ret < 0) {
136661007b31SStefan Hajnoczi         /* Do nothing, write notifier decided to fail this request */
136761007b31SStefan Hajnoczi     } else if (flags & BDRV_REQ_ZERO_WRITE) {
13689a4f4c31SKevin Wolf         bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
13699896c876SKevin Wolf         ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
13703ea1a091SPavel Butsykin     } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
13713ea1a091SPavel Butsykin         ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, qiov);
137204ed95f4SEric Blake     } else if (bytes <= max_transfer) {
13739a4f4c31SKevin Wolf         bdrv_debug_event(bs, BLKDBG_PWRITEV);
137478a07294SKevin Wolf         ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags);
137504ed95f4SEric Blake     } else {
137604ed95f4SEric Blake         bdrv_debug_event(bs, BLKDBG_PWRITEV);
137704ed95f4SEric Blake         while (bytes_remaining) {
137804ed95f4SEric Blake             int num = MIN(bytes_remaining, max_transfer);
137904ed95f4SEric Blake             QEMUIOVector local_qiov;
138004ed95f4SEric Blake             int local_flags = flags;
138104ed95f4SEric Blake 
138204ed95f4SEric Blake             assert(num);
138304ed95f4SEric Blake             if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
138404ed95f4SEric Blake                 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
138504ed95f4SEric Blake                 /* If FUA is going to be emulated by flush, we only
138604ed95f4SEric Blake                  * need to flush on the last iteration */
138704ed95f4SEric Blake                 local_flags &= ~BDRV_REQ_FUA;
138804ed95f4SEric Blake             }
138904ed95f4SEric Blake             qemu_iovec_init(&local_qiov, qiov->niov);
139004ed95f4SEric Blake             qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
139104ed95f4SEric Blake 
139204ed95f4SEric Blake             ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
139304ed95f4SEric Blake                                       num, &local_qiov, local_flags);
139404ed95f4SEric Blake             qemu_iovec_destroy(&local_qiov);
139504ed95f4SEric Blake             if (ret < 0) {
139604ed95f4SEric Blake                 break;
139704ed95f4SEric Blake             }
139804ed95f4SEric Blake             bytes_remaining -= num;
139904ed95f4SEric Blake         }
140061007b31SStefan Hajnoczi     }
14019a4f4c31SKevin Wolf     bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
140261007b31SStefan Hajnoczi 
140347fec599SPaolo Bonzini     atomic_inc(&bs->write_gen);
14049896c876SKevin Wolf     bdrv_set_dirty(bs, start_sector, end_sector - start_sector);
140561007b31SStefan Hajnoczi 
1406f7946da2SPaolo Bonzini     stat64_max(&bs->wr_highest_offset, offset + bytes);
140761007b31SStefan Hajnoczi 
140861007b31SStefan Hajnoczi     if (ret >= 0) {
14099896c876SKevin Wolf         bs->total_sectors = MAX(bs->total_sectors, end_sector);
141004ed95f4SEric Blake         ret = 0;
141161007b31SStefan Hajnoczi     }
141261007b31SStefan Hajnoczi 
141361007b31SStefan Hajnoczi     return ret;
141461007b31SStefan Hajnoczi }
141561007b31SStefan Hajnoczi 
141685c97ca7SKevin Wolf static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
14179eeb6dd1SFam Zheng                                                 int64_t offset,
14189eeb6dd1SFam Zheng                                                 unsigned int bytes,
14199eeb6dd1SFam Zheng                                                 BdrvRequestFlags flags,
14209eeb6dd1SFam Zheng                                                 BdrvTrackedRequest *req)
14219eeb6dd1SFam Zheng {
142285c97ca7SKevin Wolf     BlockDriverState *bs = child->bs;
14239eeb6dd1SFam Zheng     uint8_t *buf = NULL;
14249eeb6dd1SFam Zheng     QEMUIOVector local_qiov;
14259eeb6dd1SFam Zheng     struct iovec iov;
1426a5b8dd2cSEric Blake     uint64_t align = bs->bl.request_alignment;
14279eeb6dd1SFam Zheng     unsigned int head_padding_bytes, tail_padding_bytes;
14289eeb6dd1SFam Zheng     int ret = 0;
14299eeb6dd1SFam Zheng 
14309eeb6dd1SFam Zheng     head_padding_bytes = offset & (align - 1);
1431f13ce1beSDenis V. Lunev     tail_padding_bytes = (align - (offset + bytes)) & (align - 1);
14329eeb6dd1SFam Zheng 
14339eeb6dd1SFam Zheng 
14349eeb6dd1SFam Zheng     assert(flags & BDRV_REQ_ZERO_WRITE);
14359eeb6dd1SFam Zheng     if (head_padding_bytes || tail_padding_bytes) {
14369eeb6dd1SFam Zheng         buf = qemu_blockalign(bs, align);
14379eeb6dd1SFam Zheng         iov = (struct iovec) {
14389eeb6dd1SFam Zheng             .iov_base   = buf,
14399eeb6dd1SFam Zheng             .iov_len    = align,
14409eeb6dd1SFam Zheng         };
14419eeb6dd1SFam Zheng         qemu_iovec_init_external(&local_qiov, &iov, 1);
14429eeb6dd1SFam Zheng     }
14439eeb6dd1SFam Zheng     if (head_padding_bytes) {
14449eeb6dd1SFam Zheng         uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes);
14459eeb6dd1SFam Zheng 
14469eeb6dd1SFam Zheng         /* RMW the unaligned part before head. */
14479eeb6dd1SFam Zheng         mark_request_serialising(req, align);
14489eeb6dd1SFam Zheng         wait_serialising_requests(req);
14499a4f4c31SKevin Wolf         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
145085c97ca7SKevin Wolf         ret = bdrv_aligned_preadv(child, req, offset & ~(align - 1), align,
14519eeb6dd1SFam Zheng                                   align, &local_qiov, 0);
14529eeb6dd1SFam Zheng         if (ret < 0) {
14539eeb6dd1SFam Zheng             goto fail;
14549eeb6dd1SFam Zheng         }
14559a4f4c31SKevin Wolf         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
14569eeb6dd1SFam Zheng 
14579eeb6dd1SFam Zheng         memset(buf + head_padding_bytes, 0, zero_bytes);
145885c97ca7SKevin Wolf         ret = bdrv_aligned_pwritev(child, req, offset & ~(align - 1), align,
1459cff86b38SEric Blake                                    align, &local_qiov,
14609eeb6dd1SFam Zheng                                    flags & ~BDRV_REQ_ZERO_WRITE);
14619eeb6dd1SFam Zheng         if (ret < 0) {
14629eeb6dd1SFam Zheng             goto fail;
14639eeb6dd1SFam Zheng         }
14649eeb6dd1SFam Zheng         offset += zero_bytes;
14659eeb6dd1SFam Zheng         bytes -= zero_bytes;
14669eeb6dd1SFam Zheng     }
14679eeb6dd1SFam Zheng 
14689eeb6dd1SFam Zheng     assert(!bytes || (offset & (align - 1)) == 0);
14699eeb6dd1SFam Zheng     if (bytes >= align) {
14709eeb6dd1SFam Zheng         /* Write the aligned part in the middle. */
14719eeb6dd1SFam Zheng         uint64_t aligned_bytes = bytes & ~(align - 1);
147285c97ca7SKevin Wolf         ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
14739eeb6dd1SFam Zheng                                    NULL, flags);
14749eeb6dd1SFam Zheng         if (ret < 0) {
14759eeb6dd1SFam Zheng             goto fail;
14769eeb6dd1SFam Zheng         }
14779eeb6dd1SFam Zheng         bytes -= aligned_bytes;
14789eeb6dd1SFam Zheng         offset += aligned_bytes;
14799eeb6dd1SFam Zheng     }
14809eeb6dd1SFam Zheng 
14819eeb6dd1SFam Zheng     assert(!bytes || (offset & (align - 1)) == 0);
14829eeb6dd1SFam Zheng     if (bytes) {
14839eeb6dd1SFam Zheng         assert(align == tail_padding_bytes + bytes);
14849eeb6dd1SFam Zheng         /* RMW the unaligned part after tail. */
14859eeb6dd1SFam Zheng         mark_request_serialising(req, align);
14869eeb6dd1SFam Zheng         wait_serialising_requests(req);
14879a4f4c31SKevin Wolf         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
148885c97ca7SKevin Wolf         ret = bdrv_aligned_preadv(child, req, offset, align,
14899eeb6dd1SFam Zheng                                   align, &local_qiov, 0);
14909eeb6dd1SFam Zheng         if (ret < 0) {
14919eeb6dd1SFam Zheng             goto fail;
14929eeb6dd1SFam Zheng         }
14939a4f4c31SKevin Wolf         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
14949eeb6dd1SFam Zheng 
14959eeb6dd1SFam Zheng         memset(buf, 0, bytes);
149685c97ca7SKevin Wolf         ret = bdrv_aligned_pwritev(child, req, offset, align, align,
14979eeb6dd1SFam Zheng                                    &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE);
14989eeb6dd1SFam Zheng     }
14999eeb6dd1SFam Zheng fail:
15009eeb6dd1SFam Zheng     qemu_vfree(buf);
15019eeb6dd1SFam Zheng     return ret;
15029eeb6dd1SFam Zheng 
15039eeb6dd1SFam Zheng }
15049eeb6dd1SFam Zheng 
150561007b31SStefan Hajnoczi /*
150661007b31SStefan Hajnoczi  * Handle a write request in coroutine context
150761007b31SStefan Hajnoczi  */
1508a03ef88fSKevin Wolf int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
150961007b31SStefan Hajnoczi     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
151061007b31SStefan Hajnoczi     BdrvRequestFlags flags)
151161007b31SStefan Hajnoczi {
1512a03ef88fSKevin Wolf     BlockDriverState *bs = child->bs;
151361007b31SStefan Hajnoczi     BdrvTrackedRequest req;
1514a5b8dd2cSEric Blake     uint64_t align = bs->bl.request_alignment;
151561007b31SStefan Hajnoczi     uint8_t *head_buf = NULL;
151661007b31SStefan Hajnoczi     uint8_t *tail_buf = NULL;
151761007b31SStefan Hajnoczi     QEMUIOVector local_qiov;
151861007b31SStefan Hajnoczi     bool use_local_qiov = false;
151961007b31SStefan Hajnoczi     int ret;
152061007b31SStefan Hajnoczi 
152161007b31SStefan Hajnoczi     if (!bs->drv) {
152261007b31SStefan Hajnoczi         return -ENOMEDIUM;
152361007b31SStefan Hajnoczi     }
152461007b31SStefan Hajnoczi     if (bs->read_only) {
1525eaf5fe2dSPaolo Bonzini         return -EPERM;
152661007b31SStefan Hajnoczi     }
152704c01a5cSKevin Wolf     assert(!(bs->open_flags & BDRV_O_INACTIVE));
152861007b31SStefan Hajnoczi 
152961007b31SStefan Hajnoczi     ret = bdrv_check_byte_request(bs, offset, bytes);
153061007b31SStefan Hajnoczi     if (ret < 0) {
153161007b31SStefan Hajnoczi         return ret;
153261007b31SStefan Hajnoczi     }
153361007b31SStefan Hajnoczi 
153499723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
153561007b31SStefan Hajnoczi     /*
153661007b31SStefan Hajnoczi      * Align write if necessary by performing a read-modify-write cycle.
153761007b31SStefan Hajnoczi      * Pad qiov with the read parts and be sure to have a tracked request not
153861007b31SStefan Hajnoczi      * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
153961007b31SStefan Hajnoczi      */
1540ebde595cSFam Zheng     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
154161007b31SStefan Hajnoczi 
15429eeb6dd1SFam Zheng     if (!qiov) {
154385c97ca7SKevin Wolf         ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
15449eeb6dd1SFam Zheng         goto out;
15459eeb6dd1SFam Zheng     }
15469eeb6dd1SFam Zheng 
154761007b31SStefan Hajnoczi     if (offset & (align - 1)) {
154861007b31SStefan Hajnoczi         QEMUIOVector head_qiov;
154961007b31SStefan Hajnoczi         struct iovec head_iov;
155061007b31SStefan Hajnoczi 
155161007b31SStefan Hajnoczi         mark_request_serialising(&req, align);
155261007b31SStefan Hajnoczi         wait_serialising_requests(&req);
155361007b31SStefan Hajnoczi 
155461007b31SStefan Hajnoczi         head_buf = qemu_blockalign(bs, align);
155561007b31SStefan Hajnoczi         head_iov = (struct iovec) {
155661007b31SStefan Hajnoczi             .iov_base   = head_buf,
155761007b31SStefan Hajnoczi             .iov_len    = align,
155861007b31SStefan Hajnoczi         };
155961007b31SStefan Hajnoczi         qemu_iovec_init_external(&head_qiov, &head_iov, 1);
156061007b31SStefan Hajnoczi 
15619a4f4c31SKevin Wolf         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
156285c97ca7SKevin Wolf         ret = bdrv_aligned_preadv(child, &req, offset & ~(align - 1), align,
156361007b31SStefan Hajnoczi                                   align, &head_qiov, 0);
156461007b31SStefan Hajnoczi         if (ret < 0) {
156561007b31SStefan Hajnoczi             goto fail;
156661007b31SStefan Hajnoczi         }
15679a4f4c31SKevin Wolf         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
156861007b31SStefan Hajnoczi 
156961007b31SStefan Hajnoczi         qemu_iovec_init(&local_qiov, qiov->niov + 2);
157061007b31SStefan Hajnoczi         qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
157161007b31SStefan Hajnoczi         qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
157261007b31SStefan Hajnoczi         use_local_qiov = true;
157361007b31SStefan Hajnoczi 
157461007b31SStefan Hajnoczi         bytes += offset & (align - 1);
157561007b31SStefan Hajnoczi         offset = offset & ~(align - 1);
1576117bc3faSPeter Lieven 
1577117bc3faSPeter Lieven         /* We have read the tail already if the request is smaller
1578117bc3faSPeter Lieven          * than one aligned block.
1579117bc3faSPeter Lieven          */
1580117bc3faSPeter Lieven         if (bytes < align) {
1581117bc3faSPeter Lieven             qemu_iovec_add(&local_qiov, head_buf + bytes, align - bytes);
1582117bc3faSPeter Lieven             bytes = align;
1583117bc3faSPeter Lieven         }
158461007b31SStefan Hajnoczi     }
158561007b31SStefan Hajnoczi 
158661007b31SStefan Hajnoczi     if ((offset + bytes) & (align - 1)) {
158761007b31SStefan Hajnoczi         QEMUIOVector tail_qiov;
158861007b31SStefan Hajnoczi         struct iovec tail_iov;
158961007b31SStefan Hajnoczi         size_t tail_bytes;
159061007b31SStefan Hajnoczi         bool waited;
159161007b31SStefan Hajnoczi 
159261007b31SStefan Hajnoczi         mark_request_serialising(&req, align);
159361007b31SStefan Hajnoczi         waited = wait_serialising_requests(&req);
159461007b31SStefan Hajnoczi         assert(!waited || !use_local_qiov);
159561007b31SStefan Hajnoczi 
159661007b31SStefan Hajnoczi         tail_buf = qemu_blockalign(bs, align);
159761007b31SStefan Hajnoczi         tail_iov = (struct iovec) {
159861007b31SStefan Hajnoczi             .iov_base   = tail_buf,
159961007b31SStefan Hajnoczi             .iov_len    = align,
160061007b31SStefan Hajnoczi         };
160161007b31SStefan Hajnoczi         qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
160261007b31SStefan Hajnoczi 
16039a4f4c31SKevin Wolf         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
160485c97ca7SKevin Wolf         ret = bdrv_aligned_preadv(child, &req, (offset + bytes) & ~(align - 1),
160585c97ca7SKevin Wolf                                   align, align, &tail_qiov, 0);
160661007b31SStefan Hajnoczi         if (ret < 0) {
160761007b31SStefan Hajnoczi             goto fail;
160861007b31SStefan Hajnoczi         }
16099a4f4c31SKevin Wolf         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
161061007b31SStefan Hajnoczi 
161161007b31SStefan Hajnoczi         if (!use_local_qiov) {
161261007b31SStefan Hajnoczi             qemu_iovec_init(&local_qiov, qiov->niov + 1);
161361007b31SStefan Hajnoczi             qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
161461007b31SStefan Hajnoczi             use_local_qiov = true;
161561007b31SStefan Hajnoczi         }
161661007b31SStefan Hajnoczi 
161761007b31SStefan Hajnoczi         tail_bytes = (offset + bytes) & (align - 1);
161861007b31SStefan Hajnoczi         qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
161961007b31SStefan Hajnoczi 
162061007b31SStefan Hajnoczi         bytes = ROUND_UP(bytes, align);
162161007b31SStefan Hajnoczi     }
162261007b31SStefan Hajnoczi 
162385c97ca7SKevin Wolf     ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
162461007b31SStefan Hajnoczi                                use_local_qiov ? &local_qiov : qiov,
162561007b31SStefan Hajnoczi                                flags);
162661007b31SStefan Hajnoczi 
162761007b31SStefan Hajnoczi fail:
162861007b31SStefan Hajnoczi 
162961007b31SStefan Hajnoczi     if (use_local_qiov) {
163061007b31SStefan Hajnoczi         qemu_iovec_destroy(&local_qiov);
163161007b31SStefan Hajnoczi     }
163261007b31SStefan Hajnoczi     qemu_vfree(head_buf);
163361007b31SStefan Hajnoczi     qemu_vfree(tail_buf);
16349eeb6dd1SFam Zheng out:
16359eeb6dd1SFam Zheng     tracked_request_end(&req);
163699723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
163761007b31SStefan Hajnoczi     return ret;
163861007b31SStefan Hajnoczi }
163961007b31SStefan Hajnoczi 
1640adad6496SKevin Wolf static int coroutine_fn bdrv_co_do_writev(BdrvChild *child,
164161007b31SStefan Hajnoczi     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
164261007b31SStefan Hajnoczi     BdrvRequestFlags flags)
164361007b31SStefan Hajnoczi {
164461007b31SStefan Hajnoczi     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
164561007b31SStefan Hajnoczi         return -EINVAL;
164661007b31SStefan Hajnoczi     }
164761007b31SStefan Hajnoczi 
1648a03ef88fSKevin Wolf     return bdrv_co_pwritev(child, sector_num << BDRV_SECTOR_BITS,
164961007b31SStefan Hajnoczi                            nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
165061007b31SStefan Hajnoczi }
165161007b31SStefan Hajnoczi 
165225ec177dSKevin Wolf int coroutine_fn bdrv_co_writev(BdrvChild *child, int64_t sector_num,
165361007b31SStefan Hajnoczi     int nb_sectors, QEMUIOVector *qiov)
165461007b31SStefan Hajnoczi {
165525ec177dSKevin Wolf     trace_bdrv_co_writev(child->bs, sector_num, nb_sectors);
165661007b31SStefan Hajnoczi 
1657adad6496SKevin Wolf     return bdrv_co_do_writev(child, sector_num, nb_sectors, qiov, 0);
165861007b31SStefan Hajnoczi }
165961007b31SStefan Hajnoczi 
1660a03ef88fSKevin Wolf int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
1661f5a5ca79SManos Pitsidianakis                                        int bytes, BdrvRequestFlags flags)
166261007b31SStefan Hajnoczi {
1663f5a5ca79SManos Pitsidianakis     trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
166461007b31SStefan Hajnoczi 
1665a03ef88fSKevin Wolf     if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
166661007b31SStefan Hajnoczi         flags &= ~BDRV_REQ_MAY_UNMAP;
166761007b31SStefan Hajnoczi     }
166861007b31SStefan Hajnoczi 
1669f5a5ca79SManos Pitsidianakis     return bdrv_co_pwritev(child, offset, bytes, NULL,
167061007b31SStefan Hajnoczi                            BDRV_REQ_ZERO_WRITE | flags);
167161007b31SStefan Hajnoczi }
167261007b31SStefan Hajnoczi 
16734085f5c7SJohn Snow /*
16744085f5c7SJohn Snow  * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
16754085f5c7SJohn Snow  */
16764085f5c7SJohn Snow int bdrv_flush_all(void)
16774085f5c7SJohn Snow {
16784085f5c7SJohn Snow     BdrvNextIterator it;
16794085f5c7SJohn Snow     BlockDriverState *bs = NULL;
16804085f5c7SJohn Snow     int result = 0;
16814085f5c7SJohn Snow 
16824085f5c7SJohn Snow     for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
16834085f5c7SJohn Snow         AioContext *aio_context = bdrv_get_aio_context(bs);
16844085f5c7SJohn Snow         int ret;
16854085f5c7SJohn Snow 
16864085f5c7SJohn Snow         aio_context_acquire(aio_context);
16874085f5c7SJohn Snow         ret = bdrv_flush(bs);
16884085f5c7SJohn Snow         if (ret < 0 && !result) {
16894085f5c7SJohn Snow             result = ret;
16904085f5c7SJohn Snow         }
16914085f5c7SJohn Snow         aio_context_release(aio_context);
16924085f5c7SJohn Snow     }
16934085f5c7SJohn Snow 
16944085f5c7SJohn Snow     return result;
16954085f5c7SJohn Snow }
16964085f5c7SJohn Snow 
16974085f5c7SJohn Snow 
169861007b31SStefan Hajnoczi typedef struct BdrvCoGetBlockStatusData {
169961007b31SStefan Hajnoczi     BlockDriverState *bs;
170061007b31SStefan Hajnoczi     BlockDriverState *base;
170167a0fd2aSFam Zheng     BlockDriverState **file;
170261007b31SStefan Hajnoczi     int64_t sector_num;
170361007b31SStefan Hajnoczi     int nb_sectors;
170461007b31SStefan Hajnoczi     int *pnum;
170561007b31SStefan Hajnoczi     int64_t ret;
170661007b31SStefan Hajnoczi     bool done;
170761007b31SStefan Hajnoczi } BdrvCoGetBlockStatusData;
170861007b31SStefan Hajnoczi 
170961007b31SStefan Hajnoczi /*
171061007b31SStefan Hajnoczi  * Returns the allocation status of the specified sectors.
171161007b31SStefan Hajnoczi  * Drivers not implementing the functionality are assumed to not support
171261007b31SStefan Hajnoczi  * backing files, hence all their sectors are reported as allocated.
171361007b31SStefan Hajnoczi  *
1714fb0d8654SEric Blake  * If 'sector_num' is beyond the end of the disk image the return value is
1715fb0d8654SEric Blake  * BDRV_BLOCK_EOF and 'pnum' is set to 0.
171661007b31SStefan Hajnoczi  *
171761007b31SStefan Hajnoczi  * 'pnum' is set to the number of sectors (including and immediately following
171861007b31SStefan Hajnoczi  * the specified sector) that are known to be in the same
171961007b31SStefan Hajnoczi  * allocated/unallocated state.
172061007b31SStefan Hajnoczi  *
172161007b31SStefan Hajnoczi  * 'nb_sectors' is the max value 'pnum' should be set to.  If nb_sectors goes
1722fb0d8654SEric Blake  * beyond the end of the disk image it will be clamped; if 'pnum' is set to
1723fb0d8654SEric Blake  * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
172467a0fd2aSFam Zheng  *
172567a0fd2aSFam Zheng  * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file'
172667a0fd2aSFam Zheng  * points to the BDS which the sector range is allocated in.
172761007b31SStefan Hajnoczi  */
172861007b31SStefan Hajnoczi static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
172961007b31SStefan Hajnoczi                                                      int64_t sector_num,
173067a0fd2aSFam Zheng                                                      int nb_sectors, int *pnum,
173167a0fd2aSFam Zheng                                                      BlockDriverState **file)
173261007b31SStefan Hajnoczi {
173361007b31SStefan Hajnoczi     int64_t total_sectors;
173461007b31SStefan Hajnoczi     int64_t n;
173561007b31SStefan Hajnoczi     int64_t ret, ret2;
173661007b31SStefan Hajnoczi 
173761007b31SStefan Hajnoczi     total_sectors = bdrv_nb_sectors(bs);
173861007b31SStefan Hajnoczi     if (total_sectors < 0) {
173961007b31SStefan Hajnoczi         return total_sectors;
174061007b31SStefan Hajnoczi     }
174161007b31SStefan Hajnoczi 
174261007b31SStefan Hajnoczi     if (sector_num >= total_sectors) {
174361007b31SStefan Hajnoczi         *pnum = 0;
1744fb0d8654SEric Blake         return BDRV_BLOCK_EOF;
174561007b31SStefan Hajnoczi     }
174661007b31SStefan Hajnoczi 
174761007b31SStefan Hajnoczi     n = total_sectors - sector_num;
174861007b31SStefan Hajnoczi     if (n < nb_sectors) {
174961007b31SStefan Hajnoczi         nb_sectors = n;
175061007b31SStefan Hajnoczi     }
175161007b31SStefan Hajnoczi 
175261007b31SStefan Hajnoczi     if (!bs->drv->bdrv_co_get_block_status) {
175361007b31SStefan Hajnoczi         *pnum = nb_sectors;
175461007b31SStefan Hajnoczi         ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
1755fb0d8654SEric Blake         if (sector_num + nb_sectors == total_sectors) {
1756fb0d8654SEric Blake             ret |= BDRV_BLOCK_EOF;
1757fb0d8654SEric Blake         }
175861007b31SStefan Hajnoczi         if (bs->drv->protocol_name) {
175961007b31SStefan Hajnoczi             ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
176061007b31SStefan Hajnoczi         }
176161007b31SStefan Hajnoczi         return ret;
176261007b31SStefan Hajnoczi     }
176361007b31SStefan Hajnoczi 
176467a0fd2aSFam Zheng     *file = NULL;
176599723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
176667a0fd2aSFam Zheng     ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum,
176767a0fd2aSFam Zheng                                             file);
176861007b31SStefan Hajnoczi     if (ret < 0) {
176961007b31SStefan Hajnoczi         *pnum = 0;
177099723548SPaolo Bonzini         goto out;
177161007b31SStefan Hajnoczi     }
177261007b31SStefan Hajnoczi 
177361007b31SStefan Hajnoczi     if (ret & BDRV_BLOCK_RAW) {
177461007b31SStefan Hajnoczi         assert(ret & BDRV_BLOCK_OFFSET_VALID);
1775ee29d6adSEric Blake         ret = bdrv_co_get_block_status(*file, ret >> BDRV_SECTOR_BITS,
177667a0fd2aSFam Zheng                                        *pnum, pnum, file);
177799723548SPaolo Bonzini         goto out;
177861007b31SStefan Hajnoczi     }
177961007b31SStefan Hajnoczi 
178061007b31SStefan Hajnoczi     if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
178161007b31SStefan Hajnoczi         ret |= BDRV_BLOCK_ALLOCATED;
1782a53f1a95SPaolo Bonzini     } else {
178361007b31SStefan Hajnoczi         if (bdrv_unallocated_blocks_are_zero(bs)) {
178461007b31SStefan Hajnoczi             ret |= BDRV_BLOCK_ZERO;
1785760e0063SKevin Wolf         } else if (bs->backing) {
1786760e0063SKevin Wolf             BlockDriverState *bs2 = bs->backing->bs;
178761007b31SStefan Hajnoczi             int64_t nb_sectors2 = bdrv_nb_sectors(bs2);
178861007b31SStefan Hajnoczi             if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {
178961007b31SStefan Hajnoczi                 ret |= BDRV_BLOCK_ZERO;
179061007b31SStefan Hajnoczi             }
179161007b31SStefan Hajnoczi         }
179261007b31SStefan Hajnoczi     }
179361007b31SStefan Hajnoczi 
1794ac987b30SFam Zheng     if (*file && *file != bs &&
179561007b31SStefan Hajnoczi         (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
179661007b31SStefan Hajnoczi         (ret & BDRV_BLOCK_OFFSET_VALID)) {
179767a0fd2aSFam Zheng         BlockDriverState *file2;
179861007b31SStefan Hajnoczi         int file_pnum;
179961007b31SStefan Hajnoczi 
1800ac987b30SFam Zheng         ret2 = bdrv_co_get_block_status(*file, ret >> BDRV_SECTOR_BITS,
180167a0fd2aSFam Zheng                                         *pnum, &file_pnum, &file2);
180261007b31SStefan Hajnoczi         if (ret2 >= 0) {
180361007b31SStefan Hajnoczi             /* Ignore errors.  This is just providing extra information, it
180461007b31SStefan Hajnoczi              * is useful but not necessary.
180561007b31SStefan Hajnoczi              */
1806*c61e684eSEric Blake             if (ret2 & BDRV_BLOCK_EOF &&
1807*c61e684eSEric Blake                 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
1808*c61e684eSEric Blake                 /*
1809*c61e684eSEric Blake                  * It is valid for the format block driver to read
1810*c61e684eSEric Blake                  * beyond the end of the underlying file's current
1811*c61e684eSEric Blake                  * size; such areas read as zero.
1812*c61e684eSEric Blake                  */
181361007b31SStefan Hajnoczi                 ret |= BDRV_BLOCK_ZERO;
181461007b31SStefan Hajnoczi             } else {
181561007b31SStefan Hajnoczi                 /* Limit request to the range reported by the protocol driver */
181661007b31SStefan Hajnoczi                 *pnum = file_pnum;
181761007b31SStefan Hajnoczi                 ret |= (ret2 & BDRV_BLOCK_ZERO);
181861007b31SStefan Hajnoczi             }
181961007b31SStefan Hajnoczi         }
182061007b31SStefan Hajnoczi     }
182161007b31SStefan Hajnoczi 
182299723548SPaolo Bonzini out:
182399723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
1824fb0d8654SEric Blake     if (ret >= 0 && sector_num + *pnum == total_sectors) {
1825fb0d8654SEric Blake         ret |= BDRV_BLOCK_EOF;
1826fb0d8654SEric Blake     }
182761007b31SStefan Hajnoczi     return ret;
182861007b31SStefan Hajnoczi }
182961007b31SStefan Hajnoczi 
1830ba3f0e25SFam Zheng static int64_t coroutine_fn bdrv_co_get_block_status_above(BlockDriverState *bs,
1831ba3f0e25SFam Zheng         BlockDriverState *base,
1832ba3f0e25SFam Zheng         int64_t sector_num,
1833ba3f0e25SFam Zheng         int nb_sectors,
183467a0fd2aSFam Zheng         int *pnum,
183567a0fd2aSFam Zheng         BlockDriverState **file)
1836ba3f0e25SFam Zheng {
1837ba3f0e25SFam Zheng     BlockDriverState *p;
1838ba3f0e25SFam Zheng     int64_t ret = 0;
1839*c61e684eSEric Blake     bool first = true;
1840ba3f0e25SFam Zheng 
1841ba3f0e25SFam Zheng     assert(bs != base);
1842760e0063SKevin Wolf     for (p = bs; p != base; p = backing_bs(p)) {
184367a0fd2aSFam Zheng         ret = bdrv_co_get_block_status(p, sector_num, nb_sectors, pnum, file);
1844*c61e684eSEric Blake         if (ret < 0) {
1845*c61e684eSEric Blake             break;
1846*c61e684eSEric Blake         }
1847*c61e684eSEric Blake         if (ret & BDRV_BLOCK_ZERO && ret & BDRV_BLOCK_EOF && !first) {
1848*c61e684eSEric Blake             /*
1849*c61e684eSEric Blake              * Reading beyond the end of the file continues to read
1850*c61e684eSEric Blake              * zeroes, but we can only widen the result to the
1851*c61e684eSEric Blake              * unallocated length we learned from an earlier
1852*c61e684eSEric Blake              * iteration.
1853*c61e684eSEric Blake              */
1854*c61e684eSEric Blake             *pnum = nb_sectors;
1855*c61e684eSEric Blake         }
1856*c61e684eSEric Blake         if (ret & (BDRV_BLOCK_ZERO | BDRV_BLOCK_DATA)) {
1857ba3f0e25SFam Zheng             break;
1858ba3f0e25SFam Zheng         }
1859ba3f0e25SFam Zheng         /* [sector_num, pnum] unallocated on this layer, which could be only
1860ba3f0e25SFam Zheng          * the first part of [sector_num, nb_sectors].  */
1861ba3f0e25SFam Zheng         nb_sectors = MIN(nb_sectors, *pnum);
1862*c61e684eSEric Blake         first = false;
1863ba3f0e25SFam Zheng     }
1864ba3f0e25SFam Zheng     return ret;
1865ba3f0e25SFam Zheng }
1866ba3f0e25SFam Zheng 
1867ba3f0e25SFam Zheng /* Coroutine wrapper for bdrv_get_block_status_above() */
1868ba3f0e25SFam Zheng static void coroutine_fn bdrv_get_block_status_above_co_entry(void *opaque)
186961007b31SStefan Hajnoczi {
187061007b31SStefan Hajnoczi     BdrvCoGetBlockStatusData *data = opaque;
187161007b31SStefan Hajnoczi 
1872ba3f0e25SFam Zheng     data->ret = bdrv_co_get_block_status_above(data->bs, data->base,
1873ba3f0e25SFam Zheng                                                data->sector_num,
1874ba3f0e25SFam Zheng                                                data->nb_sectors,
187567a0fd2aSFam Zheng                                                data->pnum,
187667a0fd2aSFam Zheng                                                data->file);
187761007b31SStefan Hajnoczi     data->done = true;
187861007b31SStefan Hajnoczi }
187961007b31SStefan Hajnoczi 
188061007b31SStefan Hajnoczi /*
1881ba3f0e25SFam Zheng  * Synchronous wrapper around bdrv_co_get_block_status_above().
188261007b31SStefan Hajnoczi  *
1883ba3f0e25SFam Zheng  * See bdrv_co_get_block_status_above() for details.
188461007b31SStefan Hajnoczi  */
1885ba3f0e25SFam Zheng int64_t bdrv_get_block_status_above(BlockDriverState *bs,
1886ba3f0e25SFam Zheng                                     BlockDriverState *base,
1887ba3f0e25SFam Zheng                                     int64_t sector_num,
188867a0fd2aSFam Zheng                                     int nb_sectors, int *pnum,
188967a0fd2aSFam Zheng                                     BlockDriverState **file)
189061007b31SStefan Hajnoczi {
189161007b31SStefan Hajnoczi     Coroutine *co;
189261007b31SStefan Hajnoczi     BdrvCoGetBlockStatusData data = {
189361007b31SStefan Hajnoczi         .bs = bs,
1894ba3f0e25SFam Zheng         .base = base,
189567a0fd2aSFam Zheng         .file = file,
189661007b31SStefan Hajnoczi         .sector_num = sector_num,
189761007b31SStefan Hajnoczi         .nb_sectors = nb_sectors,
189861007b31SStefan Hajnoczi         .pnum = pnum,
189961007b31SStefan Hajnoczi         .done = false,
190061007b31SStefan Hajnoczi     };
190161007b31SStefan Hajnoczi 
190261007b31SStefan Hajnoczi     if (qemu_in_coroutine()) {
190361007b31SStefan Hajnoczi         /* Fast-path if already in coroutine context */
1904ba3f0e25SFam Zheng         bdrv_get_block_status_above_co_entry(&data);
190561007b31SStefan Hajnoczi     } else {
19060b8b8753SPaolo Bonzini         co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry,
19070b8b8753SPaolo Bonzini                                    &data);
1908e92f0e19SFam Zheng         bdrv_coroutine_enter(bs, co);
190988b062c2SPaolo Bonzini         BDRV_POLL_WHILE(bs, !data.done);
191061007b31SStefan Hajnoczi     }
191161007b31SStefan Hajnoczi     return data.ret;
191261007b31SStefan Hajnoczi }
191361007b31SStefan Hajnoczi 
1914ba3f0e25SFam Zheng int64_t bdrv_get_block_status(BlockDriverState *bs,
1915ba3f0e25SFam Zheng                               int64_t sector_num,
191667a0fd2aSFam Zheng                               int nb_sectors, int *pnum,
191767a0fd2aSFam Zheng                               BlockDriverState **file)
1918ba3f0e25SFam Zheng {
1919760e0063SKevin Wolf     return bdrv_get_block_status_above(bs, backing_bs(bs),
192067a0fd2aSFam Zheng                                        sector_num, nb_sectors, pnum, file);
1921ba3f0e25SFam Zheng }
1922ba3f0e25SFam Zheng 
192361007b31SStefan Hajnoczi int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
192461007b31SStefan Hajnoczi                                    int nb_sectors, int *pnum)
192561007b31SStefan Hajnoczi {
192667a0fd2aSFam Zheng     BlockDriverState *file;
192767a0fd2aSFam Zheng     int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum,
192867a0fd2aSFam Zheng                                         &file);
192961007b31SStefan Hajnoczi     if (ret < 0) {
193061007b31SStefan Hajnoczi         return ret;
193161007b31SStefan Hajnoczi     }
193261007b31SStefan Hajnoczi     return !!(ret & BDRV_BLOCK_ALLOCATED);
193361007b31SStefan Hajnoczi }
193461007b31SStefan Hajnoczi 
193561007b31SStefan Hajnoczi /*
193661007b31SStefan Hajnoczi  * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
193761007b31SStefan Hajnoczi  *
193861007b31SStefan Hajnoczi  * Return true if the given sector is allocated in any image between
193961007b31SStefan Hajnoczi  * BASE and TOP (inclusive).  BASE can be NULL to check if the given
194061007b31SStefan Hajnoczi  * sector is allocated in any image of the chain.  Return false otherwise.
194161007b31SStefan Hajnoczi  *
194261007b31SStefan Hajnoczi  * 'pnum' is set to the number of sectors (including and immediately following
194361007b31SStefan Hajnoczi  *  the specified sector) that are known to be in the same
194461007b31SStefan Hajnoczi  *  allocated/unallocated state.
194561007b31SStefan Hajnoczi  *
194661007b31SStefan Hajnoczi  */
194761007b31SStefan Hajnoczi int bdrv_is_allocated_above(BlockDriverState *top,
194861007b31SStefan Hajnoczi                             BlockDriverState *base,
194961007b31SStefan Hajnoczi                             int64_t sector_num,
195061007b31SStefan Hajnoczi                             int nb_sectors, int *pnum)
195161007b31SStefan Hajnoczi {
195261007b31SStefan Hajnoczi     BlockDriverState *intermediate;
195361007b31SStefan Hajnoczi     int ret, n = nb_sectors;
195461007b31SStefan Hajnoczi 
195561007b31SStefan Hajnoczi     intermediate = top;
195661007b31SStefan Hajnoczi     while (intermediate && intermediate != base) {
195761007b31SStefan Hajnoczi         int pnum_inter;
195861007b31SStefan Hajnoczi         ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
195961007b31SStefan Hajnoczi                                 &pnum_inter);
196061007b31SStefan Hajnoczi         if (ret < 0) {
196161007b31SStefan Hajnoczi             return ret;
196261007b31SStefan Hajnoczi         } else if (ret) {
196361007b31SStefan Hajnoczi             *pnum = pnum_inter;
196461007b31SStefan Hajnoczi             return 1;
196561007b31SStefan Hajnoczi         }
196661007b31SStefan Hajnoczi 
196761007b31SStefan Hajnoczi         /*
196861007b31SStefan Hajnoczi          * [sector_num, nb_sectors] is unallocated on top but intermediate
196961007b31SStefan Hajnoczi          * might have
197061007b31SStefan Hajnoczi          *
197161007b31SStefan Hajnoczi          * [sector_num+x, nr_sectors] allocated.
197261007b31SStefan Hajnoczi          */
197361007b31SStefan Hajnoczi         if (n > pnum_inter &&
197461007b31SStefan Hajnoczi             (intermediate == top ||
197561007b31SStefan Hajnoczi              sector_num + pnum_inter < intermediate->total_sectors)) {
197661007b31SStefan Hajnoczi             n = pnum_inter;
197761007b31SStefan Hajnoczi         }
197861007b31SStefan Hajnoczi 
1979760e0063SKevin Wolf         intermediate = backing_bs(intermediate);
198061007b31SStefan Hajnoczi     }
198161007b31SStefan Hajnoczi 
198261007b31SStefan Hajnoczi     *pnum = n;
198361007b31SStefan Hajnoczi     return 0;
198461007b31SStefan Hajnoczi }
198561007b31SStefan Hajnoczi 
19861a8ae822SKevin Wolf typedef struct BdrvVmstateCo {
19871a8ae822SKevin Wolf     BlockDriverState    *bs;
19881a8ae822SKevin Wolf     QEMUIOVector        *qiov;
19891a8ae822SKevin Wolf     int64_t             pos;
19901a8ae822SKevin Wolf     bool                is_read;
19911a8ae822SKevin Wolf     int                 ret;
19921a8ae822SKevin Wolf } BdrvVmstateCo;
19931a8ae822SKevin Wolf 
19941a8ae822SKevin Wolf static int coroutine_fn
19951a8ae822SKevin Wolf bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
19961a8ae822SKevin Wolf                    bool is_read)
19971a8ae822SKevin Wolf {
19981a8ae822SKevin Wolf     BlockDriver *drv = bs->drv;
1999dc88a467SStefan Hajnoczi     int ret = -ENOTSUP;
2000dc88a467SStefan Hajnoczi 
2001dc88a467SStefan Hajnoczi     bdrv_inc_in_flight(bs);
20021a8ae822SKevin Wolf 
20031a8ae822SKevin Wolf     if (!drv) {
2004dc88a467SStefan Hajnoczi         ret = -ENOMEDIUM;
20051a8ae822SKevin Wolf     } else if (drv->bdrv_load_vmstate) {
2006dc88a467SStefan Hajnoczi         if (is_read) {
2007dc88a467SStefan Hajnoczi             ret = drv->bdrv_load_vmstate(bs, qiov, pos);
2008dc88a467SStefan Hajnoczi         } else {
2009dc88a467SStefan Hajnoczi             ret = drv->bdrv_save_vmstate(bs, qiov, pos);
2010dc88a467SStefan Hajnoczi         }
20111a8ae822SKevin Wolf     } else if (bs->file) {
2012dc88a467SStefan Hajnoczi         ret = bdrv_co_rw_vmstate(bs->file->bs, qiov, pos, is_read);
20131a8ae822SKevin Wolf     }
20141a8ae822SKevin Wolf 
2015dc88a467SStefan Hajnoczi     bdrv_dec_in_flight(bs);
2016dc88a467SStefan Hajnoczi     return ret;
20171a8ae822SKevin Wolf }
20181a8ae822SKevin Wolf 
20191a8ae822SKevin Wolf static void coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque)
20201a8ae822SKevin Wolf {
20211a8ae822SKevin Wolf     BdrvVmstateCo *co = opaque;
20221a8ae822SKevin Wolf     co->ret = bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read);
20231a8ae822SKevin Wolf }
20241a8ae822SKevin Wolf 
20251a8ae822SKevin Wolf static inline int
20261a8ae822SKevin Wolf bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
20271a8ae822SKevin Wolf                 bool is_read)
20281a8ae822SKevin Wolf {
20291a8ae822SKevin Wolf     if (qemu_in_coroutine()) {
20301a8ae822SKevin Wolf         return bdrv_co_rw_vmstate(bs, qiov, pos, is_read);
20311a8ae822SKevin Wolf     } else {
20321a8ae822SKevin Wolf         BdrvVmstateCo data = {
20331a8ae822SKevin Wolf             .bs         = bs,
20341a8ae822SKevin Wolf             .qiov       = qiov,
20351a8ae822SKevin Wolf             .pos        = pos,
20361a8ae822SKevin Wolf             .is_read    = is_read,
20371a8ae822SKevin Wolf             .ret        = -EINPROGRESS,
20381a8ae822SKevin Wolf         };
20390b8b8753SPaolo Bonzini         Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry, &data);
20401a8ae822SKevin Wolf 
2041e92f0e19SFam Zheng         bdrv_coroutine_enter(bs, co);
2042ea17c9d2SStefan Hajnoczi         BDRV_POLL_WHILE(bs, data.ret == -EINPROGRESS);
20431a8ae822SKevin Wolf         return data.ret;
20441a8ae822SKevin Wolf     }
20451a8ae822SKevin Wolf }
20461a8ae822SKevin Wolf 
204761007b31SStefan Hajnoczi int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
204861007b31SStefan Hajnoczi                       int64_t pos, int size)
204961007b31SStefan Hajnoczi {
205061007b31SStefan Hajnoczi     QEMUIOVector qiov;
205161007b31SStefan Hajnoczi     struct iovec iov = {
205261007b31SStefan Hajnoczi         .iov_base   = (void *) buf,
205361007b31SStefan Hajnoczi         .iov_len    = size,
205461007b31SStefan Hajnoczi     };
2055b433d942SKevin Wolf     int ret;
205661007b31SStefan Hajnoczi 
205761007b31SStefan Hajnoczi     qemu_iovec_init_external(&qiov, &iov, 1);
2058b433d942SKevin Wolf 
2059b433d942SKevin Wolf     ret = bdrv_writev_vmstate(bs, &qiov, pos);
2060b433d942SKevin Wolf     if (ret < 0) {
2061b433d942SKevin Wolf         return ret;
2062b433d942SKevin Wolf     }
2063b433d942SKevin Wolf 
2064b433d942SKevin Wolf     return size;
206561007b31SStefan Hajnoczi }
206661007b31SStefan Hajnoczi 
206761007b31SStefan Hajnoczi int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
206861007b31SStefan Hajnoczi {
20691a8ae822SKevin Wolf     return bdrv_rw_vmstate(bs, qiov, pos, false);
207061007b31SStefan Hajnoczi }
207161007b31SStefan Hajnoczi 
207261007b31SStefan Hajnoczi int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
207361007b31SStefan Hajnoczi                       int64_t pos, int size)
207461007b31SStefan Hajnoczi {
20755ddda0b8SKevin Wolf     QEMUIOVector qiov;
20765ddda0b8SKevin Wolf     struct iovec iov = {
20775ddda0b8SKevin Wolf         .iov_base   = buf,
20785ddda0b8SKevin Wolf         .iov_len    = size,
20795ddda0b8SKevin Wolf     };
2080b433d942SKevin Wolf     int ret;
20815ddda0b8SKevin Wolf 
20825ddda0b8SKevin Wolf     qemu_iovec_init_external(&qiov, &iov, 1);
2083b433d942SKevin Wolf     ret = bdrv_readv_vmstate(bs, &qiov, pos);
2084b433d942SKevin Wolf     if (ret < 0) {
2085b433d942SKevin Wolf         return ret;
2086b433d942SKevin Wolf     }
2087b433d942SKevin Wolf 
2088b433d942SKevin Wolf     return size;
20895ddda0b8SKevin Wolf }
20905ddda0b8SKevin Wolf 
20915ddda0b8SKevin Wolf int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
20925ddda0b8SKevin Wolf {
20931a8ae822SKevin Wolf     return bdrv_rw_vmstate(bs, qiov, pos, true);
209461007b31SStefan Hajnoczi }
209561007b31SStefan Hajnoczi 
209661007b31SStefan Hajnoczi /**************************************************************/
209761007b31SStefan Hajnoczi /* async I/Os */
209861007b31SStefan Hajnoczi 
209961007b31SStefan Hajnoczi void bdrv_aio_cancel(BlockAIOCB *acb)
210061007b31SStefan Hajnoczi {
210161007b31SStefan Hajnoczi     qemu_aio_ref(acb);
210261007b31SStefan Hajnoczi     bdrv_aio_cancel_async(acb);
210361007b31SStefan Hajnoczi     while (acb->refcnt > 1) {
210461007b31SStefan Hajnoczi         if (acb->aiocb_info->get_aio_context) {
210561007b31SStefan Hajnoczi             aio_poll(acb->aiocb_info->get_aio_context(acb), true);
210661007b31SStefan Hajnoczi         } else if (acb->bs) {
21072f47da5fSPaolo Bonzini             /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
21082f47da5fSPaolo Bonzini              * assert that we're not using an I/O thread.  Thread-safe
21092f47da5fSPaolo Bonzini              * code should use bdrv_aio_cancel_async exclusively.
21102f47da5fSPaolo Bonzini              */
21112f47da5fSPaolo Bonzini             assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
211261007b31SStefan Hajnoczi             aio_poll(bdrv_get_aio_context(acb->bs), true);
211361007b31SStefan Hajnoczi         } else {
211461007b31SStefan Hajnoczi             abort();
211561007b31SStefan Hajnoczi         }
211661007b31SStefan Hajnoczi     }
211761007b31SStefan Hajnoczi     qemu_aio_unref(acb);
211861007b31SStefan Hajnoczi }
211961007b31SStefan Hajnoczi 
212061007b31SStefan Hajnoczi /* Async version of aio cancel. The caller is not blocked if the acb implements
212161007b31SStefan Hajnoczi  * cancel_async, otherwise we do nothing and let the request normally complete.
212261007b31SStefan Hajnoczi  * In either case the completion callback must be called. */
212361007b31SStefan Hajnoczi void bdrv_aio_cancel_async(BlockAIOCB *acb)
212461007b31SStefan Hajnoczi {
212561007b31SStefan Hajnoczi     if (acb->aiocb_info->cancel_async) {
212661007b31SStefan Hajnoczi         acb->aiocb_info->cancel_async(acb);
212761007b31SStefan Hajnoczi     }
212861007b31SStefan Hajnoczi }
212961007b31SStefan Hajnoczi 
213061007b31SStefan Hajnoczi /**************************************************************/
213161007b31SStefan Hajnoczi /* Coroutine block device emulation */
213261007b31SStefan Hajnoczi 
2133e293b7a3SKevin Wolf typedef struct FlushCo {
2134e293b7a3SKevin Wolf     BlockDriverState *bs;
2135e293b7a3SKevin Wolf     int ret;
2136e293b7a3SKevin Wolf } FlushCo;
2137e293b7a3SKevin Wolf 
2138e293b7a3SKevin Wolf 
213961007b31SStefan Hajnoczi static void coroutine_fn bdrv_flush_co_entry(void *opaque)
214061007b31SStefan Hajnoczi {
2141e293b7a3SKevin Wolf     FlushCo *rwco = opaque;
214261007b31SStefan Hajnoczi 
214361007b31SStefan Hajnoczi     rwco->ret = bdrv_co_flush(rwco->bs);
214461007b31SStefan Hajnoczi }
214561007b31SStefan Hajnoczi 
214661007b31SStefan Hajnoczi int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
214761007b31SStefan Hajnoczi {
214849ca6259SFam Zheng     int current_gen;
214949ca6259SFam Zheng     int ret = 0;
215061007b31SStefan Hajnoczi 
215199723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
2152c32b82afSPavel Dovgalyuk 
2153e914404eSFam Zheng     if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
215449ca6259SFam Zheng         bdrv_is_sg(bs)) {
215549ca6259SFam Zheng         goto early_exit;
215649ca6259SFam Zheng     }
215749ca6259SFam Zheng 
21583783fa3dSPaolo Bonzini     qemu_co_mutex_lock(&bs->reqs_lock);
215947fec599SPaolo Bonzini     current_gen = atomic_read(&bs->write_gen);
21603ff2f67aSEvgeny Yakovlev 
21613ff2f67aSEvgeny Yakovlev     /* Wait until any previous flushes are completed */
216299723548SPaolo Bonzini     while (bs->active_flush_req) {
21633783fa3dSPaolo Bonzini         qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
21643ff2f67aSEvgeny Yakovlev     }
21653ff2f67aSEvgeny Yakovlev 
21663783fa3dSPaolo Bonzini     /* Flushes reach this point in nondecreasing current_gen order.  */
216799723548SPaolo Bonzini     bs->active_flush_req = true;
21683783fa3dSPaolo Bonzini     qemu_co_mutex_unlock(&bs->reqs_lock);
21693ff2f67aSEvgeny Yakovlev 
2170c32b82afSPavel Dovgalyuk     /* Write back all layers by calling one driver function */
2171c32b82afSPavel Dovgalyuk     if (bs->drv->bdrv_co_flush) {
2172c32b82afSPavel Dovgalyuk         ret = bs->drv->bdrv_co_flush(bs);
2173c32b82afSPavel Dovgalyuk         goto out;
2174c32b82afSPavel Dovgalyuk     }
2175c32b82afSPavel Dovgalyuk 
217661007b31SStefan Hajnoczi     /* Write back cached data to the OS even with cache=unsafe */
217761007b31SStefan Hajnoczi     BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
217861007b31SStefan Hajnoczi     if (bs->drv->bdrv_co_flush_to_os) {
217961007b31SStefan Hajnoczi         ret = bs->drv->bdrv_co_flush_to_os(bs);
218061007b31SStefan Hajnoczi         if (ret < 0) {
2181cdb5e315SFam Zheng             goto out;
218261007b31SStefan Hajnoczi         }
218361007b31SStefan Hajnoczi     }
218461007b31SStefan Hajnoczi 
218561007b31SStefan Hajnoczi     /* But don't actually force it to the disk with cache=unsafe */
218661007b31SStefan Hajnoczi     if (bs->open_flags & BDRV_O_NO_FLUSH) {
218761007b31SStefan Hajnoczi         goto flush_parent;
218861007b31SStefan Hajnoczi     }
218961007b31SStefan Hajnoczi 
21903ff2f67aSEvgeny Yakovlev     /* Check if we really need to flush anything */
21913ff2f67aSEvgeny Yakovlev     if (bs->flushed_gen == current_gen) {
21923ff2f67aSEvgeny Yakovlev         goto flush_parent;
21933ff2f67aSEvgeny Yakovlev     }
21943ff2f67aSEvgeny Yakovlev 
219561007b31SStefan Hajnoczi     BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
219661007b31SStefan Hajnoczi     if (bs->drv->bdrv_co_flush_to_disk) {
219761007b31SStefan Hajnoczi         ret = bs->drv->bdrv_co_flush_to_disk(bs);
219861007b31SStefan Hajnoczi     } else if (bs->drv->bdrv_aio_flush) {
219961007b31SStefan Hajnoczi         BlockAIOCB *acb;
220061007b31SStefan Hajnoczi         CoroutineIOCompletion co = {
220161007b31SStefan Hajnoczi             .coroutine = qemu_coroutine_self(),
220261007b31SStefan Hajnoczi         };
220361007b31SStefan Hajnoczi 
220461007b31SStefan Hajnoczi         acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
220561007b31SStefan Hajnoczi         if (acb == NULL) {
220661007b31SStefan Hajnoczi             ret = -EIO;
220761007b31SStefan Hajnoczi         } else {
220861007b31SStefan Hajnoczi             qemu_coroutine_yield();
220961007b31SStefan Hajnoczi             ret = co.ret;
221061007b31SStefan Hajnoczi         }
221161007b31SStefan Hajnoczi     } else {
221261007b31SStefan Hajnoczi         /*
221361007b31SStefan Hajnoczi          * Some block drivers always operate in either writethrough or unsafe
221461007b31SStefan Hajnoczi          * mode and don't support bdrv_flush therefore. Usually qemu doesn't
221561007b31SStefan Hajnoczi          * know how the server works (because the behaviour is hardcoded or
221661007b31SStefan Hajnoczi          * depends on server-side configuration), so we can't ensure that
221761007b31SStefan Hajnoczi          * everything is safe on disk. Returning an error doesn't work because
221861007b31SStefan Hajnoczi          * that would break guests even if the server operates in writethrough
221961007b31SStefan Hajnoczi          * mode.
222061007b31SStefan Hajnoczi          *
222161007b31SStefan Hajnoczi          * Let's hope the user knows what he's doing.
222261007b31SStefan Hajnoczi          */
222361007b31SStefan Hajnoczi         ret = 0;
222461007b31SStefan Hajnoczi     }
22253ff2f67aSEvgeny Yakovlev 
222661007b31SStefan Hajnoczi     if (ret < 0) {
2227cdb5e315SFam Zheng         goto out;
222861007b31SStefan Hajnoczi     }
222961007b31SStefan Hajnoczi 
223061007b31SStefan Hajnoczi     /* Now flush the underlying protocol.  It will also have BDRV_O_NO_FLUSH
223161007b31SStefan Hajnoczi      * in the case of cache=unsafe, so there are no useless flushes.
223261007b31SStefan Hajnoczi      */
223361007b31SStefan Hajnoczi flush_parent:
2234cdb5e315SFam Zheng     ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0;
2235cdb5e315SFam Zheng out:
22363ff2f67aSEvgeny Yakovlev     /* Notify any pending flushes that we have completed */
2237e6af1e08SKevin Wolf     if (ret == 0) {
22383ff2f67aSEvgeny Yakovlev         bs->flushed_gen = current_gen;
2239e6af1e08SKevin Wolf     }
22403783fa3dSPaolo Bonzini 
22413783fa3dSPaolo Bonzini     qemu_co_mutex_lock(&bs->reqs_lock);
224299723548SPaolo Bonzini     bs->active_flush_req = false;
2243156af3acSDenis V. Lunev     /* Return value is ignored - it's ok if wait queue is empty */
2244156af3acSDenis V. Lunev     qemu_co_queue_next(&bs->flush_queue);
22453783fa3dSPaolo Bonzini     qemu_co_mutex_unlock(&bs->reqs_lock);
22463ff2f67aSEvgeny Yakovlev 
224749ca6259SFam Zheng early_exit:
224899723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
2249cdb5e315SFam Zheng     return ret;
225061007b31SStefan Hajnoczi }
225161007b31SStefan Hajnoczi 
225261007b31SStefan Hajnoczi int bdrv_flush(BlockDriverState *bs)
225361007b31SStefan Hajnoczi {
225461007b31SStefan Hajnoczi     Coroutine *co;
2255e293b7a3SKevin Wolf     FlushCo flush_co = {
225661007b31SStefan Hajnoczi         .bs = bs,
225761007b31SStefan Hajnoczi         .ret = NOT_DONE,
225861007b31SStefan Hajnoczi     };
225961007b31SStefan Hajnoczi 
226061007b31SStefan Hajnoczi     if (qemu_in_coroutine()) {
226161007b31SStefan Hajnoczi         /* Fast-path if already in coroutine context */
2262e293b7a3SKevin Wolf         bdrv_flush_co_entry(&flush_co);
226361007b31SStefan Hajnoczi     } else {
22640b8b8753SPaolo Bonzini         co = qemu_coroutine_create(bdrv_flush_co_entry, &flush_co);
2265e92f0e19SFam Zheng         bdrv_coroutine_enter(bs, co);
226688b062c2SPaolo Bonzini         BDRV_POLL_WHILE(bs, flush_co.ret == NOT_DONE);
226761007b31SStefan Hajnoczi     }
226861007b31SStefan Hajnoczi 
2269e293b7a3SKevin Wolf     return flush_co.ret;
227061007b31SStefan Hajnoczi }
227161007b31SStefan Hajnoczi 
227261007b31SStefan Hajnoczi typedef struct DiscardCo {
227361007b31SStefan Hajnoczi     BlockDriverState *bs;
22740c51a893SEric Blake     int64_t offset;
2275f5a5ca79SManos Pitsidianakis     int bytes;
227661007b31SStefan Hajnoczi     int ret;
227761007b31SStefan Hajnoczi } DiscardCo;
22780c51a893SEric Blake static void coroutine_fn bdrv_pdiscard_co_entry(void *opaque)
227961007b31SStefan Hajnoczi {
228061007b31SStefan Hajnoczi     DiscardCo *rwco = opaque;
228161007b31SStefan Hajnoczi 
2282f5a5ca79SManos Pitsidianakis     rwco->ret = bdrv_co_pdiscard(rwco->bs, rwco->offset, rwco->bytes);
228361007b31SStefan Hajnoczi }
228461007b31SStefan Hajnoczi 
22859f1963b3SEric Blake int coroutine_fn bdrv_co_pdiscard(BlockDriverState *bs, int64_t offset,
2286f5a5ca79SManos Pitsidianakis                                   int bytes)
228761007b31SStefan Hajnoczi {
2288b1066c87SFam Zheng     BdrvTrackedRequest req;
22899f1963b3SEric Blake     int max_pdiscard, ret;
22903482b9bcSEric Blake     int head, tail, align;
229161007b31SStefan Hajnoczi 
229261007b31SStefan Hajnoczi     if (!bs->drv) {
229361007b31SStefan Hajnoczi         return -ENOMEDIUM;
229461007b31SStefan Hajnoczi     }
229561007b31SStefan Hajnoczi 
2296f5a5ca79SManos Pitsidianakis     ret = bdrv_check_byte_request(bs, offset, bytes);
229761007b31SStefan Hajnoczi     if (ret < 0) {
229861007b31SStefan Hajnoczi         return ret;
229961007b31SStefan Hajnoczi     } else if (bs->read_only) {
2300eaf5fe2dSPaolo Bonzini         return -EPERM;
230161007b31SStefan Hajnoczi     }
230204c01a5cSKevin Wolf     assert(!(bs->open_flags & BDRV_O_INACTIVE));
230361007b31SStefan Hajnoczi 
230461007b31SStefan Hajnoczi     /* Do nothing if disabled.  */
230561007b31SStefan Hajnoczi     if (!(bs->open_flags & BDRV_O_UNMAP)) {
230661007b31SStefan Hajnoczi         return 0;
230761007b31SStefan Hajnoczi     }
230861007b31SStefan Hajnoczi 
230902aefe43SEric Blake     if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
231061007b31SStefan Hajnoczi         return 0;
231161007b31SStefan Hajnoczi     }
231261007b31SStefan Hajnoczi 
23133482b9bcSEric Blake     /* Discard is advisory, but some devices track and coalesce
23143482b9bcSEric Blake      * unaligned requests, so we must pass everything down rather than
23153482b9bcSEric Blake      * round here.  Still, most devices will just silently ignore
23163482b9bcSEric Blake      * unaligned requests (by returning -ENOTSUP), so we must fragment
23173482b9bcSEric Blake      * the request accordingly.  */
231802aefe43SEric Blake     align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
2319b8d0a980SEric Blake     assert(align % bs->bl.request_alignment == 0);
2320b8d0a980SEric Blake     head = offset % align;
2321f5a5ca79SManos Pitsidianakis     tail = (offset + bytes) % align;
23229f1963b3SEric Blake 
232399723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
2324f5a5ca79SManos Pitsidianakis     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
232550824995SFam Zheng 
2326ec050f77SDenis V. Lunev     ret = notifier_with_return_list_notify(&bs->before_write_notifiers, &req);
2327ec050f77SDenis V. Lunev     if (ret < 0) {
2328ec050f77SDenis V. Lunev         goto out;
2329ec050f77SDenis V. Lunev     }
2330ec050f77SDenis V. Lunev 
23319f1963b3SEric Blake     max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
23329f1963b3SEric Blake                                    align);
23333482b9bcSEric Blake     assert(max_pdiscard >= bs->bl.request_alignment);
23349f1963b3SEric Blake 
2335f5a5ca79SManos Pitsidianakis     while (bytes > 0) {
233661007b31SStefan Hajnoczi         int ret;
2337f5a5ca79SManos Pitsidianakis         int num = bytes;
23383482b9bcSEric Blake 
23393482b9bcSEric Blake         if (head) {
23403482b9bcSEric Blake             /* Make small requests to get to alignment boundaries. */
2341f5a5ca79SManos Pitsidianakis             num = MIN(bytes, align - head);
23423482b9bcSEric Blake             if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
23433482b9bcSEric Blake                 num %= bs->bl.request_alignment;
23443482b9bcSEric Blake             }
23453482b9bcSEric Blake             head = (head + num) % align;
23463482b9bcSEric Blake             assert(num < max_pdiscard);
23473482b9bcSEric Blake         } else if (tail) {
23483482b9bcSEric Blake             if (num > align) {
23493482b9bcSEric Blake                 /* Shorten the request to the last aligned cluster.  */
23503482b9bcSEric Blake                 num -= tail;
23513482b9bcSEric Blake             } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
23523482b9bcSEric Blake                        tail > bs->bl.request_alignment) {
23533482b9bcSEric Blake                 tail %= bs->bl.request_alignment;
23543482b9bcSEric Blake                 num -= tail;
23553482b9bcSEric Blake             }
23563482b9bcSEric Blake         }
23573482b9bcSEric Blake         /* limit request size */
23583482b9bcSEric Blake         if (num > max_pdiscard) {
23593482b9bcSEric Blake             num = max_pdiscard;
23603482b9bcSEric Blake         }
236161007b31SStefan Hajnoczi 
236247a5486dSEric Blake         if (bs->drv->bdrv_co_pdiscard) {
236347a5486dSEric Blake             ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
236461007b31SStefan Hajnoczi         } else {
236561007b31SStefan Hajnoczi             BlockAIOCB *acb;
236661007b31SStefan Hajnoczi             CoroutineIOCompletion co = {
236761007b31SStefan Hajnoczi                 .coroutine = qemu_coroutine_self(),
236861007b31SStefan Hajnoczi             };
236961007b31SStefan Hajnoczi 
23704da444a0SEric Blake             acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
237161007b31SStefan Hajnoczi                                              bdrv_co_io_em_complete, &co);
237261007b31SStefan Hajnoczi             if (acb == NULL) {
2373b1066c87SFam Zheng                 ret = -EIO;
2374b1066c87SFam Zheng                 goto out;
237561007b31SStefan Hajnoczi             } else {
237661007b31SStefan Hajnoczi                 qemu_coroutine_yield();
237761007b31SStefan Hajnoczi                 ret = co.ret;
237861007b31SStefan Hajnoczi             }
237961007b31SStefan Hajnoczi         }
238061007b31SStefan Hajnoczi         if (ret && ret != -ENOTSUP) {
2381b1066c87SFam Zheng             goto out;
238261007b31SStefan Hajnoczi         }
238361007b31SStefan Hajnoczi 
23849f1963b3SEric Blake         offset += num;
2385f5a5ca79SManos Pitsidianakis         bytes -= num;
238661007b31SStefan Hajnoczi     }
2387b1066c87SFam Zheng     ret = 0;
2388b1066c87SFam Zheng out:
238947fec599SPaolo Bonzini     atomic_inc(&bs->write_gen);
2390968d8b06SDenis V. Lunev     bdrv_set_dirty(bs, req.offset >> BDRV_SECTOR_BITS,
2391968d8b06SDenis V. Lunev                    req.bytes >> BDRV_SECTOR_BITS);
2392b1066c87SFam Zheng     tracked_request_end(&req);
239399723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
2394b1066c87SFam Zheng     return ret;
239561007b31SStefan Hajnoczi }
239661007b31SStefan Hajnoczi 
2397f5a5ca79SManos Pitsidianakis int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
239861007b31SStefan Hajnoczi {
239961007b31SStefan Hajnoczi     Coroutine *co;
240061007b31SStefan Hajnoczi     DiscardCo rwco = {
240161007b31SStefan Hajnoczi         .bs = bs,
24020c51a893SEric Blake         .offset = offset,
2403f5a5ca79SManos Pitsidianakis         .bytes = bytes,
240461007b31SStefan Hajnoczi         .ret = NOT_DONE,
240561007b31SStefan Hajnoczi     };
240661007b31SStefan Hajnoczi 
240761007b31SStefan Hajnoczi     if (qemu_in_coroutine()) {
240861007b31SStefan Hajnoczi         /* Fast-path if already in coroutine context */
24090c51a893SEric Blake         bdrv_pdiscard_co_entry(&rwco);
241061007b31SStefan Hajnoczi     } else {
24110c51a893SEric Blake         co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco);
2412e92f0e19SFam Zheng         bdrv_coroutine_enter(bs, co);
241388b062c2SPaolo Bonzini         BDRV_POLL_WHILE(bs, rwco.ret == NOT_DONE);
241461007b31SStefan Hajnoczi     }
241561007b31SStefan Hajnoczi 
241661007b31SStefan Hajnoczi     return rwco.ret;
241761007b31SStefan Hajnoczi }
241861007b31SStefan Hajnoczi 
241948af776aSKevin Wolf int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
242061007b31SStefan Hajnoczi {
242161007b31SStefan Hajnoczi     BlockDriver *drv = bs->drv;
24225c5ae76aSFam Zheng     CoroutineIOCompletion co = {
24235c5ae76aSFam Zheng         .coroutine = qemu_coroutine_self(),
24245c5ae76aSFam Zheng     };
24255c5ae76aSFam Zheng     BlockAIOCB *acb;
242661007b31SStefan Hajnoczi 
242799723548SPaolo Bonzini     bdrv_inc_in_flight(bs);
242816a389dcSKevin Wolf     if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
24295c5ae76aSFam Zheng         co.ret = -ENOTSUP;
24305c5ae76aSFam Zheng         goto out;
24315c5ae76aSFam Zheng     }
24325c5ae76aSFam Zheng 
243316a389dcSKevin Wolf     if (drv->bdrv_co_ioctl) {
243416a389dcSKevin Wolf         co.ret = drv->bdrv_co_ioctl(bs, req, buf);
243516a389dcSKevin Wolf     } else {
24365c5ae76aSFam Zheng         acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
24375c5ae76aSFam Zheng         if (!acb) {
2438c8a9fd80SFam Zheng             co.ret = -ENOTSUP;
2439c8a9fd80SFam Zheng             goto out;
24405c5ae76aSFam Zheng         }
24415c5ae76aSFam Zheng         qemu_coroutine_yield();
244216a389dcSKevin Wolf     }
24435c5ae76aSFam Zheng out:
244499723548SPaolo Bonzini     bdrv_dec_in_flight(bs);
24455c5ae76aSFam Zheng     return co.ret;
24465c5ae76aSFam Zheng }
24475c5ae76aSFam Zheng 
244861007b31SStefan Hajnoczi void *qemu_blockalign(BlockDriverState *bs, size_t size)
244961007b31SStefan Hajnoczi {
245061007b31SStefan Hajnoczi     return qemu_memalign(bdrv_opt_mem_align(bs), size);
245161007b31SStefan Hajnoczi }
245261007b31SStefan Hajnoczi 
245361007b31SStefan Hajnoczi void *qemu_blockalign0(BlockDriverState *bs, size_t size)
245461007b31SStefan Hajnoczi {
245561007b31SStefan Hajnoczi     return memset(qemu_blockalign(bs, size), 0, size);
245661007b31SStefan Hajnoczi }
245761007b31SStefan Hajnoczi 
245861007b31SStefan Hajnoczi void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
245961007b31SStefan Hajnoczi {
246061007b31SStefan Hajnoczi     size_t align = bdrv_opt_mem_align(bs);
246161007b31SStefan Hajnoczi 
246261007b31SStefan Hajnoczi     /* Ensure that NULL is never returned on success */
246361007b31SStefan Hajnoczi     assert(align > 0);
246461007b31SStefan Hajnoczi     if (size == 0) {
246561007b31SStefan Hajnoczi         size = align;
246661007b31SStefan Hajnoczi     }
246761007b31SStefan Hajnoczi 
246861007b31SStefan Hajnoczi     return qemu_try_memalign(align, size);
246961007b31SStefan Hajnoczi }
247061007b31SStefan Hajnoczi 
247161007b31SStefan Hajnoczi void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
247261007b31SStefan Hajnoczi {
247361007b31SStefan Hajnoczi     void *mem = qemu_try_blockalign(bs, size);
247461007b31SStefan Hajnoczi 
247561007b31SStefan Hajnoczi     if (mem) {
247661007b31SStefan Hajnoczi         memset(mem, 0, size);
247761007b31SStefan Hajnoczi     }
247861007b31SStefan Hajnoczi 
247961007b31SStefan Hajnoczi     return mem;
248061007b31SStefan Hajnoczi }
248161007b31SStefan Hajnoczi 
248261007b31SStefan Hajnoczi /*
248361007b31SStefan Hajnoczi  * Check if all memory in this vector is sector aligned.
248461007b31SStefan Hajnoczi  */
248561007b31SStefan Hajnoczi bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
248661007b31SStefan Hajnoczi {
248761007b31SStefan Hajnoczi     int i;
24884196d2f0SDenis V. Lunev     size_t alignment = bdrv_min_mem_align(bs);
248961007b31SStefan Hajnoczi 
249061007b31SStefan Hajnoczi     for (i = 0; i < qiov->niov; i++) {
249161007b31SStefan Hajnoczi         if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
249261007b31SStefan Hajnoczi             return false;
249361007b31SStefan Hajnoczi         }
249461007b31SStefan Hajnoczi         if (qiov->iov[i].iov_len % alignment) {
249561007b31SStefan Hajnoczi             return false;
249661007b31SStefan Hajnoczi         }
249761007b31SStefan Hajnoczi     }
249861007b31SStefan Hajnoczi 
249961007b31SStefan Hajnoczi     return true;
250061007b31SStefan Hajnoczi }
250161007b31SStefan Hajnoczi 
250261007b31SStefan Hajnoczi void bdrv_add_before_write_notifier(BlockDriverState *bs,
250361007b31SStefan Hajnoczi                                     NotifierWithReturn *notifier)
250461007b31SStefan Hajnoczi {
250561007b31SStefan Hajnoczi     notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
250661007b31SStefan Hajnoczi }
250761007b31SStefan Hajnoczi 
250861007b31SStefan Hajnoczi void bdrv_io_plug(BlockDriverState *bs)
250961007b31SStefan Hajnoczi {
25106b98bd64SPaolo Bonzini     BdrvChild *child;
25116b98bd64SPaolo Bonzini 
25126b98bd64SPaolo Bonzini     QLIST_FOREACH(child, &bs->children, next) {
25136b98bd64SPaolo Bonzini         bdrv_io_plug(child->bs);
25146b98bd64SPaolo Bonzini     }
25156b98bd64SPaolo Bonzini 
2516850d54a2SPaolo Bonzini     if (atomic_fetch_inc(&bs->io_plugged) == 0) {
251761007b31SStefan Hajnoczi         BlockDriver *drv = bs->drv;
251861007b31SStefan Hajnoczi         if (drv && drv->bdrv_io_plug) {
251961007b31SStefan Hajnoczi             drv->bdrv_io_plug(bs);
25206b98bd64SPaolo Bonzini         }
252161007b31SStefan Hajnoczi     }
252261007b31SStefan Hajnoczi }
252361007b31SStefan Hajnoczi 
252461007b31SStefan Hajnoczi void bdrv_io_unplug(BlockDriverState *bs)
252561007b31SStefan Hajnoczi {
25266b98bd64SPaolo Bonzini     BdrvChild *child;
25276b98bd64SPaolo Bonzini 
25286b98bd64SPaolo Bonzini     assert(bs->io_plugged);
2529850d54a2SPaolo Bonzini     if (atomic_fetch_dec(&bs->io_plugged) == 1) {
253061007b31SStefan Hajnoczi         BlockDriver *drv = bs->drv;
253161007b31SStefan Hajnoczi         if (drv && drv->bdrv_io_unplug) {
253261007b31SStefan Hajnoczi             drv->bdrv_io_unplug(bs);
253361007b31SStefan Hajnoczi         }
253461007b31SStefan Hajnoczi     }
253561007b31SStefan Hajnoczi 
25366b98bd64SPaolo Bonzini     QLIST_FOREACH(child, &bs->children, next) {
25376b98bd64SPaolo Bonzini         bdrv_io_unplug(child->bs);
25386b98bd64SPaolo Bonzini     }
25396b98bd64SPaolo Bonzini }
2540