161007b31SStefan Hajnoczi /* 261007b31SStefan Hajnoczi * Block layer I/O functions 361007b31SStefan Hajnoczi * 461007b31SStefan Hajnoczi * Copyright (c) 2003 Fabrice Bellard 561007b31SStefan Hajnoczi * 661007b31SStefan Hajnoczi * Permission is hereby granted, free of charge, to any person obtaining a copy 761007b31SStefan Hajnoczi * of this software and associated documentation files (the "Software"), to deal 861007b31SStefan Hajnoczi * in the Software without restriction, including without limitation the rights 961007b31SStefan Hajnoczi * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 1061007b31SStefan Hajnoczi * copies of the Software, and to permit persons to whom the Software is 1161007b31SStefan Hajnoczi * furnished to do so, subject to the following conditions: 1261007b31SStefan Hajnoczi * 1361007b31SStefan Hajnoczi * The above copyright notice and this permission notice shall be included in 1461007b31SStefan Hajnoczi * all copies or substantial portions of the Software. 1561007b31SStefan Hajnoczi * 1661007b31SStefan Hajnoczi * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 1761007b31SStefan Hajnoczi * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 1861007b31SStefan Hajnoczi * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 1961007b31SStefan Hajnoczi * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 2061007b31SStefan Hajnoczi * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 2161007b31SStefan Hajnoczi * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 2261007b31SStefan Hajnoczi * THE SOFTWARE. 2361007b31SStefan Hajnoczi */ 2461007b31SStefan Hajnoczi 2580c71a24SPeter Maydell #include "qemu/osdep.h" 2661007b31SStefan Hajnoczi #include "trace.h" 277f0e9da6SMax Reitz #include "sysemu/block-backend.h" 2861007b31SStefan Hajnoczi #include "block/blockjob.h" 2961007b31SStefan Hajnoczi #include "block/block_int.h" 30f348b6d1SVeronia Bahaa #include "qemu/cutils.h" 31da34e65cSMarkus Armbruster #include "qapi/error.h" 32d49b6836SMarkus Armbruster #include "qemu/error-report.h" 3361007b31SStefan Hajnoczi 3461007b31SStefan Hajnoczi #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ 3561007b31SStefan Hajnoczi 3661007b31SStefan Hajnoczi static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, 3761007b31SStefan Hajnoczi int64_t sector_num, 3861007b31SStefan Hajnoczi QEMUIOVector *qiov, 3961007b31SStefan Hajnoczi int nb_sectors, 4061007b31SStefan Hajnoczi BdrvRequestFlags flags, 4161007b31SStefan Hajnoczi BlockCompletionFunc *cb, 4261007b31SStefan Hajnoczi void *opaque, 4361007b31SStefan Hajnoczi bool is_write); 4461007b31SStefan Hajnoczi static void coroutine_fn bdrv_co_do_rw(void *opaque); 45*d05aa8bbSEric Blake static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 46*d05aa8bbSEric Blake int64_t offset, int count, BdrvRequestFlags flags); 4761007b31SStefan Hajnoczi 48c2066af0SKevin Wolf static void bdrv_parent_drained_begin(BlockDriverState *bs) 4961007b31SStefan Hajnoczi { 50c2066af0SKevin Wolf BdrvChild *c; 5127ccdd52SKevin Wolf 52c2066af0SKevin Wolf QLIST_FOREACH(c, &bs->parents, next_parent) { 53c2066af0SKevin Wolf if (c->role->drained_begin) { 54c2066af0SKevin Wolf c->role->drained_begin(c); 55c2066af0SKevin Wolf } 56ce0f1412SPaolo Bonzini } 57ce0f1412SPaolo Bonzini } 58ce0f1412SPaolo Bonzini 59c2066af0SKevin Wolf static void bdrv_parent_drained_end(BlockDriverState *bs) 60ce0f1412SPaolo Bonzini { 61c2066af0SKevin Wolf BdrvChild *c; 6227ccdd52SKevin Wolf 63c2066af0SKevin Wolf QLIST_FOREACH(c, &bs->parents, next_parent) { 64c2066af0SKevin Wolf if (c->role->drained_end) { 65c2066af0SKevin Wolf c->role->drained_end(c); 6627ccdd52SKevin Wolf } 67c2066af0SKevin Wolf } 6861007b31SStefan Hajnoczi } 6961007b31SStefan Hajnoczi 7061007b31SStefan Hajnoczi void bdrv_refresh_limits(BlockDriverState *bs, Error **errp) 7161007b31SStefan Hajnoczi { 7261007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 7361007b31SStefan Hajnoczi Error *local_err = NULL; 7461007b31SStefan Hajnoczi 7561007b31SStefan Hajnoczi memset(&bs->bl, 0, sizeof(bs->bl)); 7661007b31SStefan Hajnoczi 7761007b31SStefan Hajnoczi if (!drv) { 7861007b31SStefan Hajnoczi return; 7961007b31SStefan Hajnoczi } 8061007b31SStefan Hajnoczi 8161007b31SStefan Hajnoczi /* Take some limits from the children as a default */ 8261007b31SStefan Hajnoczi if (bs->file) { 839a4f4c31SKevin Wolf bdrv_refresh_limits(bs->file->bs, &local_err); 8461007b31SStefan Hajnoczi if (local_err) { 8561007b31SStefan Hajnoczi error_propagate(errp, local_err); 8661007b31SStefan Hajnoczi return; 8761007b31SStefan Hajnoczi } 889a4f4c31SKevin Wolf bs->bl.opt_transfer_length = bs->file->bs->bl.opt_transfer_length; 899a4f4c31SKevin Wolf bs->bl.max_transfer_length = bs->file->bs->bl.max_transfer_length; 909a4f4c31SKevin Wolf bs->bl.min_mem_alignment = bs->file->bs->bl.min_mem_alignment; 919a4f4c31SKevin Wolf bs->bl.opt_mem_alignment = bs->file->bs->bl.opt_mem_alignment; 92bd44feb7SStefan Hajnoczi bs->bl.max_iov = bs->file->bs->bl.max_iov; 9361007b31SStefan Hajnoczi } else { 944196d2f0SDenis V. Lunev bs->bl.min_mem_alignment = 512; 95459b4e66SDenis V. Lunev bs->bl.opt_mem_alignment = getpagesize(); 96bd44feb7SStefan Hajnoczi 97bd44feb7SStefan Hajnoczi /* Safe default since most protocols use readv()/writev()/etc */ 98bd44feb7SStefan Hajnoczi bs->bl.max_iov = IOV_MAX; 9961007b31SStefan Hajnoczi } 10061007b31SStefan Hajnoczi 101760e0063SKevin Wolf if (bs->backing) { 102760e0063SKevin Wolf bdrv_refresh_limits(bs->backing->bs, &local_err); 10361007b31SStefan Hajnoczi if (local_err) { 10461007b31SStefan Hajnoczi error_propagate(errp, local_err); 10561007b31SStefan Hajnoczi return; 10661007b31SStefan Hajnoczi } 10761007b31SStefan Hajnoczi bs->bl.opt_transfer_length = 10861007b31SStefan Hajnoczi MAX(bs->bl.opt_transfer_length, 109760e0063SKevin Wolf bs->backing->bs->bl.opt_transfer_length); 11061007b31SStefan Hajnoczi bs->bl.max_transfer_length = 11161007b31SStefan Hajnoczi MIN_NON_ZERO(bs->bl.max_transfer_length, 112760e0063SKevin Wolf bs->backing->bs->bl.max_transfer_length); 11361007b31SStefan Hajnoczi bs->bl.opt_mem_alignment = 11461007b31SStefan Hajnoczi MAX(bs->bl.opt_mem_alignment, 115760e0063SKevin Wolf bs->backing->bs->bl.opt_mem_alignment); 1164196d2f0SDenis V. Lunev bs->bl.min_mem_alignment = 1174196d2f0SDenis V. Lunev MAX(bs->bl.min_mem_alignment, 118760e0063SKevin Wolf bs->backing->bs->bl.min_mem_alignment); 119bd44feb7SStefan Hajnoczi bs->bl.max_iov = 120bd44feb7SStefan Hajnoczi MIN(bs->bl.max_iov, 121bd44feb7SStefan Hajnoczi bs->backing->bs->bl.max_iov); 12261007b31SStefan Hajnoczi } 12361007b31SStefan Hajnoczi 12461007b31SStefan Hajnoczi /* Then let the driver override it */ 12561007b31SStefan Hajnoczi if (drv->bdrv_refresh_limits) { 12661007b31SStefan Hajnoczi drv->bdrv_refresh_limits(bs, errp); 12761007b31SStefan Hajnoczi } 12861007b31SStefan Hajnoczi } 12961007b31SStefan Hajnoczi 13061007b31SStefan Hajnoczi /** 13161007b31SStefan Hajnoczi * The copy-on-read flag is actually a reference count so multiple users may 13261007b31SStefan Hajnoczi * use the feature without worrying about clobbering its previous state. 13361007b31SStefan Hajnoczi * Copy-on-read stays enabled until all users have called to disable it. 13461007b31SStefan Hajnoczi */ 13561007b31SStefan Hajnoczi void bdrv_enable_copy_on_read(BlockDriverState *bs) 13661007b31SStefan Hajnoczi { 13761007b31SStefan Hajnoczi bs->copy_on_read++; 13861007b31SStefan Hajnoczi } 13961007b31SStefan Hajnoczi 14061007b31SStefan Hajnoczi void bdrv_disable_copy_on_read(BlockDriverState *bs) 14161007b31SStefan Hajnoczi { 14261007b31SStefan Hajnoczi assert(bs->copy_on_read > 0); 14361007b31SStefan Hajnoczi bs->copy_on_read--; 14461007b31SStefan Hajnoczi } 14561007b31SStefan Hajnoczi 14661007b31SStefan Hajnoczi /* Check if any requests are in-flight (including throttled requests) */ 147439db28cSKevin Wolf bool bdrv_requests_pending(BlockDriverState *bs) 14861007b31SStefan Hajnoczi { 14937a639a7SKevin Wolf BdrvChild *child; 15037a639a7SKevin Wolf 15161007b31SStefan Hajnoczi if (!QLIST_EMPTY(&bs->tracked_requests)) { 15261007b31SStefan Hajnoczi return true; 15361007b31SStefan Hajnoczi } 15437a639a7SKevin Wolf 15537a639a7SKevin Wolf QLIST_FOREACH(child, &bs->children, next) { 15637a639a7SKevin Wolf if (bdrv_requests_pending(child->bs)) { 15761007b31SStefan Hajnoczi return true; 15861007b31SStefan Hajnoczi } 15961007b31SStefan Hajnoczi } 16037a639a7SKevin Wolf 16161007b31SStefan Hajnoczi return false; 16261007b31SStefan Hajnoczi } 16361007b31SStefan Hajnoczi 16467da1dc5SFam Zheng static void bdrv_drain_recurse(BlockDriverState *bs) 16567da1dc5SFam Zheng { 16667da1dc5SFam Zheng BdrvChild *child; 16767da1dc5SFam Zheng 16867da1dc5SFam Zheng if (bs->drv && bs->drv->bdrv_drain) { 16967da1dc5SFam Zheng bs->drv->bdrv_drain(bs); 17067da1dc5SFam Zheng } 17167da1dc5SFam Zheng QLIST_FOREACH(child, &bs->children, next) { 17267da1dc5SFam Zheng bdrv_drain_recurse(child->bs); 17367da1dc5SFam Zheng } 17467da1dc5SFam Zheng } 17567da1dc5SFam Zheng 176a77fd4bbSFam Zheng typedef struct { 177a77fd4bbSFam Zheng Coroutine *co; 178a77fd4bbSFam Zheng BlockDriverState *bs; 179a77fd4bbSFam Zheng QEMUBH *bh; 180a77fd4bbSFam Zheng bool done; 181a77fd4bbSFam Zheng } BdrvCoDrainData; 182a77fd4bbSFam Zheng 183b6e84c97SPaolo Bonzini static void bdrv_drain_poll(BlockDriverState *bs) 184b6e84c97SPaolo Bonzini { 185b6e84c97SPaolo Bonzini bool busy = true; 186b6e84c97SPaolo Bonzini 187b6e84c97SPaolo Bonzini while (busy) { 188b6e84c97SPaolo Bonzini /* Keep iterating */ 189b6e84c97SPaolo Bonzini busy = bdrv_requests_pending(bs); 190b6e84c97SPaolo Bonzini busy |= aio_poll(bdrv_get_aio_context(bs), busy); 191b6e84c97SPaolo Bonzini } 192b6e84c97SPaolo Bonzini } 193b6e84c97SPaolo Bonzini 194a77fd4bbSFam Zheng static void bdrv_co_drain_bh_cb(void *opaque) 195a77fd4bbSFam Zheng { 196a77fd4bbSFam Zheng BdrvCoDrainData *data = opaque; 197a77fd4bbSFam Zheng Coroutine *co = data->co; 198a77fd4bbSFam Zheng 199a77fd4bbSFam Zheng qemu_bh_delete(data->bh); 200b6e84c97SPaolo Bonzini bdrv_drain_poll(data->bs); 201a77fd4bbSFam Zheng data->done = true; 202a77fd4bbSFam Zheng qemu_coroutine_enter(co, NULL); 203a77fd4bbSFam Zheng } 204a77fd4bbSFam Zheng 205b6e84c97SPaolo Bonzini static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs) 206a77fd4bbSFam Zheng { 207a77fd4bbSFam Zheng BdrvCoDrainData data; 208a77fd4bbSFam Zheng 209a77fd4bbSFam Zheng /* Calling bdrv_drain() from a BH ensures the current coroutine yields and 210a77fd4bbSFam Zheng * other coroutines run if they were queued from 211a77fd4bbSFam Zheng * qemu_co_queue_run_restart(). */ 212a77fd4bbSFam Zheng 213a77fd4bbSFam Zheng assert(qemu_in_coroutine()); 214a77fd4bbSFam Zheng data = (BdrvCoDrainData) { 215a77fd4bbSFam Zheng .co = qemu_coroutine_self(), 216a77fd4bbSFam Zheng .bs = bs, 217a77fd4bbSFam Zheng .done = false, 218a77fd4bbSFam Zheng .bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_drain_bh_cb, &data), 219a77fd4bbSFam Zheng }; 220a77fd4bbSFam Zheng qemu_bh_schedule(data.bh); 221a77fd4bbSFam Zheng 222a77fd4bbSFam Zheng qemu_coroutine_yield(); 223a77fd4bbSFam Zheng /* If we are resumed from some other event (such as an aio completion or a 224a77fd4bbSFam Zheng * timer callback), it is a bug in the caller that should be fixed. */ 225a77fd4bbSFam Zheng assert(data.done); 226a77fd4bbSFam Zheng } 227a77fd4bbSFam Zheng 2286820643fSKevin Wolf void bdrv_drained_begin(BlockDriverState *bs) 2296820643fSKevin Wolf { 2306820643fSKevin Wolf if (!bs->quiesce_counter++) { 2316820643fSKevin Wolf aio_disable_external(bdrv_get_aio_context(bs)); 2326820643fSKevin Wolf bdrv_parent_drained_begin(bs); 2336820643fSKevin Wolf } 2346820643fSKevin Wolf 2356820643fSKevin Wolf bdrv_io_unplugged_begin(bs); 2366820643fSKevin Wolf bdrv_drain_recurse(bs); 2376820643fSKevin Wolf if (qemu_in_coroutine()) { 2386820643fSKevin Wolf bdrv_co_yield_to_drain(bs); 2396820643fSKevin Wolf } else { 2406820643fSKevin Wolf bdrv_drain_poll(bs); 2416820643fSKevin Wolf } 2426820643fSKevin Wolf bdrv_io_unplugged_end(bs); 2436820643fSKevin Wolf } 2446820643fSKevin Wolf 2456820643fSKevin Wolf void bdrv_drained_end(BlockDriverState *bs) 2466820643fSKevin Wolf { 2476820643fSKevin Wolf assert(bs->quiesce_counter > 0); 2486820643fSKevin Wolf if (--bs->quiesce_counter > 0) { 2496820643fSKevin Wolf return; 2506820643fSKevin Wolf } 2516820643fSKevin Wolf 2526820643fSKevin Wolf bdrv_parent_drained_end(bs); 2536820643fSKevin Wolf aio_enable_external(bdrv_get_aio_context(bs)); 2546820643fSKevin Wolf } 2556820643fSKevin Wolf 25661007b31SStefan Hajnoczi /* 25767da1dc5SFam Zheng * Wait for pending requests to complete on a single BlockDriverState subtree, 25867da1dc5SFam Zheng * and suspend block driver's internal I/O until next request arrives. 25961007b31SStefan Hajnoczi * 26061007b31SStefan Hajnoczi * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState 26161007b31SStefan Hajnoczi * AioContext. 2627a63f3cdSStefan Hajnoczi * 2637a63f3cdSStefan Hajnoczi * Only this BlockDriverState's AioContext is run, so in-flight requests must 2647a63f3cdSStefan Hajnoczi * not depend on events in other AioContexts. In that case, use 2657a63f3cdSStefan Hajnoczi * bdrv_drain_all() instead. 26661007b31SStefan Hajnoczi */ 267b6e84c97SPaolo Bonzini void coroutine_fn bdrv_co_drain(BlockDriverState *bs) 268b6e84c97SPaolo Bonzini { 2696820643fSKevin Wolf assert(qemu_in_coroutine()); 2706820643fSKevin Wolf bdrv_drained_begin(bs); 2716820643fSKevin Wolf bdrv_drained_end(bs); 272b6e84c97SPaolo Bonzini } 273b6e84c97SPaolo Bonzini 27461007b31SStefan Hajnoczi void bdrv_drain(BlockDriverState *bs) 27561007b31SStefan Hajnoczi { 2766820643fSKevin Wolf bdrv_drained_begin(bs); 2776820643fSKevin Wolf bdrv_drained_end(bs); 27861007b31SStefan Hajnoczi } 27961007b31SStefan Hajnoczi 28061007b31SStefan Hajnoczi /* 28161007b31SStefan Hajnoczi * Wait for pending requests to complete across all BlockDriverStates 28261007b31SStefan Hajnoczi * 28361007b31SStefan Hajnoczi * This function does not flush data to disk, use bdrv_flush_all() for that 28461007b31SStefan Hajnoczi * after calling this function. 28561007b31SStefan Hajnoczi */ 28661007b31SStefan Hajnoczi void bdrv_drain_all(void) 28761007b31SStefan Hajnoczi { 28861007b31SStefan Hajnoczi /* Always run first iteration so any pending completion BHs run */ 28961007b31SStefan Hajnoczi bool busy = true; 2907c8eece4SKevin Wolf BlockDriverState *bs; 29188be7b4bSKevin Wolf BdrvNextIterator it; 292f406c03cSAlexander Yarygin GSList *aio_ctxs = NULL, *ctx; 29361007b31SStefan Hajnoczi 29488be7b4bSKevin Wolf for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 29561007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 29661007b31SStefan Hajnoczi 29761007b31SStefan Hajnoczi aio_context_acquire(aio_context); 29861007b31SStefan Hajnoczi if (bs->job) { 29961007b31SStefan Hajnoczi block_job_pause(bs->job); 30061007b31SStefan Hajnoczi } 301c2066af0SKevin Wolf bdrv_parent_drained_begin(bs); 3026b98bd64SPaolo Bonzini bdrv_io_unplugged_begin(bs); 3039dcf8ecdSPaolo Bonzini bdrv_drain_recurse(bs); 30461007b31SStefan Hajnoczi aio_context_release(aio_context); 305f406c03cSAlexander Yarygin 306764ba3aeSAlberto Garcia if (!g_slist_find(aio_ctxs, aio_context)) { 307f406c03cSAlexander Yarygin aio_ctxs = g_slist_prepend(aio_ctxs, aio_context); 308f406c03cSAlexander Yarygin } 30961007b31SStefan Hajnoczi } 31061007b31SStefan Hajnoczi 3117a63f3cdSStefan Hajnoczi /* Note that completion of an asynchronous I/O operation can trigger any 3127a63f3cdSStefan Hajnoczi * number of other I/O operations on other devices---for example a 3137a63f3cdSStefan Hajnoczi * coroutine can submit an I/O request to another device in response to 3147a63f3cdSStefan Hajnoczi * request completion. Therefore we must keep looping until there was no 3157a63f3cdSStefan Hajnoczi * more activity rather than simply draining each device independently. 3167a63f3cdSStefan Hajnoczi */ 31761007b31SStefan Hajnoczi while (busy) { 31861007b31SStefan Hajnoczi busy = false; 319f406c03cSAlexander Yarygin 320f406c03cSAlexander Yarygin for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) { 321f406c03cSAlexander Yarygin AioContext *aio_context = ctx->data; 32261007b31SStefan Hajnoczi 32361007b31SStefan Hajnoczi aio_context_acquire(aio_context); 32488be7b4bSKevin Wolf for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 325f406c03cSAlexander Yarygin if (aio_context == bdrv_get_aio_context(bs)) { 326f406c03cSAlexander Yarygin if (bdrv_requests_pending(bs)) { 327f406c03cSAlexander Yarygin busy = true; 328f406c03cSAlexander Yarygin aio_poll(aio_context, busy); 329f406c03cSAlexander Yarygin } 330f406c03cSAlexander Yarygin } 331f406c03cSAlexander Yarygin } 332f406c03cSAlexander Yarygin busy |= aio_poll(aio_context, false); 33361007b31SStefan Hajnoczi aio_context_release(aio_context); 33461007b31SStefan Hajnoczi } 33561007b31SStefan Hajnoczi } 33661007b31SStefan Hajnoczi 33788be7b4bSKevin Wolf for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 33861007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 33961007b31SStefan Hajnoczi 34061007b31SStefan Hajnoczi aio_context_acquire(aio_context); 3416b98bd64SPaolo Bonzini bdrv_io_unplugged_end(bs); 342c2066af0SKevin Wolf bdrv_parent_drained_end(bs); 34361007b31SStefan Hajnoczi if (bs->job) { 34461007b31SStefan Hajnoczi block_job_resume(bs->job); 34561007b31SStefan Hajnoczi } 34661007b31SStefan Hajnoczi aio_context_release(aio_context); 34761007b31SStefan Hajnoczi } 348f406c03cSAlexander Yarygin g_slist_free(aio_ctxs); 34961007b31SStefan Hajnoczi } 35061007b31SStefan Hajnoczi 35161007b31SStefan Hajnoczi /** 35261007b31SStefan Hajnoczi * Remove an active request from the tracked requests list 35361007b31SStefan Hajnoczi * 35461007b31SStefan Hajnoczi * This function should be called when a tracked request is completing. 35561007b31SStefan Hajnoczi */ 35661007b31SStefan Hajnoczi static void tracked_request_end(BdrvTrackedRequest *req) 35761007b31SStefan Hajnoczi { 35861007b31SStefan Hajnoczi if (req->serialising) { 35961007b31SStefan Hajnoczi req->bs->serialising_in_flight--; 36061007b31SStefan Hajnoczi } 36161007b31SStefan Hajnoczi 36261007b31SStefan Hajnoczi QLIST_REMOVE(req, list); 36361007b31SStefan Hajnoczi qemu_co_queue_restart_all(&req->wait_queue); 36461007b31SStefan Hajnoczi } 36561007b31SStefan Hajnoczi 36661007b31SStefan Hajnoczi /** 36761007b31SStefan Hajnoczi * Add an active request to the tracked requests list 36861007b31SStefan Hajnoczi */ 36961007b31SStefan Hajnoczi static void tracked_request_begin(BdrvTrackedRequest *req, 37061007b31SStefan Hajnoczi BlockDriverState *bs, 37161007b31SStefan Hajnoczi int64_t offset, 372ebde595cSFam Zheng unsigned int bytes, 373ebde595cSFam Zheng enum BdrvTrackedRequestType type) 37461007b31SStefan Hajnoczi { 37561007b31SStefan Hajnoczi *req = (BdrvTrackedRequest){ 37661007b31SStefan Hajnoczi .bs = bs, 37761007b31SStefan Hajnoczi .offset = offset, 37861007b31SStefan Hajnoczi .bytes = bytes, 379ebde595cSFam Zheng .type = type, 38061007b31SStefan Hajnoczi .co = qemu_coroutine_self(), 38161007b31SStefan Hajnoczi .serialising = false, 38261007b31SStefan Hajnoczi .overlap_offset = offset, 38361007b31SStefan Hajnoczi .overlap_bytes = bytes, 38461007b31SStefan Hajnoczi }; 38561007b31SStefan Hajnoczi 38661007b31SStefan Hajnoczi qemu_co_queue_init(&req->wait_queue); 38761007b31SStefan Hajnoczi 38861007b31SStefan Hajnoczi QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); 38961007b31SStefan Hajnoczi } 39061007b31SStefan Hajnoczi 39161007b31SStefan Hajnoczi static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) 39261007b31SStefan Hajnoczi { 39361007b31SStefan Hajnoczi int64_t overlap_offset = req->offset & ~(align - 1); 39461007b31SStefan Hajnoczi unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align) 39561007b31SStefan Hajnoczi - overlap_offset; 39661007b31SStefan Hajnoczi 39761007b31SStefan Hajnoczi if (!req->serialising) { 39861007b31SStefan Hajnoczi req->bs->serialising_in_flight++; 39961007b31SStefan Hajnoczi req->serialising = true; 40061007b31SStefan Hajnoczi } 40161007b31SStefan Hajnoczi 40261007b31SStefan Hajnoczi req->overlap_offset = MIN(req->overlap_offset, overlap_offset); 40361007b31SStefan Hajnoczi req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); 40461007b31SStefan Hajnoczi } 40561007b31SStefan Hajnoczi 40661007b31SStefan Hajnoczi /** 40761007b31SStefan Hajnoczi * Round a region to cluster boundaries 40861007b31SStefan Hajnoczi */ 40961007b31SStefan Hajnoczi void bdrv_round_to_clusters(BlockDriverState *bs, 41061007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, 41161007b31SStefan Hajnoczi int64_t *cluster_sector_num, 41261007b31SStefan Hajnoczi int *cluster_nb_sectors) 41361007b31SStefan Hajnoczi { 41461007b31SStefan Hajnoczi BlockDriverInfo bdi; 41561007b31SStefan Hajnoczi 41661007b31SStefan Hajnoczi if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 41761007b31SStefan Hajnoczi *cluster_sector_num = sector_num; 41861007b31SStefan Hajnoczi *cluster_nb_sectors = nb_sectors; 41961007b31SStefan Hajnoczi } else { 42061007b31SStefan Hajnoczi int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE; 42161007b31SStefan Hajnoczi *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c); 42261007b31SStefan Hajnoczi *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num + 42361007b31SStefan Hajnoczi nb_sectors, c); 42461007b31SStefan Hajnoczi } 42561007b31SStefan Hajnoczi } 42661007b31SStefan Hajnoczi 42761007b31SStefan Hajnoczi static int bdrv_get_cluster_size(BlockDriverState *bs) 42861007b31SStefan Hajnoczi { 42961007b31SStefan Hajnoczi BlockDriverInfo bdi; 43061007b31SStefan Hajnoczi int ret; 43161007b31SStefan Hajnoczi 43261007b31SStefan Hajnoczi ret = bdrv_get_info(bs, &bdi); 43361007b31SStefan Hajnoczi if (ret < 0 || bdi.cluster_size == 0) { 43461007b31SStefan Hajnoczi return bs->request_alignment; 43561007b31SStefan Hajnoczi } else { 43661007b31SStefan Hajnoczi return bdi.cluster_size; 43761007b31SStefan Hajnoczi } 43861007b31SStefan Hajnoczi } 43961007b31SStefan Hajnoczi 44061007b31SStefan Hajnoczi static bool tracked_request_overlaps(BdrvTrackedRequest *req, 44161007b31SStefan Hajnoczi int64_t offset, unsigned int bytes) 44261007b31SStefan Hajnoczi { 44361007b31SStefan Hajnoczi /* aaaa bbbb */ 44461007b31SStefan Hajnoczi if (offset >= req->overlap_offset + req->overlap_bytes) { 44561007b31SStefan Hajnoczi return false; 44661007b31SStefan Hajnoczi } 44761007b31SStefan Hajnoczi /* bbbb aaaa */ 44861007b31SStefan Hajnoczi if (req->overlap_offset >= offset + bytes) { 44961007b31SStefan Hajnoczi return false; 45061007b31SStefan Hajnoczi } 45161007b31SStefan Hajnoczi return true; 45261007b31SStefan Hajnoczi } 45361007b31SStefan Hajnoczi 45461007b31SStefan Hajnoczi static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) 45561007b31SStefan Hajnoczi { 45661007b31SStefan Hajnoczi BlockDriverState *bs = self->bs; 45761007b31SStefan Hajnoczi BdrvTrackedRequest *req; 45861007b31SStefan Hajnoczi bool retry; 45961007b31SStefan Hajnoczi bool waited = false; 46061007b31SStefan Hajnoczi 46161007b31SStefan Hajnoczi if (!bs->serialising_in_flight) { 46261007b31SStefan Hajnoczi return false; 46361007b31SStefan Hajnoczi } 46461007b31SStefan Hajnoczi 46561007b31SStefan Hajnoczi do { 46661007b31SStefan Hajnoczi retry = false; 46761007b31SStefan Hajnoczi QLIST_FOREACH(req, &bs->tracked_requests, list) { 46861007b31SStefan Hajnoczi if (req == self || (!req->serialising && !self->serialising)) { 46961007b31SStefan Hajnoczi continue; 47061007b31SStefan Hajnoczi } 47161007b31SStefan Hajnoczi if (tracked_request_overlaps(req, self->overlap_offset, 47261007b31SStefan Hajnoczi self->overlap_bytes)) 47361007b31SStefan Hajnoczi { 47461007b31SStefan Hajnoczi /* Hitting this means there was a reentrant request, for 47561007b31SStefan Hajnoczi * example, a block driver issuing nested requests. This must 47661007b31SStefan Hajnoczi * never happen since it means deadlock. 47761007b31SStefan Hajnoczi */ 47861007b31SStefan Hajnoczi assert(qemu_coroutine_self() != req->co); 47961007b31SStefan Hajnoczi 48061007b31SStefan Hajnoczi /* If the request is already (indirectly) waiting for us, or 48161007b31SStefan Hajnoczi * will wait for us as soon as it wakes up, then just go on 48261007b31SStefan Hajnoczi * (instead of producing a deadlock in the former case). */ 48361007b31SStefan Hajnoczi if (!req->waiting_for) { 48461007b31SStefan Hajnoczi self->waiting_for = req; 48561007b31SStefan Hajnoczi qemu_co_queue_wait(&req->wait_queue); 48661007b31SStefan Hajnoczi self->waiting_for = NULL; 48761007b31SStefan Hajnoczi retry = true; 48861007b31SStefan Hajnoczi waited = true; 48961007b31SStefan Hajnoczi break; 49061007b31SStefan Hajnoczi } 49161007b31SStefan Hajnoczi } 49261007b31SStefan Hajnoczi } 49361007b31SStefan Hajnoczi } while (retry); 49461007b31SStefan Hajnoczi 49561007b31SStefan Hajnoczi return waited; 49661007b31SStefan Hajnoczi } 49761007b31SStefan Hajnoczi 49861007b31SStefan Hajnoczi static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset, 49961007b31SStefan Hajnoczi size_t size) 50061007b31SStefan Hajnoczi { 50161007b31SStefan Hajnoczi if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) { 50261007b31SStefan Hajnoczi return -EIO; 50361007b31SStefan Hajnoczi } 50461007b31SStefan Hajnoczi 50561007b31SStefan Hajnoczi if (!bdrv_is_inserted(bs)) { 50661007b31SStefan Hajnoczi return -ENOMEDIUM; 50761007b31SStefan Hajnoczi } 50861007b31SStefan Hajnoczi 50961007b31SStefan Hajnoczi if (offset < 0) { 51061007b31SStefan Hajnoczi return -EIO; 51161007b31SStefan Hajnoczi } 51261007b31SStefan Hajnoczi 51361007b31SStefan Hajnoczi return 0; 51461007b31SStefan Hajnoczi } 51561007b31SStefan Hajnoczi 51661007b31SStefan Hajnoczi static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num, 51761007b31SStefan Hajnoczi int nb_sectors) 51861007b31SStefan Hajnoczi { 51961007b31SStefan Hajnoczi if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 52061007b31SStefan Hajnoczi return -EIO; 52161007b31SStefan Hajnoczi } 52261007b31SStefan Hajnoczi 52361007b31SStefan Hajnoczi return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE, 52461007b31SStefan Hajnoczi nb_sectors * BDRV_SECTOR_SIZE); 52561007b31SStefan Hajnoczi } 52661007b31SStefan Hajnoczi 52761007b31SStefan Hajnoczi typedef struct RwCo { 52861007b31SStefan Hajnoczi BlockDriverState *bs; 52961007b31SStefan Hajnoczi int64_t offset; 53061007b31SStefan Hajnoczi QEMUIOVector *qiov; 53161007b31SStefan Hajnoczi bool is_write; 53261007b31SStefan Hajnoczi int ret; 53361007b31SStefan Hajnoczi BdrvRequestFlags flags; 53461007b31SStefan Hajnoczi } RwCo; 53561007b31SStefan Hajnoczi 53661007b31SStefan Hajnoczi static void coroutine_fn bdrv_rw_co_entry(void *opaque) 53761007b31SStefan Hajnoczi { 53861007b31SStefan Hajnoczi RwCo *rwco = opaque; 53961007b31SStefan Hajnoczi 54061007b31SStefan Hajnoczi if (!rwco->is_write) { 541cab3a356SKevin Wolf rwco->ret = bdrv_co_preadv(rwco->bs, rwco->offset, 54261007b31SStefan Hajnoczi rwco->qiov->size, rwco->qiov, 54361007b31SStefan Hajnoczi rwco->flags); 54461007b31SStefan Hajnoczi } else { 545cab3a356SKevin Wolf rwco->ret = bdrv_co_pwritev(rwco->bs, rwco->offset, 54661007b31SStefan Hajnoczi rwco->qiov->size, rwco->qiov, 54761007b31SStefan Hajnoczi rwco->flags); 54861007b31SStefan Hajnoczi } 54961007b31SStefan Hajnoczi } 55061007b31SStefan Hajnoczi 55161007b31SStefan Hajnoczi /* 55261007b31SStefan Hajnoczi * Process a vectored synchronous request using coroutines 55361007b31SStefan Hajnoczi */ 55461007b31SStefan Hajnoczi static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset, 55561007b31SStefan Hajnoczi QEMUIOVector *qiov, bool is_write, 55661007b31SStefan Hajnoczi BdrvRequestFlags flags) 55761007b31SStefan Hajnoczi { 55861007b31SStefan Hajnoczi Coroutine *co; 55961007b31SStefan Hajnoczi RwCo rwco = { 56061007b31SStefan Hajnoczi .bs = bs, 56161007b31SStefan Hajnoczi .offset = offset, 56261007b31SStefan Hajnoczi .qiov = qiov, 56361007b31SStefan Hajnoczi .is_write = is_write, 56461007b31SStefan Hajnoczi .ret = NOT_DONE, 56561007b31SStefan Hajnoczi .flags = flags, 56661007b31SStefan Hajnoczi }; 56761007b31SStefan Hajnoczi 56861007b31SStefan Hajnoczi if (qemu_in_coroutine()) { 56961007b31SStefan Hajnoczi /* Fast-path if already in coroutine context */ 57061007b31SStefan Hajnoczi bdrv_rw_co_entry(&rwco); 57161007b31SStefan Hajnoczi } else { 57261007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 57361007b31SStefan Hajnoczi 57461007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_rw_co_entry); 57561007b31SStefan Hajnoczi qemu_coroutine_enter(co, &rwco); 57661007b31SStefan Hajnoczi while (rwco.ret == NOT_DONE) { 57761007b31SStefan Hajnoczi aio_poll(aio_context, true); 57861007b31SStefan Hajnoczi } 57961007b31SStefan Hajnoczi } 58061007b31SStefan Hajnoczi return rwco.ret; 58161007b31SStefan Hajnoczi } 58261007b31SStefan Hajnoczi 58361007b31SStefan Hajnoczi /* 58461007b31SStefan Hajnoczi * Process a synchronous request using coroutines 58561007b31SStefan Hajnoczi */ 58661007b31SStefan Hajnoczi static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, 58761007b31SStefan Hajnoczi int nb_sectors, bool is_write, BdrvRequestFlags flags) 58861007b31SStefan Hajnoczi { 58961007b31SStefan Hajnoczi QEMUIOVector qiov; 59061007b31SStefan Hajnoczi struct iovec iov = { 59161007b31SStefan Hajnoczi .iov_base = (void *)buf, 59261007b31SStefan Hajnoczi .iov_len = nb_sectors * BDRV_SECTOR_SIZE, 59361007b31SStefan Hajnoczi }; 59461007b31SStefan Hajnoczi 59561007b31SStefan Hajnoczi if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 59661007b31SStefan Hajnoczi return -EINVAL; 59761007b31SStefan Hajnoczi } 59861007b31SStefan Hajnoczi 59961007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 60061007b31SStefan Hajnoczi return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS, 60161007b31SStefan Hajnoczi &qiov, is_write, flags); 60261007b31SStefan Hajnoczi } 60361007b31SStefan Hajnoczi 60461007b31SStefan Hajnoczi /* return < 0 if error. See bdrv_write() for the return codes */ 60561007b31SStefan Hajnoczi int bdrv_read(BlockDriverState *bs, int64_t sector_num, 60661007b31SStefan Hajnoczi uint8_t *buf, int nb_sectors) 60761007b31SStefan Hajnoczi { 60861007b31SStefan Hajnoczi return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0); 60961007b31SStefan Hajnoczi } 61061007b31SStefan Hajnoczi 61161007b31SStefan Hajnoczi /* Return < 0 if error. Important errors are: 61261007b31SStefan Hajnoczi -EIO generic I/O error (may happen for all errors) 61361007b31SStefan Hajnoczi -ENOMEDIUM No media inserted. 61461007b31SStefan Hajnoczi -EINVAL Invalid sector number or nb_sectors 61561007b31SStefan Hajnoczi -EACCES Trying to write a read-only device 61661007b31SStefan Hajnoczi */ 61761007b31SStefan Hajnoczi int bdrv_write(BlockDriverState *bs, int64_t sector_num, 61861007b31SStefan Hajnoczi const uint8_t *buf, int nb_sectors) 61961007b31SStefan Hajnoczi { 62061007b31SStefan Hajnoczi return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0); 62161007b31SStefan Hajnoczi } 62261007b31SStefan Hajnoczi 62361007b31SStefan Hajnoczi int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num, 62461007b31SStefan Hajnoczi int nb_sectors, BdrvRequestFlags flags) 62561007b31SStefan Hajnoczi { 62661007b31SStefan Hajnoczi return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true, 62761007b31SStefan Hajnoczi BDRV_REQ_ZERO_WRITE | flags); 62861007b31SStefan Hajnoczi } 62961007b31SStefan Hajnoczi 63061007b31SStefan Hajnoczi /* 63161007b31SStefan Hajnoczi * Completely zero out a block device with the help of bdrv_write_zeroes. 63261007b31SStefan Hajnoczi * The operation is sped up by checking the block status and only writing 63361007b31SStefan Hajnoczi * zeroes to the device if they currently do not return zeroes. Optional 634465fe887SEric Blake * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP, 635465fe887SEric Blake * BDRV_REQ_FUA). 63661007b31SStefan Hajnoczi * 63761007b31SStefan Hajnoczi * Returns < 0 on error, 0 on success. For error codes see bdrv_write(). 63861007b31SStefan Hajnoczi */ 63961007b31SStefan Hajnoczi int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags) 64061007b31SStefan Hajnoczi { 64161007b31SStefan Hajnoczi int64_t target_sectors, ret, nb_sectors, sector_num = 0; 64267a0fd2aSFam Zheng BlockDriverState *file; 64361007b31SStefan Hajnoczi int n; 64461007b31SStefan Hajnoczi 64561007b31SStefan Hajnoczi target_sectors = bdrv_nb_sectors(bs); 64661007b31SStefan Hajnoczi if (target_sectors < 0) { 64761007b31SStefan Hajnoczi return target_sectors; 64861007b31SStefan Hajnoczi } 64961007b31SStefan Hajnoczi 65061007b31SStefan Hajnoczi for (;;) { 65161007b31SStefan Hajnoczi nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS); 65261007b31SStefan Hajnoczi if (nb_sectors <= 0) { 65361007b31SStefan Hajnoczi return 0; 65461007b31SStefan Hajnoczi } 65567a0fd2aSFam Zheng ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n, &file); 65661007b31SStefan Hajnoczi if (ret < 0) { 65761007b31SStefan Hajnoczi error_report("error getting block status at sector %" PRId64 ": %s", 65861007b31SStefan Hajnoczi sector_num, strerror(-ret)); 65961007b31SStefan Hajnoczi return ret; 66061007b31SStefan Hajnoczi } 66161007b31SStefan Hajnoczi if (ret & BDRV_BLOCK_ZERO) { 66261007b31SStefan Hajnoczi sector_num += n; 66361007b31SStefan Hajnoczi continue; 66461007b31SStefan Hajnoczi } 66561007b31SStefan Hajnoczi ret = bdrv_write_zeroes(bs, sector_num, n, flags); 66661007b31SStefan Hajnoczi if (ret < 0) { 66761007b31SStefan Hajnoczi error_report("error writing zeroes at sector %" PRId64 ": %s", 66861007b31SStefan Hajnoczi sector_num, strerror(-ret)); 66961007b31SStefan Hajnoczi return ret; 67061007b31SStefan Hajnoczi } 67161007b31SStefan Hajnoczi sector_num += n; 67261007b31SStefan Hajnoczi } 67361007b31SStefan Hajnoczi } 67461007b31SStefan Hajnoczi 67561007b31SStefan Hajnoczi int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes) 67661007b31SStefan Hajnoczi { 67761007b31SStefan Hajnoczi QEMUIOVector qiov; 67861007b31SStefan Hajnoczi struct iovec iov = { 67961007b31SStefan Hajnoczi .iov_base = (void *)buf, 68061007b31SStefan Hajnoczi .iov_len = bytes, 68161007b31SStefan Hajnoczi }; 68261007b31SStefan Hajnoczi int ret; 68361007b31SStefan Hajnoczi 68461007b31SStefan Hajnoczi if (bytes < 0) { 68561007b31SStefan Hajnoczi return -EINVAL; 68661007b31SStefan Hajnoczi } 68761007b31SStefan Hajnoczi 68861007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 68961007b31SStefan Hajnoczi ret = bdrv_prwv_co(bs, offset, &qiov, false, 0); 69061007b31SStefan Hajnoczi if (ret < 0) { 69161007b31SStefan Hajnoczi return ret; 69261007b31SStefan Hajnoczi } 69361007b31SStefan Hajnoczi 69461007b31SStefan Hajnoczi return bytes; 69561007b31SStefan Hajnoczi } 69661007b31SStefan Hajnoczi 69761007b31SStefan Hajnoczi int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov) 69861007b31SStefan Hajnoczi { 69961007b31SStefan Hajnoczi int ret; 70061007b31SStefan Hajnoczi 70161007b31SStefan Hajnoczi ret = bdrv_prwv_co(bs, offset, qiov, true, 0); 70261007b31SStefan Hajnoczi if (ret < 0) { 70361007b31SStefan Hajnoczi return ret; 70461007b31SStefan Hajnoczi } 70561007b31SStefan Hajnoczi 70661007b31SStefan Hajnoczi return qiov->size; 70761007b31SStefan Hajnoczi } 70861007b31SStefan Hajnoczi 70961007b31SStefan Hajnoczi int bdrv_pwrite(BlockDriverState *bs, int64_t offset, 71061007b31SStefan Hajnoczi const void *buf, int bytes) 71161007b31SStefan Hajnoczi { 71261007b31SStefan Hajnoczi QEMUIOVector qiov; 71361007b31SStefan Hajnoczi struct iovec iov = { 71461007b31SStefan Hajnoczi .iov_base = (void *) buf, 71561007b31SStefan Hajnoczi .iov_len = bytes, 71661007b31SStefan Hajnoczi }; 71761007b31SStefan Hajnoczi 71861007b31SStefan Hajnoczi if (bytes < 0) { 71961007b31SStefan Hajnoczi return -EINVAL; 72061007b31SStefan Hajnoczi } 72161007b31SStefan Hajnoczi 72261007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 72361007b31SStefan Hajnoczi return bdrv_pwritev(bs, offset, &qiov); 72461007b31SStefan Hajnoczi } 72561007b31SStefan Hajnoczi 72661007b31SStefan Hajnoczi /* 72761007b31SStefan Hajnoczi * Writes to the file and ensures that no writes are reordered across this 72861007b31SStefan Hajnoczi * request (acts as a barrier) 72961007b31SStefan Hajnoczi * 73061007b31SStefan Hajnoczi * Returns 0 on success, -errno in error cases. 73161007b31SStefan Hajnoczi */ 73261007b31SStefan Hajnoczi int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset, 73361007b31SStefan Hajnoczi const void *buf, int count) 73461007b31SStefan Hajnoczi { 73561007b31SStefan Hajnoczi int ret; 73661007b31SStefan Hajnoczi 73761007b31SStefan Hajnoczi ret = bdrv_pwrite(bs, offset, buf, count); 73861007b31SStefan Hajnoczi if (ret < 0) { 73961007b31SStefan Hajnoczi return ret; 74061007b31SStefan Hajnoczi } 74161007b31SStefan Hajnoczi 742855a6a93SKevin Wolf ret = bdrv_flush(bs); 743855a6a93SKevin Wolf if (ret < 0) { 744855a6a93SKevin Wolf return ret; 74561007b31SStefan Hajnoczi } 74661007b31SStefan Hajnoczi 74761007b31SStefan Hajnoczi return 0; 74861007b31SStefan Hajnoczi } 74961007b31SStefan Hajnoczi 75008844473SKevin Wolf typedef struct CoroutineIOCompletion { 75108844473SKevin Wolf Coroutine *coroutine; 75208844473SKevin Wolf int ret; 75308844473SKevin Wolf } CoroutineIOCompletion; 75408844473SKevin Wolf 75508844473SKevin Wolf static void bdrv_co_io_em_complete(void *opaque, int ret) 75608844473SKevin Wolf { 75708844473SKevin Wolf CoroutineIOCompletion *co = opaque; 75808844473SKevin Wolf 75908844473SKevin Wolf co->ret = ret; 76008844473SKevin Wolf qemu_coroutine_enter(co->coroutine, NULL); 76108844473SKevin Wolf } 76208844473SKevin Wolf 763166fe960SKevin Wolf static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs, 764166fe960SKevin Wolf uint64_t offset, uint64_t bytes, 765166fe960SKevin Wolf QEMUIOVector *qiov, int flags) 766166fe960SKevin Wolf { 767166fe960SKevin Wolf BlockDriver *drv = bs->drv; 7683fb06697SKevin Wolf int64_t sector_num; 7693fb06697SKevin Wolf unsigned int nb_sectors; 7703fb06697SKevin Wolf 7713fb06697SKevin Wolf if (drv->bdrv_co_preadv) { 7723fb06697SKevin Wolf return drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags); 7733fb06697SKevin Wolf } 7743fb06697SKevin Wolf 7753fb06697SKevin Wolf sector_num = offset >> BDRV_SECTOR_BITS; 7763fb06697SKevin Wolf nb_sectors = bytes >> BDRV_SECTOR_BITS; 777166fe960SKevin Wolf 778166fe960SKevin Wolf assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 779166fe960SKevin Wolf assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 780166fe960SKevin Wolf assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS); 781166fe960SKevin Wolf 78208844473SKevin Wolf if (drv->bdrv_co_readv) { 783166fe960SKevin Wolf return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 78408844473SKevin Wolf } else { 78508844473SKevin Wolf BlockAIOCB *acb; 78608844473SKevin Wolf CoroutineIOCompletion co = { 78708844473SKevin Wolf .coroutine = qemu_coroutine_self(), 78808844473SKevin Wolf }; 78908844473SKevin Wolf 79008844473SKevin Wolf acb = bs->drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors, 79108844473SKevin Wolf bdrv_co_io_em_complete, &co); 79208844473SKevin Wolf if (acb == NULL) { 79308844473SKevin Wolf return -EIO; 79408844473SKevin Wolf } else { 79508844473SKevin Wolf qemu_coroutine_yield(); 79608844473SKevin Wolf return co.ret; 79708844473SKevin Wolf } 79808844473SKevin Wolf } 799166fe960SKevin Wolf } 800166fe960SKevin Wolf 80178a07294SKevin Wolf static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs, 80278a07294SKevin Wolf uint64_t offset, uint64_t bytes, 80378a07294SKevin Wolf QEMUIOVector *qiov, int flags) 80478a07294SKevin Wolf { 80578a07294SKevin Wolf BlockDriver *drv = bs->drv; 8063fb06697SKevin Wolf int64_t sector_num; 8073fb06697SKevin Wolf unsigned int nb_sectors; 80878a07294SKevin Wolf int ret; 80978a07294SKevin Wolf 8103fb06697SKevin Wolf if (drv->bdrv_co_pwritev) { 8113fb06697SKevin Wolf ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, flags); 8123fb06697SKevin Wolf goto emulate_flags; 8133fb06697SKevin Wolf } 8143fb06697SKevin Wolf 8153fb06697SKevin Wolf sector_num = offset >> BDRV_SECTOR_BITS; 8163fb06697SKevin Wolf nb_sectors = bytes >> BDRV_SECTOR_BITS; 8173fb06697SKevin Wolf 81878a07294SKevin Wolf assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 81978a07294SKevin Wolf assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 82078a07294SKevin Wolf assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS); 82178a07294SKevin Wolf 82278a07294SKevin Wolf if (drv->bdrv_co_writev_flags) { 82378a07294SKevin Wolf ret = drv->bdrv_co_writev_flags(bs, sector_num, nb_sectors, qiov, 8244df863f3SEric Blake flags & bs->supported_write_flags); 8254df863f3SEric Blake flags &= ~bs->supported_write_flags; 82608844473SKevin Wolf } else if (drv->bdrv_co_writev) { 8274df863f3SEric Blake assert(!bs->supported_write_flags); 82878a07294SKevin Wolf ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov); 82908844473SKevin Wolf } else { 83008844473SKevin Wolf BlockAIOCB *acb; 83108844473SKevin Wolf CoroutineIOCompletion co = { 83208844473SKevin Wolf .coroutine = qemu_coroutine_self(), 83308844473SKevin Wolf }; 83408844473SKevin Wolf 83508844473SKevin Wolf acb = bs->drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors, 83608844473SKevin Wolf bdrv_co_io_em_complete, &co); 83708844473SKevin Wolf if (acb == NULL) { 8383fb06697SKevin Wolf ret = -EIO; 83908844473SKevin Wolf } else { 84008844473SKevin Wolf qemu_coroutine_yield(); 8413fb06697SKevin Wolf ret = co.ret; 84208844473SKevin Wolf } 84378a07294SKevin Wolf } 84478a07294SKevin Wolf 8453fb06697SKevin Wolf emulate_flags: 8464df863f3SEric Blake if (ret == 0 && (flags & BDRV_REQ_FUA)) { 84778a07294SKevin Wolf ret = bdrv_co_flush(bs); 84878a07294SKevin Wolf } 84978a07294SKevin Wolf 85078a07294SKevin Wolf return ret; 85178a07294SKevin Wolf } 85278a07294SKevin Wolf 85361007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs, 85461007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) 85561007b31SStefan Hajnoczi { 85661007b31SStefan Hajnoczi /* Perform I/O through a temporary buffer so that users who scribble over 85761007b31SStefan Hajnoczi * their read buffer while the operation is in progress do not end up 85861007b31SStefan Hajnoczi * modifying the image file. This is critical for zero-copy guest I/O 85961007b31SStefan Hajnoczi * where anything might happen inside guest memory. 86061007b31SStefan Hajnoczi */ 86161007b31SStefan Hajnoczi void *bounce_buffer; 86261007b31SStefan Hajnoczi 86361007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 86461007b31SStefan Hajnoczi struct iovec iov; 86561007b31SStefan Hajnoczi QEMUIOVector bounce_qiov; 86661007b31SStefan Hajnoczi int64_t cluster_sector_num; 86761007b31SStefan Hajnoczi int cluster_nb_sectors; 86861007b31SStefan Hajnoczi size_t skip_bytes; 86961007b31SStefan Hajnoczi int ret; 87061007b31SStefan Hajnoczi 87161007b31SStefan Hajnoczi /* Cover entire cluster so no additional backing file I/O is required when 87261007b31SStefan Hajnoczi * allocating cluster in the image file. 87361007b31SStefan Hajnoczi */ 87461007b31SStefan Hajnoczi bdrv_round_to_clusters(bs, sector_num, nb_sectors, 87561007b31SStefan Hajnoczi &cluster_sector_num, &cluster_nb_sectors); 87661007b31SStefan Hajnoczi 87761007b31SStefan Hajnoczi trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, 87861007b31SStefan Hajnoczi cluster_sector_num, cluster_nb_sectors); 87961007b31SStefan Hajnoczi 88061007b31SStefan Hajnoczi iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE; 88161007b31SStefan Hajnoczi iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len); 88261007b31SStefan Hajnoczi if (bounce_buffer == NULL) { 88361007b31SStefan Hajnoczi ret = -ENOMEM; 88461007b31SStefan Hajnoczi goto err; 88561007b31SStefan Hajnoczi } 88661007b31SStefan Hajnoczi 88761007b31SStefan Hajnoczi qemu_iovec_init_external(&bounce_qiov, &iov, 1); 88861007b31SStefan Hajnoczi 889166fe960SKevin Wolf ret = bdrv_driver_preadv(bs, cluster_sector_num * BDRV_SECTOR_SIZE, 890166fe960SKevin Wolf cluster_nb_sectors * BDRV_SECTOR_SIZE, 891166fe960SKevin Wolf &bounce_qiov, 0); 89261007b31SStefan Hajnoczi if (ret < 0) { 89361007b31SStefan Hajnoczi goto err; 89461007b31SStefan Hajnoczi } 89561007b31SStefan Hajnoczi 896*d05aa8bbSEric Blake if ((drv->bdrv_co_write_zeroes || drv->bdrv_co_pwrite_zeroes) && 89761007b31SStefan Hajnoczi buffer_is_zero(bounce_buffer, iov.iov_len)) { 898*d05aa8bbSEric Blake ret = bdrv_co_do_pwrite_zeroes(bs, 899*d05aa8bbSEric Blake cluster_sector_num * BDRV_SECTOR_SIZE, 900*d05aa8bbSEric Blake cluster_nb_sectors * BDRV_SECTOR_SIZE, 901*d05aa8bbSEric Blake 0); 90261007b31SStefan Hajnoczi } else { 90361007b31SStefan Hajnoczi /* This does not change the data on the disk, it is not necessary 90461007b31SStefan Hajnoczi * to flush even in cache=writethrough mode. 90561007b31SStefan Hajnoczi */ 90678a07294SKevin Wolf ret = bdrv_driver_pwritev(bs, cluster_sector_num * BDRV_SECTOR_SIZE, 90778a07294SKevin Wolf cluster_nb_sectors * BDRV_SECTOR_SIZE, 90878a07294SKevin Wolf &bounce_qiov, 0); 90961007b31SStefan Hajnoczi } 91061007b31SStefan Hajnoczi 91161007b31SStefan Hajnoczi if (ret < 0) { 91261007b31SStefan Hajnoczi /* It might be okay to ignore write errors for guest requests. If this 91361007b31SStefan Hajnoczi * is a deliberate copy-on-read then we don't want to ignore the error. 91461007b31SStefan Hajnoczi * Simply report it in all cases. 91561007b31SStefan Hajnoczi */ 91661007b31SStefan Hajnoczi goto err; 91761007b31SStefan Hajnoczi } 91861007b31SStefan Hajnoczi 91961007b31SStefan Hajnoczi skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE; 92061007b31SStefan Hajnoczi qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, 92161007b31SStefan Hajnoczi nb_sectors * BDRV_SECTOR_SIZE); 92261007b31SStefan Hajnoczi 92361007b31SStefan Hajnoczi err: 92461007b31SStefan Hajnoczi qemu_vfree(bounce_buffer); 92561007b31SStefan Hajnoczi return ret; 92661007b31SStefan Hajnoczi } 92761007b31SStefan Hajnoczi 92861007b31SStefan Hajnoczi /* 92961007b31SStefan Hajnoczi * Forwards an already correctly aligned request to the BlockDriver. This 93061007b31SStefan Hajnoczi * handles copy on read and zeroing after EOF; any other features must be 93161007b31SStefan Hajnoczi * implemented by the caller. 93261007b31SStefan Hajnoczi */ 93361007b31SStefan Hajnoczi static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs, 93461007b31SStefan Hajnoczi BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 93561007b31SStefan Hajnoczi int64_t align, QEMUIOVector *qiov, int flags) 93661007b31SStefan Hajnoczi { 93761007b31SStefan Hajnoczi int ret; 93861007b31SStefan Hajnoczi 93961007b31SStefan Hajnoczi int64_t sector_num = offset >> BDRV_SECTOR_BITS; 94061007b31SStefan Hajnoczi unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS; 94161007b31SStefan Hajnoczi 94261007b31SStefan Hajnoczi assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 94361007b31SStefan Hajnoczi assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 94461007b31SStefan Hajnoczi assert(!qiov || bytes == qiov->size); 945abb06c5aSDaniel P. Berrange assert((bs->open_flags & BDRV_O_NO_IO) == 0); 94661007b31SStefan Hajnoczi 94761007b31SStefan Hajnoczi /* Handle Copy on Read and associated serialisation */ 94861007b31SStefan Hajnoczi if (flags & BDRV_REQ_COPY_ON_READ) { 94961007b31SStefan Hajnoczi /* If we touch the same cluster it counts as an overlap. This 95061007b31SStefan Hajnoczi * guarantees that allocating writes will be serialized and not race 95161007b31SStefan Hajnoczi * with each other for the same cluster. For example, in copy-on-read 95261007b31SStefan Hajnoczi * it ensures that the CoR read and write operations are atomic and 95361007b31SStefan Hajnoczi * guest writes cannot interleave between them. */ 95461007b31SStefan Hajnoczi mark_request_serialising(req, bdrv_get_cluster_size(bs)); 95561007b31SStefan Hajnoczi } 95661007b31SStefan Hajnoczi 95761408b25SFam Zheng if (!(flags & BDRV_REQ_NO_SERIALISING)) { 95861007b31SStefan Hajnoczi wait_serialising_requests(req); 95961408b25SFam Zheng } 96061007b31SStefan Hajnoczi 96161007b31SStefan Hajnoczi if (flags & BDRV_REQ_COPY_ON_READ) { 96261007b31SStefan Hajnoczi int pnum; 96361007b31SStefan Hajnoczi 96461007b31SStefan Hajnoczi ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum); 96561007b31SStefan Hajnoczi if (ret < 0) { 96661007b31SStefan Hajnoczi goto out; 96761007b31SStefan Hajnoczi } 96861007b31SStefan Hajnoczi 96961007b31SStefan Hajnoczi if (!ret || pnum != nb_sectors) { 97061007b31SStefan Hajnoczi ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov); 97161007b31SStefan Hajnoczi goto out; 97261007b31SStefan Hajnoczi } 97361007b31SStefan Hajnoczi } 97461007b31SStefan Hajnoczi 97561007b31SStefan Hajnoczi /* Forward the request to the BlockDriver */ 97661007b31SStefan Hajnoczi if (!bs->zero_beyond_eof) { 977166fe960SKevin Wolf ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0); 97861007b31SStefan Hajnoczi } else { 97961007b31SStefan Hajnoczi /* Read zeros after EOF */ 98061007b31SStefan Hajnoczi int64_t total_sectors, max_nb_sectors; 98161007b31SStefan Hajnoczi 98261007b31SStefan Hajnoczi total_sectors = bdrv_nb_sectors(bs); 98361007b31SStefan Hajnoczi if (total_sectors < 0) { 98461007b31SStefan Hajnoczi ret = total_sectors; 98561007b31SStefan Hajnoczi goto out; 98661007b31SStefan Hajnoczi } 98761007b31SStefan Hajnoczi 98861007b31SStefan Hajnoczi max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num), 98961007b31SStefan Hajnoczi align >> BDRV_SECTOR_BITS); 99061007b31SStefan Hajnoczi if (nb_sectors < max_nb_sectors) { 991166fe960SKevin Wolf ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0); 99261007b31SStefan Hajnoczi } else if (max_nb_sectors > 0) { 99361007b31SStefan Hajnoczi QEMUIOVector local_qiov; 99461007b31SStefan Hajnoczi 99561007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov); 99661007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, 99761007b31SStefan Hajnoczi max_nb_sectors * BDRV_SECTOR_SIZE); 99861007b31SStefan Hajnoczi 999166fe960SKevin Wolf ret = bdrv_driver_preadv(bs, offset, 1000166fe960SKevin Wolf max_nb_sectors * BDRV_SECTOR_SIZE, 1001166fe960SKevin Wolf &local_qiov, 0); 100261007b31SStefan Hajnoczi 100361007b31SStefan Hajnoczi qemu_iovec_destroy(&local_qiov); 100461007b31SStefan Hajnoczi } else { 100561007b31SStefan Hajnoczi ret = 0; 100661007b31SStefan Hajnoczi } 100761007b31SStefan Hajnoczi 100861007b31SStefan Hajnoczi /* Reading beyond end of file is supposed to produce zeroes */ 100961007b31SStefan Hajnoczi if (ret == 0 && total_sectors < sector_num + nb_sectors) { 101061007b31SStefan Hajnoczi uint64_t offset = MAX(0, total_sectors - sector_num); 101161007b31SStefan Hajnoczi uint64_t bytes = (sector_num + nb_sectors - offset) * 101261007b31SStefan Hajnoczi BDRV_SECTOR_SIZE; 101361007b31SStefan Hajnoczi qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes); 101461007b31SStefan Hajnoczi } 101561007b31SStefan Hajnoczi } 101661007b31SStefan Hajnoczi 101761007b31SStefan Hajnoczi out: 101861007b31SStefan Hajnoczi return ret; 101961007b31SStefan Hajnoczi } 102061007b31SStefan Hajnoczi 102161007b31SStefan Hajnoczi /* 102261007b31SStefan Hajnoczi * Handle a read request in coroutine context 102361007b31SStefan Hajnoczi */ 1024cab3a356SKevin Wolf int coroutine_fn bdrv_co_preadv(BlockDriverState *bs, 102561007b31SStefan Hajnoczi int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 102661007b31SStefan Hajnoczi BdrvRequestFlags flags) 102761007b31SStefan Hajnoczi { 102861007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 102961007b31SStefan Hajnoczi BdrvTrackedRequest req; 103061007b31SStefan Hajnoczi 1031d01c07f2SFam Zheng /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */ 1032d01c07f2SFam Zheng uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment); 103361007b31SStefan Hajnoczi uint8_t *head_buf = NULL; 103461007b31SStefan Hajnoczi uint8_t *tail_buf = NULL; 103561007b31SStefan Hajnoczi QEMUIOVector local_qiov; 103661007b31SStefan Hajnoczi bool use_local_qiov = false; 103761007b31SStefan Hajnoczi int ret; 103861007b31SStefan Hajnoczi 103961007b31SStefan Hajnoczi if (!drv) { 104061007b31SStefan Hajnoczi return -ENOMEDIUM; 104161007b31SStefan Hajnoczi } 104261007b31SStefan Hajnoczi 104361007b31SStefan Hajnoczi ret = bdrv_check_byte_request(bs, offset, bytes); 104461007b31SStefan Hajnoczi if (ret < 0) { 104561007b31SStefan Hajnoczi return ret; 104661007b31SStefan Hajnoczi } 104761007b31SStefan Hajnoczi 10489568b511SWen Congyang /* Don't do copy-on-read if we read data before write operation */ 104961408b25SFam Zheng if (bs->copy_on_read && !(flags & BDRV_REQ_NO_SERIALISING)) { 105061007b31SStefan Hajnoczi flags |= BDRV_REQ_COPY_ON_READ; 105161007b31SStefan Hajnoczi } 105261007b31SStefan Hajnoczi 105361007b31SStefan Hajnoczi /* Align read if necessary by padding qiov */ 105461007b31SStefan Hajnoczi if (offset & (align - 1)) { 105561007b31SStefan Hajnoczi head_buf = qemu_blockalign(bs, align); 105661007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov + 2); 105761007b31SStefan Hajnoczi qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); 105861007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 105961007b31SStefan Hajnoczi use_local_qiov = true; 106061007b31SStefan Hajnoczi 106161007b31SStefan Hajnoczi bytes += offset & (align - 1); 106261007b31SStefan Hajnoczi offset = offset & ~(align - 1); 106361007b31SStefan Hajnoczi } 106461007b31SStefan Hajnoczi 106561007b31SStefan Hajnoczi if ((offset + bytes) & (align - 1)) { 106661007b31SStefan Hajnoczi if (!use_local_qiov) { 106761007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov + 1); 106861007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 106961007b31SStefan Hajnoczi use_local_qiov = true; 107061007b31SStefan Hajnoczi } 107161007b31SStefan Hajnoczi tail_buf = qemu_blockalign(bs, align); 107261007b31SStefan Hajnoczi qemu_iovec_add(&local_qiov, tail_buf, 107361007b31SStefan Hajnoczi align - ((offset + bytes) & (align - 1))); 107461007b31SStefan Hajnoczi 107561007b31SStefan Hajnoczi bytes = ROUND_UP(bytes, align); 107661007b31SStefan Hajnoczi } 107761007b31SStefan Hajnoczi 1078ebde595cSFam Zheng tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ); 107961007b31SStefan Hajnoczi ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align, 108061007b31SStefan Hajnoczi use_local_qiov ? &local_qiov : qiov, 108161007b31SStefan Hajnoczi flags); 108261007b31SStefan Hajnoczi tracked_request_end(&req); 108361007b31SStefan Hajnoczi 108461007b31SStefan Hajnoczi if (use_local_qiov) { 108561007b31SStefan Hajnoczi qemu_iovec_destroy(&local_qiov); 108661007b31SStefan Hajnoczi qemu_vfree(head_buf); 108761007b31SStefan Hajnoczi qemu_vfree(tail_buf); 108861007b31SStefan Hajnoczi } 108961007b31SStefan Hajnoczi 109061007b31SStefan Hajnoczi return ret; 109161007b31SStefan Hajnoczi } 109261007b31SStefan Hajnoczi 109361007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, 109461007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 109561007b31SStefan Hajnoczi BdrvRequestFlags flags) 109661007b31SStefan Hajnoczi { 109761007b31SStefan Hajnoczi if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 109861007b31SStefan Hajnoczi return -EINVAL; 109961007b31SStefan Hajnoczi } 110061007b31SStefan Hajnoczi 1101cab3a356SKevin Wolf return bdrv_co_preadv(bs, sector_num << BDRV_SECTOR_BITS, 110261007b31SStefan Hajnoczi nb_sectors << BDRV_SECTOR_BITS, qiov, flags); 110361007b31SStefan Hajnoczi } 110461007b31SStefan Hajnoczi 110561007b31SStefan Hajnoczi int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num, 110661007b31SStefan Hajnoczi int nb_sectors, QEMUIOVector *qiov) 110761007b31SStefan Hajnoczi { 110861007b31SStefan Hajnoczi trace_bdrv_co_readv(bs, sector_num, nb_sectors); 110961007b31SStefan Hajnoczi 111061007b31SStefan Hajnoczi return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0); 111161007b31SStefan Hajnoczi } 111261007b31SStefan Hajnoczi 111361007b31SStefan Hajnoczi #define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768 111461007b31SStefan Hajnoczi 1115*d05aa8bbSEric Blake static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 1116*d05aa8bbSEric Blake int64_t offset, int count, BdrvRequestFlags flags) 111761007b31SStefan Hajnoczi { 111861007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 111961007b31SStefan Hajnoczi QEMUIOVector qiov; 112061007b31SStefan Hajnoczi struct iovec iov = {0}; 112161007b31SStefan Hajnoczi int ret = 0; 1122465fe887SEric Blake bool need_flush = false; 1123443668caSDenis V. Lunev int head = 0; 1124443668caSDenis V. Lunev int tail = 0; 112561007b31SStefan Hajnoczi 1126cf081fcaSEric Blake int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX); 1127*d05aa8bbSEric Blake int alignment = MAX(bs->bl.pwrite_zeroes_alignment ?: 1, 1128*d05aa8bbSEric Blake bs->request_alignment); 1129cf081fcaSEric Blake 1130*d05aa8bbSEric Blake assert(is_power_of_2(alignment)); 1131*d05aa8bbSEric Blake head = offset & (alignment - 1); 1132*d05aa8bbSEric Blake tail = (offset + count) & (alignment - 1); 1133*d05aa8bbSEric Blake max_write_zeroes &= ~(alignment - 1); 113461007b31SStefan Hajnoczi 1135*d05aa8bbSEric Blake while (count > 0 && !ret) { 1136*d05aa8bbSEric Blake int num = count; 113761007b31SStefan Hajnoczi 113861007b31SStefan Hajnoczi /* Align request. Block drivers can expect the "bulk" of the request 1139443668caSDenis V. Lunev * to be aligned, and that unaligned requests do not cross cluster 1140443668caSDenis V. Lunev * boundaries. 114161007b31SStefan Hajnoczi */ 1142443668caSDenis V. Lunev if (head) { 114361007b31SStefan Hajnoczi /* Make a small request up to the first aligned sector. */ 1144*d05aa8bbSEric Blake num = MIN(count, alignment - head); 1145443668caSDenis V. Lunev head = 0; 1146*d05aa8bbSEric Blake } else if (tail && num > alignment) { 1147443668caSDenis V. Lunev /* Shorten the request to the last aligned sector. */ 1148443668caSDenis V. Lunev num -= tail; 114961007b31SStefan Hajnoczi } 115061007b31SStefan Hajnoczi 115161007b31SStefan Hajnoczi /* limit request size */ 115261007b31SStefan Hajnoczi if (num > max_write_zeroes) { 115361007b31SStefan Hajnoczi num = max_write_zeroes; 115461007b31SStefan Hajnoczi } 115561007b31SStefan Hajnoczi 115661007b31SStefan Hajnoczi ret = -ENOTSUP; 115761007b31SStefan Hajnoczi /* First try the efficient write zeroes operation */ 1158*d05aa8bbSEric Blake if (drv->bdrv_co_pwrite_zeroes) { 1159*d05aa8bbSEric Blake ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num, 1160*d05aa8bbSEric Blake flags & bs->supported_zero_flags); 1161*d05aa8bbSEric Blake if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) && 1162*d05aa8bbSEric Blake !(bs->supported_zero_flags & BDRV_REQ_FUA)) { 1163*d05aa8bbSEric Blake need_flush = true; 1164*d05aa8bbSEric Blake } 1165*d05aa8bbSEric Blake } else if (drv->bdrv_co_write_zeroes) { 1166*d05aa8bbSEric Blake assert(offset % BDRV_SECTOR_SIZE == 0); 1167*d05aa8bbSEric Blake assert(count % BDRV_SECTOR_SIZE == 0); 1168*d05aa8bbSEric Blake ret = drv->bdrv_co_write_zeroes(bs, offset >> BDRV_SECTOR_BITS, 1169*d05aa8bbSEric Blake num >> BDRV_SECTOR_BITS, 1170465fe887SEric Blake flags & bs->supported_zero_flags); 1171465fe887SEric Blake if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) && 1172465fe887SEric Blake !(bs->supported_zero_flags & BDRV_REQ_FUA)) { 1173465fe887SEric Blake need_flush = true; 1174465fe887SEric Blake } 1175465fe887SEric Blake } else { 1176465fe887SEric Blake assert(!bs->supported_zero_flags); 117761007b31SStefan Hajnoczi } 117861007b31SStefan Hajnoczi 117961007b31SStefan Hajnoczi if (ret == -ENOTSUP) { 118061007b31SStefan Hajnoczi /* Fall back to bounce buffer if write zeroes is unsupported */ 118161007b31SStefan Hajnoczi int max_xfer_len = MIN_NON_ZERO(bs->bl.max_transfer_length, 118261007b31SStefan Hajnoczi MAX_WRITE_ZEROES_BOUNCE_BUFFER); 1183465fe887SEric Blake BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE; 1184465fe887SEric Blake 1185465fe887SEric Blake if ((flags & BDRV_REQ_FUA) && 1186465fe887SEric Blake !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1187465fe887SEric Blake /* No need for bdrv_driver_pwrite() to do a fallback 1188465fe887SEric Blake * flush on each chunk; use just one at the end */ 1189465fe887SEric Blake write_flags &= ~BDRV_REQ_FUA; 1190465fe887SEric Blake need_flush = true; 1191465fe887SEric Blake } 1192*d05aa8bbSEric Blake num = MIN(num, max_xfer_len << BDRV_SECTOR_BITS); 1193*d05aa8bbSEric Blake iov.iov_len = num; 119461007b31SStefan Hajnoczi if (iov.iov_base == NULL) { 1195*d05aa8bbSEric Blake iov.iov_base = qemu_try_blockalign(bs, num); 119661007b31SStefan Hajnoczi if (iov.iov_base == NULL) { 119761007b31SStefan Hajnoczi ret = -ENOMEM; 119861007b31SStefan Hajnoczi goto fail; 119961007b31SStefan Hajnoczi } 1200*d05aa8bbSEric Blake memset(iov.iov_base, 0, num); 120161007b31SStefan Hajnoczi } 120261007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 120361007b31SStefan Hajnoczi 1204*d05aa8bbSEric Blake ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags); 120561007b31SStefan Hajnoczi 120661007b31SStefan Hajnoczi /* Keep bounce buffer around if it is big enough for all 120761007b31SStefan Hajnoczi * all future requests. 120861007b31SStefan Hajnoczi */ 1209*d05aa8bbSEric Blake if (num < max_xfer_len << BDRV_SECTOR_BITS) { 121061007b31SStefan Hajnoczi qemu_vfree(iov.iov_base); 121161007b31SStefan Hajnoczi iov.iov_base = NULL; 121261007b31SStefan Hajnoczi } 121361007b31SStefan Hajnoczi } 121461007b31SStefan Hajnoczi 1215*d05aa8bbSEric Blake offset += num; 1216*d05aa8bbSEric Blake count -= num; 121761007b31SStefan Hajnoczi } 121861007b31SStefan Hajnoczi 121961007b31SStefan Hajnoczi fail: 1220465fe887SEric Blake if (ret == 0 && need_flush) { 1221465fe887SEric Blake ret = bdrv_co_flush(bs); 1222465fe887SEric Blake } 122361007b31SStefan Hajnoczi qemu_vfree(iov.iov_base); 122461007b31SStefan Hajnoczi return ret; 122561007b31SStefan Hajnoczi } 122661007b31SStefan Hajnoczi 122761007b31SStefan Hajnoczi /* 122861007b31SStefan Hajnoczi * Forwards an already correctly aligned write request to the BlockDriver. 122961007b31SStefan Hajnoczi */ 123061007b31SStefan Hajnoczi static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs, 123161007b31SStefan Hajnoczi BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 123261007b31SStefan Hajnoczi QEMUIOVector *qiov, int flags) 123361007b31SStefan Hajnoczi { 123461007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 123561007b31SStefan Hajnoczi bool waited; 123661007b31SStefan Hajnoczi int ret; 123761007b31SStefan Hajnoczi 123861007b31SStefan Hajnoczi int64_t sector_num = offset >> BDRV_SECTOR_BITS; 123961007b31SStefan Hajnoczi unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS; 124061007b31SStefan Hajnoczi 124161007b31SStefan Hajnoczi assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 124261007b31SStefan Hajnoczi assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 124361007b31SStefan Hajnoczi assert(!qiov || bytes == qiov->size); 1244abb06c5aSDaniel P. Berrange assert((bs->open_flags & BDRV_O_NO_IO) == 0); 124561007b31SStefan Hajnoczi 124661007b31SStefan Hajnoczi waited = wait_serialising_requests(req); 124761007b31SStefan Hajnoczi assert(!waited || !req->serialising); 124861007b31SStefan Hajnoczi assert(req->overlap_offset <= offset); 124961007b31SStefan Hajnoczi assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); 125061007b31SStefan Hajnoczi 125161007b31SStefan Hajnoczi ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req); 125261007b31SStefan Hajnoczi 125361007b31SStefan Hajnoczi if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && 1254*d05aa8bbSEric Blake !(flags & BDRV_REQ_ZERO_WRITE) && 1255*d05aa8bbSEric Blake (drv->bdrv_co_pwrite_zeroes || drv->bdrv_co_write_zeroes) && 125661007b31SStefan Hajnoczi qemu_iovec_is_zero(qiov)) { 125761007b31SStefan Hajnoczi flags |= BDRV_REQ_ZERO_WRITE; 125861007b31SStefan Hajnoczi if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { 125961007b31SStefan Hajnoczi flags |= BDRV_REQ_MAY_UNMAP; 126061007b31SStefan Hajnoczi } 126161007b31SStefan Hajnoczi } 126261007b31SStefan Hajnoczi 126361007b31SStefan Hajnoczi if (ret < 0) { 126461007b31SStefan Hajnoczi /* Do nothing, write notifier decided to fail this request */ 126561007b31SStefan Hajnoczi } else if (flags & BDRV_REQ_ZERO_WRITE) { 12669a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO); 1267*d05aa8bbSEric Blake ret = bdrv_co_do_pwrite_zeroes(bs, sector_num << BDRV_SECTOR_BITS, 1268*d05aa8bbSEric Blake nb_sectors << BDRV_SECTOR_BITS, flags); 126961007b31SStefan Hajnoczi } else { 12709a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV); 127178a07294SKevin Wolf ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags); 127261007b31SStefan Hajnoczi } 12739a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE); 127461007b31SStefan Hajnoczi 127561007b31SStefan Hajnoczi bdrv_set_dirty(bs, sector_num, nb_sectors); 127661007b31SStefan Hajnoczi 127753d8f9d8SMax Reitz if (bs->wr_highest_offset < offset + bytes) { 127853d8f9d8SMax Reitz bs->wr_highest_offset = offset + bytes; 127953d8f9d8SMax Reitz } 128061007b31SStefan Hajnoczi 128161007b31SStefan Hajnoczi if (ret >= 0) { 128261007b31SStefan Hajnoczi bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors); 128361007b31SStefan Hajnoczi } 128461007b31SStefan Hajnoczi 128561007b31SStefan Hajnoczi return ret; 128661007b31SStefan Hajnoczi } 128761007b31SStefan Hajnoczi 12889eeb6dd1SFam Zheng static int coroutine_fn bdrv_co_do_zero_pwritev(BlockDriverState *bs, 12899eeb6dd1SFam Zheng int64_t offset, 12909eeb6dd1SFam Zheng unsigned int bytes, 12919eeb6dd1SFam Zheng BdrvRequestFlags flags, 12929eeb6dd1SFam Zheng BdrvTrackedRequest *req) 12939eeb6dd1SFam Zheng { 12949eeb6dd1SFam Zheng uint8_t *buf = NULL; 12959eeb6dd1SFam Zheng QEMUIOVector local_qiov; 12969eeb6dd1SFam Zheng struct iovec iov; 12979eeb6dd1SFam Zheng uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment); 12989eeb6dd1SFam Zheng unsigned int head_padding_bytes, tail_padding_bytes; 12999eeb6dd1SFam Zheng int ret = 0; 13009eeb6dd1SFam Zheng 13019eeb6dd1SFam Zheng head_padding_bytes = offset & (align - 1); 13029eeb6dd1SFam Zheng tail_padding_bytes = align - ((offset + bytes) & (align - 1)); 13039eeb6dd1SFam Zheng 13049eeb6dd1SFam Zheng 13059eeb6dd1SFam Zheng assert(flags & BDRV_REQ_ZERO_WRITE); 13069eeb6dd1SFam Zheng if (head_padding_bytes || tail_padding_bytes) { 13079eeb6dd1SFam Zheng buf = qemu_blockalign(bs, align); 13089eeb6dd1SFam Zheng iov = (struct iovec) { 13099eeb6dd1SFam Zheng .iov_base = buf, 13109eeb6dd1SFam Zheng .iov_len = align, 13119eeb6dd1SFam Zheng }; 13129eeb6dd1SFam Zheng qemu_iovec_init_external(&local_qiov, &iov, 1); 13139eeb6dd1SFam Zheng } 13149eeb6dd1SFam Zheng if (head_padding_bytes) { 13159eeb6dd1SFam Zheng uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes); 13169eeb6dd1SFam Zheng 13179eeb6dd1SFam Zheng /* RMW the unaligned part before head. */ 13189eeb6dd1SFam Zheng mark_request_serialising(req, align); 13199eeb6dd1SFam Zheng wait_serialising_requests(req); 13209a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 13219eeb6dd1SFam Zheng ret = bdrv_aligned_preadv(bs, req, offset & ~(align - 1), align, 13229eeb6dd1SFam Zheng align, &local_qiov, 0); 13239eeb6dd1SFam Zheng if (ret < 0) { 13249eeb6dd1SFam Zheng goto fail; 13259eeb6dd1SFam Zheng } 13269a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 13279eeb6dd1SFam Zheng 13289eeb6dd1SFam Zheng memset(buf + head_padding_bytes, 0, zero_bytes); 13299eeb6dd1SFam Zheng ret = bdrv_aligned_pwritev(bs, req, offset & ~(align - 1), align, 13309eeb6dd1SFam Zheng &local_qiov, 13319eeb6dd1SFam Zheng flags & ~BDRV_REQ_ZERO_WRITE); 13329eeb6dd1SFam Zheng if (ret < 0) { 13339eeb6dd1SFam Zheng goto fail; 13349eeb6dd1SFam Zheng } 13359eeb6dd1SFam Zheng offset += zero_bytes; 13369eeb6dd1SFam Zheng bytes -= zero_bytes; 13379eeb6dd1SFam Zheng } 13389eeb6dd1SFam Zheng 13399eeb6dd1SFam Zheng assert(!bytes || (offset & (align - 1)) == 0); 13409eeb6dd1SFam Zheng if (bytes >= align) { 13419eeb6dd1SFam Zheng /* Write the aligned part in the middle. */ 13429eeb6dd1SFam Zheng uint64_t aligned_bytes = bytes & ~(align - 1); 13439eeb6dd1SFam Zheng ret = bdrv_aligned_pwritev(bs, req, offset, aligned_bytes, 13449eeb6dd1SFam Zheng NULL, flags); 13459eeb6dd1SFam Zheng if (ret < 0) { 13469eeb6dd1SFam Zheng goto fail; 13479eeb6dd1SFam Zheng } 13489eeb6dd1SFam Zheng bytes -= aligned_bytes; 13499eeb6dd1SFam Zheng offset += aligned_bytes; 13509eeb6dd1SFam Zheng } 13519eeb6dd1SFam Zheng 13529eeb6dd1SFam Zheng assert(!bytes || (offset & (align - 1)) == 0); 13539eeb6dd1SFam Zheng if (bytes) { 13549eeb6dd1SFam Zheng assert(align == tail_padding_bytes + bytes); 13559eeb6dd1SFam Zheng /* RMW the unaligned part after tail. */ 13569eeb6dd1SFam Zheng mark_request_serialising(req, align); 13579eeb6dd1SFam Zheng wait_serialising_requests(req); 13589a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 13599eeb6dd1SFam Zheng ret = bdrv_aligned_preadv(bs, req, offset, align, 13609eeb6dd1SFam Zheng align, &local_qiov, 0); 13619eeb6dd1SFam Zheng if (ret < 0) { 13629eeb6dd1SFam Zheng goto fail; 13639eeb6dd1SFam Zheng } 13649a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 13659eeb6dd1SFam Zheng 13669eeb6dd1SFam Zheng memset(buf, 0, bytes); 13679eeb6dd1SFam Zheng ret = bdrv_aligned_pwritev(bs, req, offset, align, 13689eeb6dd1SFam Zheng &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE); 13699eeb6dd1SFam Zheng } 13709eeb6dd1SFam Zheng fail: 13719eeb6dd1SFam Zheng qemu_vfree(buf); 13729eeb6dd1SFam Zheng return ret; 13739eeb6dd1SFam Zheng 13749eeb6dd1SFam Zheng } 13759eeb6dd1SFam Zheng 137661007b31SStefan Hajnoczi /* 137761007b31SStefan Hajnoczi * Handle a write request in coroutine context 137861007b31SStefan Hajnoczi */ 1379cab3a356SKevin Wolf int coroutine_fn bdrv_co_pwritev(BlockDriverState *bs, 138061007b31SStefan Hajnoczi int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 138161007b31SStefan Hajnoczi BdrvRequestFlags flags) 138261007b31SStefan Hajnoczi { 138361007b31SStefan Hajnoczi BdrvTrackedRequest req; 1384d01c07f2SFam Zheng /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */ 1385d01c07f2SFam Zheng uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment); 138661007b31SStefan Hajnoczi uint8_t *head_buf = NULL; 138761007b31SStefan Hajnoczi uint8_t *tail_buf = NULL; 138861007b31SStefan Hajnoczi QEMUIOVector local_qiov; 138961007b31SStefan Hajnoczi bool use_local_qiov = false; 139061007b31SStefan Hajnoczi int ret; 139161007b31SStefan Hajnoczi 139261007b31SStefan Hajnoczi if (!bs->drv) { 139361007b31SStefan Hajnoczi return -ENOMEDIUM; 139461007b31SStefan Hajnoczi } 139561007b31SStefan Hajnoczi if (bs->read_only) { 1396eaf5fe2dSPaolo Bonzini return -EPERM; 139761007b31SStefan Hajnoczi } 139804c01a5cSKevin Wolf assert(!(bs->open_flags & BDRV_O_INACTIVE)); 139961007b31SStefan Hajnoczi 140061007b31SStefan Hajnoczi ret = bdrv_check_byte_request(bs, offset, bytes); 140161007b31SStefan Hajnoczi if (ret < 0) { 140261007b31SStefan Hajnoczi return ret; 140361007b31SStefan Hajnoczi } 140461007b31SStefan Hajnoczi 140561007b31SStefan Hajnoczi /* 140661007b31SStefan Hajnoczi * Align write if necessary by performing a read-modify-write cycle. 140761007b31SStefan Hajnoczi * Pad qiov with the read parts and be sure to have a tracked request not 140861007b31SStefan Hajnoczi * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle. 140961007b31SStefan Hajnoczi */ 1410ebde595cSFam Zheng tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE); 141161007b31SStefan Hajnoczi 14129eeb6dd1SFam Zheng if (!qiov) { 14139eeb6dd1SFam Zheng ret = bdrv_co_do_zero_pwritev(bs, offset, bytes, flags, &req); 14149eeb6dd1SFam Zheng goto out; 14159eeb6dd1SFam Zheng } 14169eeb6dd1SFam Zheng 141761007b31SStefan Hajnoczi if (offset & (align - 1)) { 141861007b31SStefan Hajnoczi QEMUIOVector head_qiov; 141961007b31SStefan Hajnoczi struct iovec head_iov; 142061007b31SStefan Hajnoczi 142161007b31SStefan Hajnoczi mark_request_serialising(&req, align); 142261007b31SStefan Hajnoczi wait_serialising_requests(&req); 142361007b31SStefan Hajnoczi 142461007b31SStefan Hajnoczi head_buf = qemu_blockalign(bs, align); 142561007b31SStefan Hajnoczi head_iov = (struct iovec) { 142661007b31SStefan Hajnoczi .iov_base = head_buf, 142761007b31SStefan Hajnoczi .iov_len = align, 142861007b31SStefan Hajnoczi }; 142961007b31SStefan Hajnoczi qemu_iovec_init_external(&head_qiov, &head_iov, 1); 143061007b31SStefan Hajnoczi 14319a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 143261007b31SStefan Hajnoczi ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align, 143361007b31SStefan Hajnoczi align, &head_qiov, 0); 143461007b31SStefan Hajnoczi if (ret < 0) { 143561007b31SStefan Hajnoczi goto fail; 143661007b31SStefan Hajnoczi } 14379a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 143861007b31SStefan Hajnoczi 143961007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov + 2); 144061007b31SStefan Hajnoczi qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); 144161007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 144261007b31SStefan Hajnoczi use_local_qiov = true; 144361007b31SStefan Hajnoczi 144461007b31SStefan Hajnoczi bytes += offset & (align - 1); 144561007b31SStefan Hajnoczi offset = offset & ~(align - 1); 1446117bc3faSPeter Lieven 1447117bc3faSPeter Lieven /* We have read the tail already if the request is smaller 1448117bc3faSPeter Lieven * than one aligned block. 1449117bc3faSPeter Lieven */ 1450117bc3faSPeter Lieven if (bytes < align) { 1451117bc3faSPeter Lieven qemu_iovec_add(&local_qiov, head_buf + bytes, align - bytes); 1452117bc3faSPeter Lieven bytes = align; 1453117bc3faSPeter Lieven } 145461007b31SStefan Hajnoczi } 145561007b31SStefan Hajnoczi 145661007b31SStefan Hajnoczi if ((offset + bytes) & (align - 1)) { 145761007b31SStefan Hajnoczi QEMUIOVector tail_qiov; 145861007b31SStefan Hajnoczi struct iovec tail_iov; 145961007b31SStefan Hajnoczi size_t tail_bytes; 146061007b31SStefan Hajnoczi bool waited; 146161007b31SStefan Hajnoczi 146261007b31SStefan Hajnoczi mark_request_serialising(&req, align); 146361007b31SStefan Hajnoczi waited = wait_serialising_requests(&req); 146461007b31SStefan Hajnoczi assert(!waited || !use_local_qiov); 146561007b31SStefan Hajnoczi 146661007b31SStefan Hajnoczi tail_buf = qemu_blockalign(bs, align); 146761007b31SStefan Hajnoczi tail_iov = (struct iovec) { 146861007b31SStefan Hajnoczi .iov_base = tail_buf, 146961007b31SStefan Hajnoczi .iov_len = align, 147061007b31SStefan Hajnoczi }; 147161007b31SStefan Hajnoczi qemu_iovec_init_external(&tail_qiov, &tail_iov, 1); 147261007b31SStefan Hajnoczi 14739a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 147461007b31SStefan Hajnoczi ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align, 147561007b31SStefan Hajnoczi align, &tail_qiov, 0); 147661007b31SStefan Hajnoczi if (ret < 0) { 147761007b31SStefan Hajnoczi goto fail; 147861007b31SStefan Hajnoczi } 14799a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 148061007b31SStefan Hajnoczi 148161007b31SStefan Hajnoczi if (!use_local_qiov) { 148261007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov + 1); 148361007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 148461007b31SStefan Hajnoczi use_local_qiov = true; 148561007b31SStefan Hajnoczi } 148661007b31SStefan Hajnoczi 148761007b31SStefan Hajnoczi tail_bytes = (offset + bytes) & (align - 1); 148861007b31SStefan Hajnoczi qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes); 148961007b31SStefan Hajnoczi 149061007b31SStefan Hajnoczi bytes = ROUND_UP(bytes, align); 149161007b31SStefan Hajnoczi } 149261007b31SStefan Hajnoczi 149361007b31SStefan Hajnoczi ret = bdrv_aligned_pwritev(bs, &req, offset, bytes, 149461007b31SStefan Hajnoczi use_local_qiov ? &local_qiov : qiov, 149561007b31SStefan Hajnoczi flags); 149661007b31SStefan Hajnoczi 149761007b31SStefan Hajnoczi fail: 149861007b31SStefan Hajnoczi 149961007b31SStefan Hajnoczi if (use_local_qiov) { 150061007b31SStefan Hajnoczi qemu_iovec_destroy(&local_qiov); 150161007b31SStefan Hajnoczi } 150261007b31SStefan Hajnoczi qemu_vfree(head_buf); 150361007b31SStefan Hajnoczi qemu_vfree(tail_buf); 15049eeb6dd1SFam Zheng out: 15059eeb6dd1SFam Zheng tracked_request_end(&req); 150661007b31SStefan Hajnoczi return ret; 150761007b31SStefan Hajnoczi } 150861007b31SStefan Hajnoczi 150961007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs, 151061007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 151161007b31SStefan Hajnoczi BdrvRequestFlags flags) 151261007b31SStefan Hajnoczi { 151361007b31SStefan Hajnoczi if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 151461007b31SStefan Hajnoczi return -EINVAL; 151561007b31SStefan Hajnoczi } 151661007b31SStefan Hajnoczi 1517cab3a356SKevin Wolf return bdrv_co_pwritev(bs, sector_num << BDRV_SECTOR_BITS, 151861007b31SStefan Hajnoczi nb_sectors << BDRV_SECTOR_BITS, qiov, flags); 151961007b31SStefan Hajnoczi } 152061007b31SStefan Hajnoczi 152161007b31SStefan Hajnoczi int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num, 152261007b31SStefan Hajnoczi int nb_sectors, QEMUIOVector *qiov) 152361007b31SStefan Hajnoczi { 152461007b31SStefan Hajnoczi trace_bdrv_co_writev(bs, sector_num, nb_sectors); 152561007b31SStefan Hajnoczi 152661007b31SStefan Hajnoczi return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0); 152761007b31SStefan Hajnoczi } 152861007b31SStefan Hajnoczi 152961007b31SStefan Hajnoczi int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs, 153061007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, 153161007b31SStefan Hajnoczi BdrvRequestFlags flags) 153261007b31SStefan Hajnoczi { 153361007b31SStefan Hajnoczi trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags); 153461007b31SStefan Hajnoczi 153561007b31SStefan Hajnoczi if (!(bs->open_flags & BDRV_O_UNMAP)) { 153661007b31SStefan Hajnoczi flags &= ~BDRV_REQ_MAY_UNMAP; 153761007b31SStefan Hajnoczi } 153861007b31SStefan Hajnoczi 1539d01c07f2SFam Zheng return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL, 154061007b31SStefan Hajnoczi BDRV_REQ_ZERO_WRITE | flags); 154161007b31SStefan Hajnoczi } 154261007b31SStefan Hajnoczi 154361007b31SStefan Hajnoczi typedef struct BdrvCoGetBlockStatusData { 154461007b31SStefan Hajnoczi BlockDriverState *bs; 154561007b31SStefan Hajnoczi BlockDriverState *base; 154667a0fd2aSFam Zheng BlockDriverState **file; 154761007b31SStefan Hajnoczi int64_t sector_num; 154861007b31SStefan Hajnoczi int nb_sectors; 154961007b31SStefan Hajnoczi int *pnum; 155061007b31SStefan Hajnoczi int64_t ret; 155161007b31SStefan Hajnoczi bool done; 155261007b31SStefan Hajnoczi } BdrvCoGetBlockStatusData; 155361007b31SStefan Hajnoczi 155461007b31SStefan Hajnoczi /* 155561007b31SStefan Hajnoczi * Returns the allocation status of the specified sectors. 155661007b31SStefan Hajnoczi * Drivers not implementing the functionality are assumed to not support 155761007b31SStefan Hajnoczi * backing files, hence all their sectors are reported as allocated. 155861007b31SStefan Hajnoczi * 155961007b31SStefan Hajnoczi * If 'sector_num' is beyond the end of the disk image the return value is 0 156061007b31SStefan Hajnoczi * and 'pnum' is set to 0. 156161007b31SStefan Hajnoczi * 156261007b31SStefan Hajnoczi * 'pnum' is set to the number of sectors (including and immediately following 156361007b31SStefan Hajnoczi * the specified sector) that are known to be in the same 156461007b31SStefan Hajnoczi * allocated/unallocated state. 156561007b31SStefan Hajnoczi * 156661007b31SStefan Hajnoczi * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes 156761007b31SStefan Hajnoczi * beyond the end of the disk image it will be clamped. 156867a0fd2aSFam Zheng * 156967a0fd2aSFam Zheng * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file' 157067a0fd2aSFam Zheng * points to the BDS which the sector range is allocated in. 157161007b31SStefan Hajnoczi */ 157261007b31SStefan Hajnoczi static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs, 157361007b31SStefan Hajnoczi int64_t sector_num, 157467a0fd2aSFam Zheng int nb_sectors, int *pnum, 157567a0fd2aSFam Zheng BlockDriverState **file) 157661007b31SStefan Hajnoczi { 157761007b31SStefan Hajnoczi int64_t total_sectors; 157861007b31SStefan Hajnoczi int64_t n; 157961007b31SStefan Hajnoczi int64_t ret, ret2; 158061007b31SStefan Hajnoczi 158161007b31SStefan Hajnoczi total_sectors = bdrv_nb_sectors(bs); 158261007b31SStefan Hajnoczi if (total_sectors < 0) { 158361007b31SStefan Hajnoczi return total_sectors; 158461007b31SStefan Hajnoczi } 158561007b31SStefan Hajnoczi 158661007b31SStefan Hajnoczi if (sector_num >= total_sectors) { 158761007b31SStefan Hajnoczi *pnum = 0; 158861007b31SStefan Hajnoczi return 0; 158961007b31SStefan Hajnoczi } 159061007b31SStefan Hajnoczi 159161007b31SStefan Hajnoczi n = total_sectors - sector_num; 159261007b31SStefan Hajnoczi if (n < nb_sectors) { 159361007b31SStefan Hajnoczi nb_sectors = n; 159461007b31SStefan Hajnoczi } 159561007b31SStefan Hajnoczi 159661007b31SStefan Hajnoczi if (!bs->drv->bdrv_co_get_block_status) { 159761007b31SStefan Hajnoczi *pnum = nb_sectors; 159861007b31SStefan Hajnoczi ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; 159961007b31SStefan Hajnoczi if (bs->drv->protocol_name) { 160061007b31SStefan Hajnoczi ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE); 160161007b31SStefan Hajnoczi } 160261007b31SStefan Hajnoczi return ret; 160361007b31SStefan Hajnoczi } 160461007b31SStefan Hajnoczi 160567a0fd2aSFam Zheng *file = NULL; 160667a0fd2aSFam Zheng ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum, 160767a0fd2aSFam Zheng file); 160861007b31SStefan Hajnoczi if (ret < 0) { 160961007b31SStefan Hajnoczi *pnum = 0; 161061007b31SStefan Hajnoczi return ret; 161161007b31SStefan Hajnoczi } 161261007b31SStefan Hajnoczi 161361007b31SStefan Hajnoczi if (ret & BDRV_BLOCK_RAW) { 161461007b31SStefan Hajnoczi assert(ret & BDRV_BLOCK_OFFSET_VALID); 16159a4f4c31SKevin Wolf return bdrv_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS, 161667a0fd2aSFam Zheng *pnum, pnum, file); 161761007b31SStefan Hajnoczi } 161861007b31SStefan Hajnoczi 161961007b31SStefan Hajnoczi if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { 162061007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ALLOCATED; 1621a53f1a95SPaolo Bonzini } else { 162261007b31SStefan Hajnoczi if (bdrv_unallocated_blocks_are_zero(bs)) { 162361007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ZERO; 1624760e0063SKevin Wolf } else if (bs->backing) { 1625760e0063SKevin Wolf BlockDriverState *bs2 = bs->backing->bs; 162661007b31SStefan Hajnoczi int64_t nb_sectors2 = bdrv_nb_sectors(bs2); 162761007b31SStefan Hajnoczi if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) { 162861007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ZERO; 162961007b31SStefan Hajnoczi } 163061007b31SStefan Hajnoczi } 163161007b31SStefan Hajnoczi } 163261007b31SStefan Hajnoczi 1633ac987b30SFam Zheng if (*file && *file != bs && 163461007b31SStefan Hajnoczi (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && 163561007b31SStefan Hajnoczi (ret & BDRV_BLOCK_OFFSET_VALID)) { 163667a0fd2aSFam Zheng BlockDriverState *file2; 163761007b31SStefan Hajnoczi int file_pnum; 163861007b31SStefan Hajnoczi 1639ac987b30SFam Zheng ret2 = bdrv_co_get_block_status(*file, ret >> BDRV_SECTOR_BITS, 164067a0fd2aSFam Zheng *pnum, &file_pnum, &file2); 164161007b31SStefan Hajnoczi if (ret2 >= 0) { 164261007b31SStefan Hajnoczi /* Ignore errors. This is just providing extra information, it 164361007b31SStefan Hajnoczi * is useful but not necessary. 164461007b31SStefan Hajnoczi */ 164561007b31SStefan Hajnoczi if (!file_pnum) { 164661007b31SStefan Hajnoczi /* !file_pnum indicates an offset at or beyond the EOF; it is 164761007b31SStefan Hajnoczi * perfectly valid for the format block driver to point to such 164861007b31SStefan Hajnoczi * offsets, so catch it and mark everything as zero */ 164961007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ZERO; 165061007b31SStefan Hajnoczi } else { 165161007b31SStefan Hajnoczi /* Limit request to the range reported by the protocol driver */ 165261007b31SStefan Hajnoczi *pnum = file_pnum; 165361007b31SStefan Hajnoczi ret |= (ret2 & BDRV_BLOCK_ZERO); 165461007b31SStefan Hajnoczi } 165561007b31SStefan Hajnoczi } 165661007b31SStefan Hajnoczi } 165761007b31SStefan Hajnoczi 165861007b31SStefan Hajnoczi return ret; 165961007b31SStefan Hajnoczi } 166061007b31SStefan Hajnoczi 1661ba3f0e25SFam Zheng static int64_t coroutine_fn bdrv_co_get_block_status_above(BlockDriverState *bs, 1662ba3f0e25SFam Zheng BlockDriverState *base, 1663ba3f0e25SFam Zheng int64_t sector_num, 1664ba3f0e25SFam Zheng int nb_sectors, 166567a0fd2aSFam Zheng int *pnum, 166667a0fd2aSFam Zheng BlockDriverState **file) 1667ba3f0e25SFam Zheng { 1668ba3f0e25SFam Zheng BlockDriverState *p; 1669ba3f0e25SFam Zheng int64_t ret = 0; 1670ba3f0e25SFam Zheng 1671ba3f0e25SFam Zheng assert(bs != base); 1672760e0063SKevin Wolf for (p = bs; p != base; p = backing_bs(p)) { 167367a0fd2aSFam Zheng ret = bdrv_co_get_block_status(p, sector_num, nb_sectors, pnum, file); 1674ba3f0e25SFam Zheng if (ret < 0 || ret & BDRV_BLOCK_ALLOCATED) { 1675ba3f0e25SFam Zheng break; 1676ba3f0e25SFam Zheng } 1677ba3f0e25SFam Zheng /* [sector_num, pnum] unallocated on this layer, which could be only 1678ba3f0e25SFam Zheng * the first part of [sector_num, nb_sectors]. */ 1679ba3f0e25SFam Zheng nb_sectors = MIN(nb_sectors, *pnum); 1680ba3f0e25SFam Zheng } 1681ba3f0e25SFam Zheng return ret; 1682ba3f0e25SFam Zheng } 1683ba3f0e25SFam Zheng 1684ba3f0e25SFam Zheng /* Coroutine wrapper for bdrv_get_block_status_above() */ 1685ba3f0e25SFam Zheng static void coroutine_fn bdrv_get_block_status_above_co_entry(void *opaque) 168661007b31SStefan Hajnoczi { 168761007b31SStefan Hajnoczi BdrvCoGetBlockStatusData *data = opaque; 168861007b31SStefan Hajnoczi 1689ba3f0e25SFam Zheng data->ret = bdrv_co_get_block_status_above(data->bs, data->base, 1690ba3f0e25SFam Zheng data->sector_num, 1691ba3f0e25SFam Zheng data->nb_sectors, 169267a0fd2aSFam Zheng data->pnum, 169367a0fd2aSFam Zheng data->file); 169461007b31SStefan Hajnoczi data->done = true; 169561007b31SStefan Hajnoczi } 169661007b31SStefan Hajnoczi 169761007b31SStefan Hajnoczi /* 1698ba3f0e25SFam Zheng * Synchronous wrapper around bdrv_co_get_block_status_above(). 169961007b31SStefan Hajnoczi * 1700ba3f0e25SFam Zheng * See bdrv_co_get_block_status_above() for details. 170161007b31SStefan Hajnoczi */ 1702ba3f0e25SFam Zheng int64_t bdrv_get_block_status_above(BlockDriverState *bs, 1703ba3f0e25SFam Zheng BlockDriverState *base, 1704ba3f0e25SFam Zheng int64_t sector_num, 170567a0fd2aSFam Zheng int nb_sectors, int *pnum, 170667a0fd2aSFam Zheng BlockDriverState **file) 170761007b31SStefan Hajnoczi { 170861007b31SStefan Hajnoczi Coroutine *co; 170961007b31SStefan Hajnoczi BdrvCoGetBlockStatusData data = { 171061007b31SStefan Hajnoczi .bs = bs, 1711ba3f0e25SFam Zheng .base = base, 171267a0fd2aSFam Zheng .file = file, 171361007b31SStefan Hajnoczi .sector_num = sector_num, 171461007b31SStefan Hajnoczi .nb_sectors = nb_sectors, 171561007b31SStefan Hajnoczi .pnum = pnum, 171661007b31SStefan Hajnoczi .done = false, 171761007b31SStefan Hajnoczi }; 171861007b31SStefan Hajnoczi 171961007b31SStefan Hajnoczi if (qemu_in_coroutine()) { 172061007b31SStefan Hajnoczi /* Fast-path if already in coroutine context */ 1721ba3f0e25SFam Zheng bdrv_get_block_status_above_co_entry(&data); 172261007b31SStefan Hajnoczi } else { 172361007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 172461007b31SStefan Hajnoczi 1725ba3f0e25SFam Zheng co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry); 172661007b31SStefan Hajnoczi qemu_coroutine_enter(co, &data); 172761007b31SStefan Hajnoczi while (!data.done) { 172861007b31SStefan Hajnoczi aio_poll(aio_context, true); 172961007b31SStefan Hajnoczi } 173061007b31SStefan Hajnoczi } 173161007b31SStefan Hajnoczi return data.ret; 173261007b31SStefan Hajnoczi } 173361007b31SStefan Hajnoczi 1734ba3f0e25SFam Zheng int64_t bdrv_get_block_status(BlockDriverState *bs, 1735ba3f0e25SFam Zheng int64_t sector_num, 173667a0fd2aSFam Zheng int nb_sectors, int *pnum, 173767a0fd2aSFam Zheng BlockDriverState **file) 1738ba3f0e25SFam Zheng { 1739760e0063SKevin Wolf return bdrv_get_block_status_above(bs, backing_bs(bs), 174067a0fd2aSFam Zheng sector_num, nb_sectors, pnum, file); 1741ba3f0e25SFam Zheng } 1742ba3f0e25SFam Zheng 174361007b31SStefan Hajnoczi int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, 174461007b31SStefan Hajnoczi int nb_sectors, int *pnum) 174561007b31SStefan Hajnoczi { 174667a0fd2aSFam Zheng BlockDriverState *file; 174767a0fd2aSFam Zheng int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum, 174867a0fd2aSFam Zheng &file); 174961007b31SStefan Hajnoczi if (ret < 0) { 175061007b31SStefan Hajnoczi return ret; 175161007b31SStefan Hajnoczi } 175261007b31SStefan Hajnoczi return !!(ret & BDRV_BLOCK_ALLOCATED); 175361007b31SStefan Hajnoczi } 175461007b31SStefan Hajnoczi 175561007b31SStefan Hajnoczi /* 175661007b31SStefan Hajnoczi * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] 175761007b31SStefan Hajnoczi * 175861007b31SStefan Hajnoczi * Return true if the given sector is allocated in any image between 175961007b31SStefan Hajnoczi * BASE and TOP (inclusive). BASE can be NULL to check if the given 176061007b31SStefan Hajnoczi * sector is allocated in any image of the chain. Return false otherwise. 176161007b31SStefan Hajnoczi * 176261007b31SStefan Hajnoczi * 'pnum' is set to the number of sectors (including and immediately following 176361007b31SStefan Hajnoczi * the specified sector) that are known to be in the same 176461007b31SStefan Hajnoczi * allocated/unallocated state. 176561007b31SStefan Hajnoczi * 176661007b31SStefan Hajnoczi */ 176761007b31SStefan Hajnoczi int bdrv_is_allocated_above(BlockDriverState *top, 176861007b31SStefan Hajnoczi BlockDriverState *base, 176961007b31SStefan Hajnoczi int64_t sector_num, 177061007b31SStefan Hajnoczi int nb_sectors, int *pnum) 177161007b31SStefan Hajnoczi { 177261007b31SStefan Hajnoczi BlockDriverState *intermediate; 177361007b31SStefan Hajnoczi int ret, n = nb_sectors; 177461007b31SStefan Hajnoczi 177561007b31SStefan Hajnoczi intermediate = top; 177661007b31SStefan Hajnoczi while (intermediate && intermediate != base) { 177761007b31SStefan Hajnoczi int pnum_inter; 177861007b31SStefan Hajnoczi ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors, 177961007b31SStefan Hajnoczi &pnum_inter); 178061007b31SStefan Hajnoczi if (ret < 0) { 178161007b31SStefan Hajnoczi return ret; 178261007b31SStefan Hajnoczi } else if (ret) { 178361007b31SStefan Hajnoczi *pnum = pnum_inter; 178461007b31SStefan Hajnoczi return 1; 178561007b31SStefan Hajnoczi } 178661007b31SStefan Hajnoczi 178761007b31SStefan Hajnoczi /* 178861007b31SStefan Hajnoczi * [sector_num, nb_sectors] is unallocated on top but intermediate 178961007b31SStefan Hajnoczi * might have 179061007b31SStefan Hajnoczi * 179161007b31SStefan Hajnoczi * [sector_num+x, nr_sectors] allocated. 179261007b31SStefan Hajnoczi */ 179361007b31SStefan Hajnoczi if (n > pnum_inter && 179461007b31SStefan Hajnoczi (intermediate == top || 179561007b31SStefan Hajnoczi sector_num + pnum_inter < intermediate->total_sectors)) { 179661007b31SStefan Hajnoczi n = pnum_inter; 179761007b31SStefan Hajnoczi } 179861007b31SStefan Hajnoczi 1799760e0063SKevin Wolf intermediate = backing_bs(intermediate); 180061007b31SStefan Hajnoczi } 180161007b31SStefan Hajnoczi 180261007b31SStefan Hajnoczi *pnum = n; 180361007b31SStefan Hajnoczi return 0; 180461007b31SStefan Hajnoczi } 180561007b31SStefan Hajnoczi 180661007b31SStefan Hajnoczi int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num, 180761007b31SStefan Hajnoczi const uint8_t *buf, int nb_sectors) 180861007b31SStefan Hajnoczi { 180961007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 181061007b31SStefan Hajnoczi int ret; 181161007b31SStefan Hajnoczi 181261007b31SStefan Hajnoczi if (!drv) { 181361007b31SStefan Hajnoczi return -ENOMEDIUM; 181461007b31SStefan Hajnoczi } 181561007b31SStefan Hajnoczi if (!drv->bdrv_write_compressed) { 181661007b31SStefan Hajnoczi return -ENOTSUP; 181761007b31SStefan Hajnoczi } 181861007b31SStefan Hajnoczi ret = bdrv_check_request(bs, sector_num, nb_sectors); 181961007b31SStefan Hajnoczi if (ret < 0) { 182061007b31SStefan Hajnoczi return ret; 182161007b31SStefan Hajnoczi } 182261007b31SStefan Hajnoczi 182361007b31SStefan Hajnoczi assert(QLIST_EMPTY(&bs->dirty_bitmaps)); 182461007b31SStefan Hajnoczi 182561007b31SStefan Hajnoczi return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors); 182661007b31SStefan Hajnoczi } 182761007b31SStefan Hajnoczi 182861007b31SStefan Hajnoczi int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 182961007b31SStefan Hajnoczi int64_t pos, int size) 183061007b31SStefan Hajnoczi { 183161007b31SStefan Hajnoczi QEMUIOVector qiov; 183261007b31SStefan Hajnoczi struct iovec iov = { 183361007b31SStefan Hajnoczi .iov_base = (void *) buf, 183461007b31SStefan Hajnoczi .iov_len = size, 183561007b31SStefan Hajnoczi }; 183661007b31SStefan Hajnoczi 183761007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 183861007b31SStefan Hajnoczi return bdrv_writev_vmstate(bs, &qiov, pos); 183961007b31SStefan Hajnoczi } 184061007b31SStefan Hajnoczi 184161007b31SStefan Hajnoczi int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 184261007b31SStefan Hajnoczi { 184361007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 184461007b31SStefan Hajnoczi 184561007b31SStefan Hajnoczi if (!drv) { 184661007b31SStefan Hajnoczi return -ENOMEDIUM; 184761007b31SStefan Hajnoczi } else if (drv->bdrv_save_vmstate) { 184861007b31SStefan Hajnoczi return drv->bdrv_save_vmstate(bs, qiov, pos); 184961007b31SStefan Hajnoczi } else if (bs->file) { 18509a4f4c31SKevin Wolf return bdrv_writev_vmstate(bs->file->bs, qiov, pos); 185161007b31SStefan Hajnoczi } 185261007b31SStefan Hajnoczi 185361007b31SStefan Hajnoczi return -ENOTSUP; 185461007b31SStefan Hajnoczi } 185561007b31SStefan Hajnoczi 185661007b31SStefan Hajnoczi int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 185761007b31SStefan Hajnoczi int64_t pos, int size) 185861007b31SStefan Hajnoczi { 185961007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 186061007b31SStefan Hajnoczi if (!drv) 186161007b31SStefan Hajnoczi return -ENOMEDIUM; 186261007b31SStefan Hajnoczi if (drv->bdrv_load_vmstate) 186361007b31SStefan Hajnoczi return drv->bdrv_load_vmstate(bs, buf, pos, size); 186461007b31SStefan Hajnoczi if (bs->file) 18659a4f4c31SKevin Wolf return bdrv_load_vmstate(bs->file->bs, buf, pos, size); 186661007b31SStefan Hajnoczi return -ENOTSUP; 186761007b31SStefan Hajnoczi } 186861007b31SStefan Hajnoczi 186961007b31SStefan Hajnoczi /**************************************************************/ 187061007b31SStefan Hajnoczi /* async I/Os */ 187161007b31SStefan Hajnoczi 187261007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num, 187361007b31SStefan Hajnoczi QEMUIOVector *qiov, int nb_sectors, 187461007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 187561007b31SStefan Hajnoczi { 187661007b31SStefan Hajnoczi trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque); 187761007b31SStefan Hajnoczi 187861007b31SStefan Hajnoczi return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0, 187961007b31SStefan Hajnoczi cb, opaque, false); 188061007b31SStefan Hajnoczi } 188161007b31SStefan Hajnoczi 188261007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num, 188361007b31SStefan Hajnoczi QEMUIOVector *qiov, int nb_sectors, 188461007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 188561007b31SStefan Hajnoczi { 188661007b31SStefan Hajnoczi trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque); 188761007b31SStefan Hajnoczi 188861007b31SStefan Hajnoczi return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0, 188961007b31SStefan Hajnoczi cb, opaque, true); 189061007b31SStefan Hajnoczi } 189161007b31SStefan Hajnoczi 189261007b31SStefan Hajnoczi void bdrv_aio_cancel(BlockAIOCB *acb) 189361007b31SStefan Hajnoczi { 189461007b31SStefan Hajnoczi qemu_aio_ref(acb); 189561007b31SStefan Hajnoczi bdrv_aio_cancel_async(acb); 189661007b31SStefan Hajnoczi while (acb->refcnt > 1) { 189761007b31SStefan Hajnoczi if (acb->aiocb_info->get_aio_context) { 189861007b31SStefan Hajnoczi aio_poll(acb->aiocb_info->get_aio_context(acb), true); 189961007b31SStefan Hajnoczi } else if (acb->bs) { 190061007b31SStefan Hajnoczi aio_poll(bdrv_get_aio_context(acb->bs), true); 190161007b31SStefan Hajnoczi } else { 190261007b31SStefan Hajnoczi abort(); 190361007b31SStefan Hajnoczi } 190461007b31SStefan Hajnoczi } 190561007b31SStefan Hajnoczi qemu_aio_unref(acb); 190661007b31SStefan Hajnoczi } 190761007b31SStefan Hajnoczi 190861007b31SStefan Hajnoczi /* Async version of aio cancel. The caller is not blocked if the acb implements 190961007b31SStefan Hajnoczi * cancel_async, otherwise we do nothing and let the request normally complete. 191061007b31SStefan Hajnoczi * In either case the completion callback must be called. */ 191161007b31SStefan Hajnoczi void bdrv_aio_cancel_async(BlockAIOCB *acb) 191261007b31SStefan Hajnoczi { 191361007b31SStefan Hajnoczi if (acb->aiocb_info->cancel_async) { 191461007b31SStefan Hajnoczi acb->aiocb_info->cancel_async(acb); 191561007b31SStefan Hajnoczi } 191661007b31SStefan Hajnoczi } 191761007b31SStefan Hajnoczi 191861007b31SStefan Hajnoczi /**************************************************************/ 191961007b31SStefan Hajnoczi /* async block device emulation */ 192061007b31SStefan Hajnoczi 192141574268SEric Blake typedef struct BlockRequest { 192241574268SEric Blake union { 192341574268SEric Blake /* Used during read, write, trim */ 192441574268SEric Blake struct { 192541574268SEric Blake int64_t sector; 192641574268SEric Blake int nb_sectors; 192741574268SEric Blake int flags; 192841574268SEric Blake QEMUIOVector *qiov; 192941574268SEric Blake }; 193041574268SEric Blake /* Used during ioctl */ 193141574268SEric Blake struct { 193241574268SEric Blake int req; 193341574268SEric Blake void *buf; 193441574268SEric Blake }; 193541574268SEric Blake }; 193641574268SEric Blake BlockCompletionFunc *cb; 193741574268SEric Blake void *opaque; 193841574268SEric Blake 193941574268SEric Blake int error; 194041574268SEric Blake } BlockRequest; 194141574268SEric Blake 194261007b31SStefan Hajnoczi typedef struct BlockAIOCBCoroutine { 194361007b31SStefan Hajnoczi BlockAIOCB common; 194461007b31SStefan Hajnoczi BlockRequest req; 194561007b31SStefan Hajnoczi bool is_write; 194661007b31SStefan Hajnoczi bool need_bh; 194761007b31SStefan Hajnoczi bool *done; 194861007b31SStefan Hajnoczi QEMUBH* bh; 194961007b31SStefan Hajnoczi } BlockAIOCBCoroutine; 195061007b31SStefan Hajnoczi 195161007b31SStefan Hajnoczi static const AIOCBInfo bdrv_em_co_aiocb_info = { 195261007b31SStefan Hajnoczi .aiocb_size = sizeof(BlockAIOCBCoroutine), 195361007b31SStefan Hajnoczi }; 195461007b31SStefan Hajnoczi 195561007b31SStefan Hajnoczi static void bdrv_co_complete(BlockAIOCBCoroutine *acb) 195661007b31SStefan Hajnoczi { 195761007b31SStefan Hajnoczi if (!acb->need_bh) { 195861007b31SStefan Hajnoczi acb->common.cb(acb->common.opaque, acb->req.error); 195961007b31SStefan Hajnoczi qemu_aio_unref(acb); 196061007b31SStefan Hajnoczi } 196161007b31SStefan Hajnoczi } 196261007b31SStefan Hajnoczi 196361007b31SStefan Hajnoczi static void bdrv_co_em_bh(void *opaque) 196461007b31SStefan Hajnoczi { 196561007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb = opaque; 196661007b31SStefan Hajnoczi 196761007b31SStefan Hajnoczi assert(!acb->need_bh); 196861007b31SStefan Hajnoczi qemu_bh_delete(acb->bh); 196961007b31SStefan Hajnoczi bdrv_co_complete(acb); 197061007b31SStefan Hajnoczi } 197161007b31SStefan Hajnoczi 197261007b31SStefan Hajnoczi static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb) 197361007b31SStefan Hajnoczi { 197461007b31SStefan Hajnoczi acb->need_bh = false; 197561007b31SStefan Hajnoczi if (acb->req.error != -EINPROGRESS) { 197661007b31SStefan Hajnoczi BlockDriverState *bs = acb->common.bs; 197761007b31SStefan Hajnoczi 197861007b31SStefan Hajnoczi acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb); 197961007b31SStefan Hajnoczi qemu_bh_schedule(acb->bh); 198061007b31SStefan Hajnoczi } 198161007b31SStefan Hajnoczi } 198261007b31SStefan Hajnoczi 198361007b31SStefan Hajnoczi /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */ 198461007b31SStefan Hajnoczi static void coroutine_fn bdrv_co_do_rw(void *opaque) 198561007b31SStefan Hajnoczi { 198661007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb = opaque; 198761007b31SStefan Hajnoczi BlockDriverState *bs = acb->common.bs; 198861007b31SStefan Hajnoczi 198961007b31SStefan Hajnoczi if (!acb->is_write) { 199061007b31SStefan Hajnoczi acb->req.error = bdrv_co_do_readv(bs, acb->req.sector, 199161007b31SStefan Hajnoczi acb->req.nb_sectors, acb->req.qiov, acb->req.flags); 199261007b31SStefan Hajnoczi } else { 199361007b31SStefan Hajnoczi acb->req.error = bdrv_co_do_writev(bs, acb->req.sector, 199461007b31SStefan Hajnoczi acb->req.nb_sectors, acb->req.qiov, acb->req.flags); 199561007b31SStefan Hajnoczi } 199661007b31SStefan Hajnoczi 199761007b31SStefan Hajnoczi bdrv_co_complete(acb); 199861007b31SStefan Hajnoczi } 199961007b31SStefan Hajnoczi 200061007b31SStefan Hajnoczi static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, 200161007b31SStefan Hajnoczi int64_t sector_num, 200261007b31SStefan Hajnoczi QEMUIOVector *qiov, 200361007b31SStefan Hajnoczi int nb_sectors, 200461007b31SStefan Hajnoczi BdrvRequestFlags flags, 200561007b31SStefan Hajnoczi BlockCompletionFunc *cb, 200661007b31SStefan Hajnoczi void *opaque, 200761007b31SStefan Hajnoczi bool is_write) 200861007b31SStefan Hajnoczi { 200961007b31SStefan Hajnoczi Coroutine *co; 201061007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb; 201161007b31SStefan Hajnoczi 201261007b31SStefan Hajnoczi acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 201361007b31SStefan Hajnoczi acb->need_bh = true; 201461007b31SStefan Hajnoczi acb->req.error = -EINPROGRESS; 201561007b31SStefan Hajnoczi acb->req.sector = sector_num; 201661007b31SStefan Hajnoczi acb->req.nb_sectors = nb_sectors; 201761007b31SStefan Hajnoczi acb->req.qiov = qiov; 201861007b31SStefan Hajnoczi acb->req.flags = flags; 201961007b31SStefan Hajnoczi acb->is_write = is_write; 202061007b31SStefan Hajnoczi 202161007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_co_do_rw); 202261007b31SStefan Hajnoczi qemu_coroutine_enter(co, acb); 202361007b31SStefan Hajnoczi 202461007b31SStefan Hajnoczi bdrv_co_maybe_schedule_bh(acb); 202561007b31SStefan Hajnoczi return &acb->common; 202661007b31SStefan Hajnoczi } 202761007b31SStefan Hajnoczi 202861007b31SStefan Hajnoczi static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque) 202961007b31SStefan Hajnoczi { 203061007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb = opaque; 203161007b31SStefan Hajnoczi BlockDriverState *bs = acb->common.bs; 203261007b31SStefan Hajnoczi 203361007b31SStefan Hajnoczi acb->req.error = bdrv_co_flush(bs); 203461007b31SStefan Hajnoczi bdrv_co_complete(acb); 203561007b31SStefan Hajnoczi } 203661007b31SStefan Hajnoczi 203761007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs, 203861007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 203961007b31SStefan Hajnoczi { 204061007b31SStefan Hajnoczi trace_bdrv_aio_flush(bs, opaque); 204161007b31SStefan Hajnoczi 204261007b31SStefan Hajnoczi Coroutine *co; 204361007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb; 204461007b31SStefan Hajnoczi 204561007b31SStefan Hajnoczi acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 204661007b31SStefan Hajnoczi acb->need_bh = true; 204761007b31SStefan Hajnoczi acb->req.error = -EINPROGRESS; 204861007b31SStefan Hajnoczi 204961007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_aio_flush_co_entry); 205061007b31SStefan Hajnoczi qemu_coroutine_enter(co, acb); 205161007b31SStefan Hajnoczi 205261007b31SStefan Hajnoczi bdrv_co_maybe_schedule_bh(acb); 205361007b31SStefan Hajnoczi return &acb->common; 205461007b31SStefan Hajnoczi } 205561007b31SStefan Hajnoczi 205661007b31SStefan Hajnoczi static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque) 205761007b31SStefan Hajnoczi { 205861007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb = opaque; 205961007b31SStefan Hajnoczi BlockDriverState *bs = acb->common.bs; 206061007b31SStefan Hajnoczi 206161007b31SStefan Hajnoczi acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors); 206261007b31SStefan Hajnoczi bdrv_co_complete(acb); 206361007b31SStefan Hajnoczi } 206461007b31SStefan Hajnoczi 206561007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs, 206661007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, 206761007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 206861007b31SStefan Hajnoczi { 206961007b31SStefan Hajnoczi Coroutine *co; 207061007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb; 207161007b31SStefan Hajnoczi 207261007b31SStefan Hajnoczi trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque); 207361007b31SStefan Hajnoczi 207461007b31SStefan Hajnoczi acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 207561007b31SStefan Hajnoczi acb->need_bh = true; 207661007b31SStefan Hajnoczi acb->req.error = -EINPROGRESS; 207761007b31SStefan Hajnoczi acb->req.sector = sector_num; 207861007b31SStefan Hajnoczi acb->req.nb_sectors = nb_sectors; 207961007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_aio_discard_co_entry); 208061007b31SStefan Hajnoczi qemu_coroutine_enter(co, acb); 208161007b31SStefan Hajnoczi 208261007b31SStefan Hajnoczi bdrv_co_maybe_schedule_bh(acb); 208361007b31SStefan Hajnoczi return &acb->common; 208461007b31SStefan Hajnoczi } 208561007b31SStefan Hajnoczi 208661007b31SStefan Hajnoczi void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, 208761007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 208861007b31SStefan Hajnoczi { 208961007b31SStefan Hajnoczi BlockAIOCB *acb; 209061007b31SStefan Hajnoczi 2091c84b3192SPaolo Bonzini acb = g_malloc(aiocb_info->aiocb_size); 209261007b31SStefan Hajnoczi acb->aiocb_info = aiocb_info; 209361007b31SStefan Hajnoczi acb->bs = bs; 209461007b31SStefan Hajnoczi acb->cb = cb; 209561007b31SStefan Hajnoczi acb->opaque = opaque; 209661007b31SStefan Hajnoczi acb->refcnt = 1; 209761007b31SStefan Hajnoczi return acb; 209861007b31SStefan Hajnoczi } 209961007b31SStefan Hajnoczi 210061007b31SStefan Hajnoczi void qemu_aio_ref(void *p) 210161007b31SStefan Hajnoczi { 210261007b31SStefan Hajnoczi BlockAIOCB *acb = p; 210361007b31SStefan Hajnoczi acb->refcnt++; 210461007b31SStefan Hajnoczi } 210561007b31SStefan Hajnoczi 210661007b31SStefan Hajnoczi void qemu_aio_unref(void *p) 210761007b31SStefan Hajnoczi { 210861007b31SStefan Hajnoczi BlockAIOCB *acb = p; 210961007b31SStefan Hajnoczi assert(acb->refcnt > 0); 211061007b31SStefan Hajnoczi if (--acb->refcnt == 0) { 2111c84b3192SPaolo Bonzini g_free(acb); 211261007b31SStefan Hajnoczi } 211361007b31SStefan Hajnoczi } 211461007b31SStefan Hajnoczi 211561007b31SStefan Hajnoczi /**************************************************************/ 211661007b31SStefan Hajnoczi /* Coroutine block device emulation */ 211761007b31SStefan Hajnoczi 211861007b31SStefan Hajnoczi static void coroutine_fn bdrv_flush_co_entry(void *opaque) 211961007b31SStefan Hajnoczi { 212061007b31SStefan Hajnoczi RwCo *rwco = opaque; 212161007b31SStefan Hajnoczi 212261007b31SStefan Hajnoczi rwco->ret = bdrv_co_flush(rwco->bs); 212361007b31SStefan Hajnoczi } 212461007b31SStefan Hajnoczi 212561007b31SStefan Hajnoczi int coroutine_fn bdrv_co_flush(BlockDriverState *bs) 212661007b31SStefan Hajnoczi { 212761007b31SStefan Hajnoczi int ret; 2128cdb5e315SFam Zheng BdrvTrackedRequest req; 212961007b31SStefan Hajnoczi 21301b6bc94dSDimitris Aragiorgis if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs) || 21311b6bc94dSDimitris Aragiorgis bdrv_is_sg(bs)) { 213261007b31SStefan Hajnoczi return 0; 213361007b31SStefan Hajnoczi } 213461007b31SStefan Hajnoczi 2135cdb5e315SFam Zheng tracked_request_begin(&req, bs, 0, 0, BDRV_TRACKED_FLUSH); 2136c32b82afSPavel Dovgalyuk 2137c32b82afSPavel Dovgalyuk /* Write back all layers by calling one driver function */ 2138c32b82afSPavel Dovgalyuk if (bs->drv->bdrv_co_flush) { 2139c32b82afSPavel Dovgalyuk ret = bs->drv->bdrv_co_flush(bs); 2140c32b82afSPavel Dovgalyuk goto out; 2141c32b82afSPavel Dovgalyuk } 2142c32b82afSPavel Dovgalyuk 214361007b31SStefan Hajnoczi /* Write back cached data to the OS even with cache=unsafe */ 214461007b31SStefan Hajnoczi BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS); 214561007b31SStefan Hajnoczi if (bs->drv->bdrv_co_flush_to_os) { 214661007b31SStefan Hajnoczi ret = bs->drv->bdrv_co_flush_to_os(bs); 214761007b31SStefan Hajnoczi if (ret < 0) { 2148cdb5e315SFam Zheng goto out; 214961007b31SStefan Hajnoczi } 215061007b31SStefan Hajnoczi } 215161007b31SStefan Hajnoczi 215261007b31SStefan Hajnoczi /* But don't actually force it to the disk with cache=unsafe */ 215361007b31SStefan Hajnoczi if (bs->open_flags & BDRV_O_NO_FLUSH) { 215461007b31SStefan Hajnoczi goto flush_parent; 215561007b31SStefan Hajnoczi } 215661007b31SStefan Hajnoczi 215761007b31SStefan Hajnoczi BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK); 215861007b31SStefan Hajnoczi if (bs->drv->bdrv_co_flush_to_disk) { 215961007b31SStefan Hajnoczi ret = bs->drv->bdrv_co_flush_to_disk(bs); 216061007b31SStefan Hajnoczi } else if (bs->drv->bdrv_aio_flush) { 216161007b31SStefan Hajnoczi BlockAIOCB *acb; 216261007b31SStefan Hajnoczi CoroutineIOCompletion co = { 216361007b31SStefan Hajnoczi .coroutine = qemu_coroutine_self(), 216461007b31SStefan Hajnoczi }; 216561007b31SStefan Hajnoczi 216661007b31SStefan Hajnoczi acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); 216761007b31SStefan Hajnoczi if (acb == NULL) { 216861007b31SStefan Hajnoczi ret = -EIO; 216961007b31SStefan Hajnoczi } else { 217061007b31SStefan Hajnoczi qemu_coroutine_yield(); 217161007b31SStefan Hajnoczi ret = co.ret; 217261007b31SStefan Hajnoczi } 217361007b31SStefan Hajnoczi } else { 217461007b31SStefan Hajnoczi /* 217561007b31SStefan Hajnoczi * Some block drivers always operate in either writethrough or unsafe 217661007b31SStefan Hajnoczi * mode and don't support bdrv_flush therefore. Usually qemu doesn't 217761007b31SStefan Hajnoczi * know how the server works (because the behaviour is hardcoded or 217861007b31SStefan Hajnoczi * depends on server-side configuration), so we can't ensure that 217961007b31SStefan Hajnoczi * everything is safe on disk. Returning an error doesn't work because 218061007b31SStefan Hajnoczi * that would break guests even if the server operates in writethrough 218161007b31SStefan Hajnoczi * mode. 218261007b31SStefan Hajnoczi * 218361007b31SStefan Hajnoczi * Let's hope the user knows what he's doing. 218461007b31SStefan Hajnoczi */ 218561007b31SStefan Hajnoczi ret = 0; 218661007b31SStefan Hajnoczi } 218761007b31SStefan Hajnoczi if (ret < 0) { 2188cdb5e315SFam Zheng goto out; 218961007b31SStefan Hajnoczi } 219061007b31SStefan Hajnoczi 219161007b31SStefan Hajnoczi /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH 219261007b31SStefan Hajnoczi * in the case of cache=unsafe, so there are no useless flushes. 219361007b31SStefan Hajnoczi */ 219461007b31SStefan Hajnoczi flush_parent: 2195cdb5e315SFam Zheng ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0; 2196cdb5e315SFam Zheng out: 2197cdb5e315SFam Zheng tracked_request_end(&req); 2198cdb5e315SFam Zheng return ret; 219961007b31SStefan Hajnoczi } 220061007b31SStefan Hajnoczi 220161007b31SStefan Hajnoczi int bdrv_flush(BlockDriverState *bs) 220261007b31SStefan Hajnoczi { 220361007b31SStefan Hajnoczi Coroutine *co; 220461007b31SStefan Hajnoczi RwCo rwco = { 220561007b31SStefan Hajnoczi .bs = bs, 220661007b31SStefan Hajnoczi .ret = NOT_DONE, 220761007b31SStefan Hajnoczi }; 220861007b31SStefan Hajnoczi 220961007b31SStefan Hajnoczi if (qemu_in_coroutine()) { 221061007b31SStefan Hajnoczi /* Fast-path if already in coroutine context */ 221161007b31SStefan Hajnoczi bdrv_flush_co_entry(&rwco); 221261007b31SStefan Hajnoczi } else { 221361007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 221461007b31SStefan Hajnoczi 221561007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_flush_co_entry); 221661007b31SStefan Hajnoczi qemu_coroutine_enter(co, &rwco); 221761007b31SStefan Hajnoczi while (rwco.ret == NOT_DONE) { 221861007b31SStefan Hajnoczi aio_poll(aio_context, true); 221961007b31SStefan Hajnoczi } 222061007b31SStefan Hajnoczi } 222161007b31SStefan Hajnoczi 222261007b31SStefan Hajnoczi return rwco.ret; 222361007b31SStefan Hajnoczi } 222461007b31SStefan Hajnoczi 222561007b31SStefan Hajnoczi typedef struct DiscardCo { 222661007b31SStefan Hajnoczi BlockDriverState *bs; 222761007b31SStefan Hajnoczi int64_t sector_num; 222861007b31SStefan Hajnoczi int nb_sectors; 222961007b31SStefan Hajnoczi int ret; 223061007b31SStefan Hajnoczi } DiscardCo; 223161007b31SStefan Hajnoczi static void coroutine_fn bdrv_discard_co_entry(void *opaque) 223261007b31SStefan Hajnoczi { 223361007b31SStefan Hajnoczi DiscardCo *rwco = opaque; 223461007b31SStefan Hajnoczi 223561007b31SStefan Hajnoczi rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors); 223661007b31SStefan Hajnoczi } 223761007b31SStefan Hajnoczi 223861007b31SStefan Hajnoczi int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, 223961007b31SStefan Hajnoczi int nb_sectors) 224061007b31SStefan Hajnoczi { 2241b1066c87SFam Zheng BdrvTrackedRequest req; 224261007b31SStefan Hajnoczi int max_discard, ret; 224361007b31SStefan Hajnoczi 224461007b31SStefan Hajnoczi if (!bs->drv) { 224561007b31SStefan Hajnoczi return -ENOMEDIUM; 224661007b31SStefan Hajnoczi } 224761007b31SStefan Hajnoczi 224861007b31SStefan Hajnoczi ret = bdrv_check_request(bs, sector_num, nb_sectors); 224961007b31SStefan Hajnoczi if (ret < 0) { 225061007b31SStefan Hajnoczi return ret; 225161007b31SStefan Hajnoczi } else if (bs->read_only) { 2252eaf5fe2dSPaolo Bonzini return -EPERM; 225361007b31SStefan Hajnoczi } 225404c01a5cSKevin Wolf assert(!(bs->open_flags & BDRV_O_INACTIVE)); 225561007b31SStefan Hajnoczi 225661007b31SStefan Hajnoczi /* Do nothing if disabled. */ 225761007b31SStefan Hajnoczi if (!(bs->open_flags & BDRV_O_UNMAP)) { 225861007b31SStefan Hajnoczi return 0; 225961007b31SStefan Hajnoczi } 226061007b31SStefan Hajnoczi 226161007b31SStefan Hajnoczi if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) { 226261007b31SStefan Hajnoczi return 0; 226361007b31SStefan Hajnoczi } 226461007b31SStefan Hajnoczi 2265b1066c87SFam Zheng tracked_request_begin(&req, bs, sector_num, nb_sectors, 2266b1066c87SFam Zheng BDRV_TRACKED_DISCARD); 226750824995SFam Zheng bdrv_set_dirty(bs, sector_num, nb_sectors); 226850824995SFam Zheng 226961007b31SStefan Hajnoczi max_discard = MIN_NON_ZERO(bs->bl.max_discard, BDRV_REQUEST_MAX_SECTORS); 227061007b31SStefan Hajnoczi while (nb_sectors > 0) { 227161007b31SStefan Hajnoczi int ret; 227261007b31SStefan Hajnoczi int num = nb_sectors; 227361007b31SStefan Hajnoczi 227461007b31SStefan Hajnoczi /* align request */ 227561007b31SStefan Hajnoczi if (bs->bl.discard_alignment && 227661007b31SStefan Hajnoczi num >= bs->bl.discard_alignment && 227761007b31SStefan Hajnoczi sector_num % bs->bl.discard_alignment) { 227861007b31SStefan Hajnoczi if (num > bs->bl.discard_alignment) { 227961007b31SStefan Hajnoczi num = bs->bl.discard_alignment; 228061007b31SStefan Hajnoczi } 228161007b31SStefan Hajnoczi num -= sector_num % bs->bl.discard_alignment; 228261007b31SStefan Hajnoczi } 228361007b31SStefan Hajnoczi 228461007b31SStefan Hajnoczi /* limit request size */ 228561007b31SStefan Hajnoczi if (num > max_discard) { 228661007b31SStefan Hajnoczi num = max_discard; 228761007b31SStefan Hajnoczi } 228861007b31SStefan Hajnoczi 228961007b31SStefan Hajnoczi if (bs->drv->bdrv_co_discard) { 229061007b31SStefan Hajnoczi ret = bs->drv->bdrv_co_discard(bs, sector_num, num); 229161007b31SStefan Hajnoczi } else { 229261007b31SStefan Hajnoczi BlockAIOCB *acb; 229361007b31SStefan Hajnoczi CoroutineIOCompletion co = { 229461007b31SStefan Hajnoczi .coroutine = qemu_coroutine_self(), 229561007b31SStefan Hajnoczi }; 229661007b31SStefan Hajnoczi 229761007b31SStefan Hajnoczi acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors, 229861007b31SStefan Hajnoczi bdrv_co_io_em_complete, &co); 229961007b31SStefan Hajnoczi if (acb == NULL) { 2300b1066c87SFam Zheng ret = -EIO; 2301b1066c87SFam Zheng goto out; 230261007b31SStefan Hajnoczi } else { 230361007b31SStefan Hajnoczi qemu_coroutine_yield(); 230461007b31SStefan Hajnoczi ret = co.ret; 230561007b31SStefan Hajnoczi } 230661007b31SStefan Hajnoczi } 230761007b31SStefan Hajnoczi if (ret && ret != -ENOTSUP) { 2308b1066c87SFam Zheng goto out; 230961007b31SStefan Hajnoczi } 231061007b31SStefan Hajnoczi 231161007b31SStefan Hajnoczi sector_num += num; 231261007b31SStefan Hajnoczi nb_sectors -= num; 231361007b31SStefan Hajnoczi } 2314b1066c87SFam Zheng ret = 0; 2315b1066c87SFam Zheng out: 2316b1066c87SFam Zheng tracked_request_end(&req); 2317b1066c87SFam Zheng return ret; 231861007b31SStefan Hajnoczi } 231961007b31SStefan Hajnoczi 232061007b31SStefan Hajnoczi int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors) 232161007b31SStefan Hajnoczi { 232261007b31SStefan Hajnoczi Coroutine *co; 232361007b31SStefan Hajnoczi DiscardCo rwco = { 232461007b31SStefan Hajnoczi .bs = bs, 232561007b31SStefan Hajnoczi .sector_num = sector_num, 232661007b31SStefan Hajnoczi .nb_sectors = nb_sectors, 232761007b31SStefan Hajnoczi .ret = NOT_DONE, 232861007b31SStefan Hajnoczi }; 232961007b31SStefan Hajnoczi 233061007b31SStefan Hajnoczi if (qemu_in_coroutine()) { 233161007b31SStefan Hajnoczi /* Fast-path if already in coroutine context */ 233261007b31SStefan Hajnoczi bdrv_discard_co_entry(&rwco); 233361007b31SStefan Hajnoczi } else { 233461007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 233561007b31SStefan Hajnoczi 233661007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_discard_co_entry); 233761007b31SStefan Hajnoczi qemu_coroutine_enter(co, &rwco); 233861007b31SStefan Hajnoczi while (rwco.ret == NOT_DONE) { 233961007b31SStefan Hajnoczi aio_poll(aio_context, true); 234061007b31SStefan Hajnoczi } 234161007b31SStefan Hajnoczi } 234261007b31SStefan Hajnoczi 234361007b31SStefan Hajnoczi return rwco.ret; 234461007b31SStefan Hajnoczi } 234561007b31SStefan Hajnoczi 23465c5ae76aSFam Zheng static int bdrv_co_do_ioctl(BlockDriverState *bs, int req, void *buf) 234761007b31SStefan Hajnoczi { 234861007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 23495c5ae76aSFam Zheng BdrvTrackedRequest tracked_req; 23505c5ae76aSFam Zheng CoroutineIOCompletion co = { 23515c5ae76aSFam Zheng .coroutine = qemu_coroutine_self(), 23525c5ae76aSFam Zheng }; 23535c5ae76aSFam Zheng BlockAIOCB *acb; 235461007b31SStefan Hajnoczi 23555c5ae76aSFam Zheng tracked_request_begin(&tracked_req, bs, 0, 0, BDRV_TRACKED_IOCTL); 23565c5ae76aSFam Zheng if (!drv || !drv->bdrv_aio_ioctl) { 23575c5ae76aSFam Zheng co.ret = -ENOTSUP; 23585c5ae76aSFam Zheng goto out; 23595c5ae76aSFam Zheng } 23605c5ae76aSFam Zheng 23615c5ae76aSFam Zheng acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co); 23625c5ae76aSFam Zheng if (!acb) { 2363c8a9fd80SFam Zheng co.ret = -ENOTSUP; 2364c8a9fd80SFam Zheng goto out; 23655c5ae76aSFam Zheng } 23665c5ae76aSFam Zheng qemu_coroutine_yield(); 23675c5ae76aSFam Zheng out: 23685c5ae76aSFam Zheng tracked_request_end(&tracked_req); 23695c5ae76aSFam Zheng return co.ret; 23705c5ae76aSFam Zheng } 23715c5ae76aSFam Zheng 23725c5ae76aSFam Zheng typedef struct { 23735c5ae76aSFam Zheng BlockDriverState *bs; 23745c5ae76aSFam Zheng int req; 23755c5ae76aSFam Zheng void *buf; 23765c5ae76aSFam Zheng int ret; 23775c5ae76aSFam Zheng } BdrvIoctlCoData; 23785c5ae76aSFam Zheng 23795c5ae76aSFam Zheng static void coroutine_fn bdrv_co_ioctl_entry(void *opaque) 23805c5ae76aSFam Zheng { 23815c5ae76aSFam Zheng BdrvIoctlCoData *data = opaque; 23825c5ae76aSFam Zheng data->ret = bdrv_co_do_ioctl(data->bs, data->req, data->buf); 23835c5ae76aSFam Zheng } 23845c5ae76aSFam Zheng 23855c5ae76aSFam Zheng /* needed for generic scsi interface */ 23865c5ae76aSFam Zheng int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) 23875c5ae76aSFam Zheng { 23885c5ae76aSFam Zheng BdrvIoctlCoData data = { 23895c5ae76aSFam Zheng .bs = bs, 23905c5ae76aSFam Zheng .req = req, 23915c5ae76aSFam Zheng .buf = buf, 23925c5ae76aSFam Zheng .ret = -EINPROGRESS, 23935c5ae76aSFam Zheng }; 23945c5ae76aSFam Zheng 23955c5ae76aSFam Zheng if (qemu_in_coroutine()) { 23965c5ae76aSFam Zheng /* Fast-path if already in coroutine context */ 23975c5ae76aSFam Zheng bdrv_co_ioctl_entry(&data); 23985c5ae76aSFam Zheng } else { 23995c5ae76aSFam Zheng Coroutine *co = qemu_coroutine_create(bdrv_co_ioctl_entry); 2400ba889444SPaolo Bonzini 24015c5ae76aSFam Zheng qemu_coroutine_enter(co, &data); 24025c5ae76aSFam Zheng while (data.ret == -EINPROGRESS) { 24035c5ae76aSFam Zheng aio_poll(bdrv_get_aio_context(bs), true); 24045c5ae76aSFam Zheng } 2405ba889444SPaolo Bonzini } 24065c5ae76aSFam Zheng return data.ret; 24075c5ae76aSFam Zheng } 24085c5ae76aSFam Zheng 24095c5ae76aSFam Zheng static void coroutine_fn bdrv_co_aio_ioctl_entry(void *opaque) 24105c5ae76aSFam Zheng { 24115c5ae76aSFam Zheng BlockAIOCBCoroutine *acb = opaque; 24125c5ae76aSFam Zheng acb->req.error = bdrv_co_do_ioctl(acb->common.bs, 24135c5ae76aSFam Zheng acb->req.req, acb->req.buf); 24145c5ae76aSFam Zheng bdrv_co_complete(acb); 241561007b31SStefan Hajnoczi } 241661007b31SStefan Hajnoczi 241761007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, 241861007b31SStefan Hajnoczi unsigned long int req, void *buf, 241961007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 242061007b31SStefan Hajnoczi { 24215c5ae76aSFam Zheng BlockAIOCBCoroutine *acb = qemu_aio_get(&bdrv_em_co_aiocb_info, 24225c5ae76aSFam Zheng bs, cb, opaque); 24235c5ae76aSFam Zheng Coroutine *co; 242461007b31SStefan Hajnoczi 24255c5ae76aSFam Zheng acb->need_bh = true; 24265c5ae76aSFam Zheng acb->req.error = -EINPROGRESS; 24275c5ae76aSFam Zheng acb->req.req = req; 24285c5ae76aSFam Zheng acb->req.buf = buf; 24295c5ae76aSFam Zheng co = qemu_coroutine_create(bdrv_co_aio_ioctl_entry); 24305c5ae76aSFam Zheng qemu_coroutine_enter(co, acb); 24315c5ae76aSFam Zheng 24325c5ae76aSFam Zheng bdrv_co_maybe_schedule_bh(acb); 24335c5ae76aSFam Zheng return &acb->common; 243461007b31SStefan Hajnoczi } 243561007b31SStefan Hajnoczi 243661007b31SStefan Hajnoczi void *qemu_blockalign(BlockDriverState *bs, size_t size) 243761007b31SStefan Hajnoczi { 243861007b31SStefan Hajnoczi return qemu_memalign(bdrv_opt_mem_align(bs), size); 243961007b31SStefan Hajnoczi } 244061007b31SStefan Hajnoczi 244161007b31SStefan Hajnoczi void *qemu_blockalign0(BlockDriverState *bs, size_t size) 244261007b31SStefan Hajnoczi { 244361007b31SStefan Hajnoczi return memset(qemu_blockalign(bs, size), 0, size); 244461007b31SStefan Hajnoczi } 244561007b31SStefan Hajnoczi 244661007b31SStefan Hajnoczi void *qemu_try_blockalign(BlockDriverState *bs, size_t size) 244761007b31SStefan Hajnoczi { 244861007b31SStefan Hajnoczi size_t align = bdrv_opt_mem_align(bs); 244961007b31SStefan Hajnoczi 245061007b31SStefan Hajnoczi /* Ensure that NULL is never returned on success */ 245161007b31SStefan Hajnoczi assert(align > 0); 245261007b31SStefan Hajnoczi if (size == 0) { 245361007b31SStefan Hajnoczi size = align; 245461007b31SStefan Hajnoczi } 245561007b31SStefan Hajnoczi 245661007b31SStefan Hajnoczi return qemu_try_memalign(align, size); 245761007b31SStefan Hajnoczi } 245861007b31SStefan Hajnoczi 245961007b31SStefan Hajnoczi void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) 246061007b31SStefan Hajnoczi { 246161007b31SStefan Hajnoczi void *mem = qemu_try_blockalign(bs, size); 246261007b31SStefan Hajnoczi 246361007b31SStefan Hajnoczi if (mem) { 246461007b31SStefan Hajnoczi memset(mem, 0, size); 246561007b31SStefan Hajnoczi } 246661007b31SStefan Hajnoczi 246761007b31SStefan Hajnoczi return mem; 246861007b31SStefan Hajnoczi } 246961007b31SStefan Hajnoczi 247061007b31SStefan Hajnoczi /* 247161007b31SStefan Hajnoczi * Check if all memory in this vector is sector aligned. 247261007b31SStefan Hajnoczi */ 247361007b31SStefan Hajnoczi bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) 247461007b31SStefan Hajnoczi { 247561007b31SStefan Hajnoczi int i; 24764196d2f0SDenis V. Lunev size_t alignment = bdrv_min_mem_align(bs); 247761007b31SStefan Hajnoczi 247861007b31SStefan Hajnoczi for (i = 0; i < qiov->niov; i++) { 247961007b31SStefan Hajnoczi if ((uintptr_t) qiov->iov[i].iov_base % alignment) { 248061007b31SStefan Hajnoczi return false; 248161007b31SStefan Hajnoczi } 248261007b31SStefan Hajnoczi if (qiov->iov[i].iov_len % alignment) { 248361007b31SStefan Hajnoczi return false; 248461007b31SStefan Hajnoczi } 248561007b31SStefan Hajnoczi } 248661007b31SStefan Hajnoczi 248761007b31SStefan Hajnoczi return true; 248861007b31SStefan Hajnoczi } 248961007b31SStefan Hajnoczi 249061007b31SStefan Hajnoczi void bdrv_add_before_write_notifier(BlockDriverState *bs, 249161007b31SStefan Hajnoczi NotifierWithReturn *notifier) 249261007b31SStefan Hajnoczi { 249361007b31SStefan Hajnoczi notifier_with_return_list_add(&bs->before_write_notifiers, notifier); 249461007b31SStefan Hajnoczi } 249561007b31SStefan Hajnoczi 249661007b31SStefan Hajnoczi void bdrv_io_plug(BlockDriverState *bs) 249761007b31SStefan Hajnoczi { 24986b98bd64SPaolo Bonzini BdrvChild *child; 24996b98bd64SPaolo Bonzini 25006b98bd64SPaolo Bonzini QLIST_FOREACH(child, &bs->children, next) { 25016b98bd64SPaolo Bonzini bdrv_io_plug(child->bs); 25026b98bd64SPaolo Bonzini } 25036b98bd64SPaolo Bonzini 25046b98bd64SPaolo Bonzini if (bs->io_plugged++ == 0 && bs->io_plug_disabled == 0) { 250561007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 250661007b31SStefan Hajnoczi if (drv && drv->bdrv_io_plug) { 250761007b31SStefan Hajnoczi drv->bdrv_io_plug(bs); 25086b98bd64SPaolo Bonzini } 250961007b31SStefan Hajnoczi } 251061007b31SStefan Hajnoczi } 251161007b31SStefan Hajnoczi 251261007b31SStefan Hajnoczi void bdrv_io_unplug(BlockDriverState *bs) 251361007b31SStefan Hajnoczi { 25146b98bd64SPaolo Bonzini BdrvChild *child; 25156b98bd64SPaolo Bonzini 25166b98bd64SPaolo Bonzini assert(bs->io_plugged); 25176b98bd64SPaolo Bonzini if (--bs->io_plugged == 0 && bs->io_plug_disabled == 0) { 251861007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 251961007b31SStefan Hajnoczi if (drv && drv->bdrv_io_unplug) { 252061007b31SStefan Hajnoczi drv->bdrv_io_unplug(bs); 252161007b31SStefan Hajnoczi } 252261007b31SStefan Hajnoczi } 252361007b31SStefan Hajnoczi 25246b98bd64SPaolo Bonzini QLIST_FOREACH(child, &bs->children, next) { 25256b98bd64SPaolo Bonzini bdrv_io_unplug(child->bs); 25266b98bd64SPaolo Bonzini } 25276b98bd64SPaolo Bonzini } 25286b98bd64SPaolo Bonzini 25296b98bd64SPaolo Bonzini void bdrv_io_unplugged_begin(BlockDriverState *bs) 253061007b31SStefan Hajnoczi { 25316b98bd64SPaolo Bonzini BdrvChild *child; 25326b98bd64SPaolo Bonzini 25336b98bd64SPaolo Bonzini if (bs->io_plug_disabled++ == 0 && bs->io_plugged > 0) { 253461007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 25356b98bd64SPaolo Bonzini if (drv && drv->bdrv_io_unplug) { 25366b98bd64SPaolo Bonzini drv->bdrv_io_unplug(bs); 25376b98bd64SPaolo Bonzini } 25386b98bd64SPaolo Bonzini } 25396b98bd64SPaolo Bonzini 25406b98bd64SPaolo Bonzini QLIST_FOREACH(child, &bs->children, next) { 25416b98bd64SPaolo Bonzini bdrv_io_unplugged_begin(child->bs); 25426b98bd64SPaolo Bonzini } 25436b98bd64SPaolo Bonzini } 25446b98bd64SPaolo Bonzini 25456b98bd64SPaolo Bonzini void bdrv_io_unplugged_end(BlockDriverState *bs) 25466b98bd64SPaolo Bonzini { 25476b98bd64SPaolo Bonzini BdrvChild *child; 25486b98bd64SPaolo Bonzini 25496b98bd64SPaolo Bonzini assert(bs->io_plug_disabled); 25506b98bd64SPaolo Bonzini QLIST_FOREACH(child, &bs->children, next) { 25516b98bd64SPaolo Bonzini bdrv_io_unplugged_end(child->bs); 25526b98bd64SPaolo Bonzini } 25536b98bd64SPaolo Bonzini 25546b98bd64SPaolo Bonzini if (--bs->io_plug_disabled == 0 && bs->io_plugged > 0) { 25556b98bd64SPaolo Bonzini BlockDriver *drv = bs->drv; 25566b98bd64SPaolo Bonzini if (drv && drv->bdrv_io_plug) { 25576b98bd64SPaolo Bonzini drv->bdrv_io_plug(bs); 25586b98bd64SPaolo Bonzini } 255961007b31SStefan Hajnoczi } 256061007b31SStefan Hajnoczi } 2561