161007b31SStefan Hajnoczi /* 261007b31SStefan Hajnoczi * Block layer I/O functions 361007b31SStefan Hajnoczi * 461007b31SStefan Hajnoczi * Copyright (c) 2003 Fabrice Bellard 561007b31SStefan Hajnoczi * 661007b31SStefan Hajnoczi * Permission is hereby granted, free of charge, to any person obtaining a copy 761007b31SStefan Hajnoczi * of this software and associated documentation files (the "Software"), to deal 861007b31SStefan Hajnoczi * in the Software without restriction, including without limitation the rights 961007b31SStefan Hajnoczi * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 1061007b31SStefan Hajnoczi * copies of the Software, and to permit persons to whom the Software is 1161007b31SStefan Hajnoczi * furnished to do so, subject to the following conditions: 1261007b31SStefan Hajnoczi * 1361007b31SStefan Hajnoczi * The above copyright notice and this permission notice shall be included in 1461007b31SStefan Hajnoczi * all copies or substantial portions of the Software. 1561007b31SStefan Hajnoczi * 1661007b31SStefan Hajnoczi * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 1761007b31SStefan Hajnoczi * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 1861007b31SStefan Hajnoczi * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 1961007b31SStefan Hajnoczi * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 2061007b31SStefan Hajnoczi * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 2161007b31SStefan Hajnoczi * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 2261007b31SStefan Hajnoczi * THE SOFTWARE. 2361007b31SStefan Hajnoczi */ 2461007b31SStefan Hajnoczi 2580c71a24SPeter Maydell #include "qemu/osdep.h" 2661007b31SStefan Hajnoczi #include "trace.h" 277f0e9da6SMax Reitz #include "sysemu/block-backend.h" 2861007b31SStefan Hajnoczi #include "block/blockjob.h" 2961007b31SStefan Hajnoczi #include "block/block_int.h" 30f348b6d1SVeronia Bahaa #include "qemu/cutils.h" 31da34e65cSMarkus Armbruster #include "qapi/error.h" 32d49b6836SMarkus Armbruster #include "qemu/error-report.h" 3361007b31SStefan Hajnoczi 3461007b31SStefan Hajnoczi #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ 3561007b31SStefan Hajnoczi 3661007b31SStefan Hajnoczi static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, 3761007b31SStefan Hajnoczi int64_t sector_num, 3861007b31SStefan Hajnoczi QEMUIOVector *qiov, 3961007b31SStefan Hajnoczi int nb_sectors, 4061007b31SStefan Hajnoczi BdrvRequestFlags flags, 4161007b31SStefan Hajnoczi BlockCompletionFunc *cb, 4261007b31SStefan Hajnoczi void *opaque, 4361007b31SStefan Hajnoczi bool is_write); 4461007b31SStefan Hajnoczi static void coroutine_fn bdrv_co_do_rw(void *opaque); 45d05aa8bbSEric Blake static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 46d05aa8bbSEric Blake int64_t offset, int count, BdrvRequestFlags flags); 4761007b31SStefan Hajnoczi 48c2066af0SKevin Wolf static void bdrv_parent_drained_begin(BlockDriverState *bs) 4961007b31SStefan Hajnoczi { 50c2066af0SKevin Wolf BdrvChild *c; 5127ccdd52SKevin Wolf 52c2066af0SKevin Wolf QLIST_FOREACH(c, &bs->parents, next_parent) { 53c2066af0SKevin Wolf if (c->role->drained_begin) { 54c2066af0SKevin Wolf c->role->drained_begin(c); 55c2066af0SKevin Wolf } 56ce0f1412SPaolo Bonzini } 57ce0f1412SPaolo Bonzini } 58ce0f1412SPaolo Bonzini 59c2066af0SKevin Wolf static void bdrv_parent_drained_end(BlockDriverState *bs) 60ce0f1412SPaolo Bonzini { 61c2066af0SKevin Wolf BdrvChild *c; 6227ccdd52SKevin Wolf 63c2066af0SKevin Wolf QLIST_FOREACH(c, &bs->parents, next_parent) { 64c2066af0SKevin Wolf if (c->role->drained_end) { 65c2066af0SKevin Wolf c->role->drained_end(c); 6627ccdd52SKevin Wolf } 67c2066af0SKevin Wolf } 6861007b31SStefan Hajnoczi } 6961007b31SStefan Hajnoczi 7061007b31SStefan Hajnoczi void bdrv_refresh_limits(BlockDriverState *bs, Error **errp) 7161007b31SStefan Hajnoczi { 7261007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 7361007b31SStefan Hajnoczi Error *local_err = NULL; 7461007b31SStefan Hajnoczi 7561007b31SStefan Hajnoczi memset(&bs->bl, 0, sizeof(bs->bl)); 7661007b31SStefan Hajnoczi 7761007b31SStefan Hajnoczi if (!drv) { 7861007b31SStefan Hajnoczi return; 7961007b31SStefan Hajnoczi } 8061007b31SStefan Hajnoczi 8161007b31SStefan Hajnoczi /* Take some limits from the children as a default */ 8261007b31SStefan Hajnoczi if (bs->file) { 839a4f4c31SKevin Wolf bdrv_refresh_limits(bs->file->bs, &local_err); 8461007b31SStefan Hajnoczi if (local_err) { 8561007b31SStefan Hajnoczi error_propagate(errp, local_err); 8661007b31SStefan Hajnoczi return; 8761007b31SStefan Hajnoczi } 889a4f4c31SKevin Wolf bs->bl.opt_transfer_length = bs->file->bs->bl.opt_transfer_length; 899a4f4c31SKevin Wolf bs->bl.max_transfer_length = bs->file->bs->bl.max_transfer_length; 909a4f4c31SKevin Wolf bs->bl.min_mem_alignment = bs->file->bs->bl.min_mem_alignment; 919a4f4c31SKevin Wolf bs->bl.opt_mem_alignment = bs->file->bs->bl.opt_mem_alignment; 92bd44feb7SStefan Hajnoczi bs->bl.max_iov = bs->file->bs->bl.max_iov; 9361007b31SStefan Hajnoczi } else { 944196d2f0SDenis V. Lunev bs->bl.min_mem_alignment = 512; 95459b4e66SDenis V. Lunev bs->bl.opt_mem_alignment = getpagesize(); 96bd44feb7SStefan Hajnoczi 97bd44feb7SStefan Hajnoczi /* Safe default since most protocols use readv()/writev()/etc */ 98bd44feb7SStefan Hajnoczi bs->bl.max_iov = IOV_MAX; 9961007b31SStefan Hajnoczi } 10061007b31SStefan Hajnoczi 101760e0063SKevin Wolf if (bs->backing) { 102760e0063SKevin Wolf bdrv_refresh_limits(bs->backing->bs, &local_err); 10361007b31SStefan Hajnoczi if (local_err) { 10461007b31SStefan Hajnoczi error_propagate(errp, local_err); 10561007b31SStefan Hajnoczi return; 10661007b31SStefan Hajnoczi } 10761007b31SStefan Hajnoczi bs->bl.opt_transfer_length = 10861007b31SStefan Hajnoczi MAX(bs->bl.opt_transfer_length, 109760e0063SKevin Wolf bs->backing->bs->bl.opt_transfer_length); 11061007b31SStefan Hajnoczi bs->bl.max_transfer_length = 11161007b31SStefan Hajnoczi MIN_NON_ZERO(bs->bl.max_transfer_length, 112760e0063SKevin Wolf bs->backing->bs->bl.max_transfer_length); 11361007b31SStefan Hajnoczi bs->bl.opt_mem_alignment = 11461007b31SStefan Hajnoczi MAX(bs->bl.opt_mem_alignment, 115760e0063SKevin Wolf bs->backing->bs->bl.opt_mem_alignment); 1164196d2f0SDenis V. Lunev bs->bl.min_mem_alignment = 1174196d2f0SDenis V. Lunev MAX(bs->bl.min_mem_alignment, 118760e0063SKevin Wolf bs->backing->bs->bl.min_mem_alignment); 119bd44feb7SStefan Hajnoczi bs->bl.max_iov = 120bd44feb7SStefan Hajnoczi MIN(bs->bl.max_iov, 121bd44feb7SStefan Hajnoczi bs->backing->bs->bl.max_iov); 12261007b31SStefan Hajnoczi } 12361007b31SStefan Hajnoczi 12461007b31SStefan Hajnoczi /* Then let the driver override it */ 12561007b31SStefan Hajnoczi if (drv->bdrv_refresh_limits) { 12661007b31SStefan Hajnoczi drv->bdrv_refresh_limits(bs, errp); 12761007b31SStefan Hajnoczi } 12861007b31SStefan Hajnoczi } 12961007b31SStefan Hajnoczi 13061007b31SStefan Hajnoczi /** 13161007b31SStefan Hajnoczi * The copy-on-read flag is actually a reference count so multiple users may 13261007b31SStefan Hajnoczi * use the feature without worrying about clobbering its previous state. 13361007b31SStefan Hajnoczi * Copy-on-read stays enabled until all users have called to disable it. 13461007b31SStefan Hajnoczi */ 13561007b31SStefan Hajnoczi void bdrv_enable_copy_on_read(BlockDriverState *bs) 13661007b31SStefan Hajnoczi { 13761007b31SStefan Hajnoczi bs->copy_on_read++; 13861007b31SStefan Hajnoczi } 13961007b31SStefan Hajnoczi 14061007b31SStefan Hajnoczi void bdrv_disable_copy_on_read(BlockDriverState *bs) 14161007b31SStefan Hajnoczi { 14261007b31SStefan Hajnoczi assert(bs->copy_on_read > 0); 14361007b31SStefan Hajnoczi bs->copy_on_read--; 14461007b31SStefan Hajnoczi } 14561007b31SStefan Hajnoczi 14661007b31SStefan Hajnoczi /* Check if any requests are in-flight (including throttled requests) */ 147439db28cSKevin Wolf bool bdrv_requests_pending(BlockDriverState *bs) 14861007b31SStefan Hajnoczi { 14937a639a7SKevin Wolf BdrvChild *child; 15037a639a7SKevin Wolf 15161007b31SStefan Hajnoczi if (!QLIST_EMPTY(&bs->tracked_requests)) { 15261007b31SStefan Hajnoczi return true; 15361007b31SStefan Hajnoczi } 15437a639a7SKevin Wolf 15537a639a7SKevin Wolf QLIST_FOREACH(child, &bs->children, next) { 15637a639a7SKevin Wolf if (bdrv_requests_pending(child->bs)) { 15761007b31SStefan Hajnoczi return true; 15861007b31SStefan Hajnoczi } 15961007b31SStefan Hajnoczi } 16037a639a7SKevin Wolf 16161007b31SStefan Hajnoczi return false; 16261007b31SStefan Hajnoczi } 16361007b31SStefan Hajnoczi 16467da1dc5SFam Zheng static void bdrv_drain_recurse(BlockDriverState *bs) 16567da1dc5SFam Zheng { 16667da1dc5SFam Zheng BdrvChild *child; 16767da1dc5SFam Zheng 16867da1dc5SFam Zheng if (bs->drv && bs->drv->bdrv_drain) { 16967da1dc5SFam Zheng bs->drv->bdrv_drain(bs); 17067da1dc5SFam Zheng } 17167da1dc5SFam Zheng QLIST_FOREACH(child, &bs->children, next) { 17267da1dc5SFam Zheng bdrv_drain_recurse(child->bs); 17367da1dc5SFam Zheng } 17467da1dc5SFam Zheng } 17567da1dc5SFam Zheng 176a77fd4bbSFam Zheng typedef struct { 177a77fd4bbSFam Zheng Coroutine *co; 178a77fd4bbSFam Zheng BlockDriverState *bs; 179a77fd4bbSFam Zheng QEMUBH *bh; 180a77fd4bbSFam Zheng bool done; 181a77fd4bbSFam Zheng } BdrvCoDrainData; 182a77fd4bbSFam Zheng 183b6e84c97SPaolo Bonzini static void bdrv_drain_poll(BlockDriverState *bs) 184b6e84c97SPaolo Bonzini { 185b6e84c97SPaolo Bonzini bool busy = true; 186b6e84c97SPaolo Bonzini 187b6e84c97SPaolo Bonzini while (busy) { 188b6e84c97SPaolo Bonzini /* Keep iterating */ 189b6e84c97SPaolo Bonzini busy = bdrv_requests_pending(bs); 190b6e84c97SPaolo Bonzini busy |= aio_poll(bdrv_get_aio_context(bs), busy); 191b6e84c97SPaolo Bonzini } 192b6e84c97SPaolo Bonzini } 193b6e84c97SPaolo Bonzini 194a77fd4bbSFam Zheng static void bdrv_co_drain_bh_cb(void *opaque) 195a77fd4bbSFam Zheng { 196a77fd4bbSFam Zheng BdrvCoDrainData *data = opaque; 197a77fd4bbSFam Zheng Coroutine *co = data->co; 198a77fd4bbSFam Zheng 199a77fd4bbSFam Zheng qemu_bh_delete(data->bh); 200b6e84c97SPaolo Bonzini bdrv_drain_poll(data->bs); 201a77fd4bbSFam Zheng data->done = true; 202a77fd4bbSFam Zheng qemu_coroutine_enter(co, NULL); 203a77fd4bbSFam Zheng } 204a77fd4bbSFam Zheng 205b6e84c97SPaolo Bonzini static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs) 206a77fd4bbSFam Zheng { 207a77fd4bbSFam Zheng BdrvCoDrainData data; 208a77fd4bbSFam Zheng 209a77fd4bbSFam Zheng /* Calling bdrv_drain() from a BH ensures the current coroutine yields and 210a77fd4bbSFam Zheng * other coroutines run if they were queued from 211a77fd4bbSFam Zheng * qemu_co_queue_run_restart(). */ 212a77fd4bbSFam Zheng 213a77fd4bbSFam Zheng assert(qemu_in_coroutine()); 214a77fd4bbSFam Zheng data = (BdrvCoDrainData) { 215a77fd4bbSFam Zheng .co = qemu_coroutine_self(), 216a77fd4bbSFam Zheng .bs = bs, 217a77fd4bbSFam Zheng .done = false, 218a77fd4bbSFam Zheng .bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_drain_bh_cb, &data), 219a77fd4bbSFam Zheng }; 220a77fd4bbSFam Zheng qemu_bh_schedule(data.bh); 221a77fd4bbSFam Zheng 222a77fd4bbSFam Zheng qemu_coroutine_yield(); 223a77fd4bbSFam Zheng /* If we are resumed from some other event (such as an aio completion or a 224a77fd4bbSFam Zheng * timer callback), it is a bug in the caller that should be fixed. */ 225a77fd4bbSFam Zheng assert(data.done); 226a77fd4bbSFam Zheng } 227a77fd4bbSFam Zheng 2286820643fSKevin Wolf void bdrv_drained_begin(BlockDriverState *bs) 2296820643fSKevin Wolf { 2306820643fSKevin Wolf if (!bs->quiesce_counter++) { 2316820643fSKevin Wolf aio_disable_external(bdrv_get_aio_context(bs)); 2326820643fSKevin Wolf bdrv_parent_drained_begin(bs); 2336820643fSKevin Wolf } 2346820643fSKevin Wolf 2356820643fSKevin Wolf bdrv_io_unplugged_begin(bs); 2366820643fSKevin Wolf bdrv_drain_recurse(bs); 2376820643fSKevin Wolf if (qemu_in_coroutine()) { 2386820643fSKevin Wolf bdrv_co_yield_to_drain(bs); 2396820643fSKevin Wolf } else { 2406820643fSKevin Wolf bdrv_drain_poll(bs); 2416820643fSKevin Wolf } 2426820643fSKevin Wolf bdrv_io_unplugged_end(bs); 2436820643fSKevin Wolf } 2446820643fSKevin Wolf 2456820643fSKevin Wolf void bdrv_drained_end(BlockDriverState *bs) 2466820643fSKevin Wolf { 2476820643fSKevin Wolf assert(bs->quiesce_counter > 0); 2486820643fSKevin Wolf if (--bs->quiesce_counter > 0) { 2496820643fSKevin Wolf return; 2506820643fSKevin Wolf } 2516820643fSKevin Wolf 2526820643fSKevin Wolf bdrv_parent_drained_end(bs); 2536820643fSKevin Wolf aio_enable_external(bdrv_get_aio_context(bs)); 2546820643fSKevin Wolf } 2556820643fSKevin Wolf 25661007b31SStefan Hajnoczi /* 25767da1dc5SFam Zheng * Wait for pending requests to complete on a single BlockDriverState subtree, 25867da1dc5SFam Zheng * and suspend block driver's internal I/O until next request arrives. 25961007b31SStefan Hajnoczi * 26061007b31SStefan Hajnoczi * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState 26161007b31SStefan Hajnoczi * AioContext. 2627a63f3cdSStefan Hajnoczi * 2637a63f3cdSStefan Hajnoczi * Only this BlockDriverState's AioContext is run, so in-flight requests must 2647a63f3cdSStefan Hajnoczi * not depend on events in other AioContexts. In that case, use 2657a63f3cdSStefan Hajnoczi * bdrv_drain_all() instead. 26661007b31SStefan Hajnoczi */ 267b6e84c97SPaolo Bonzini void coroutine_fn bdrv_co_drain(BlockDriverState *bs) 268b6e84c97SPaolo Bonzini { 2696820643fSKevin Wolf assert(qemu_in_coroutine()); 2706820643fSKevin Wolf bdrv_drained_begin(bs); 2716820643fSKevin Wolf bdrv_drained_end(bs); 272b6e84c97SPaolo Bonzini } 273b6e84c97SPaolo Bonzini 27461007b31SStefan Hajnoczi void bdrv_drain(BlockDriverState *bs) 27561007b31SStefan Hajnoczi { 2766820643fSKevin Wolf bdrv_drained_begin(bs); 2776820643fSKevin Wolf bdrv_drained_end(bs); 27861007b31SStefan Hajnoczi } 27961007b31SStefan Hajnoczi 28061007b31SStefan Hajnoczi /* 28161007b31SStefan Hajnoczi * Wait for pending requests to complete across all BlockDriverStates 28261007b31SStefan Hajnoczi * 28361007b31SStefan Hajnoczi * This function does not flush data to disk, use bdrv_flush_all() for that 28461007b31SStefan Hajnoczi * after calling this function. 28561007b31SStefan Hajnoczi */ 28661007b31SStefan Hajnoczi void bdrv_drain_all(void) 28761007b31SStefan Hajnoczi { 28861007b31SStefan Hajnoczi /* Always run first iteration so any pending completion BHs run */ 28961007b31SStefan Hajnoczi bool busy = true; 2907c8eece4SKevin Wolf BlockDriverState *bs; 29188be7b4bSKevin Wolf BdrvNextIterator it; 292f406c03cSAlexander Yarygin GSList *aio_ctxs = NULL, *ctx; 29361007b31SStefan Hajnoczi 29488be7b4bSKevin Wolf for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 29561007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 29661007b31SStefan Hajnoczi 29761007b31SStefan Hajnoczi aio_context_acquire(aio_context); 29861007b31SStefan Hajnoczi if (bs->job) { 29961007b31SStefan Hajnoczi block_job_pause(bs->job); 30061007b31SStefan Hajnoczi } 301c2066af0SKevin Wolf bdrv_parent_drained_begin(bs); 3026b98bd64SPaolo Bonzini bdrv_io_unplugged_begin(bs); 3039dcf8ecdSPaolo Bonzini bdrv_drain_recurse(bs); 30461007b31SStefan Hajnoczi aio_context_release(aio_context); 305f406c03cSAlexander Yarygin 306764ba3aeSAlberto Garcia if (!g_slist_find(aio_ctxs, aio_context)) { 307f406c03cSAlexander Yarygin aio_ctxs = g_slist_prepend(aio_ctxs, aio_context); 308f406c03cSAlexander Yarygin } 30961007b31SStefan Hajnoczi } 31061007b31SStefan Hajnoczi 3117a63f3cdSStefan Hajnoczi /* Note that completion of an asynchronous I/O operation can trigger any 3127a63f3cdSStefan Hajnoczi * number of other I/O operations on other devices---for example a 3137a63f3cdSStefan Hajnoczi * coroutine can submit an I/O request to another device in response to 3147a63f3cdSStefan Hajnoczi * request completion. Therefore we must keep looping until there was no 3157a63f3cdSStefan Hajnoczi * more activity rather than simply draining each device independently. 3167a63f3cdSStefan Hajnoczi */ 31761007b31SStefan Hajnoczi while (busy) { 31861007b31SStefan Hajnoczi busy = false; 319f406c03cSAlexander Yarygin 320f406c03cSAlexander Yarygin for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) { 321f406c03cSAlexander Yarygin AioContext *aio_context = ctx->data; 32261007b31SStefan Hajnoczi 32361007b31SStefan Hajnoczi aio_context_acquire(aio_context); 32488be7b4bSKevin Wolf for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 325f406c03cSAlexander Yarygin if (aio_context == bdrv_get_aio_context(bs)) { 326f406c03cSAlexander Yarygin if (bdrv_requests_pending(bs)) { 327f406c03cSAlexander Yarygin busy = true; 328f406c03cSAlexander Yarygin aio_poll(aio_context, busy); 329f406c03cSAlexander Yarygin } 330f406c03cSAlexander Yarygin } 331f406c03cSAlexander Yarygin } 332f406c03cSAlexander Yarygin busy |= aio_poll(aio_context, false); 33361007b31SStefan Hajnoczi aio_context_release(aio_context); 33461007b31SStefan Hajnoczi } 33561007b31SStefan Hajnoczi } 33661007b31SStefan Hajnoczi 33788be7b4bSKevin Wolf for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 33861007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 33961007b31SStefan Hajnoczi 34061007b31SStefan Hajnoczi aio_context_acquire(aio_context); 3416b98bd64SPaolo Bonzini bdrv_io_unplugged_end(bs); 342c2066af0SKevin Wolf bdrv_parent_drained_end(bs); 34361007b31SStefan Hajnoczi if (bs->job) { 34461007b31SStefan Hajnoczi block_job_resume(bs->job); 34561007b31SStefan Hajnoczi } 34661007b31SStefan Hajnoczi aio_context_release(aio_context); 34761007b31SStefan Hajnoczi } 348f406c03cSAlexander Yarygin g_slist_free(aio_ctxs); 34961007b31SStefan Hajnoczi } 35061007b31SStefan Hajnoczi 35161007b31SStefan Hajnoczi /** 35261007b31SStefan Hajnoczi * Remove an active request from the tracked requests list 35361007b31SStefan Hajnoczi * 35461007b31SStefan Hajnoczi * This function should be called when a tracked request is completing. 35561007b31SStefan Hajnoczi */ 35661007b31SStefan Hajnoczi static void tracked_request_end(BdrvTrackedRequest *req) 35761007b31SStefan Hajnoczi { 35861007b31SStefan Hajnoczi if (req->serialising) { 35961007b31SStefan Hajnoczi req->bs->serialising_in_flight--; 36061007b31SStefan Hajnoczi } 36161007b31SStefan Hajnoczi 36261007b31SStefan Hajnoczi QLIST_REMOVE(req, list); 36361007b31SStefan Hajnoczi qemu_co_queue_restart_all(&req->wait_queue); 36461007b31SStefan Hajnoczi } 36561007b31SStefan Hajnoczi 36661007b31SStefan Hajnoczi /** 36761007b31SStefan Hajnoczi * Add an active request to the tracked requests list 36861007b31SStefan Hajnoczi */ 36961007b31SStefan Hajnoczi static void tracked_request_begin(BdrvTrackedRequest *req, 37061007b31SStefan Hajnoczi BlockDriverState *bs, 37161007b31SStefan Hajnoczi int64_t offset, 372ebde595cSFam Zheng unsigned int bytes, 373ebde595cSFam Zheng enum BdrvTrackedRequestType type) 37461007b31SStefan Hajnoczi { 37561007b31SStefan Hajnoczi *req = (BdrvTrackedRequest){ 37661007b31SStefan Hajnoczi .bs = bs, 37761007b31SStefan Hajnoczi .offset = offset, 37861007b31SStefan Hajnoczi .bytes = bytes, 379ebde595cSFam Zheng .type = type, 38061007b31SStefan Hajnoczi .co = qemu_coroutine_self(), 38161007b31SStefan Hajnoczi .serialising = false, 38261007b31SStefan Hajnoczi .overlap_offset = offset, 38361007b31SStefan Hajnoczi .overlap_bytes = bytes, 38461007b31SStefan Hajnoczi }; 38561007b31SStefan Hajnoczi 38661007b31SStefan Hajnoczi qemu_co_queue_init(&req->wait_queue); 38761007b31SStefan Hajnoczi 38861007b31SStefan Hajnoczi QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); 38961007b31SStefan Hajnoczi } 39061007b31SStefan Hajnoczi 39161007b31SStefan Hajnoczi static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) 39261007b31SStefan Hajnoczi { 39361007b31SStefan Hajnoczi int64_t overlap_offset = req->offset & ~(align - 1); 39461007b31SStefan Hajnoczi unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align) 39561007b31SStefan Hajnoczi - overlap_offset; 39661007b31SStefan Hajnoczi 39761007b31SStefan Hajnoczi if (!req->serialising) { 39861007b31SStefan Hajnoczi req->bs->serialising_in_flight++; 39961007b31SStefan Hajnoczi req->serialising = true; 40061007b31SStefan Hajnoczi } 40161007b31SStefan Hajnoczi 40261007b31SStefan Hajnoczi req->overlap_offset = MIN(req->overlap_offset, overlap_offset); 40361007b31SStefan Hajnoczi req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); 40461007b31SStefan Hajnoczi } 40561007b31SStefan Hajnoczi 40661007b31SStefan Hajnoczi /** 407244483e6SKevin Wolf * Round a region to cluster boundaries (sector-based) 40861007b31SStefan Hajnoczi */ 409244483e6SKevin Wolf void bdrv_round_sectors_to_clusters(BlockDriverState *bs, 41061007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, 41161007b31SStefan Hajnoczi int64_t *cluster_sector_num, 41261007b31SStefan Hajnoczi int *cluster_nb_sectors) 41361007b31SStefan Hajnoczi { 41461007b31SStefan Hajnoczi BlockDriverInfo bdi; 41561007b31SStefan Hajnoczi 41661007b31SStefan Hajnoczi if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 41761007b31SStefan Hajnoczi *cluster_sector_num = sector_num; 41861007b31SStefan Hajnoczi *cluster_nb_sectors = nb_sectors; 41961007b31SStefan Hajnoczi } else { 42061007b31SStefan Hajnoczi int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE; 42161007b31SStefan Hajnoczi *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c); 42261007b31SStefan Hajnoczi *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num + 42361007b31SStefan Hajnoczi nb_sectors, c); 42461007b31SStefan Hajnoczi } 42561007b31SStefan Hajnoczi } 42661007b31SStefan Hajnoczi 427244483e6SKevin Wolf /** 428244483e6SKevin Wolf * Round a region to cluster boundaries 429244483e6SKevin Wolf */ 430244483e6SKevin Wolf void bdrv_round_to_clusters(BlockDriverState *bs, 431244483e6SKevin Wolf int64_t offset, unsigned int bytes, 432244483e6SKevin Wolf int64_t *cluster_offset, 433244483e6SKevin Wolf unsigned int *cluster_bytes) 434244483e6SKevin Wolf { 435244483e6SKevin Wolf BlockDriverInfo bdi; 436244483e6SKevin Wolf 437244483e6SKevin Wolf if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 438244483e6SKevin Wolf *cluster_offset = offset; 439244483e6SKevin Wolf *cluster_bytes = bytes; 440244483e6SKevin Wolf } else { 441244483e6SKevin Wolf int64_t c = bdi.cluster_size; 442244483e6SKevin Wolf *cluster_offset = QEMU_ALIGN_DOWN(offset, c); 443244483e6SKevin Wolf *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c); 444244483e6SKevin Wolf } 445244483e6SKevin Wolf } 446244483e6SKevin Wolf 44761007b31SStefan Hajnoczi static int bdrv_get_cluster_size(BlockDriverState *bs) 44861007b31SStefan Hajnoczi { 44961007b31SStefan Hajnoczi BlockDriverInfo bdi; 45061007b31SStefan Hajnoczi int ret; 45161007b31SStefan Hajnoczi 45261007b31SStefan Hajnoczi ret = bdrv_get_info(bs, &bdi); 45361007b31SStefan Hajnoczi if (ret < 0 || bdi.cluster_size == 0) { 45461007b31SStefan Hajnoczi return bs->request_alignment; 45561007b31SStefan Hajnoczi } else { 45661007b31SStefan Hajnoczi return bdi.cluster_size; 45761007b31SStefan Hajnoczi } 45861007b31SStefan Hajnoczi } 45961007b31SStefan Hajnoczi 46061007b31SStefan Hajnoczi static bool tracked_request_overlaps(BdrvTrackedRequest *req, 46161007b31SStefan Hajnoczi int64_t offset, unsigned int bytes) 46261007b31SStefan Hajnoczi { 46361007b31SStefan Hajnoczi /* aaaa bbbb */ 46461007b31SStefan Hajnoczi if (offset >= req->overlap_offset + req->overlap_bytes) { 46561007b31SStefan Hajnoczi return false; 46661007b31SStefan Hajnoczi } 46761007b31SStefan Hajnoczi /* bbbb aaaa */ 46861007b31SStefan Hajnoczi if (req->overlap_offset >= offset + bytes) { 46961007b31SStefan Hajnoczi return false; 47061007b31SStefan Hajnoczi } 47161007b31SStefan Hajnoczi return true; 47261007b31SStefan Hajnoczi } 47361007b31SStefan Hajnoczi 47461007b31SStefan Hajnoczi static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) 47561007b31SStefan Hajnoczi { 47661007b31SStefan Hajnoczi BlockDriverState *bs = self->bs; 47761007b31SStefan Hajnoczi BdrvTrackedRequest *req; 47861007b31SStefan Hajnoczi bool retry; 47961007b31SStefan Hajnoczi bool waited = false; 48061007b31SStefan Hajnoczi 48161007b31SStefan Hajnoczi if (!bs->serialising_in_flight) { 48261007b31SStefan Hajnoczi return false; 48361007b31SStefan Hajnoczi } 48461007b31SStefan Hajnoczi 48561007b31SStefan Hajnoczi do { 48661007b31SStefan Hajnoczi retry = false; 48761007b31SStefan Hajnoczi QLIST_FOREACH(req, &bs->tracked_requests, list) { 48861007b31SStefan Hajnoczi if (req == self || (!req->serialising && !self->serialising)) { 48961007b31SStefan Hajnoczi continue; 49061007b31SStefan Hajnoczi } 49161007b31SStefan Hajnoczi if (tracked_request_overlaps(req, self->overlap_offset, 49261007b31SStefan Hajnoczi self->overlap_bytes)) 49361007b31SStefan Hajnoczi { 49461007b31SStefan Hajnoczi /* Hitting this means there was a reentrant request, for 49561007b31SStefan Hajnoczi * example, a block driver issuing nested requests. This must 49661007b31SStefan Hajnoczi * never happen since it means deadlock. 49761007b31SStefan Hajnoczi */ 49861007b31SStefan Hajnoczi assert(qemu_coroutine_self() != req->co); 49961007b31SStefan Hajnoczi 50061007b31SStefan Hajnoczi /* If the request is already (indirectly) waiting for us, or 50161007b31SStefan Hajnoczi * will wait for us as soon as it wakes up, then just go on 50261007b31SStefan Hajnoczi * (instead of producing a deadlock in the former case). */ 50361007b31SStefan Hajnoczi if (!req->waiting_for) { 50461007b31SStefan Hajnoczi self->waiting_for = req; 50561007b31SStefan Hajnoczi qemu_co_queue_wait(&req->wait_queue); 50661007b31SStefan Hajnoczi self->waiting_for = NULL; 50761007b31SStefan Hajnoczi retry = true; 50861007b31SStefan Hajnoczi waited = true; 50961007b31SStefan Hajnoczi break; 51061007b31SStefan Hajnoczi } 51161007b31SStefan Hajnoczi } 51261007b31SStefan Hajnoczi } 51361007b31SStefan Hajnoczi } while (retry); 51461007b31SStefan Hajnoczi 51561007b31SStefan Hajnoczi return waited; 51661007b31SStefan Hajnoczi } 51761007b31SStefan Hajnoczi 51861007b31SStefan Hajnoczi static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset, 51961007b31SStefan Hajnoczi size_t size) 52061007b31SStefan Hajnoczi { 52161007b31SStefan Hajnoczi if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) { 52261007b31SStefan Hajnoczi return -EIO; 52361007b31SStefan Hajnoczi } 52461007b31SStefan Hajnoczi 52561007b31SStefan Hajnoczi if (!bdrv_is_inserted(bs)) { 52661007b31SStefan Hajnoczi return -ENOMEDIUM; 52761007b31SStefan Hajnoczi } 52861007b31SStefan Hajnoczi 52961007b31SStefan Hajnoczi if (offset < 0) { 53061007b31SStefan Hajnoczi return -EIO; 53161007b31SStefan Hajnoczi } 53261007b31SStefan Hajnoczi 53361007b31SStefan Hajnoczi return 0; 53461007b31SStefan Hajnoczi } 53561007b31SStefan Hajnoczi 53661007b31SStefan Hajnoczi static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num, 53761007b31SStefan Hajnoczi int nb_sectors) 53861007b31SStefan Hajnoczi { 53961007b31SStefan Hajnoczi if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 54061007b31SStefan Hajnoczi return -EIO; 54161007b31SStefan Hajnoczi } 54261007b31SStefan Hajnoczi 54361007b31SStefan Hajnoczi return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE, 54461007b31SStefan Hajnoczi nb_sectors * BDRV_SECTOR_SIZE); 54561007b31SStefan Hajnoczi } 54661007b31SStefan Hajnoczi 54761007b31SStefan Hajnoczi typedef struct RwCo { 54861007b31SStefan Hajnoczi BlockDriverState *bs; 54961007b31SStefan Hajnoczi int64_t offset; 55061007b31SStefan Hajnoczi QEMUIOVector *qiov; 55161007b31SStefan Hajnoczi bool is_write; 55261007b31SStefan Hajnoczi int ret; 55361007b31SStefan Hajnoczi BdrvRequestFlags flags; 55461007b31SStefan Hajnoczi } RwCo; 55561007b31SStefan Hajnoczi 55661007b31SStefan Hajnoczi static void coroutine_fn bdrv_rw_co_entry(void *opaque) 55761007b31SStefan Hajnoczi { 55861007b31SStefan Hajnoczi RwCo *rwco = opaque; 55961007b31SStefan Hajnoczi 56061007b31SStefan Hajnoczi if (!rwco->is_write) { 561cab3a356SKevin Wolf rwco->ret = bdrv_co_preadv(rwco->bs, rwco->offset, 56261007b31SStefan Hajnoczi rwco->qiov->size, rwco->qiov, 56361007b31SStefan Hajnoczi rwco->flags); 56461007b31SStefan Hajnoczi } else { 565cab3a356SKevin Wolf rwco->ret = bdrv_co_pwritev(rwco->bs, rwco->offset, 56661007b31SStefan Hajnoczi rwco->qiov->size, rwco->qiov, 56761007b31SStefan Hajnoczi rwco->flags); 56861007b31SStefan Hajnoczi } 56961007b31SStefan Hajnoczi } 57061007b31SStefan Hajnoczi 57161007b31SStefan Hajnoczi /* 57261007b31SStefan Hajnoczi * Process a vectored synchronous request using coroutines 57361007b31SStefan Hajnoczi */ 57461007b31SStefan Hajnoczi static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset, 57561007b31SStefan Hajnoczi QEMUIOVector *qiov, bool is_write, 57661007b31SStefan Hajnoczi BdrvRequestFlags flags) 57761007b31SStefan Hajnoczi { 57861007b31SStefan Hajnoczi Coroutine *co; 57961007b31SStefan Hajnoczi RwCo rwco = { 58061007b31SStefan Hajnoczi .bs = bs, 58161007b31SStefan Hajnoczi .offset = offset, 58261007b31SStefan Hajnoczi .qiov = qiov, 58361007b31SStefan Hajnoczi .is_write = is_write, 58461007b31SStefan Hajnoczi .ret = NOT_DONE, 58561007b31SStefan Hajnoczi .flags = flags, 58661007b31SStefan Hajnoczi }; 58761007b31SStefan Hajnoczi 58861007b31SStefan Hajnoczi if (qemu_in_coroutine()) { 58961007b31SStefan Hajnoczi /* Fast-path if already in coroutine context */ 59061007b31SStefan Hajnoczi bdrv_rw_co_entry(&rwco); 59161007b31SStefan Hajnoczi } else { 59261007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 59361007b31SStefan Hajnoczi 59461007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_rw_co_entry); 59561007b31SStefan Hajnoczi qemu_coroutine_enter(co, &rwco); 59661007b31SStefan Hajnoczi while (rwco.ret == NOT_DONE) { 59761007b31SStefan Hajnoczi aio_poll(aio_context, true); 59861007b31SStefan Hajnoczi } 59961007b31SStefan Hajnoczi } 60061007b31SStefan Hajnoczi return rwco.ret; 60161007b31SStefan Hajnoczi } 60261007b31SStefan Hajnoczi 60361007b31SStefan Hajnoczi /* 60461007b31SStefan Hajnoczi * Process a synchronous request using coroutines 60561007b31SStefan Hajnoczi */ 60661007b31SStefan Hajnoczi static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, 60761007b31SStefan Hajnoczi int nb_sectors, bool is_write, BdrvRequestFlags flags) 60861007b31SStefan Hajnoczi { 60961007b31SStefan Hajnoczi QEMUIOVector qiov; 61061007b31SStefan Hajnoczi struct iovec iov = { 61161007b31SStefan Hajnoczi .iov_base = (void *)buf, 61261007b31SStefan Hajnoczi .iov_len = nb_sectors * BDRV_SECTOR_SIZE, 61361007b31SStefan Hajnoczi }; 61461007b31SStefan Hajnoczi 61561007b31SStefan Hajnoczi if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 61661007b31SStefan Hajnoczi return -EINVAL; 61761007b31SStefan Hajnoczi } 61861007b31SStefan Hajnoczi 61961007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 62061007b31SStefan Hajnoczi return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS, 62161007b31SStefan Hajnoczi &qiov, is_write, flags); 62261007b31SStefan Hajnoczi } 62361007b31SStefan Hajnoczi 62461007b31SStefan Hajnoczi /* return < 0 if error. See bdrv_write() for the return codes */ 62561007b31SStefan Hajnoczi int bdrv_read(BlockDriverState *bs, int64_t sector_num, 62661007b31SStefan Hajnoczi uint8_t *buf, int nb_sectors) 62761007b31SStefan Hajnoczi { 62861007b31SStefan Hajnoczi return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0); 62961007b31SStefan Hajnoczi } 63061007b31SStefan Hajnoczi 63161007b31SStefan Hajnoczi /* Return < 0 if error. Important errors are: 63261007b31SStefan Hajnoczi -EIO generic I/O error (may happen for all errors) 63361007b31SStefan Hajnoczi -ENOMEDIUM No media inserted. 63461007b31SStefan Hajnoczi -EINVAL Invalid sector number or nb_sectors 63561007b31SStefan Hajnoczi -EACCES Trying to write a read-only device 63661007b31SStefan Hajnoczi */ 63761007b31SStefan Hajnoczi int bdrv_write(BlockDriverState *bs, int64_t sector_num, 63861007b31SStefan Hajnoczi const uint8_t *buf, int nb_sectors) 63961007b31SStefan Hajnoczi { 64061007b31SStefan Hajnoczi return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0); 64161007b31SStefan Hajnoczi } 64261007b31SStefan Hajnoczi 64374021bc4SEric Blake int bdrv_pwrite_zeroes(BlockDriverState *bs, int64_t offset, 64474021bc4SEric Blake int count, BdrvRequestFlags flags) 64561007b31SStefan Hajnoczi { 64674021bc4SEric Blake QEMUIOVector qiov; 64774021bc4SEric Blake struct iovec iov = { 64874021bc4SEric Blake .iov_base = NULL, 64974021bc4SEric Blake .iov_len = count, 65074021bc4SEric Blake }; 65174021bc4SEric Blake 65274021bc4SEric Blake qemu_iovec_init_external(&qiov, &iov, 1); 65374021bc4SEric Blake return bdrv_prwv_co(bs, offset, &qiov, true, 65461007b31SStefan Hajnoczi BDRV_REQ_ZERO_WRITE | flags); 65561007b31SStefan Hajnoczi } 65661007b31SStefan Hajnoczi 65761007b31SStefan Hajnoczi /* 65874021bc4SEric Blake * Completely zero out a block device with the help of bdrv_pwrite_zeroes. 65961007b31SStefan Hajnoczi * The operation is sped up by checking the block status and only writing 66061007b31SStefan Hajnoczi * zeroes to the device if they currently do not return zeroes. Optional 66174021bc4SEric Blake * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP, 662465fe887SEric Blake * BDRV_REQ_FUA). 66361007b31SStefan Hajnoczi * 66461007b31SStefan Hajnoczi * Returns < 0 on error, 0 on success. For error codes see bdrv_write(). 66561007b31SStefan Hajnoczi */ 66661007b31SStefan Hajnoczi int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags) 66761007b31SStefan Hajnoczi { 66861007b31SStefan Hajnoczi int64_t target_sectors, ret, nb_sectors, sector_num = 0; 66967a0fd2aSFam Zheng BlockDriverState *file; 67061007b31SStefan Hajnoczi int n; 67161007b31SStefan Hajnoczi 67261007b31SStefan Hajnoczi target_sectors = bdrv_nb_sectors(bs); 67361007b31SStefan Hajnoczi if (target_sectors < 0) { 67461007b31SStefan Hajnoczi return target_sectors; 67561007b31SStefan Hajnoczi } 67661007b31SStefan Hajnoczi 67761007b31SStefan Hajnoczi for (;;) { 67861007b31SStefan Hajnoczi nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS); 67961007b31SStefan Hajnoczi if (nb_sectors <= 0) { 68061007b31SStefan Hajnoczi return 0; 68161007b31SStefan Hajnoczi } 68267a0fd2aSFam Zheng ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n, &file); 68361007b31SStefan Hajnoczi if (ret < 0) { 68461007b31SStefan Hajnoczi error_report("error getting block status at sector %" PRId64 ": %s", 68561007b31SStefan Hajnoczi sector_num, strerror(-ret)); 68661007b31SStefan Hajnoczi return ret; 68761007b31SStefan Hajnoczi } 68861007b31SStefan Hajnoczi if (ret & BDRV_BLOCK_ZERO) { 68961007b31SStefan Hajnoczi sector_num += n; 69061007b31SStefan Hajnoczi continue; 69161007b31SStefan Hajnoczi } 69274021bc4SEric Blake ret = bdrv_pwrite_zeroes(bs, sector_num << BDRV_SECTOR_BITS, 69374021bc4SEric Blake n << BDRV_SECTOR_BITS, flags); 69461007b31SStefan Hajnoczi if (ret < 0) { 69561007b31SStefan Hajnoczi error_report("error writing zeroes at sector %" PRId64 ": %s", 69661007b31SStefan Hajnoczi sector_num, strerror(-ret)); 69761007b31SStefan Hajnoczi return ret; 69861007b31SStefan Hajnoczi } 69961007b31SStefan Hajnoczi sector_num += n; 70061007b31SStefan Hajnoczi } 70161007b31SStefan Hajnoczi } 70261007b31SStefan Hajnoczi 703f1e84741SKevin Wolf int bdrv_preadv(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov) 704f1e84741SKevin Wolf { 705f1e84741SKevin Wolf int ret; 706f1e84741SKevin Wolf 707f1e84741SKevin Wolf ret = bdrv_prwv_co(bs, offset, qiov, false, 0); 708f1e84741SKevin Wolf if (ret < 0) { 709f1e84741SKevin Wolf return ret; 710f1e84741SKevin Wolf } 711f1e84741SKevin Wolf 712f1e84741SKevin Wolf return qiov->size; 713f1e84741SKevin Wolf } 714f1e84741SKevin Wolf 71561007b31SStefan Hajnoczi int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes) 71661007b31SStefan Hajnoczi { 71761007b31SStefan Hajnoczi QEMUIOVector qiov; 71861007b31SStefan Hajnoczi struct iovec iov = { 71961007b31SStefan Hajnoczi .iov_base = (void *)buf, 72061007b31SStefan Hajnoczi .iov_len = bytes, 72161007b31SStefan Hajnoczi }; 72261007b31SStefan Hajnoczi 72361007b31SStefan Hajnoczi if (bytes < 0) { 72461007b31SStefan Hajnoczi return -EINVAL; 72561007b31SStefan Hajnoczi } 72661007b31SStefan Hajnoczi 72761007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 728f1e84741SKevin Wolf return bdrv_preadv(bs, offset, &qiov); 72961007b31SStefan Hajnoczi } 73061007b31SStefan Hajnoczi 73161007b31SStefan Hajnoczi int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov) 73261007b31SStefan Hajnoczi { 73361007b31SStefan Hajnoczi int ret; 73461007b31SStefan Hajnoczi 73561007b31SStefan Hajnoczi ret = bdrv_prwv_co(bs, offset, qiov, true, 0); 73661007b31SStefan Hajnoczi if (ret < 0) { 73761007b31SStefan Hajnoczi return ret; 73861007b31SStefan Hajnoczi } 73961007b31SStefan Hajnoczi 74061007b31SStefan Hajnoczi return qiov->size; 74161007b31SStefan Hajnoczi } 74261007b31SStefan Hajnoczi 74361007b31SStefan Hajnoczi int bdrv_pwrite(BlockDriverState *bs, int64_t offset, 74461007b31SStefan Hajnoczi const void *buf, int bytes) 74561007b31SStefan Hajnoczi { 74661007b31SStefan Hajnoczi QEMUIOVector qiov; 74761007b31SStefan Hajnoczi struct iovec iov = { 74861007b31SStefan Hajnoczi .iov_base = (void *) buf, 74961007b31SStefan Hajnoczi .iov_len = bytes, 75061007b31SStefan Hajnoczi }; 75161007b31SStefan Hajnoczi 75261007b31SStefan Hajnoczi if (bytes < 0) { 75361007b31SStefan Hajnoczi return -EINVAL; 75461007b31SStefan Hajnoczi } 75561007b31SStefan Hajnoczi 75661007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 75761007b31SStefan Hajnoczi return bdrv_pwritev(bs, offset, &qiov); 75861007b31SStefan Hajnoczi } 75961007b31SStefan Hajnoczi 76061007b31SStefan Hajnoczi /* 76161007b31SStefan Hajnoczi * Writes to the file and ensures that no writes are reordered across this 76261007b31SStefan Hajnoczi * request (acts as a barrier) 76361007b31SStefan Hajnoczi * 76461007b31SStefan Hajnoczi * Returns 0 on success, -errno in error cases. 76561007b31SStefan Hajnoczi */ 76661007b31SStefan Hajnoczi int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset, 76761007b31SStefan Hajnoczi const void *buf, int count) 76861007b31SStefan Hajnoczi { 76961007b31SStefan Hajnoczi int ret; 77061007b31SStefan Hajnoczi 77161007b31SStefan Hajnoczi ret = bdrv_pwrite(bs, offset, buf, count); 77261007b31SStefan Hajnoczi if (ret < 0) { 77361007b31SStefan Hajnoczi return ret; 77461007b31SStefan Hajnoczi } 77561007b31SStefan Hajnoczi 776855a6a93SKevin Wolf ret = bdrv_flush(bs); 777855a6a93SKevin Wolf if (ret < 0) { 778855a6a93SKevin Wolf return ret; 77961007b31SStefan Hajnoczi } 78061007b31SStefan Hajnoczi 78161007b31SStefan Hajnoczi return 0; 78261007b31SStefan Hajnoczi } 78361007b31SStefan Hajnoczi 78408844473SKevin Wolf typedef struct CoroutineIOCompletion { 78508844473SKevin Wolf Coroutine *coroutine; 78608844473SKevin Wolf int ret; 78708844473SKevin Wolf } CoroutineIOCompletion; 78808844473SKevin Wolf 78908844473SKevin Wolf static void bdrv_co_io_em_complete(void *opaque, int ret) 79008844473SKevin Wolf { 79108844473SKevin Wolf CoroutineIOCompletion *co = opaque; 79208844473SKevin Wolf 79308844473SKevin Wolf co->ret = ret; 79408844473SKevin Wolf qemu_coroutine_enter(co->coroutine, NULL); 79508844473SKevin Wolf } 79608844473SKevin Wolf 797166fe960SKevin Wolf static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs, 798166fe960SKevin Wolf uint64_t offset, uint64_t bytes, 799166fe960SKevin Wolf QEMUIOVector *qiov, int flags) 800166fe960SKevin Wolf { 801166fe960SKevin Wolf BlockDriver *drv = bs->drv; 8023fb06697SKevin Wolf int64_t sector_num; 8033fb06697SKevin Wolf unsigned int nb_sectors; 8043fb06697SKevin Wolf 805fa166538SEric Blake assert(!(flags & ~BDRV_REQ_MASK)); 806fa166538SEric Blake 8073fb06697SKevin Wolf if (drv->bdrv_co_preadv) { 8083fb06697SKevin Wolf return drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags); 8093fb06697SKevin Wolf } 8103fb06697SKevin Wolf 8113fb06697SKevin Wolf sector_num = offset >> BDRV_SECTOR_BITS; 8123fb06697SKevin Wolf nb_sectors = bytes >> BDRV_SECTOR_BITS; 813166fe960SKevin Wolf 814166fe960SKevin Wolf assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 815166fe960SKevin Wolf assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 816166fe960SKevin Wolf assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS); 817166fe960SKevin Wolf 81808844473SKevin Wolf if (drv->bdrv_co_readv) { 819166fe960SKevin Wolf return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 82008844473SKevin Wolf } else { 82108844473SKevin Wolf BlockAIOCB *acb; 82208844473SKevin Wolf CoroutineIOCompletion co = { 82308844473SKevin Wolf .coroutine = qemu_coroutine_self(), 82408844473SKevin Wolf }; 82508844473SKevin Wolf 82608844473SKevin Wolf acb = bs->drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors, 82708844473SKevin Wolf bdrv_co_io_em_complete, &co); 82808844473SKevin Wolf if (acb == NULL) { 82908844473SKevin Wolf return -EIO; 83008844473SKevin Wolf } else { 83108844473SKevin Wolf qemu_coroutine_yield(); 83208844473SKevin Wolf return co.ret; 83308844473SKevin Wolf } 83408844473SKevin Wolf } 835166fe960SKevin Wolf } 836166fe960SKevin Wolf 83778a07294SKevin Wolf static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs, 83878a07294SKevin Wolf uint64_t offset, uint64_t bytes, 83978a07294SKevin Wolf QEMUIOVector *qiov, int flags) 84078a07294SKevin Wolf { 84178a07294SKevin Wolf BlockDriver *drv = bs->drv; 8423fb06697SKevin Wolf int64_t sector_num; 8433fb06697SKevin Wolf unsigned int nb_sectors; 84478a07294SKevin Wolf int ret; 84578a07294SKevin Wolf 846fa166538SEric Blake assert(!(flags & ~BDRV_REQ_MASK)); 847fa166538SEric Blake 8483fb06697SKevin Wolf if (drv->bdrv_co_pwritev) { 849515c2f43SKevin Wolf ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, 850515c2f43SKevin Wolf flags & bs->supported_write_flags); 851515c2f43SKevin Wolf flags &= ~bs->supported_write_flags; 8523fb06697SKevin Wolf goto emulate_flags; 8533fb06697SKevin Wolf } 8543fb06697SKevin Wolf 8553fb06697SKevin Wolf sector_num = offset >> BDRV_SECTOR_BITS; 8563fb06697SKevin Wolf nb_sectors = bytes >> BDRV_SECTOR_BITS; 8573fb06697SKevin Wolf 85878a07294SKevin Wolf assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 85978a07294SKevin Wolf assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 86078a07294SKevin Wolf assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS); 86178a07294SKevin Wolf 86278a07294SKevin Wolf if (drv->bdrv_co_writev_flags) { 86378a07294SKevin Wolf ret = drv->bdrv_co_writev_flags(bs, sector_num, nb_sectors, qiov, 8644df863f3SEric Blake flags & bs->supported_write_flags); 8654df863f3SEric Blake flags &= ~bs->supported_write_flags; 86608844473SKevin Wolf } else if (drv->bdrv_co_writev) { 8674df863f3SEric Blake assert(!bs->supported_write_flags); 86878a07294SKevin Wolf ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov); 86908844473SKevin Wolf } else { 87008844473SKevin Wolf BlockAIOCB *acb; 87108844473SKevin Wolf CoroutineIOCompletion co = { 87208844473SKevin Wolf .coroutine = qemu_coroutine_self(), 87308844473SKevin Wolf }; 87408844473SKevin Wolf 87508844473SKevin Wolf acb = bs->drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors, 87608844473SKevin Wolf bdrv_co_io_em_complete, &co); 87708844473SKevin Wolf if (acb == NULL) { 8783fb06697SKevin Wolf ret = -EIO; 87908844473SKevin Wolf } else { 88008844473SKevin Wolf qemu_coroutine_yield(); 8813fb06697SKevin Wolf ret = co.ret; 88208844473SKevin Wolf } 88378a07294SKevin Wolf } 88478a07294SKevin Wolf 8853fb06697SKevin Wolf emulate_flags: 8864df863f3SEric Blake if (ret == 0 && (flags & BDRV_REQ_FUA)) { 88778a07294SKevin Wolf ret = bdrv_co_flush(bs); 88878a07294SKevin Wolf } 88978a07294SKevin Wolf 89078a07294SKevin Wolf return ret; 89178a07294SKevin Wolf } 89278a07294SKevin Wolf 89361007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs, 894244483e6SKevin Wolf int64_t offset, unsigned int bytes, QEMUIOVector *qiov) 89561007b31SStefan Hajnoczi { 89661007b31SStefan Hajnoczi /* Perform I/O through a temporary buffer so that users who scribble over 89761007b31SStefan Hajnoczi * their read buffer while the operation is in progress do not end up 89861007b31SStefan Hajnoczi * modifying the image file. This is critical for zero-copy guest I/O 89961007b31SStefan Hajnoczi * where anything might happen inside guest memory. 90061007b31SStefan Hajnoczi */ 90161007b31SStefan Hajnoczi void *bounce_buffer; 90261007b31SStefan Hajnoczi 90361007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 90461007b31SStefan Hajnoczi struct iovec iov; 90561007b31SStefan Hajnoczi QEMUIOVector bounce_qiov; 906244483e6SKevin Wolf int64_t cluster_offset; 907244483e6SKevin Wolf unsigned int cluster_bytes; 90861007b31SStefan Hajnoczi size_t skip_bytes; 90961007b31SStefan Hajnoczi int ret; 91061007b31SStefan Hajnoczi 91161007b31SStefan Hajnoczi /* Cover entire cluster so no additional backing file I/O is required when 91261007b31SStefan Hajnoczi * allocating cluster in the image file. 91361007b31SStefan Hajnoczi */ 914244483e6SKevin Wolf bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes); 91561007b31SStefan Hajnoczi 916244483e6SKevin Wolf trace_bdrv_co_do_copy_on_readv(bs, offset, bytes, 917244483e6SKevin Wolf cluster_offset, cluster_bytes); 91861007b31SStefan Hajnoczi 919244483e6SKevin Wolf iov.iov_len = cluster_bytes; 92061007b31SStefan Hajnoczi iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len); 92161007b31SStefan Hajnoczi if (bounce_buffer == NULL) { 92261007b31SStefan Hajnoczi ret = -ENOMEM; 92361007b31SStefan Hajnoczi goto err; 92461007b31SStefan Hajnoczi } 92561007b31SStefan Hajnoczi 92661007b31SStefan Hajnoczi qemu_iovec_init_external(&bounce_qiov, &iov, 1); 92761007b31SStefan Hajnoczi 928244483e6SKevin Wolf ret = bdrv_driver_preadv(bs, cluster_offset, cluster_bytes, 929166fe960SKevin Wolf &bounce_qiov, 0); 93061007b31SStefan Hajnoczi if (ret < 0) { 93161007b31SStefan Hajnoczi goto err; 93261007b31SStefan Hajnoczi } 93361007b31SStefan Hajnoczi 934c1499a5eSEric Blake if (drv->bdrv_co_pwrite_zeroes && 93561007b31SStefan Hajnoczi buffer_is_zero(bounce_buffer, iov.iov_len)) { 936244483e6SKevin Wolf ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, cluster_bytes, 0); 93761007b31SStefan Hajnoczi } else { 93861007b31SStefan Hajnoczi /* This does not change the data on the disk, it is not necessary 93961007b31SStefan Hajnoczi * to flush even in cache=writethrough mode. 94061007b31SStefan Hajnoczi */ 941244483e6SKevin Wolf ret = bdrv_driver_pwritev(bs, cluster_offset, cluster_bytes, 94278a07294SKevin Wolf &bounce_qiov, 0); 94361007b31SStefan Hajnoczi } 94461007b31SStefan Hajnoczi 94561007b31SStefan Hajnoczi if (ret < 0) { 94661007b31SStefan Hajnoczi /* It might be okay to ignore write errors for guest requests. If this 94761007b31SStefan Hajnoczi * is a deliberate copy-on-read then we don't want to ignore the error. 94861007b31SStefan Hajnoczi * Simply report it in all cases. 94961007b31SStefan Hajnoczi */ 95061007b31SStefan Hajnoczi goto err; 95161007b31SStefan Hajnoczi } 95261007b31SStefan Hajnoczi 953244483e6SKevin Wolf skip_bytes = offset - cluster_offset; 954244483e6SKevin Wolf qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, bytes); 95561007b31SStefan Hajnoczi 95661007b31SStefan Hajnoczi err: 95761007b31SStefan Hajnoczi qemu_vfree(bounce_buffer); 95861007b31SStefan Hajnoczi return ret; 95961007b31SStefan Hajnoczi } 96061007b31SStefan Hajnoczi 96161007b31SStefan Hajnoczi /* 96261007b31SStefan Hajnoczi * Forwards an already correctly aligned request to the BlockDriver. This 96361007b31SStefan Hajnoczi * handles copy on read and zeroing after EOF; any other features must be 96461007b31SStefan Hajnoczi * implemented by the caller. 96561007b31SStefan Hajnoczi */ 96661007b31SStefan Hajnoczi static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs, 96761007b31SStefan Hajnoczi BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 96861007b31SStefan Hajnoczi int64_t align, QEMUIOVector *qiov, int flags) 96961007b31SStefan Hajnoczi { 97061007b31SStefan Hajnoczi int ret; 97161007b31SStefan Hajnoczi 97249c07526SKevin Wolf assert(is_power_of_2(align)); 97349c07526SKevin Wolf assert((offset & (align - 1)) == 0); 97449c07526SKevin Wolf assert((bytes & (align - 1)) == 0); 97561007b31SStefan Hajnoczi assert(!qiov || bytes == qiov->size); 976abb06c5aSDaniel P. Berrange assert((bs->open_flags & BDRV_O_NO_IO) == 0); 977fa166538SEric Blake assert(!(flags & ~BDRV_REQ_MASK)); 97861007b31SStefan Hajnoczi 97961007b31SStefan Hajnoczi /* Handle Copy on Read and associated serialisation */ 98061007b31SStefan Hajnoczi if (flags & BDRV_REQ_COPY_ON_READ) { 98161007b31SStefan Hajnoczi /* If we touch the same cluster it counts as an overlap. This 98261007b31SStefan Hajnoczi * guarantees that allocating writes will be serialized and not race 98361007b31SStefan Hajnoczi * with each other for the same cluster. For example, in copy-on-read 98461007b31SStefan Hajnoczi * it ensures that the CoR read and write operations are atomic and 98561007b31SStefan Hajnoczi * guest writes cannot interleave between them. */ 98661007b31SStefan Hajnoczi mark_request_serialising(req, bdrv_get_cluster_size(bs)); 98761007b31SStefan Hajnoczi } 98861007b31SStefan Hajnoczi 98961408b25SFam Zheng if (!(flags & BDRV_REQ_NO_SERIALISING)) { 99061007b31SStefan Hajnoczi wait_serialising_requests(req); 99161408b25SFam Zheng } 99261007b31SStefan Hajnoczi 99361007b31SStefan Hajnoczi if (flags & BDRV_REQ_COPY_ON_READ) { 99449c07526SKevin Wolf int64_t start_sector = offset >> BDRV_SECTOR_BITS; 99549c07526SKevin Wolf int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); 99649c07526SKevin Wolf unsigned int nb_sectors = end_sector - start_sector; 99761007b31SStefan Hajnoczi int pnum; 99861007b31SStefan Hajnoczi 99949c07526SKevin Wolf ret = bdrv_is_allocated(bs, start_sector, nb_sectors, &pnum); 100061007b31SStefan Hajnoczi if (ret < 0) { 100161007b31SStefan Hajnoczi goto out; 100261007b31SStefan Hajnoczi } 100361007b31SStefan Hajnoczi 100461007b31SStefan Hajnoczi if (!ret || pnum != nb_sectors) { 1005244483e6SKevin Wolf ret = bdrv_co_do_copy_on_readv(bs, offset, bytes, qiov); 100661007b31SStefan Hajnoczi goto out; 100761007b31SStefan Hajnoczi } 100861007b31SStefan Hajnoczi } 100961007b31SStefan Hajnoczi 101061007b31SStefan Hajnoczi /* Forward the request to the BlockDriver */ 101161007b31SStefan Hajnoczi if (!bs->zero_beyond_eof) { 1012166fe960SKevin Wolf ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0); 101361007b31SStefan Hajnoczi } else { 101461007b31SStefan Hajnoczi /* Read zeros after EOF */ 101549c07526SKevin Wolf int64_t total_bytes, max_bytes; 101661007b31SStefan Hajnoczi 101749c07526SKevin Wolf total_bytes = bdrv_getlength(bs); 101849c07526SKevin Wolf if (total_bytes < 0) { 101949c07526SKevin Wolf ret = total_bytes; 102061007b31SStefan Hajnoczi goto out; 102161007b31SStefan Hajnoczi } 102261007b31SStefan Hajnoczi 102349c07526SKevin Wolf max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align); 102449c07526SKevin Wolf if (bytes < max_bytes) { 1025166fe960SKevin Wolf ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0); 102649c07526SKevin Wolf } else if (max_bytes > 0) { 102761007b31SStefan Hajnoczi QEMUIOVector local_qiov; 102861007b31SStefan Hajnoczi 102961007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov); 103049c07526SKevin Wolf qemu_iovec_concat(&local_qiov, qiov, 0, max_bytes); 103161007b31SStefan Hajnoczi 103249c07526SKevin Wolf ret = bdrv_driver_preadv(bs, offset, max_bytes, &local_qiov, 0); 103361007b31SStefan Hajnoczi 103461007b31SStefan Hajnoczi qemu_iovec_destroy(&local_qiov); 103561007b31SStefan Hajnoczi } else { 103661007b31SStefan Hajnoczi ret = 0; 103761007b31SStefan Hajnoczi } 103861007b31SStefan Hajnoczi 103961007b31SStefan Hajnoczi /* Reading beyond end of file is supposed to produce zeroes */ 104049c07526SKevin Wolf if (ret == 0 && total_bytes < offset + bytes) { 104149c07526SKevin Wolf uint64_t zero_offset = MAX(0, total_bytes - offset); 104249c07526SKevin Wolf uint64_t zero_bytes = offset + bytes - zero_offset; 104349c07526SKevin Wolf qemu_iovec_memset(qiov, zero_offset, 0, zero_bytes); 104461007b31SStefan Hajnoczi } 104561007b31SStefan Hajnoczi } 104661007b31SStefan Hajnoczi 104761007b31SStefan Hajnoczi out: 104861007b31SStefan Hajnoczi return ret; 104961007b31SStefan Hajnoczi } 105061007b31SStefan Hajnoczi 105161007b31SStefan Hajnoczi /* 105261007b31SStefan Hajnoczi * Handle a read request in coroutine context 105361007b31SStefan Hajnoczi */ 1054cab3a356SKevin Wolf int coroutine_fn bdrv_co_preadv(BlockDriverState *bs, 105561007b31SStefan Hajnoczi int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 105661007b31SStefan Hajnoczi BdrvRequestFlags flags) 105761007b31SStefan Hajnoczi { 105861007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 105961007b31SStefan Hajnoczi BdrvTrackedRequest req; 106061007b31SStefan Hajnoczi 106123b0d9fbSKevin Wolf uint64_t align = bs->request_alignment; 106261007b31SStefan Hajnoczi uint8_t *head_buf = NULL; 106361007b31SStefan Hajnoczi uint8_t *tail_buf = NULL; 106461007b31SStefan Hajnoczi QEMUIOVector local_qiov; 106561007b31SStefan Hajnoczi bool use_local_qiov = false; 106661007b31SStefan Hajnoczi int ret; 106761007b31SStefan Hajnoczi 106861007b31SStefan Hajnoczi if (!drv) { 106961007b31SStefan Hajnoczi return -ENOMEDIUM; 107061007b31SStefan Hajnoczi } 107161007b31SStefan Hajnoczi 107261007b31SStefan Hajnoczi ret = bdrv_check_byte_request(bs, offset, bytes); 107361007b31SStefan Hajnoczi if (ret < 0) { 107461007b31SStefan Hajnoczi return ret; 107561007b31SStefan Hajnoczi } 107661007b31SStefan Hajnoczi 10779568b511SWen Congyang /* Don't do copy-on-read if we read data before write operation */ 107861408b25SFam Zheng if (bs->copy_on_read && !(flags & BDRV_REQ_NO_SERIALISING)) { 107961007b31SStefan Hajnoczi flags |= BDRV_REQ_COPY_ON_READ; 108061007b31SStefan Hajnoczi } 108161007b31SStefan Hajnoczi 108261007b31SStefan Hajnoczi /* Align read if necessary by padding qiov */ 108361007b31SStefan Hajnoczi if (offset & (align - 1)) { 108461007b31SStefan Hajnoczi head_buf = qemu_blockalign(bs, align); 108561007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov + 2); 108661007b31SStefan Hajnoczi qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); 108761007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 108861007b31SStefan Hajnoczi use_local_qiov = true; 108961007b31SStefan Hajnoczi 109061007b31SStefan Hajnoczi bytes += offset & (align - 1); 109161007b31SStefan Hajnoczi offset = offset & ~(align - 1); 109261007b31SStefan Hajnoczi } 109361007b31SStefan Hajnoczi 109461007b31SStefan Hajnoczi if ((offset + bytes) & (align - 1)) { 109561007b31SStefan Hajnoczi if (!use_local_qiov) { 109661007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov + 1); 109761007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 109861007b31SStefan Hajnoczi use_local_qiov = true; 109961007b31SStefan Hajnoczi } 110061007b31SStefan Hajnoczi tail_buf = qemu_blockalign(bs, align); 110161007b31SStefan Hajnoczi qemu_iovec_add(&local_qiov, tail_buf, 110261007b31SStefan Hajnoczi align - ((offset + bytes) & (align - 1))); 110361007b31SStefan Hajnoczi 110461007b31SStefan Hajnoczi bytes = ROUND_UP(bytes, align); 110561007b31SStefan Hajnoczi } 110661007b31SStefan Hajnoczi 1107ebde595cSFam Zheng tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ); 110861007b31SStefan Hajnoczi ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align, 110961007b31SStefan Hajnoczi use_local_qiov ? &local_qiov : qiov, 111061007b31SStefan Hajnoczi flags); 111161007b31SStefan Hajnoczi tracked_request_end(&req); 111261007b31SStefan Hajnoczi 111361007b31SStefan Hajnoczi if (use_local_qiov) { 111461007b31SStefan Hajnoczi qemu_iovec_destroy(&local_qiov); 111561007b31SStefan Hajnoczi qemu_vfree(head_buf); 111661007b31SStefan Hajnoczi qemu_vfree(tail_buf); 111761007b31SStefan Hajnoczi } 111861007b31SStefan Hajnoczi 111961007b31SStefan Hajnoczi return ret; 112061007b31SStefan Hajnoczi } 112161007b31SStefan Hajnoczi 112261007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, 112361007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 112461007b31SStefan Hajnoczi BdrvRequestFlags flags) 112561007b31SStefan Hajnoczi { 112661007b31SStefan Hajnoczi if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 112761007b31SStefan Hajnoczi return -EINVAL; 112861007b31SStefan Hajnoczi } 112961007b31SStefan Hajnoczi 1130cab3a356SKevin Wolf return bdrv_co_preadv(bs, sector_num << BDRV_SECTOR_BITS, 113161007b31SStefan Hajnoczi nb_sectors << BDRV_SECTOR_BITS, qiov, flags); 113261007b31SStefan Hajnoczi } 113361007b31SStefan Hajnoczi 113461007b31SStefan Hajnoczi int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num, 113561007b31SStefan Hajnoczi int nb_sectors, QEMUIOVector *qiov) 113661007b31SStefan Hajnoczi { 113761007b31SStefan Hajnoczi trace_bdrv_co_readv(bs, sector_num, nb_sectors); 113861007b31SStefan Hajnoczi 113961007b31SStefan Hajnoczi return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0); 114061007b31SStefan Hajnoczi } 114161007b31SStefan Hajnoczi 114261007b31SStefan Hajnoczi #define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768 114361007b31SStefan Hajnoczi 1144d05aa8bbSEric Blake static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 1145d05aa8bbSEric Blake int64_t offset, int count, BdrvRequestFlags flags) 114661007b31SStefan Hajnoczi { 114761007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 114861007b31SStefan Hajnoczi QEMUIOVector qiov; 114961007b31SStefan Hajnoczi struct iovec iov = {0}; 115061007b31SStefan Hajnoczi int ret = 0; 1151465fe887SEric Blake bool need_flush = false; 1152443668caSDenis V. Lunev int head = 0; 1153443668caSDenis V. Lunev int tail = 0; 115461007b31SStefan Hajnoczi 1155cf081fcaSEric Blake int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX); 1156d05aa8bbSEric Blake int alignment = MAX(bs->bl.pwrite_zeroes_alignment ?: 1, 1157d05aa8bbSEric Blake bs->request_alignment); 1158cf081fcaSEric Blake 1159d05aa8bbSEric Blake assert(is_power_of_2(alignment)); 1160d05aa8bbSEric Blake head = offset & (alignment - 1); 1161d05aa8bbSEric Blake tail = (offset + count) & (alignment - 1); 1162d05aa8bbSEric Blake max_write_zeroes &= ~(alignment - 1); 116361007b31SStefan Hajnoczi 1164d05aa8bbSEric Blake while (count > 0 && !ret) { 1165d05aa8bbSEric Blake int num = count; 116661007b31SStefan Hajnoczi 116761007b31SStefan Hajnoczi /* Align request. Block drivers can expect the "bulk" of the request 1168443668caSDenis V. Lunev * to be aligned, and that unaligned requests do not cross cluster 1169443668caSDenis V. Lunev * boundaries. 117061007b31SStefan Hajnoczi */ 1171443668caSDenis V. Lunev if (head) { 117261007b31SStefan Hajnoczi /* Make a small request up to the first aligned sector. */ 1173d05aa8bbSEric Blake num = MIN(count, alignment - head); 1174443668caSDenis V. Lunev head = 0; 1175d05aa8bbSEric Blake } else if (tail && num > alignment) { 1176443668caSDenis V. Lunev /* Shorten the request to the last aligned sector. */ 1177443668caSDenis V. Lunev num -= tail; 117861007b31SStefan Hajnoczi } 117961007b31SStefan Hajnoczi 118061007b31SStefan Hajnoczi /* limit request size */ 118161007b31SStefan Hajnoczi if (num > max_write_zeroes) { 118261007b31SStefan Hajnoczi num = max_write_zeroes; 118361007b31SStefan Hajnoczi } 118461007b31SStefan Hajnoczi 118561007b31SStefan Hajnoczi ret = -ENOTSUP; 118661007b31SStefan Hajnoczi /* First try the efficient write zeroes operation */ 1187d05aa8bbSEric Blake if (drv->bdrv_co_pwrite_zeroes) { 1188d05aa8bbSEric Blake ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num, 1189d05aa8bbSEric Blake flags & bs->supported_zero_flags); 1190d05aa8bbSEric Blake if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) && 1191d05aa8bbSEric Blake !(bs->supported_zero_flags & BDRV_REQ_FUA)) { 1192d05aa8bbSEric Blake need_flush = true; 1193d05aa8bbSEric Blake } 1194465fe887SEric Blake } else { 1195465fe887SEric Blake assert(!bs->supported_zero_flags); 119661007b31SStefan Hajnoczi } 119761007b31SStefan Hajnoczi 119861007b31SStefan Hajnoczi if (ret == -ENOTSUP) { 119961007b31SStefan Hajnoczi /* Fall back to bounce buffer if write zeroes is unsupported */ 120061007b31SStefan Hajnoczi int max_xfer_len = MIN_NON_ZERO(bs->bl.max_transfer_length, 120161007b31SStefan Hajnoczi MAX_WRITE_ZEROES_BOUNCE_BUFFER); 1202465fe887SEric Blake BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE; 1203465fe887SEric Blake 1204465fe887SEric Blake if ((flags & BDRV_REQ_FUA) && 1205465fe887SEric Blake !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1206465fe887SEric Blake /* No need for bdrv_driver_pwrite() to do a fallback 1207465fe887SEric Blake * flush on each chunk; use just one at the end */ 1208465fe887SEric Blake write_flags &= ~BDRV_REQ_FUA; 1209465fe887SEric Blake need_flush = true; 1210465fe887SEric Blake } 1211d05aa8bbSEric Blake num = MIN(num, max_xfer_len << BDRV_SECTOR_BITS); 1212d05aa8bbSEric Blake iov.iov_len = num; 121361007b31SStefan Hajnoczi if (iov.iov_base == NULL) { 1214d05aa8bbSEric Blake iov.iov_base = qemu_try_blockalign(bs, num); 121561007b31SStefan Hajnoczi if (iov.iov_base == NULL) { 121661007b31SStefan Hajnoczi ret = -ENOMEM; 121761007b31SStefan Hajnoczi goto fail; 121861007b31SStefan Hajnoczi } 1219d05aa8bbSEric Blake memset(iov.iov_base, 0, num); 122061007b31SStefan Hajnoczi } 122161007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 122261007b31SStefan Hajnoczi 1223d05aa8bbSEric Blake ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags); 122461007b31SStefan Hajnoczi 122561007b31SStefan Hajnoczi /* Keep bounce buffer around if it is big enough for all 122661007b31SStefan Hajnoczi * all future requests. 122761007b31SStefan Hajnoczi */ 1228d05aa8bbSEric Blake if (num < max_xfer_len << BDRV_SECTOR_BITS) { 122961007b31SStefan Hajnoczi qemu_vfree(iov.iov_base); 123061007b31SStefan Hajnoczi iov.iov_base = NULL; 123161007b31SStefan Hajnoczi } 123261007b31SStefan Hajnoczi } 123361007b31SStefan Hajnoczi 1234d05aa8bbSEric Blake offset += num; 1235d05aa8bbSEric Blake count -= num; 123661007b31SStefan Hajnoczi } 123761007b31SStefan Hajnoczi 123861007b31SStefan Hajnoczi fail: 1239465fe887SEric Blake if (ret == 0 && need_flush) { 1240465fe887SEric Blake ret = bdrv_co_flush(bs); 1241465fe887SEric Blake } 124261007b31SStefan Hajnoczi qemu_vfree(iov.iov_base); 124361007b31SStefan Hajnoczi return ret; 124461007b31SStefan Hajnoczi } 124561007b31SStefan Hajnoczi 124661007b31SStefan Hajnoczi /* 124761007b31SStefan Hajnoczi * Forwards an already correctly aligned write request to the BlockDriver. 124861007b31SStefan Hajnoczi */ 124961007b31SStefan Hajnoczi static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs, 125061007b31SStefan Hajnoczi BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 125161007b31SStefan Hajnoczi QEMUIOVector *qiov, int flags) 125261007b31SStefan Hajnoczi { 125361007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 125461007b31SStefan Hajnoczi bool waited; 125561007b31SStefan Hajnoczi int ret; 125661007b31SStefan Hajnoczi 12579896c876SKevin Wolf int64_t start_sector = offset >> BDRV_SECTOR_BITS; 12589896c876SKevin Wolf int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); 125961007b31SStefan Hajnoczi 126061007b31SStefan Hajnoczi assert(!qiov || bytes == qiov->size); 1261abb06c5aSDaniel P. Berrange assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1262fa166538SEric Blake assert(!(flags & ~BDRV_REQ_MASK)); 126361007b31SStefan Hajnoczi 126461007b31SStefan Hajnoczi waited = wait_serialising_requests(req); 126561007b31SStefan Hajnoczi assert(!waited || !req->serialising); 126661007b31SStefan Hajnoczi assert(req->overlap_offset <= offset); 126761007b31SStefan Hajnoczi assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); 126861007b31SStefan Hajnoczi 126961007b31SStefan Hajnoczi ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req); 127061007b31SStefan Hajnoczi 127161007b31SStefan Hajnoczi if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && 1272c1499a5eSEric Blake !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes && 127361007b31SStefan Hajnoczi qemu_iovec_is_zero(qiov)) { 127461007b31SStefan Hajnoczi flags |= BDRV_REQ_ZERO_WRITE; 127561007b31SStefan Hajnoczi if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { 127661007b31SStefan Hajnoczi flags |= BDRV_REQ_MAY_UNMAP; 127761007b31SStefan Hajnoczi } 127861007b31SStefan Hajnoczi } 127961007b31SStefan Hajnoczi 128061007b31SStefan Hajnoczi if (ret < 0) { 128161007b31SStefan Hajnoczi /* Do nothing, write notifier decided to fail this request */ 128261007b31SStefan Hajnoczi } else if (flags & BDRV_REQ_ZERO_WRITE) { 12839a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO); 12849896c876SKevin Wolf ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags); 128561007b31SStefan Hajnoczi } else { 12869a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV); 128778a07294SKevin Wolf ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags); 128861007b31SStefan Hajnoczi } 12899a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE); 129061007b31SStefan Hajnoczi 12919896c876SKevin Wolf bdrv_set_dirty(bs, start_sector, end_sector - start_sector); 129261007b31SStefan Hajnoczi 129353d8f9d8SMax Reitz if (bs->wr_highest_offset < offset + bytes) { 129453d8f9d8SMax Reitz bs->wr_highest_offset = offset + bytes; 129553d8f9d8SMax Reitz } 129661007b31SStefan Hajnoczi 129761007b31SStefan Hajnoczi if (ret >= 0) { 12989896c876SKevin Wolf bs->total_sectors = MAX(bs->total_sectors, end_sector); 129961007b31SStefan Hajnoczi } 130061007b31SStefan Hajnoczi 130161007b31SStefan Hajnoczi return ret; 130261007b31SStefan Hajnoczi } 130361007b31SStefan Hajnoczi 13049eeb6dd1SFam Zheng static int coroutine_fn bdrv_co_do_zero_pwritev(BlockDriverState *bs, 13059eeb6dd1SFam Zheng int64_t offset, 13069eeb6dd1SFam Zheng unsigned int bytes, 13079eeb6dd1SFam Zheng BdrvRequestFlags flags, 13089eeb6dd1SFam Zheng BdrvTrackedRequest *req) 13099eeb6dd1SFam Zheng { 13109eeb6dd1SFam Zheng uint8_t *buf = NULL; 13119eeb6dd1SFam Zheng QEMUIOVector local_qiov; 13129eeb6dd1SFam Zheng struct iovec iov; 131323b0d9fbSKevin Wolf uint64_t align = bs->request_alignment; 13149eeb6dd1SFam Zheng unsigned int head_padding_bytes, tail_padding_bytes; 13159eeb6dd1SFam Zheng int ret = 0; 13169eeb6dd1SFam Zheng 13179eeb6dd1SFam Zheng head_padding_bytes = offset & (align - 1); 13189eeb6dd1SFam Zheng tail_padding_bytes = align - ((offset + bytes) & (align - 1)); 13199eeb6dd1SFam Zheng 13209eeb6dd1SFam Zheng 13219eeb6dd1SFam Zheng assert(flags & BDRV_REQ_ZERO_WRITE); 13229eeb6dd1SFam Zheng if (head_padding_bytes || tail_padding_bytes) { 13239eeb6dd1SFam Zheng buf = qemu_blockalign(bs, align); 13249eeb6dd1SFam Zheng iov = (struct iovec) { 13259eeb6dd1SFam Zheng .iov_base = buf, 13269eeb6dd1SFam Zheng .iov_len = align, 13279eeb6dd1SFam Zheng }; 13289eeb6dd1SFam Zheng qemu_iovec_init_external(&local_qiov, &iov, 1); 13299eeb6dd1SFam Zheng } 13309eeb6dd1SFam Zheng if (head_padding_bytes) { 13319eeb6dd1SFam Zheng uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes); 13329eeb6dd1SFam Zheng 13339eeb6dd1SFam Zheng /* RMW the unaligned part before head. */ 13349eeb6dd1SFam Zheng mark_request_serialising(req, align); 13359eeb6dd1SFam Zheng wait_serialising_requests(req); 13369a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 13379eeb6dd1SFam Zheng ret = bdrv_aligned_preadv(bs, req, offset & ~(align - 1), align, 13389eeb6dd1SFam Zheng align, &local_qiov, 0); 13399eeb6dd1SFam Zheng if (ret < 0) { 13409eeb6dd1SFam Zheng goto fail; 13419eeb6dd1SFam Zheng } 13429a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 13439eeb6dd1SFam Zheng 13449eeb6dd1SFam Zheng memset(buf + head_padding_bytes, 0, zero_bytes); 13459eeb6dd1SFam Zheng ret = bdrv_aligned_pwritev(bs, req, offset & ~(align - 1), align, 13469eeb6dd1SFam Zheng &local_qiov, 13479eeb6dd1SFam Zheng flags & ~BDRV_REQ_ZERO_WRITE); 13489eeb6dd1SFam Zheng if (ret < 0) { 13499eeb6dd1SFam Zheng goto fail; 13509eeb6dd1SFam Zheng } 13519eeb6dd1SFam Zheng offset += zero_bytes; 13529eeb6dd1SFam Zheng bytes -= zero_bytes; 13539eeb6dd1SFam Zheng } 13549eeb6dd1SFam Zheng 13559eeb6dd1SFam Zheng assert(!bytes || (offset & (align - 1)) == 0); 13569eeb6dd1SFam Zheng if (bytes >= align) { 13579eeb6dd1SFam Zheng /* Write the aligned part in the middle. */ 13589eeb6dd1SFam Zheng uint64_t aligned_bytes = bytes & ~(align - 1); 13599eeb6dd1SFam Zheng ret = bdrv_aligned_pwritev(bs, req, offset, aligned_bytes, 13609eeb6dd1SFam Zheng NULL, flags); 13619eeb6dd1SFam Zheng if (ret < 0) { 13629eeb6dd1SFam Zheng goto fail; 13639eeb6dd1SFam Zheng } 13649eeb6dd1SFam Zheng bytes -= aligned_bytes; 13659eeb6dd1SFam Zheng offset += aligned_bytes; 13669eeb6dd1SFam Zheng } 13679eeb6dd1SFam Zheng 13689eeb6dd1SFam Zheng assert(!bytes || (offset & (align - 1)) == 0); 13699eeb6dd1SFam Zheng if (bytes) { 13709eeb6dd1SFam Zheng assert(align == tail_padding_bytes + bytes); 13719eeb6dd1SFam Zheng /* RMW the unaligned part after tail. */ 13729eeb6dd1SFam Zheng mark_request_serialising(req, align); 13739eeb6dd1SFam Zheng wait_serialising_requests(req); 13749a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 13759eeb6dd1SFam Zheng ret = bdrv_aligned_preadv(bs, req, offset, align, 13769eeb6dd1SFam Zheng align, &local_qiov, 0); 13779eeb6dd1SFam Zheng if (ret < 0) { 13789eeb6dd1SFam Zheng goto fail; 13799eeb6dd1SFam Zheng } 13809a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 13819eeb6dd1SFam Zheng 13829eeb6dd1SFam Zheng memset(buf, 0, bytes); 13839eeb6dd1SFam Zheng ret = bdrv_aligned_pwritev(bs, req, offset, align, 13849eeb6dd1SFam Zheng &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE); 13859eeb6dd1SFam Zheng } 13869eeb6dd1SFam Zheng fail: 13879eeb6dd1SFam Zheng qemu_vfree(buf); 13889eeb6dd1SFam Zheng return ret; 13899eeb6dd1SFam Zheng 13909eeb6dd1SFam Zheng } 13919eeb6dd1SFam Zheng 139261007b31SStefan Hajnoczi /* 139361007b31SStefan Hajnoczi * Handle a write request in coroutine context 139461007b31SStefan Hajnoczi */ 1395cab3a356SKevin Wolf int coroutine_fn bdrv_co_pwritev(BlockDriverState *bs, 139661007b31SStefan Hajnoczi int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 139761007b31SStefan Hajnoczi BdrvRequestFlags flags) 139861007b31SStefan Hajnoczi { 139961007b31SStefan Hajnoczi BdrvTrackedRequest req; 140023b0d9fbSKevin Wolf uint64_t align = bs->request_alignment; 140161007b31SStefan Hajnoczi uint8_t *head_buf = NULL; 140261007b31SStefan Hajnoczi uint8_t *tail_buf = NULL; 140361007b31SStefan Hajnoczi QEMUIOVector local_qiov; 140461007b31SStefan Hajnoczi bool use_local_qiov = false; 140561007b31SStefan Hajnoczi int ret; 140661007b31SStefan Hajnoczi 140761007b31SStefan Hajnoczi if (!bs->drv) { 140861007b31SStefan Hajnoczi return -ENOMEDIUM; 140961007b31SStefan Hajnoczi } 141061007b31SStefan Hajnoczi if (bs->read_only) { 1411eaf5fe2dSPaolo Bonzini return -EPERM; 141261007b31SStefan Hajnoczi } 141304c01a5cSKevin Wolf assert(!(bs->open_flags & BDRV_O_INACTIVE)); 141461007b31SStefan Hajnoczi 141561007b31SStefan Hajnoczi ret = bdrv_check_byte_request(bs, offset, bytes); 141661007b31SStefan Hajnoczi if (ret < 0) { 141761007b31SStefan Hajnoczi return ret; 141861007b31SStefan Hajnoczi } 141961007b31SStefan Hajnoczi 142061007b31SStefan Hajnoczi /* 142161007b31SStefan Hajnoczi * Align write if necessary by performing a read-modify-write cycle. 142261007b31SStefan Hajnoczi * Pad qiov with the read parts and be sure to have a tracked request not 142361007b31SStefan Hajnoczi * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle. 142461007b31SStefan Hajnoczi */ 1425ebde595cSFam Zheng tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE); 142661007b31SStefan Hajnoczi 14279eeb6dd1SFam Zheng if (!qiov) { 14289eeb6dd1SFam Zheng ret = bdrv_co_do_zero_pwritev(bs, offset, bytes, flags, &req); 14299eeb6dd1SFam Zheng goto out; 14309eeb6dd1SFam Zheng } 14319eeb6dd1SFam Zheng 143261007b31SStefan Hajnoczi if (offset & (align - 1)) { 143361007b31SStefan Hajnoczi QEMUIOVector head_qiov; 143461007b31SStefan Hajnoczi struct iovec head_iov; 143561007b31SStefan Hajnoczi 143661007b31SStefan Hajnoczi mark_request_serialising(&req, align); 143761007b31SStefan Hajnoczi wait_serialising_requests(&req); 143861007b31SStefan Hajnoczi 143961007b31SStefan Hajnoczi head_buf = qemu_blockalign(bs, align); 144061007b31SStefan Hajnoczi head_iov = (struct iovec) { 144161007b31SStefan Hajnoczi .iov_base = head_buf, 144261007b31SStefan Hajnoczi .iov_len = align, 144361007b31SStefan Hajnoczi }; 144461007b31SStefan Hajnoczi qemu_iovec_init_external(&head_qiov, &head_iov, 1); 144561007b31SStefan Hajnoczi 14469a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 144761007b31SStefan Hajnoczi ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align, 144861007b31SStefan Hajnoczi align, &head_qiov, 0); 144961007b31SStefan Hajnoczi if (ret < 0) { 145061007b31SStefan Hajnoczi goto fail; 145161007b31SStefan Hajnoczi } 14529a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 145361007b31SStefan Hajnoczi 145461007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov + 2); 145561007b31SStefan Hajnoczi qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); 145661007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 145761007b31SStefan Hajnoczi use_local_qiov = true; 145861007b31SStefan Hajnoczi 145961007b31SStefan Hajnoczi bytes += offset & (align - 1); 146061007b31SStefan Hajnoczi offset = offset & ~(align - 1); 1461117bc3faSPeter Lieven 1462117bc3faSPeter Lieven /* We have read the tail already if the request is smaller 1463117bc3faSPeter Lieven * than one aligned block. 1464117bc3faSPeter Lieven */ 1465117bc3faSPeter Lieven if (bytes < align) { 1466117bc3faSPeter Lieven qemu_iovec_add(&local_qiov, head_buf + bytes, align - bytes); 1467117bc3faSPeter Lieven bytes = align; 1468117bc3faSPeter Lieven } 146961007b31SStefan Hajnoczi } 147061007b31SStefan Hajnoczi 147161007b31SStefan Hajnoczi if ((offset + bytes) & (align - 1)) { 147261007b31SStefan Hajnoczi QEMUIOVector tail_qiov; 147361007b31SStefan Hajnoczi struct iovec tail_iov; 147461007b31SStefan Hajnoczi size_t tail_bytes; 147561007b31SStefan Hajnoczi bool waited; 147661007b31SStefan Hajnoczi 147761007b31SStefan Hajnoczi mark_request_serialising(&req, align); 147861007b31SStefan Hajnoczi waited = wait_serialising_requests(&req); 147961007b31SStefan Hajnoczi assert(!waited || !use_local_qiov); 148061007b31SStefan Hajnoczi 148161007b31SStefan Hajnoczi tail_buf = qemu_blockalign(bs, align); 148261007b31SStefan Hajnoczi tail_iov = (struct iovec) { 148361007b31SStefan Hajnoczi .iov_base = tail_buf, 148461007b31SStefan Hajnoczi .iov_len = align, 148561007b31SStefan Hajnoczi }; 148661007b31SStefan Hajnoczi qemu_iovec_init_external(&tail_qiov, &tail_iov, 1); 148761007b31SStefan Hajnoczi 14889a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 148961007b31SStefan Hajnoczi ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align, 149061007b31SStefan Hajnoczi align, &tail_qiov, 0); 149161007b31SStefan Hajnoczi if (ret < 0) { 149261007b31SStefan Hajnoczi goto fail; 149361007b31SStefan Hajnoczi } 14949a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 149561007b31SStefan Hajnoczi 149661007b31SStefan Hajnoczi if (!use_local_qiov) { 149761007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov + 1); 149861007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 149961007b31SStefan Hajnoczi use_local_qiov = true; 150061007b31SStefan Hajnoczi } 150161007b31SStefan Hajnoczi 150261007b31SStefan Hajnoczi tail_bytes = (offset + bytes) & (align - 1); 150361007b31SStefan Hajnoczi qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes); 150461007b31SStefan Hajnoczi 150561007b31SStefan Hajnoczi bytes = ROUND_UP(bytes, align); 150661007b31SStefan Hajnoczi } 150761007b31SStefan Hajnoczi 150861007b31SStefan Hajnoczi ret = bdrv_aligned_pwritev(bs, &req, offset, bytes, 150961007b31SStefan Hajnoczi use_local_qiov ? &local_qiov : qiov, 151061007b31SStefan Hajnoczi flags); 151161007b31SStefan Hajnoczi 151261007b31SStefan Hajnoczi fail: 151361007b31SStefan Hajnoczi 151461007b31SStefan Hajnoczi if (use_local_qiov) { 151561007b31SStefan Hajnoczi qemu_iovec_destroy(&local_qiov); 151661007b31SStefan Hajnoczi } 151761007b31SStefan Hajnoczi qemu_vfree(head_buf); 151861007b31SStefan Hajnoczi qemu_vfree(tail_buf); 15199eeb6dd1SFam Zheng out: 15209eeb6dd1SFam Zheng tracked_request_end(&req); 152161007b31SStefan Hajnoczi return ret; 152261007b31SStefan Hajnoczi } 152361007b31SStefan Hajnoczi 152461007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs, 152561007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 152661007b31SStefan Hajnoczi BdrvRequestFlags flags) 152761007b31SStefan Hajnoczi { 152861007b31SStefan Hajnoczi if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 152961007b31SStefan Hajnoczi return -EINVAL; 153061007b31SStefan Hajnoczi } 153161007b31SStefan Hajnoczi 1532cab3a356SKevin Wolf return bdrv_co_pwritev(bs, sector_num << BDRV_SECTOR_BITS, 153361007b31SStefan Hajnoczi nb_sectors << BDRV_SECTOR_BITS, qiov, flags); 153461007b31SStefan Hajnoczi } 153561007b31SStefan Hajnoczi 153661007b31SStefan Hajnoczi int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num, 153761007b31SStefan Hajnoczi int nb_sectors, QEMUIOVector *qiov) 153861007b31SStefan Hajnoczi { 153961007b31SStefan Hajnoczi trace_bdrv_co_writev(bs, sector_num, nb_sectors); 154061007b31SStefan Hajnoczi 154161007b31SStefan Hajnoczi return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0); 154261007b31SStefan Hajnoczi } 154361007b31SStefan Hajnoczi 154474021bc4SEric Blake int coroutine_fn bdrv_co_pwrite_zeroes(BlockDriverState *bs, 154574021bc4SEric Blake int64_t offset, int count, 154661007b31SStefan Hajnoczi BdrvRequestFlags flags) 154761007b31SStefan Hajnoczi { 154874021bc4SEric Blake trace_bdrv_co_pwrite_zeroes(bs, offset, count, flags); 154961007b31SStefan Hajnoczi 155061007b31SStefan Hajnoczi if (!(bs->open_flags & BDRV_O_UNMAP)) { 155161007b31SStefan Hajnoczi flags &= ~BDRV_REQ_MAY_UNMAP; 155261007b31SStefan Hajnoczi } 155361007b31SStefan Hajnoczi 155474021bc4SEric Blake return bdrv_co_pwritev(bs, offset, count, NULL, 155561007b31SStefan Hajnoczi BDRV_REQ_ZERO_WRITE | flags); 155661007b31SStefan Hajnoczi } 155761007b31SStefan Hajnoczi 155861007b31SStefan Hajnoczi typedef struct BdrvCoGetBlockStatusData { 155961007b31SStefan Hajnoczi BlockDriverState *bs; 156061007b31SStefan Hajnoczi BlockDriverState *base; 156167a0fd2aSFam Zheng BlockDriverState **file; 156261007b31SStefan Hajnoczi int64_t sector_num; 156361007b31SStefan Hajnoczi int nb_sectors; 156461007b31SStefan Hajnoczi int *pnum; 156561007b31SStefan Hajnoczi int64_t ret; 156661007b31SStefan Hajnoczi bool done; 156761007b31SStefan Hajnoczi } BdrvCoGetBlockStatusData; 156861007b31SStefan Hajnoczi 156961007b31SStefan Hajnoczi /* 157061007b31SStefan Hajnoczi * Returns the allocation status of the specified sectors. 157161007b31SStefan Hajnoczi * Drivers not implementing the functionality are assumed to not support 157261007b31SStefan Hajnoczi * backing files, hence all their sectors are reported as allocated. 157361007b31SStefan Hajnoczi * 157461007b31SStefan Hajnoczi * If 'sector_num' is beyond the end of the disk image the return value is 0 157561007b31SStefan Hajnoczi * and 'pnum' is set to 0. 157661007b31SStefan Hajnoczi * 157761007b31SStefan Hajnoczi * 'pnum' is set to the number of sectors (including and immediately following 157861007b31SStefan Hajnoczi * the specified sector) that are known to be in the same 157961007b31SStefan Hajnoczi * allocated/unallocated state. 158061007b31SStefan Hajnoczi * 158161007b31SStefan Hajnoczi * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes 158261007b31SStefan Hajnoczi * beyond the end of the disk image it will be clamped. 158367a0fd2aSFam Zheng * 158467a0fd2aSFam Zheng * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file' 158567a0fd2aSFam Zheng * points to the BDS which the sector range is allocated in. 158661007b31SStefan Hajnoczi */ 158761007b31SStefan Hajnoczi static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs, 158861007b31SStefan Hajnoczi int64_t sector_num, 158967a0fd2aSFam Zheng int nb_sectors, int *pnum, 159067a0fd2aSFam Zheng BlockDriverState **file) 159161007b31SStefan Hajnoczi { 159261007b31SStefan Hajnoczi int64_t total_sectors; 159361007b31SStefan Hajnoczi int64_t n; 159461007b31SStefan Hajnoczi int64_t ret, ret2; 159561007b31SStefan Hajnoczi 159661007b31SStefan Hajnoczi total_sectors = bdrv_nb_sectors(bs); 159761007b31SStefan Hajnoczi if (total_sectors < 0) { 159861007b31SStefan Hajnoczi return total_sectors; 159961007b31SStefan Hajnoczi } 160061007b31SStefan Hajnoczi 160161007b31SStefan Hajnoczi if (sector_num >= total_sectors) { 160261007b31SStefan Hajnoczi *pnum = 0; 160361007b31SStefan Hajnoczi return 0; 160461007b31SStefan Hajnoczi } 160561007b31SStefan Hajnoczi 160661007b31SStefan Hajnoczi n = total_sectors - sector_num; 160761007b31SStefan Hajnoczi if (n < nb_sectors) { 160861007b31SStefan Hajnoczi nb_sectors = n; 160961007b31SStefan Hajnoczi } 161061007b31SStefan Hajnoczi 161161007b31SStefan Hajnoczi if (!bs->drv->bdrv_co_get_block_status) { 161261007b31SStefan Hajnoczi *pnum = nb_sectors; 161361007b31SStefan Hajnoczi ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; 161461007b31SStefan Hajnoczi if (bs->drv->protocol_name) { 161561007b31SStefan Hajnoczi ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE); 161661007b31SStefan Hajnoczi } 161761007b31SStefan Hajnoczi return ret; 161861007b31SStefan Hajnoczi } 161961007b31SStefan Hajnoczi 162067a0fd2aSFam Zheng *file = NULL; 162167a0fd2aSFam Zheng ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum, 162267a0fd2aSFam Zheng file); 162361007b31SStefan Hajnoczi if (ret < 0) { 162461007b31SStefan Hajnoczi *pnum = 0; 162561007b31SStefan Hajnoczi return ret; 162661007b31SStefan Hajnoczi } 162761007b31SStefan Hajnoczi 162861007b31SStefan Hajnoczi if (ret & BDRV_BLOCK_RAW) { 162961007b31SStefan Hajnoczi assert(ret & BDRV_BLOCK_OFFSET_VALID); 16309a4f4c31SKevin Wolf return bdrv_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS, 163167a0fd2aSFam Zheng *pnum, pnum, file); 163261007b31SStefan Hajnoczi } 163361007b31SStefan Hajnoczi 163461007b31SStefan Hajnoczi if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { 163561007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ALLOCATED; 1636a53f1a95SPaolo Bonzini } else { 163761007b31SStefan Hajnoczi if (bdrv_unallocated_blocks_are_zero(bs)) { 163861007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ZERO; 1639760e0063SKevin Wolf } else if (bs->backing) { 1640760e0063SKevin Wolf BlockDriverState *bs2 = bs->backing->bs; 164161007b31SStefan Hajnoczi int64_t nb_sectors2 = bdrv_nb_sectors(bs2); 164261007b31SStefan Hajnoczi if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) { 164361007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ZERO; 164461007b31SStefan Hajnoczi } 164561007b31SStefan Hajnoczi } 164661007b31SStefan Hajnoczi } 164761007b31SStefan Hajnoczi 1648ac987b30SFam Zheng if (*file && *file != bs && 164961007b31SStefan Hajnoczi (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && 165061007b31SStefan Hajnoczi (ret & BDRV_BLOCK_OFFSET_VALID)) { 165167a0fd2aSFam Zheng BlockDriverState *file2; 165261007b31SStefan Hajnoczi int file_pnum; 165361007b31SStefan Hajnoczi 1654ac987b30SFam Zheng ret2 = bdrv_co_get_block_status(*file, ret >> BDRV_SECTOR_BITS, 165567a0fd2aSFam Zheng *pnum, &file_pnum, &file2); 165661007b31SStefan Hajnoczi if (ret2 >= 0) { 165761007b31SStefan Hajnoczi /* Ignore errors. This is just providing extra information, it 165861007b31SStefan Hajnoczi * is useful but not necessary. 165961007b31SStefan Hajnoczi */ 166061007b31SStefan Hajnoczi if (!file_pnum) { 166161007b31SStefan Hajnoczi /* !file_pnum indicates an offset at or beyond the EOF; it is 166261007b31SStefan Hajnoczi * perfectly valid for the format block driver to point to such 166361007b31SStefan Hajnoczi * offsets, so catch it and mark everything as zero */ 166461007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ZERO; 166561007b31SStefan Hajnoczi } else { 166661007b31SStefan Hajnoczi /* Limit request to the range reported by the protocol driver */ 166761007b31SStefan Hajnoczi *pnum = file_pnum; 166861007b31SStefan Hajnoczi ret |= (ret2 & BDRV_BLOCK_ZERO); 166961007b31SStefan Hajnoczi } 167061007b31SStefan Hajnoczi } 167161007b31SStefan Hajnoczi } 167261007b31SStefan Hajnoczi 167361007b31SStefan Hajnoczi return ret; 167461007b31SStefan Hajnoczi } 167561007b31SStefan Hajnoczi 1676ba3f0e25SFam Zheng static int64_t coroutine_fn bdrv_co_get_block_status_above(BlockDriverState *bs, 1677ba3f0e25SFam Zheng BlockDriverState *base, 1678ba3f0e25SFam Zheng int64_t sector_num, 1679ba3f0e25SFam Zheng int nb_sectors, 168067a0fd2aSFam Zheng int *pnum, 168167a0fd2aSFam Zheng BlockDriverState **file) 1682ba3f0e25SFam Zheng { 1683ba3f0e25SFam Zheng BlockDriverState *p; 1684ba3f0e25SFam Zheng int64_t ret = 0; 1685ba3f0e25SFam Zheng 1686ba3f0e25SFam Zheng assert(bs != base); 1687760e0063SKevin Wolf for (p = bs; p != base; p = backing_bs(p)) { 168867a0fd2aSFam Zheng ret = bdrv_co_get_block_status(p, sector_num, nb_sectors, pnum, file); 1689ba3f0e25SFam Zheng if (ret < 0 || ret & BDRV_BLOCK_ALLOCATED) { 1690ba3f0e25SFam Zheng break; 1691ba3f0e25SFam Zheng } 1692ba3f0e25SFam Zheng /* [sector_num, pnum] unallocated on this layer, which could be only 1693ba3f0e25SFam Zheng * the first part of [sector_num, nb_sectors]. */ 1694ba3f0e25SFam Zheng nb_sectors = MIN(nb_sectors, *pnum); 1695ba3f0e25SFam Zheng } 1696ba3f0e25SFam Zheng return ret; 1697ba3f0e25SFam Zheng } 1698ba3f0e25SFam Zheng 1699ba3f0e25SFam Zheng /* Coroutine wrapper for bdrv_get_block_status_above() */ 1700ba3f0e25SFam Zheng static void coroutine_fn bdrv_get_block_status_above_co_entry(void *opaque) 170161007b31SStefan Hajnoczi { 170261007b31SStefan Hajnoczi BdrvCoGetBlockStatusData *data = opaque; 170361007b31SStefan Hajnoczi 1704ba3f0e25SFam Zheng data->ret = bdrv_co_get_block_status_above(data->bs, data->base, 1705ba3f0e25SFam Zheng data->sector_num, 1706ba3f0e25SFam Zheng data->nb_sectors, 170767a0fd2aSFam Zheng data->pnum, 170867a0fd2aSFam Zheng data->file); 170961007b31SStefan Hajnoczi data->done = true; 171061007b31SStefan Hajnoczi } 171161007b31SStefan Hajnoczi 171261007b31SStefan Hajnoczi /* 1713ba3f0e25SFam Zheng * Synchronous wrapper around bdrv_co_get_block_status_above(). 171461007b31SStefan Hajnoczi * 1715ba3f0e25SFam Zheng * See bdrv_co_get_block_status_above() for details. 171661007b31SStefan Hajnoczi */ 1717ba3f0e25SFam Zheng int64_t bdrv_get_block_status_above(BlockDriverState *bs, 1718ba3f0e25SFam Zheng BlockDriverState *base, 1719ba3f0e25SFam Zheng int64_t sector_num, 172067a0fd2aSFam Zheng int nb_sectors, int *pnum, 172167a0fd2aSFam Zheng BlockDriverState **file) 172261007b31SStefan Hajnoczi { 172361007b31SStefan Hajnoczi Coroutine *co; 172461007b31SStefan Hajnoczi BdrvCoGetBlockStatusData data = { 172561007b31SStefan Hajnoczi .bs = bs, 1726ba3f0e25SFam Zheng .base = base, 172767a0fd2aSFam Zheng .file = file, 172861007b31SStefan Hajnoczi .sector_num = sector_num, 172961007b31SStefan Hajnoczi .nb_sectors = nb_sectors, 173061007b31SStefan Hajnoczi .pnum = pnum, 173161007b31SStefan Hajnoczi .done = false, 173261007b31SStefan Hajnoczi }; 173361007b31SStefan Hajnoczi 173461007b31SStefan Hajnoczi if (qemu_in_coroutine()) { 173561007b31SStefan Hajnoczi /* Fast-path if already in coroutine context */ 1736ba3f0e25SFam Zheng bdrv_get_block_status_above_co_entry(&data); 173761007b31SStefan Hajnoczi } else { 173861007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 173961007b31SStefan Hajnoczi 1740ba3f0e25SFam Zheng co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry); 174161007b31SStefan Hajnoczi qemu_coroutine_enter(co, &data); 174261007b31SStefan Hajnoczi while (!data.done) { 174361007b31SStefan Hajnoczi aio_poll(aio_context, true); 174461007b31SStefan Hajnoczi } 174561007b31SStefan Hajnoczi } 174661007b31SStefan Hajnoczi return data.ret; 174761007b31SStefan Hajnoczi } 174861007b31SStefan Hajnoczi 1749ba3f0e25SFam Zheng int64_t bdrv_get_block_status(BlockDriverState *bs, 1750ba3f0e25SFam Zheng int64_t sector_num, 175167a0fd2aSFam Zheng int nb_sectors, int *pnum, 175267a0fd2aSFam Zheng BlockDriverState **file) 1753ba3f0e25SFam Zheng { 1754760e0063SKevin Wolf return bdrv_get_block_status_above(bs, backing_bs(bs), 175567a0fd2aSFam Zheng sector_num, nb_sectors, pnum, file); 1756ba3f0e25SFam Zheng } 1757ba3f0e25SFam Zheng 175861007b31SStefan Hajnoczi int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, 175961007b31SStefan Hajnoczi int nb_sectors, int *pnum) 176061007b31SStefan Hajnoczi { 176167a0fd2aSFam Zheng BlockDriverState *file; 176267a0fd2aSFam Zheng int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum, 176367a0fd2aSFam Zheng &file); 176461007b31SStefan Hajnoczi if (ret < 0) { 176561007b31SStefan Hajnoczi return ret; 176661007b31SStefan Hajnoczi } 176761007b31SStefan Hajnoczi return !!(ret & BDRV_BLOCK_ALLOCATED); 176861007b31SStefan Hajnoczi } 176961007b31SStefan Hajnoczi 177061007b31SStefan Hajnoczi /* 177161007b31SStefan Hajnoczi * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] 177261007b31SStefan Hajnoczi * 177361007b31SStefan Hajnoczi * Return true if the given sector is allocated in any image between 177461007b31SStefan Hajnoczi * BASE and TOP (inclusive). BASE can be NULL to check if the given 177561007b31SStefan Hajnoczi * sector is allocated in any image of the chain. Return false otherwise. 177661007b31SStefan Hajnoczi * 177761007b31SStefan Hajnoczi * 'pnum' is set to the number of sectors (including and immediately following 177861007b31SStefan Hajnoczi * the specified sector) that are known to be in the same 177961007b31SStefan Hajnoczi * allocated/unallocated state. 178061007b31SStefan Hajnoczi * 178161007b31SStefan Hajnoczi */ 178261007b31SStefan Hajnoczi int bdrv_is_allocated_above(BlockDriverState *top, 178361007b31SStefan Hajnoczi BlockDriverState *base, 178461007b31SStefan Hajnoczi int64_t sector_num, 178561007b31SStefan Hajnoczi int nb_sectors, int *pnum) 178661007b31SStefan Hajnoczi { 178761007b31SStefan Hajnoczi BlockDriverState *intermediate; 178861007b31SStefan Hajnoczi int ret, n = nb_sectors; 178961007b31SStefan Hajnoczi 179061007b31SStefan Hajnoczi intermediate = top; 179161007b31SStefan Hajnoczi while (intermediate && intermediate != base) { 179261007b31SStefan Hajnoczi int pnum_inter; 179361007b31SStefan Hajnoczi ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors, 179461007b31SStefan Hajnoczi &pnum_inter); 179561007b31SStefan Hajnoczi if (ret < 0) { 179661007b31SStefan Hajnoczi return ret; 179761007b31SStefan Hajnoczi } else if (ret) { 179861007b31SStefan Hajnoczi *pnum = pnum_inter; 179961007b31SStefan Hajnoczi return 1; 180061007b31SStefan Hajnoczi } 180161007b31SStefan Hajnoczi 180261007b31SStefan Hajnoczi /* 180361007b31SStefan Hajnoczi * [sector_num, nb_sectors] is unallocated on top but intermediate 180461007b31SStefan Hajnoczi * might have 180561007b31SStefan Hajnoczi * 180661007b31SStefan Hajnoczi * [sector_num+x, nr_sectors] allocated. 180761007b31SStefan Hajnoczi */ 180861007b31SStefan Hajnoczi if (n > pnum_inter && 180961007b31SStefan Hajnoczi (intermediate == top || 181061007b31SStefan Hajnoczi sector_num + pnum_inter < intermediate->total_sectors)) { 181161007b31SStefan Hajnoczi n = pnum_inter; 181261007b31SStefan Hajnoczi } 181361007b31SStefan Hajnoczi 1814760e0063SKevin Wolf intermediate = backing_bs(intermediate); 181561007b31SStefan Hajnoczi } 181661007b31SStefan Hajnoczi 181761007b31SStefan Hajnoczi *pnum = n; 181861007b31SStefan Hajnoczi return 0; 181961007b31SStefan Hajnoczi } 182061007b31SStefan Hajnoczi 182161007b31SStefan Hajnoczi int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num, 182261007b31SStefan Hajnoczi const uint8_t *buf, int nb_sectors) 182361007b31SStefan Hajnoczi { 182461007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 182561007b31SStefan Hajnoczi int ret; 182661007b31SStefan Hajnoczi 182761007b31SStefan Hajnoczi if (!drv) { 182861007b31SStefan Hajnoczi return -ENOMEDIUM; 182961007b31SStefan Hajnoczi } 183061007b31SStefan Hajnoczi if (!drv->bdrv_write_compressed) { 183161007b31SStefan Hajnoczi return -ENOTSUP; 183261007b31SStefan Hajnoczi } 183361007b31SStefan Hajnoczi ret = bdrv_check_request(bs, sector_num, nb_sectors); 183461007b31SStefan Hajnoczi if (ret < 0) { 183561007b31SStefan Hajnoczi return ret; 183661007b31SStefan Hajnoczi } 183761007b31SStefan Hajnoczi 183861007b31SStefan Hajnoczi assert(QLIST_EMPTY(&bs->dirty_bitmaps)); 183961007b31SStefan Hajnoczi 184061007b31SStefan Hajnoczi return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors); 184161007b31SStefan Hajnoczi } 184261007b31SStefan Hajnoczi 184361007b31SStefan Hajnoczi int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 184461007b31SStefan Hajnoczi int64_t pos, int size) 184561007b31SStefan Hajnoczi { 184661007b31SStefan Hajnoczi QEMUIOVector qiov; 184761007b31SStefan Hajnoczi struct iovec iov = { 184861007b31SStefan Hajnoczi .iov_base = (void *) buf, 184961007b31SStefan Hajnoczi .iov_len = size, 185061007b31SStefan Hajnoczi }; 1851*b433d942SKevin Wolf int ret; 185261007b31SStefan Hajnoczi 185361007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 1854*b433d942SKevin Wolf 1855*b433d942SKevin Wolf ret = bdrv_writev_vmstate(bs, &qiov, pos); 1856*b433d942SKevin Wolf if (ret < 0) { 1857*b433d942SKevin Wolf return ret; 1858*b433d942SKevin Wolf } 1859*b433d942SKevin Wolf 1860*b433d942SKevin Wolf return size; 186161007b31SStefan Hajnoczi } 186261007b31SStefan Hajnoczi 186361007b31SStefan Hajnoczi int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 186461007b31SStefan Hajnoczi { 186561007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 186661007b31SStefan Hajnoczi 186761007b31SStefan Hajnoczi if (!drv) { 186861007b31SStefan Hajnoczi return -ENOMEDIUM; 186961007b31SStefan Hajnoczi } else if (drv->bdrv_save_vmstate) { 187061007b31SStefan Hajnoczi return drv->bdrv_save_vmstate(bs, qiov, pos); 187161007b31SStefan Hajnoczi } else if (bs->file) { 18729a4f4c31SKevin Wolf return bdrv_writev_vmstate(bs->file->bs, qiov, pos); 187361007b31SStefan Hajnoczi } 187461007b31SStefan Hajnoczi 187561007b31SStefan Hajnoczi return -ENOTSUP; 187661007b31SStefan Hajnoczi } 187761007b31SStefan Hajnoczi 187861007b31SStefan Hajnoczi int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 187961007b31SStefan Hajnoczi int64_t pos, int size) 188061007b31SStefan Hajnoczi { 18815ddda0b8SKevin Wolf QEMUIOVector qiov; 18825ddda0b8SKevin Wolf struct iovec iov = { 18835ddda0b8SKevin Wolf .iov_base = buf, 18845ddda0b8SKevin Wolf .iov_len = size, 18855ddda0b8SKevin Wolf }; 1886*b433d942SKevin Wolf int ret; 18875ddda0b8SKevin Wolf 18885ddda0b8SKevin Wolf qemu_iovec_init_external(&qiov, &iov, 1); 1889*b433d942SKevin Wolf ret = bdrv_readv_vmstate(bs, &qiov, pos); 1890*b433d942SKevin Wolf if (ret < 0) { 1891*b433d942SKevin Wolf return ret; 1892*b433d942SKevin Wolf } 1893*b433d942SKevin Wolf 1894*b433d942SKevin Wolf return size; 18955ddda0b8SKevin Wolf } 18965ddda0b8SKevin Wolf 18975ddda0b8SKevin Wolf int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 18985ddda0b8SKevin Wolf { 189961007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 19005ddda0b8SKevin Wolf 19015ddda0b8SKevin Wolf if (!drv) { 190261007b31SStefan Hajnoczi return -ENOMEDIUM; 19035ddda0b8SKevin Wolf } else if (drv->bdrv_load_vmstate) { 19045ddda0b8SKevin Wolf return drv->bdrv_load_vmstate(bs, qiov, pos); 19055ddda0b8SKevin Wolf } else if (bs->file) { 19065ddda0b8SKevin Wolf return bdrv_readv_vmstate(bs->file->bs, qiov, pos); 19075ddda0b8SKevin Wolf } 19085ddda0b8SKevin Wolf 190961007b31SStefan Hajnoczi return -ENOTSUP; 191061007b31SStefan Hajnoczi } 191161007b31SStefan Hajnoczi 191261007b31SStefan Hajnoczi /**************************************************************/ 191361007b31SStefan Hajnoczi /* async I/Os */ 191461007b31SStefan Hajnoczi 191561007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num, 191661007b31SStefan Hajnoczi QEMUIOVector *qiov, int nb_sectors, 191761007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 191861007b31SStefan Hajnoczi { 191961007b31SStefan Hajnoczi trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque); 192061007b31SStefan Hajnoczi 192161007b31SStefan Hajnoczi return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0, 192261007b31SStefan Hajnoczi cb, opaque, false); 192361007b31SStefan Hajnoczi } 192461007b31SStefan Hajnoczi 192561007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num, 192661007b31SStefan Hajnoczi QEMUIOVector *qiov, int nb_sectors, 192761007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 192861007b31SStefan Hajnoczi { 192961007b31SStefan Hajnoczi trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque); 193061007b31SStefan Hajnoczi 193161007b31SStefan Hajnoczi return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0, 193261007b31SStefan Hajnoczi cb, opaque, true); 193361007b31SStefan Hajnoczi } 193461007b31SStefan Hajnoczi 193561007b31SStefan Hajnoczi void bdrv_aio_cancel(BlockAIOCB *acb) 193661007b31SStefan Hajnoczi { 193761007b31SStefan Hajnoczi qemu_aio_ref(acb); 193861007b31SStefan Hajnoczi bdrv_aio_cancel_async(acb); 193961007b31SStefan Hajnoczi while (acb->refcnt > 1) { 194061007b31SStefan Hajnoczi if (acb->aiocb_info->get_aio_context) { 194161007b31SStefan Hajnoczi aio_poll(acb->aiocb_info->get_aio_context(acb), true); 194261007b31SStefan Hajnoczi } else if (acb->bs) { 194361007b31SStefan Hajnoczi aio_poll(bdrv_get_aio_context(acb->bs), true); 194461007b31SStefan Hajnoczi } else { 194561007b31SStefan Hajnoczi abort(); 194661007b31SStefan Hajnoczi } 194761007b31SStefan Hajnoczi } 194861007b31SStefan Hajnoczi qemu_aio_unref(acb); 194961007b31SStefan Hajnoczi } 195061007b31SStefan Hajnoczi 195161007b31SStefan Hajnoczi /* Async version of aio cancel. The caller is not blocked if the acb implements 195261007b31SStefan Hajnoczi * cancel_async, otherwise we do nothing and let the request normally complete. 195361007b31SStefan Hajnoczi * In either case the completion callback must be called. */ 195461007b31SStefan Hajnoczi void bdrv_aio_cancel_async(BlockAIOCB *acb) 195561007b31SStefan Hajnoczi { 195661007b31SStefan Hajnoczi if (acb->aiocb_info->cancel_async) { 195761007b31SStefan Hajnoczi acb->aiocb_info->cancel_async(acb); 195861007b31SStefan Hajnoczi } 195961007b31SStefan Hajnoczi } 196061007b31SStefan Hajnoczi 196161007b31SStefan Hajnoczi /**************************************************************/ 196261007b31SStefan Hajnoczi /* async block device emulation */ 196361007b31SStefan Hajnoczi 196441574268SEric Blake typedef struct BlockRequest { 196541574268SEric Blake union { 196641574268SEric Blake /* Used during read, write, trim */ 196741574268SEric Blake struct { 196841574268SEric Blake int64_t sector; 196941574268SEric Blake int nb_sectors; 197041574268SEric Blake int flags; 197141574268SEric Blake QEMUIOVector *qiov; 197241574268SEric Blake }; 197341574268SEric Blake /* Used during ioctl */ 197441574268SEric Blake struct { 197541574268SEric Blake int req; 197641574268SEric Blake void *buf; 197741574268SEric Blake }; 197841574268SEric Blake }; 197941574268SEric Blake BlockCompletionFunc *cb; 198041574268SEric Blake void *opaque; 198141574268SEric Blake 198241574268SEric Blake int error; 198341574268SEric Blake } BlockRequest; 198441574268SEric Blake 198561007b31SStefan Hajnoczi typedef struct BlockAIOCBCoroutine { 198661007b31SStefan Hajnoczi BlockAIOCB common; 198761007b31SStefan Hajnoczi BlockRequest req; 198861007b31SStefan Hajnoczi bool is_write; 198961007b31SStefan Hajnoczi bool need_bh; 199061007b31SStefan Hajnoczi bool *done; 199161007b31SStefan Hajnoczi QEMUBH* bh; 199261007b31SStefan Hajnoczi } BlockAIOCBCoroutine; 199361007b31SStefan Hajnoczi 199461007b31SStefan Hajnoczi static const AIOCBInfo bdrv_em_co_aiocb_info = { 199561007b31SStefan Hajnoczi .aiocb_size = sizeof(BlockAIOCBCoroutine), 199661007b31SStefan Hajnoczi }; 199761007b31SStefan Hajnoczi 199861007b31SStefan Hajnoczi static void bdrv_co_complete(BlockAIOCBCoroutine *acb) 199961007b31SStefan Hajnoczi { 200061007b31SStefan Hajnoczi if (!acb->need_bh) { 200161007b31SStefan Hajnoczi acb->common.cb(acb->common.opaque, acb->req.error); 200261007b31SStefan Hajnoczi qemu_aio_unref(acb); 200361007b31SStefan Hajnoczi } 200461007b31SStefan Hajnoczi } 200561007b31SStefan Hajnoczi 200661007b31SStefan Hajnoczi static void bdrv_co_em_bh(void *opaque) 200761007b31SStefan Hajnoczi { 200861007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb = opaque; 200961007b31SStefan Hajnoczi 201061007b31SStefan Hajnoczi assert(!acb->need_bh); 201161007b31SStefan Hajnoczi qemu_bh_delete(acb->bh); 201261007b31SStefan Hajnoczi bdrv_co_complete(acb); 201361007b31SStefan Hajnoczi } 201461007b31SStefan Hajnoczi 201561007b31SStefan Hajnoczi static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb) 201661007b31SStefan Hajnoczi { 201761007b31SStefan Hajnoczi acb->need_bh = false; 201861007b31SStefan Hajnoczi if (acb->req.error != -EINPROGRESS) { 201961007b31SStefan Hajnoczi BlockDriverState *bs = acb->common.bs; 202061007b31SStefan Hajnoczi 202161007b31SStefan Hajnoczi acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb); 202261007b31SStefan Hajnoczi qemu_bh_schedule(acb->bh); 202361007b31SStefan Hajnoczi } 202461007b31SStefan Hajnoczi } 202561007b31SStefan Hajnoczi 202661007b31SStefan Hajnoczi /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */ 202761007b31SStefan Hajnoczi static void coroutine_fn bdrv_co_do_rw(void *opaque) 202861007b31SStefan Hajnoczi { 202961007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb = opaque; 203061007b31SStefan Hajnoczi BlockDriverState *bs = acb->common.bs; 203161007b31SStefan Hajnoczi 203261007b31SStefan Hajnoczi if (!acb->is_write) { 203361007b31SStefan Hajnoczi acb->req.error = bdrv_co_do_readv(bs, acb->req.sector, 203461007b31SStefan Hajnoczi acb->req.nb_sectors, acb->req.qiov, acb->req.flags); 203561007b31SStefan Hajnoczi } else { 203661007b31SStefan Hajnoczi acb->req.error = bdrv_co_do_writev(bs, acb->req.sector, 203761007b31SStefan Hajnoczi acb->req.nb_sectors, acb->req.qiov, acb->req.flags); 203861007b31SStefan Hajnoczi } 203961007b31SStefan Hajnoczi 204061007b31SStefan Hajnoczi bdrv_co_complete(acb); 204161007b31SStefan Hajnoczi } 204261007b31SStefan Hajnoczi 204361007b31SStefan Hajnoczi static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, 204461007b31SStefan Hajnoczi int64_t sector_num, 204561007b31SStefan Hajnoczi QEMUIOVector *qiov, 204661007b31SStefan Hajnoczi int nb_sectors, 204761007b31SStefan Hajnoczi BdrvRequestFlags flags, 204861007b31SStefan Hajnoczi BlockCompletionFunc *cb, 204961007b31SStefan Hajnoczi void *opaque, 205061007b31SStefan Hajnoczi bool is_write) 205161007b31SStefan Hajnoczi { 205261007b31SStefan Hajnoczi Coroutine *co; 205361007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb; 205461007b31SStefan Hajnoczi 205561007b31SStefan Hajnoczi acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 205661007b31SStefan Hajnoczi acb->need_bh = true; 205761007b31SStefan Hajnoczi acb->req.error = -EINPROGRESS; 205861007b31SStefan Hajnoczi acb->req.sector = sector_num; 205961007b31SStefan Hajnoczi acb->req.nb_sectors = nb_sectors; 206061007b31SStefan Hajnoczi acb->req.qiov = qiov; 206161007b31SStefan Hajnoczi acb->req.flags = flags; 206261007b31SStefan Hajnoczi acb->is_write = is_write; 206361007b31SStefan Hajnoczi 206461007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_co_do_rw); 206561007b31SStefan Hajnoczi qemu_coroutine_enter(co, acb); 206661007b31SStefan Hajnoczi 206761007b31SStefan Hajnoczi bdrv_co_maybe_schedule_bh(acb); 206861007b31SStefan Hajnoczi return &acb->common; 206961007b31SStefan Hajnoczi } 207061007b31SStefan Hajnoczi 207161007b31SStefan Hajnoczi static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque) 207261007b31SStefan Hajnoczi { 207361007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb = opaque; 207461007b31SStefan Hajnoczi BlockDriverState *bs = acb->common.bs; 207561007b31SStefan Hajnoczi 207661007b31SStefan Hajnoczi acb->req.error = bdrv_co_flush(bs); 207761007b31SStefan Hajnoczi bdrv_co_complete(acb); 207861007b31SStefan Hajnoczi } 207961007b31SStefan Hajnoczi 208061007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs, 208161007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 208261007b31SStefan Hajnoczi { 208361007b31SStefan Hajnoczi trace_bdrv_aio_flush(bs, opaque); 208461007b31SStefan Hajnoczi 208561007b31SStefan Hajnoczi Coroutine *co; 208661007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb; 208761007b31SStefan Hajnoczi 208861007b31SStefan Hajnoczi acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 208961007b31SStefan Hajnoczi acb->need_bh = true; 209061007b31SStefan Hajnoczi acb->req.error = -EINPROGRESS; 209161007b31SStefan Hajnoczi 209261007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_aio_flush_co_entry); 209361007b31SStefan Hajnoczi qemu_coroutine_enter(co, acb); 209461007b31SStefan Hajnoczi 209561007b31SStefan Hajnoczi bdrv_co_maybe_schedule_bh(acb); 209661007b31SStefan Hajnoczi return &acb->common; 209761007b31SStefan Hajnoczi } 209861007b31SStefan Hajnoczi 209961007b31SStefan Hajnoczi static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque) 210061007b31SStefan Hajnoczi { 210161007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb = opaque; 210261007b31SStefan Hajnoczi BlockDriverState *bs = acb->common.bs; 210361007b31SStefan Hajnoczi 210461007b31SStefan Hajnoczi acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors); 210561007b31SStefan Hajnoczi bdrv_co_complete(acb); 210661007b31SStefan Hajnoczi } 210761007b31SStefan Hajnoczi 210861007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs, 210961007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, 211061007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 211161007b31SStefan Hajnoczi { 211261007b31SStefan Hajnoczi Coroutine *co; 211361007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb; 211461007b31SStefan Hajnoczi 211561007b31SStefan Hajnoczi trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque); 211661007b31SStefan Hajnoczi 211761007b31SStefan Hajnoczi acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 211861007b31SStefan Hajnoczi acb->need_bh = true; 211961007b31SStefan Hajnoczi acb->req.error = -EINPROGRESS; 212061007b31SStefan Hajnoczi acb->req.sector = sector_num; 212161007b31SStefan Hajnoczi acb->req.nb_sectors = nb_sectors; 212261007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_aio_discard_co_entry); 212361007b31SStefan Hajnoczi qemu_coroutine_enter(co, acb); 212461007b31SStefan Hajnoczi 212561007b31SStefan Hajnoczi bdrv_co_maybe_schedule_bh(acb); 212661007b31SStefan Hajnoczi return &acb->common; 212761007b31SStefan Hajnoczi } 212861007b31SStefan Hajnoczi 212961007b31SStefan Hajnoczi void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, 213061007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 213161007b31SStefan Hajnoczi { 213261007b31SStefan Hajnoczi BlockAIOCB *acb; 213361007b31SStefan Hajnoczi 2134c84b3192SPaolo Bonzini acb = g_malloc(aiocb_info->aiocb_size); 213561007b31SStefan Hajnoczi acb->aiocb_info = aiocb_info; 213661007b31SStefan Hajnoczi acb->bs = bs; 213761007b31SStefan Hajnoczi acb->cb = cb; 213861007b31SStefan Hajnoczi acb->opaque = opaque; 213961007b31SStefan Hajnoczi acb->refcnt = 1; 214061007b31SStefan Hajnoczi return acb; 214161007b31SStefan Hajnoczi } 214261007b31SStefan Hajnoczi 214361007b31SStefan Hajnoczi void qemu_aio_ref(void *p) 214461007b31SStefan Hajnoczi { 214561007b31SStefan Hajnoczi BlockAIOCB *acb = p; 214661007b31SStefan Hajnoczi acb->refcnt++; 214761007b31SStefan Hajnoczi } 214861007b31SStefan Hajnoczi 214961007b31SStefan Hajnoczi void qemu_aio_unref(void *p) 215061007b31SStefan Hajnoczi { 215161007b31SStefan Hajnoczi BlockAIOCB *acb = p; 215261007b31SStefan Hajnoczi assert(acb->refcnt > 0); 215361007b31SStefan Hajnoczi if (--acb->refcnt == 0) { 2154c84b3192SPaolo Bonzini g_free(acb); 215561007b31SStefan Hajnoczi } 215661007b31SStefan Hajnoczi } 215761007b31SStefan Hajnoczi 215861007b31SStefan Hajnoczi /**************************************************************/ 215961007b31SStefan Hajnoczi /* Coroutine block device emulation */ 216061007b31SStefan Hajnoczi 216161007b31SStefan Hajnoczi static void coroutine_fn bdrv_flush_co_entry(void *opaque) 216261007b31SStefan Hajnoczi { 216361007b31SStefan Hajnoczi RwCo *rwco = opaque; 216461007b31SStefan Hajnoczi 216561007b31SStefan Hajnoczi rwco->ret = bdrv_co_flush(rwco->bs); 216661007b31SStefan Hajnoczi } 216761007b31SStefan Hajnoczi 216861007b31SStefan Hajnoczi int coroutine_fn bdrv_co_flush(BlockDriverState *bs) 216961007b31SStefan Hajnoczi { 217061007b31SStefan Hajnoczi int ret; 2171cdb5e315SFam Zheng BdrvTrackedRequest req; 217261007b31SStefan Hajnoczi 21731b6bc94dSDimitris Aragiorgis if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs) || 21741b6bc94dSDimitris Aragiorgis bdrv_is_sg(bs)) { 217561007b31SStefan Hajnoczi return 0; 217661007b31SStefan Hajnoczi } 217761007b31SStefan Hajnoczi 2178cdb5e315SFam Zheng tracked_request_begin(&req, bs, 0, 0, BDRV_TRACKED_FLUSH); 2179c32b82afSPavel Dovgalyuk 2180c32b82afSPavel Dovgalyuk /* Write back all layers by calling one driver function */ 2181c32b82afSPavel Dovgalyuk if (bs->drv->bdrv_co_flush) { 2182c32b82afSPavel Dovgalyuk ret = bs->drv->bdrv_co_flush(bs); 2183c32b82afSPavel Dovgalyuk goto out; 2184c32b82afSPavel Dovgalyuk } 2185c32b82afSPavel Dovgalyuk 218661007b31SStefan Hajnoczi /* Write back cached data to the OS even with cache=unsafe */ 218761007b31SStefan Hajnoczi BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS); 218861007b31SStefan Hajnoczi if (bs->drv->bdrv_co_flush_to_os) { 218961007b31SStefan Hajnoczi ret = bs->drv->bdrv_co_flush_to_os(bs); 219061007b31SStefan Hajnoczi if (ret < 0) { 2191cdb5e315SFam Zheng goto out; 219261007b31SStefan Hajnoczi } 219361007b31SStefan Hajnoczi } 219461007b31SStefan Hajnoczi 219561007b31SStefan Hajnoczi /* But don't actually force it to the disk with cache=unsafe */ 219661007b31SStefan Hajnoczi if (bs->open_flags & BDRV_O_NO_FLUSH) { 219761007b31SStefan Hajnoczi goto flush_parent; 219861007b31SStefan Hajnoczi } 219961007b31SStefan Hajnoczi 220061007b31SStefan Hajnoczi BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK); 220161007b31SStefan Hajnoczi if (bs->drv->bdrv_co_flush_to_disk) { 220261007b31SStefan Hajnoczi ret = bs->drv->bdrv_co_flush_to_disk(bs); 220361007b31SStefan Hajnoczi } else if (bs->drv->bdrv_aio_flush) { 220461007b31SStefan Hajnoczi BlockAIOCB *acb; 220561007b31SStefan Hajnoczi CoroutineIOCompletion co = { 220661007b31SStefan Hajnoczi .coroutine = qemu_coroutine_self(), 220761007b31SStefan Hajnoczi }; 220861007b31SStefan Hajnoczi 220961007b31SStefan Hajnoczi acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); 221061007b31SStefan Hajnoczi if (acb == NULL) { 221161007b31SStefan Hajnoczi ret = -EIO; 221261007b31SStefan Hajnoczi } else { 221361007b31SStefan Hajnoczi qemu_coroutine_yield(); 221461007b31SStefan Hajnoczi ret = co.ret; 221561007b31SStefan Hajnoczi } 221661007b31SStefan Hajnoczi } else { 221761007b31SStefan Hajnoczi /* 221861007b31SStefan Hajnoczi * Some block drivers always operate in either writethrough or unsafe 221961007b31SStefan Hajnoczi * mode and don't support bdrv_flush therefore. Usually qemu doesn't 222061007b31SStefan Hajnoczi * know how the server works (because the behaviour is hardcoded or 222161007b31SStefan Hajnoczi * depends on server-side configuration), so we can't ensure that 222261007b31SStefan Hajnoczi * everything is safe on disk. Returning an error doesn't work because 222361007b31SStefan Hajnoczi * that would break guests even if the server operates in writethrough 222461007b31SStefan Hajnoczi * mode. 222561007b31SStefan Hajnoczi * 222661007b31SStefan Hajnoczi * Let's hope the user knows what he's doing. 222761007b31SStefan Hajnoczi */ 222861007b31SStefan Hajnoczi ret = 0; 222961007b31SStefan Hajnoczi } 223061007b31SStefan Hajnoczi if (ret < 0) { 2231cdb5e315SFam Zheng goto out; 223261007b31SStefan Hajnoczi } 223361007b31SStefan Hajnoczi 223461007b31SStefan Hajnoczi /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH 223561007b31SStefan Hajnoczi * in the case of cache=unsafe, so there are no useless flushes. 223661007b31SStefan Hajnoczi */ 223761007b31SStefan Hajnoczi flush_parent: 2238cdb5e315SFam Zheng ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0; 2239cdb5e315SFam Zheng out: 2240cdb5e315SFam Zheng tracked_request_end(&req); 2241cdb5e315SFam Zheng return ret; 224261007b31SStefan Hajnoczi } 224361007b31SStefan Hajnoczi 224461007b31SStefan Hajnoczi int bdrv_flush(BlockDriverState *bs) 224561007b31SStefan Hajnoczi { 224661007b31SStefan Hajnoczi Coroutine *co; 224761007b31SStefan Hajnoczi RwCo rwco = { 224861007b31SStefan Hajnoczi .bs = bs, 224961007b31SStefan Hajnoczi .ret = NOT_DONE, 225061007b31SStefan Hajnoczi }; 225161007b31SStefan Hajnoczi 225261007b31SStefan Hajnoczi if (qemu_in_coroutine()) { 225361007b31SStefan Hajnoczi /* Fast-path if already in coroutine context */ 225461007b31SStefan Hajnoczi bdrv_flush_co_entry(&rwco); 225561007b31SStefan Hajnoczi } else { 225661007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 225761007b31SStefan Hajnoczi 225861007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_flush_co_entry); 225961007b31SStefan Hajnoczi qemu_coroutine_enter(co, &rwco); 226061007b31SStefan Hajnoczi while (rwco.ret == NOT_DONE) { 226161007b31SStefan Hajnoczi aio_poll(aio_context, true); 226261007b31SStefan Hajnoczi } 226361007b31SStefan Hajnoczi } 226461007b31SStefan Hajnoczi 226561007b31SStefan Hajnoczi return rwco.ret; 226661007b31SStefan Hajnoczi } 226761007b31SStefan Hajnoczi 226861007b31SStefan Hajnoczi typedef struct DiscardCo { 226961007b31SStefan Hajnoczi BlockDriverState *bs; 227061007b31SStefan Hajnoczi int64_t sector_num; 227161007b31SStefan Hajnoczi int nb_sectors; 227261007b31SStefan Hajnoczi int ret; 227361007b31SStefan Hajnoczi } DiscardCo; 227461007b31SStefan Hajnoczi static void coroutine_fn bdrv_discard_co_entry(void *opaque) 227561007b31SStefan Hajnoczi { 227661007b31SStefan Hajnoczi DiscardCo *rwco = opaque; 227761007b31SStefan Hajnoczi 227861007b31SStefan Hajnoczi rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors); 227961007b31SStefan Hajnoczi } 228061007b31SStefan Hajnoczi 228161007b31SStefan Hajnoczi int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, 228261007b31SStefan Hajnoczi int nb_sectors) 228361007b31SStefan Hajnoczi { 2284b1066c87SFam Zheng BdrvTrackedRequest req; 228561007b31SStefan Hajnoczi int max_discard, ret; 228661007b31SStefan Hajnoczi 228761007b31SStefan Hajnoczi if (!bs->drv) { 228861007b31SStefan Hajnoczi return -ENOMEDIUM; 228961007b31SStefan Hajnoczi } 229061007b31SStefan Hajnoczi 229161007b31SStefan Hajnoczi ret = bdrv_check_request(bs, sector_num, nb_sectors); 229261007b31SStefan Hajnoczi if (ret < 0) { 229361007b31SStefan Hajnoczi return ret; 229461007b31SStefan Hajnoczi } else if (bs->read_only) { 2295eaf5fe2dSPaolo Bonzini return -EPERM; 229661007b31SStefan Hajnoczi } 229704c01a5cSKevin Wolf assert(!(bs->open_flags & BDRV_O_INACTIVE)); 229861007b31SStefan Hajnoczi 229961007b31SStefan Hajnoczi /* Do nothing if disabled. */ 230061007b31SStefan Hajnoczi if (!(bs->open_flags & BDRV_O_UNMAP)) { 230161007b31SStefan Hajnoczi return 0; 230261007b31SStefan Hajnoczi } 230361007b31SStefan Hajnoczi 230461007b31SStefan Hajnoczi if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) { 230561007b31SStefan Hajnoczi return 0; 230661007b31SStefan Hajnoczi } 230761007b31SStefan Hajnoczi 2308b1066c87SFam Zheng tracked_request_begin(&req, bs, sector_num, nb_sectors, 2309b1066c87SFam Zheng BDRV_TRACKED_DISCARD); 231050824995SFam Zheng bdrv_set_dirty(bs, sector_num, nb_sectors); 231150824995SFam Zheng 231261007b31SStefan Hajnoczi max_discard = MIN_NON_ZERO(bs->bl.max_discard, BDRV_REQUEST_MAX_SECTORS); 231361007b31SStefan Hajnoczi while (nb_sectors > 0) { 231461007b31SStefan Hajnoczi int ret; 231561007b31SStefan Hajnoczi int num = nb_sectors; 231661007b31SStefan Hajnoczi 231761007b31SStefan Hajnoczi /* align request */ 231861007b31SStefan Hajnoczi if (bs->bl.discard_alignment && 231961007b31SStefan Hajnoczi num >= bs->bl.discard_alignment && 232061007b31SStefan Hajnoczi sector_num % bs->bl.discard_alignment) { 232161007b31SStefan Hajnoczi if (num > bs->bl.discard_alignment) { 232261007b31SStefan Hajnoczi num = bs->bl.discard_alignment; 232361007b31SStefan Hajnoczi } 232461007b31SStefan Hajnoczi num -= sector_num % bs->bl.discard_alignment; 232561007b31SStefan Hajnoczi } 232661007b31SStefan Hajnoczi 232761007b31SStefan Hajnoczi /* limit request size */ 232861007b31SStefan Hajnoczi if (num > max_discard) { 232961007b31SStefan Hajnoczi num = max_discard; 233061007b31SStefan Hajnoczi } 233161007b31SStefan Hajnoczi 233261007b31SStefan Hajnoczi if (bs->drv->bdrv_co_discard) { 233361007b31SStefan Hajnoczi ret = bs->drv->bdrv_co_discard(bs, sector_num, num); 233461007b31SStefan Hajnoczi } else { 233561007b31SStefan Hajnoczi BlockAIOCB *acb; 233661007b31SStefan Hajnoczi CoroutineIOCompletion co = { 233761007b31SStefan Hajnoczi .coroutine = qemu_coroutine_self(), 233861007b31SStefan Hajnoczi }; 233961007b31SStefan Hajnoczi 234061007b31SStefan Hajnoczi acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors, 234161007b31SStefan Hajnoczi bdrv_co_io_em_complete, &co); 234261007b31SStefan Hajnoczi if (acb == NULL) { 2343b1066c87SFam Zheng ret = -EIO; 2344b1066c87SFam Zheng goto out; 234561007b31SStefan Hajnoczi } else { 234661007b31SStefan Hajnoczi qemu_coroutine_yield(); 234761007b31SStefan Hajnoczi ret = co.ret; 234861007b31SStefan Hajnoczi } 234961007b31SStefan Hajnoczi } 235061007b31SStefan Hajnoczi if (ret && ret != -ENOTSUP) { 2351b1066c87SFam Zheng goto out; 235261007b31SStefan Hajnoczi } 235361007b31SStefan Hajnoczi 235461007b31SStefan Hajnoczi sector_num += num; 235561007b31SStefan Hajnoczi nb_sectors -= num; 235661007b31SStefan Hajnoczi } 2357b1066c87SFam Zheng ret = 0; 2358b1066c87SFam Zheng out: 2359b1066c87SFam Zheng tracked_request_end(&req); 2360b1066c87SFam Zheng return ret; 236161007b31SStefan Hajnoczi } 236261007b31SStefan Hajnoczi 236361007b31SStefan Hajnoczi int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors) 236461007b31SStefan Hajnoczi { 236561007b31SStefan Hajnoczi Coroutine *co; 236661007b31SStefan Hajnoczi DiscardCo rwco = { 236761007b31SStefan Hajnoczi .bs = bs, 236861007b31SStefan Hajnoczi .sector_num = sector_num, 236961007b31SStefan Hajnoczi .nb_sectors = nb_sectors, 237061007b31SStefan Hajnoczi .ret = NOT_DONE, 237161007b31SStefan Hajnoczi }; 237261007b31SStefan Hajnoczi 237361007b31SStefan Hajnoczi if (qemu_in_coroutine()) { 237461007b31SStefan Hajnoczi /* Fast-path if already in coroutine context */ 237561007b31SStefan Hajnoczi bdrv_discard_co_entry(&rwco); 237661007b31SStefan Hajnoczi } else { 237761007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 237861007b31SStefan Hajnoczi 237961007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_discard_co_entry); 238061007b31SStefan Hajnoczi qemu_coroutine_enter(co, &rwco); 238161007b31SStefan Hajnoczi while (rwco.ret == NOT_DONE) { 238261007b31SStefan Hajnoczi aio_poll(aio_context, true); 238361007b31SStefan Hajnoczi } 238461007b31SStefan Hajnoczi } 238561007b31SStefan Hajnoczi 238661007b31SStefan Hajnoczi return rwco.ret; 238761007b31SStefan Hajnoczi } 238861007b31SStefan Hajnoczi 23895c5ae76aSFam Zheng static int bdrv_co_do_ioctl(BlockDriverState *bs, int req, void *buf) 239061007b31SStefan Hajnoczi { 239161007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 23925c5ae76aSFam Zheng BdrvTrackedRequest tracked_req; 23935c5ae76aSFam Zheng CoroutineIOCompletion co = { 23945c5ae76aSFam Zheng .coroutine = qemu_coroutine_self(), 23955c5ae76aSFam Zheng }; 23965c5ae76aSFam Zheng BlockAIOCB *acb; 239761007b31SStefan Hajnoczi 23985c5ae76aSFam Zheng tracked_request_begin(&tracked_req, bs, 0, 0, BDRV_TRACKED_IOCTL); 23995c5ae76aSFam Zheng if (!drv || !drv->bdrv_aio_ioctl) { 24005c5ae76aSFam Zheng co.ret = -ENOTSUP; 24015c5ae76aSFam Zheng goto out; 24025c5ae76aSFam Zheng } 24035c5ae76aSFam Zheng 24045c5ae76aSFam Zheng acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co); 24055c5ae76aSFam Zheng if (!acb) { 2406c8a9fd80SFam Zheng co.ret = -ENOTSUP; 2407c8a9fd80SFam Zheng goto out; 24085c5ae76aSFam Zheng } 24095c5ae76aSFam Zheng qemu_coroutine_yield(); 24105c5ae76aSFam Zheng out: 24115c5ae76aSFam Zheng tracked_request_end(&tracked_req); 24125c5ae76aSFam Zheng return co.ret; 24135c5ae76aSFam Zheng } 24145c5ae76aSFam Zheng 24155c5ae76aSFam Zheng typedef struct { 24165c5ae76aSFam Zheng BlockDriverState *bs; 24175c5ae76aSFam Zheng int req; 24185c5ae76aSFam Zheng void *buf; 24195c5ae76aSFam Zheng int ret; 24205c5ae76aSFam Zheng } BdrvIoctlCoData; 24215c5ae76aSFam Zheng 24225c5ae76aSFam Zheng static void coroutine_fn bdrv_co_ioctl_entry(void *opaque) 24235c5ae76aSFam Zheng { 24245c5ae76aSFam Zheng BdrvIoctlCoData *data = opaque; 24255c5ae76aSFam Zheng data->ret = bdrv_co_do_ioctl(data->bs, data->req, data->buf); 24265c5ae76aSFam Zheng } 24275c5ae76aSFam Zheng 24285c5ae76aSFam Zheng /* needed for generic scsi interface */ 24295c5ae76aSFam Zheng int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) 24305c5ae76aSFam Zheng { 24315c5ae76aSFam Zheng BdrvIoctlCoData data = { 24325c5ae76aSFam Zheng .bs = bs, 24335c5ae76aSFam Zheng .req = req, 24345c5ae76aSFam Zheng .buf = buf, 24355c5ae76aSFam Zheng .ret = -EINPROGRESS, 24365c5ae76aSFam Zheng }; 24375c5ae76aSFam Zheng 24385c5ae76aSFam Zheng if (qemu_in_coroutine()) { 24395c5ae76aSFam Zheng /* Fast-path if already in coroutine context */ 24405c5ae76aSFam Zheng bdrv_co_ioctl_entry(&data); 24415c5ae76aSFam Zheng } else { 24425c5ae76aSFam Zheng Coroutine *co = qemu_coroutine_create(bdrv_co_ioctl_entry); 2443ba889444SPaolo Bonzini 24445c5ae76aSFam Zheng qemu_coroutine_enter(co, &data); 24455c5ae76aSFam Zheng while (data.ret == -EINPROGRESS) { 24465c5ae76aSFam Zheng aio_poll(bdrv_get_aio_context(bs), true); 24475c5ae76aSFam Zheng } 2448ba889444SPaolo Bonzini } 24495c5ae76aSFam Zheng return data.ret; 24505c5ae76aSFam Zheng } 24515c5ae76aSFam Zheng 24525c5ae76aSFam Zheng static void coroutine_fn bdrv_co_aio_ioctl_entry(void *opaque) 24535c5ae76aSFam Zheng { 24545c5ae76aSFam Zheng BlockAIOCBCoroutine *acb = opaque; 24555c5ae76aSFam Zheng acb->req.error = bdrv_co_do_ioctl(acb->common.bs, 24565c5ae76aSFam Zheng acb->req.req, acb->req.buf); 24575c5ae76aSFam Zheng bdrv_co_complete(acb); 245861007b31SStefan Hajnoczi } 245961007b31SStefan Hajnoczi 246061007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, 246161007b31SStefan Hajnoczi unsigned long int req, void *buf, 246261007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 246361007b31SStefan Hajnoczi { 24645c5ae76aSFam Zheng BlockAIOCBCoroutine *acb = qemu_aio_get(&bdrv_em_co_aiocb_info, 24655c5ae76aSFam Zheng bs, cb, opaque); 24665c5ae76aSFam Zheng Coroutine *co; 246761007b31SStefan Hajnoczi 24685c5ae76aSFam Zheng acb->need_bh = true; 24695c5ae76aSFam Zheng acb->req.error = -EINPROGRESS; 24705c5ae76aSFam Zheng acb->req.req = req; 24715c5ae76aSFam Zheng acb->req.buf = buf; 24725c5ae76aSFam Zheng co = qemu_coroutine_create(bdrv_co_aio_ioctl_entry); 24735c5ae76aSFam Zheng qemu_coroutine_enter(co, acb); 24745c5ae76aSFam Zheng 24755c5ae76aSFam Zheng bdrv_co_maybe_schedule_bh(acb); 24765c5ae76aSFam Zheng return &acb->common; 247761007b31SStefan Hajnoczi } 247861007b31SStefan Hajnoczi 247961007b31SStefan Hajnoczi void *qemu_blockalign(BlockDriverState *bs, size_t size) 248061007b31SStefan Hajnoczi { 248161007b31SStefan Hajnoczi return qemu_memalign(bdrv_opt_mem_align(bs), size); 248261007b31SStefan Hajnoczi } 248361007b31SStefan Hajnoczi 248461007b31SStefan Hajnoczi void *qemu_blockalign0(BlockDriverState *bs, size_t size) 248561007b31SStefan Hajnoczi { 248661007b31SStefan Hajnoczi return memset(qemu_blockalign(bs, size), 0, size); 248761007b31SStefan Hajnoczi } 248861007b31SStefan Hajnoczi 248961007b31SStefan Hajnoczi void *qemu_try_blockalign(BlockDriverState *bs, size_t size) 249061007b31SStefan Hajnoczi { 249161007b31SStefan Hajnoczi size_t align = bdrv_opt_mem_align(bs); 249261007b31SStefan Hajnoczi 249361007b31SStefan Hajnoczi /* Ensure that NULL is never returned on success */ 249461007b31SStefan Hajnoczi assert(align > 0); 249561007b31SStefan Hajnoczi if (size == 0) { 249661007b31SStefan Hajnoczi size = align; 249761007b31SStefan Hajnoczi } 249861007b31SStefan Hajnoczi 249961007b31SStefan Hajnoczi return qemu_try_memalign(align, size); 250061007b31SStefan Hajnoczi } 250161007b31SStefan Hajnoczi 250261007b31SStefan Hajnoczi void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) 250361007b31SStefan Hajnoczi { 250461007b31SStefan Hajnoczi void *mem = qemu_try_blockalign(bs, size); 250561007b31SStefan Hajnoczi 250661007b31SStefan Hajnoczi if (mem) { 250761007b31SStefan Hajnoczi memset(mem, 0, size); 250861007b31SStefan Hajnoczi } 250961007b31SStefan Hajnoczi 251061007b31SStefan Hajnoczi return mem; 251161007b31SStefan Hajnoczi } 251261007b31SStefan Hajnoczi 251361007b31SStefan Hajnoczi /* 251461007b31SStefan Hajnoczi * Check if all memory in this vector is sector aligned. 251561007b31SStefan Hajnoczi */ 251661007b31SStefan Hajnoczi bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) 251761007b31SStefan Hajnoczi { 251861007b31SStefan Hajnoczi int i; 25194196d2f0SDenis V. Lunev size_t alignment = bdrv_min_mem_align(bs); 252061007b31SStefan Hajnoczi 252161007b31SStefan Hajnoczi for (i = 0; i < qiov->niov; i++) { 252261007b31SStefan Hajnoczi if ((uintptr_t) qiov->iov[i].iov_base % alignment) { 252361007b31SStefan Hajnoczi return false; 252461007b31SStefan Hajnoczi } 252561007b31SStefan Hajnoczi if (qiov->iov[i].iov_len % alignment) { 252661007b31SStefan Hajnoczi return false; 252761007b31SStefan Hajnoczi } 252861007b31SStefan Hajnoczi } 252961007b31SStefan Hajnoczi 253061007b31SStefan Hajnoczi return true; 253161007b31SStefan Hajnoczi } 253261007b31SStefan Hajnoczi 253361007b31SStefan Hajnoczi void bdrv_add_before_write_notifier(BlockDriverState *bs, 253461007b31SStefan Hajnoczi NotifierWithReturn *notifier) 253561007b31SStefan Hajnoczi { 253661007b31SStefan Hajnoczi notifier_with_return_list_add(&bs->before_write_notifiers, notifier); 253761007b31SStefan Hajnoczi } 253861007b31SStefan Hajnoczi 253961007b31SStefan Hajnoczi void bdrv_io_plug(BlockDriverState *bs) 254061007b31SStefan Hajnoczi { 25416b98bd64SPaolo Bonzini BdrvChild *child; 25426b98bd64SPaolo Bonzini 25436b98bd64SPaolo Bonzini QLIST_FOREACH(child, &bs->children, next) { 25446b98bd64SPaolo Bonzini bdrv_io_plug(child->bs); 25456b98bd64SPaolo Bonzini } 25466b98bd64SPaolo Bonzini 25476b98bd64SPaolo Bonzini if (bs->io_plugged++ == 0 && bs->io_plug_disabled == 0) { 254861007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 254961007b31SStefan Hajnoczi if (drv && drv->bdrv_io_plug) { 255061007b31SStefan Hajnoczi drv->bdrv_io_plug(bs); 25516b98bd64SPaolo Bonzini } 255261007b31SStefan Hajnoczi } 255361007b31SStefan Hajnoczi } 255461007b31SStefan Hajnoczi 255561007b31SStefan Hajnoczi void bdrv_io_unplug(BlockDriverState *bs) 255661007b31SStefan Hajnoczi { 25576b98bd64SPaolo Bonzini BdrvChild *child; 25586b98bd64SPaolo Bonzini 25596b98bd64SPaolo Bonzini assert(bs->io_plugged); 25606b98bd64SPaolo Bonzini if (--bs->io_plugged == 0 && bs->io_plug_disabled == 0) { 256161007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 256261007b31SStefan Hajnoczi if (drv && drv->bdrv_io_unplug) { 256361007b31SStefan Hajnoczi drv->bdrv_io_unplug(bs); 256461007b31SStefan Hajnoczi } 256561007b31SStefan Hajnoczi } 256661007b31SStefan Hajnoczi 25676b98bd64SPaolo Bonzini QLIST_FOREACH(child, &bs->children, next) { 25686b98bd64SPaolo Bonzini bdrv_io_unplug(child->bs); 25696b98bd64SPaolo Bonzini } 25706b98bd64SPaolo Bonzini } 25716b98bd64SPaolo Bonzini 25726b98bd64SPaolo Bonzini void bdrv_io_unplugged_begin(BlockDriverState *bs) 257361007b31SStefan Hajnoczi { 25746b98bd64SPaolo Bonzini BdrvChild *child; 25756b98bd64SPaolo Bonzini 25766b98bd64SPaolo Bonzini if (bs->io_plug_disabled++ == 0 && bs->io_plugged > 0) { 257761007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 25786b98bd64SPaolo Bonzini if (drv && drv->bdrv_io_unplug) { 25796b98bd64SPaolo Bonzini drv->bdrv_io_unplug(bs); 25806b98bd64SPaolo Bonzini } 25816b98bd64SPaolo Bonzini } 25826b98bd64SPaolo Bonzini 25836b98bd64SPaolo Bonzini QLIST_FOREACH(child, &bs->children, next) { 25846b98bd64SPaolo Bonzini bdrv_io_unplugged_begin(child->bs); 25856b98bd64SPaolo Bonzini } 25866b98bd64SPaolo Bonzini } 25876b98bd64SPaolo Bonzini 25886b98bd64SPaolo Bonzini void bdrv_io_unplugged_end(BlockDriverState *bs) 25896b98bd64SPaolo Bonzini { 25906b98bd64SPaolo Bonzini BdrvChild *child; 25916b98bd64SPaolo Bonzini 25926b98bd64SPaolo Bonzini assert(bs->io_plug_disabled); 25936b98bd64SPaolo Bonzini QLIST_FOREACH(child, &bs->children, next) { 25946b98bd64SPaolo Bonzini bdrv_io_unplugged_end(child->bs); 25956b98bd64SPaolo Bonzini } 25966b98bd64SPaolo Bonzini 25976b98bd64SPaolo Bonzini if (--bs->io_plug_disabled == 0 && bs->io_plugged > 0) { 25986b98bd64SPaolo Bonzini BlockDriver *drv = bs->drv; 25996b98bd64SPaolo Bonzini if (drv && drv->bdrv_io_plug) { 26006b98bd64SPaolo Bonzini drv->bdrv_io_plug(bs); 26016b98bd64SPaolo Bonzini } 260261007b31SStefan Hajnoczi } 260361007b31SStefan Hajnoczi } 2604