161007b31SStefan Hajnoczi /* 261007b31SStefan Hajnoczi * Block layer I/O functions 361007b31SStefan Hajnoczi * 461007b31SStefan Hajnoczi * Copyright (c) 2003 Fabrice Bellard 561007b31SStefan Hajnoczi * 661007b31SStefan Hajnoczi * Permission is hereby granted, free of charge, to any person obtaining a copy 761007b31SStefan Hajnoczi * of this software and associated documentation files (the "Software"), to deal 861007b31SStefan Hajnoczi * in the Software without restriction, including without limitation the rights 961007b31SStefan Hajnoczi * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 1061007b31SStefan Hajnoczi * copies of the Software, and to permit persons to whom the Software is 1161007b31SStefan Hajnoczi * furnished to do so, subject to the following conditions: 1261007b31SStefan Hajnoczi * 1361007b31SStefan Hajnoczi * The above copyright notice and this permission notice shall be included in 1461007b31SStefan Hajnoczi * all copies or substantial portions of the Software. 1561007b31SStefan Hajnoczi * 1661007b31SStefan Hajnoczi * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 1761007b31SStefan Hajnoczi * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 1861007b31SStefan Hajnoczi * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 1961007b31SStefan Hajnoczi * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 2061007b31SStefan Hajnoczi * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 2161007b31SStefan Hajnoczi * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 2261007b31SStefan Hajnoczi * THE SOFTWARE. 2361007b31SStefan Hajnoczi */ 2461007b31SStefan Hajnoczi 2580c71a24SPeter Maydell #include "qemu/osdep.h" 2661007b31SStefan Hajnoczi #include "trace.h" 277f0e9da6SMax Reitz #include "sysemu/block-backend.h" 2861007b31SStefan Hajnoczi #include "block/blockjob.h" 2961007b31SStefan Hajnoczi #include "block/block_int.h" 30f348b6d1SVeronia Bahaa #include "qemu/cutils.h" 31da34e65cSMarkus Armbruster #include "qapi/error.h" 32d49b6836SMarkus Armbruster #include "qemu/error-report.h" 3361007b31SStefan Hajnoczi 3461007b31SStefan Hajnoczi #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ 3561007b31SStefan Hajnoczi 36adad6496SKevin Wolf static BlockAIOCB *bdrv_co_aio_rw_vector(BdrvChild *child, 3761007b31SStefan Hajnoczi int64_t sector_num, 3861007b31SStefan Hajnoczi QEMUIOVector *qiov, 3961007b31SStefan Hajnoczi int nb_sectors, 4061007b31SStefan Hajnoczi BdrvRequestFlags flags, 4161007b31SStefan Hajnoczi BlockCompletionFunc *cb, 4261007b31SStefan Hajnoczi void *opaque, 4361007b31SStefan Hajnoczi bool is_write); 4461007b31SStefan Hajnoczi static void coroutine_fn bdrv_co_do_rw(void *opaque); 45d05aa8bbSEric Blake static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 46d05aa8bbSEric Blake int64_t offset, int count, BdrvRequestFlags flags); 4761007b31SStefan Hajnoczi 48c2066af0SKevin Wolf static void bdrv_parent_drained_begin(BlockDriverState *bs) 4961007b31SStefan Hajnoczi { 50c2066af0SKevin Wolf BdrvChild *c; 5127ccdd52SKevin Wolf 52c2066af0SKevin Wolf QLIST_FOREACH(c, &bs->parents, next_parent) { 53c2066af0SKevin Wolf if (c->role->drained_begin) { 54c2066af0SKevin Wolf c->role->drained_begin(c); 55c2066af0SKevin Wolf } 56ce0f1412SPaolo Bonzini } 57ce0f1412SPaolo Bonzini } 58ce0f1412SPaolo Bonzini 59c2066af0SKevin Wolf static void bdrv_parent_drained_end(BlockDriverState *bs) 60ce0f1412SPaolo Bonzini { 61c2066af0SKevin Wolf BdrvChild *c; 6227ccdd52SKevin Wolf 63c2066af0SKevin Wolf QLIST_FOREACH(c, &bs->parents, next_parent) { 64c2066af0SKevin Wolf if (c->role->drained_end) { 65c2066af0SKevin Wolf c->role->drained_end(c); 6627ccdd52SKevin Wolf } 67c2066af0SKevin Wolf } 6861007b31SStefan Hajnoczi } 6961007b31SStefan Hajnoczi 70d9e0dfa2SEric Blake static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src) 71d9e0dfa2SEric Blake { 72d9e0dfa2SEric Blake dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer); 73d9e0dfa2SEric Blake dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer); 74d9e0dfa2SEric Blake dst->opt_mem_alignment = MAX(dst->opt_mem_alignment, 75d9e0dfa2SEric Blake src->opt_mem_alignment); 76d9e0dfa2SEric Blake dst->min_mem_alignment = MAX(dst->min_mem_alignment, 77d9e0dfa2SEric Blake src->min_mem_alignment); 78d9e0dfa2SEric Blake dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov); 79d9e0dfa2SEric Blake } 80d9e0dfa2SEric Blake 8161007b31SStefan Hajnoczi void bdrv_refresh_limits(BlockDriverState *bs, Error **errp) 8261007b31SStefan Hajnoczi { 8361007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 8461007b31SStefan Hajnoczi Error *local_err = NULL; 8561007b31SStefan Hajnoczi 8661007b31SStefan Hajnoczi memset(&bs->bl, 0, sizeof(bs->bl)); 8761007b31SStefan Hajnoczi 8861007b31SStefan Hajnoczi if (!drv) { 8961007b31SStefan Hajnoczi return; 9061007b31SStefan Hajnoczi } 9161007b31SStefan Hajnoczi 9279ba8c98SEric Blake /* Default alignment based on whether driver has byte interface */ 93a5b8dd2cSEric Blake bs->bl.request_alignment = drv->bdrv_co_preadv ? 1 : 512; 9479ba8c98SEric Blake 9561007b31SStefan Hajnoczi /* Take some limits from the children as a default */ 9661007b31SStefan Hajnoczi if (bs->file) { 979a4f4c31SKevin Wolf bdrv_refresh_limits(bs->file->bs, &local_err); 9861007b31SStefan Hajnoczi if (local_err) { 9961007b31SStefan Hajnoczi error_propagate(errp, local_err); 10061007b31SStefan Hajnoczi return; 10161007b31SStefan Hajnoczi } 102d9e0dfa2SEric Blake bdrv_merge_limits(&bs->bl, &bs->file->bs->bl); 10361007b31SStefan Hajnoczi } else { 1044196d2f0SDenis V. Lunev bs->bl.min_mem_alignment = 512; 105459b4e66SDenis V. Lunev bs->bl.opt_mem_alignment = getpagesize(); 106bd44feb7SStefan Hajnoczi 107bd44feb7SStefan Hajnoczi /* Safe default since most protocols use readv()/writev()/etc */ 108bd44feb7SStefan Hajnoczi bs->bl.max_iov = IOV_MAX; 10961007b31SStefan Hajnoczi } 11061007b31SStefan Hajnoczi 111760e0063SKevin Wolf if (bs->backing) { 112760e0063SKevin Wolf bdrv_refresh_limits(bs->backing->bs, &local_err); 11361007b31SStefan Hajnoczi if (local_err) { 11461007b31SStefan Hajnoczi error_propagate(errp, local_err); 11561007b31SStefan Hajnoczi return; 11661007b31SStefan Hajnoczi } 117d9e0dfa2SEric Blake bdrv_merge_limits(&bs->bl, &bs->backing->bs->bl); 11861007b31SStefan Hajnoczi } 11961007b31SStefan Hajnoczi 12061007b31SStefan Hajnoczi /* Then let the driver override it */ 12161007b31SStefan Hajnoczi if (drv->bdrv_refresh_limits) { 12261007b31SStefan Hajnoczi drv->bdrv_refresh_limits(bs, errp); 12361007b31SStefan Hajnoczi } 12461007b31SStefan Hajnoczi } 12561007b31SStefan Hajnoczi 12661007b31SStefan Hajnoczi /** 12761007b31SStefan Hajnoczi * The copy-on-read flag is actually a reference count so multiple users may 12861007b31SStefan Hajnoczi * use the feature without worrying about clobbering its previous state. 12961007b31SStefan Hajnoczi * Copy-on-read stays enabled until all users have called to disable it. 13061007b31SStefan Hajnoczi */ 13161007b31SStefan Hajnoczi void bdrv_enable_copy_on_read(BlockDriverState *bs) 13261007b31SStefan Hajnoczi { 13361007b31SStefan Hajnoczi bs->copy_on_read++; 13461007b31SStefan Hajnoczi } 13561007b31SStefan Hajnoczi 13661007b31SStefan Hajnoczi void bdrv_disable_copy_on_read(BlockDriverState *bs) 13761007b31SStefan Hajnoczi { 13861007b31SStefan Hajnoczi assert(bs->copy_on_read > 0); 13961007b31SStefan Hajnoczi bs->copy_on_read--; 14061007b31SStefan Hajnoczi } 14161007b31SStefan Hajnoczi 14261007b31SStefan Hajnoczi /* Check if any requests are in-flight (including throttled requests) */ 143439db28cSKevin Wolf bool bdrv_requests_pending(BlockDriverState *bs) 14461007b31SStefan Hajnoczi { 14537a639a7SKevin Wolf BdrvChild *child; 14637a639a7SKevin Wolf 14761007b31SStefan Hajnoczi if (!QLIST_EMPTY(&bs->tracked_requests)) { 14861007b31SStefan Hajnoczi return true; 14961007b31SStefan Hajnoczi } 15037a639a7SKevin Wolf 15137a639a7SKevin Wolf QLIST_FOREACH(child, &bs->children, next) { 15237a639a7SKevin Wolf if (bdrv_requests_pending(child->bs)) { 15361007b31SStefan Hajnoczi return true; 15461007b31SStefan Hajnoczi } 15561007b31SStefan Hajnoczi } 15637a639a7SKevin Wolf 15761007b31SStefan Hajnoczi return false; 15861007b31SStefan Hajnoczi } 15961007b31SStefan Hajnoczi 16067da1dc5SFam Zheng static void bdrv_drain_recurse(BlockDriverState *bs) 16167da1dc5SFam Zheng { 16267da1dc5SFam Zheng BdrvChild *child; 16367da1dc5SFam Zheng 16467da1dc5SFam Zheng if (bs->drv && bs->drv->bdrv_drain) { 16567da1dc5SFam Zheng bs->drv->bdrv_drain(bs); 16667da1dc5SFam Zheng } 16767da1dc5SFam Zheng QLIST_FOREACH(child, &bs->children, next) { 16867da1dc5SFam Zheng bdrv_drain_recurse(child->bs); 16967da1dc5SFam Zheng } 17067da1dc5SFam Zheng } 17167da1dc5SFam Zheng 172a77fd4bbSFam Zheng typedef struct { 173a77fd4bbSFam Zheng Coroutine *co; 174a77fd4bbSFam Zheng BlockDriverState *bs; 175a77fd4bbSFam Zheng QEMUBH *bh; 176a77fd4bbSFam Zheng bool done; 177a77fd4bbSFam Zheng } BdrvCoDrainData; 178a77fd4bbSFam Zheng 179b6e84c97SPaolo Bonzini static void bdrv_drain_poll(BlockDriverState *bs) 180b6e84c97SPaolo Bonzini { 181b6e84c97SPaolo Bonzini bool busy = true; 182b6e84c97SPaolo Bonzini 183b6e84c97SPaolo Bonzini while (busy) { 184b6e84c97SPaolo Bonzini /* Keep iterating */ 185b6e84c97SPaolo Bonzini busy = bdrv_requests_pending(bs); 186b6e84c97SPaolo Bonzini busy |= aio_poll(bdrv_get_aio_context(bs), busy); 187b6e84c97SPaolo Bonzini } 188b6e84c97SPaolo Bonzini } 189b6e84c97SPaolo Bonzini 190a77fd4bbSFam Zheng static void bdrv_co_drain_bh_cb(void *opaque) 191a77fd4bbSFam Zheng { 192a77fd4bbSFam Zheng BdrvCoDrainData *data = opaque; 193a77fd4bbSFam Zheng Coroutine *co = data->co; 194a77fd4bbSFam Zheng 195a77fd4bbSFam Zheng qemu_bh_delete(data->bh); 196b6e84c97SPaolo Bonzini bdrv_drain_poll(data->bs); 197a77fd4bbSFam Zheng data->done = true; 1980b8b8753SPaolo Bonzini qemu_coroutine_enter(co); 199a77fd4bbSFam Zheng } 200a77fd4bbSFam Zheng 201b6e84c97SPaolo Bonzini static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs) 202a77fd4bbSFam Zheng { 203a77fd4bbSFam Zheng BdrvCoDrainData data; 204a77fd4bbSFam Zheng 205a77fd4bbSFam Zheng /* Calling bdrv_drain() from a BH ensures the current coroutine yields and 206a77fd4bbSFam Zheng * other coroutines run if they were queued from 207a77fd4bbSFam Zheng * qemu_co_queue_run_restart(). */ 208a77fd4bbSFam Zheng 209a77fd4bbSFam Zheng assert(qemu_in_coroutine()); 210a77fd4bbSFam Zheng data = (BdrvCoDrainData) { 211a77fd4bbSFam Zheng .co = qemu_coroutine_self(), 212a77fd4bbSFam Zheng .bs = bs, 213a77fd4bbSFam Zheng .done = false, 214a77fd4bbSFam Zheng .bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_drain_bh_cb, &data), 215a77fd4bbSFam Zheng }; 216a77fd4bbSFam Zheng qemu_bh_schedule(data.bh); 217a77fd4bbSFam Zheng 218a77fd4bbSFam Zheng qemu_coroutine_yield(); 219a77fd4bbSFam Zheng /* If we are resumed from some other event (such as an aio completion or a 220a77fd4bbSFam Zheng * timer callback), it is a bug in the caller that should be fixed. */ 221a77fd4bbSFam Zheng assert(data.done); 222a77fd4bbSFam Zheng } 223a77fd4bbSFam Zheng 2246820643fSKevin Wolf void bdrv_drained_begin(BlockDriverState *bs) 2256820643fSKevin Wolf { 2266820643fSKevin Wolf if (!bs->quiesce_counter++) { 2276820643fSKevin Wolf aio_disable_external(bdrv_get_aio_context(bs)); 2286820643fSKevin Wolf bdrv_parent_drained_begin(bs); 2296820643fSKevin Wolf } 2306820643fSKevin Wolf 2316820643fSKevin Wolf bdrv_io_unplugged_begin(bs); 2326820643fSKevin Wolf bdrv_drain_recurse(bs); 2336820643fSKevin Wolf if (qemu_in_coroutine()) { 2346820643fSKevin Wolf bdrv_co_yield_to_drain(bs); 2356820643fSKevin Wolf } else { 2366820643fSKevin Wolf bdrv_drain_poll(bs); 2376820643fSKevin Wolf } 2386820643fSKevin Wolf bdrv_io_unplugged_end(bs); 2396820643fSKevin Wolf } 2406820643fSKevin Wolf 2416820643fSKevin Wolf void bdrv_drained_end(BlockDriverState *bs) 2426820643fSKevin Wolf { 2436820643fSKevin Wolf assert(bs->quiesce_counter > 0); 2446820643fSKevin Wolf if (--bs->quiesce_counter > 0) { 2456820643fSKevin Wolf return; 2466820643fSKevin Wolf } 2476820643fSKevin Wolf 2486820643fSKevin Wolf bdrv_parent_drained_end(bs); 2496820643fSKevin Wolf aio_enable_external(bdrv_get_aio_context(bs)); 2506820643fSKevin Wolf } 2516820643fSKevin Wolf 25261007b31SStefan Hajnoczi /* 25367da1dc5SFam Zheng * Wait for pending requests to complete on a single BlockDriverState subtree, 25467da1dc5SFam Zheng * and suspend block driver's internal I/O until next request arrives. 25561007b31SStefan Hajnoczi * 25661007b31SStefan Hajnoczi * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState 25761007b31SStefan Hajnoczi * AioContext. 2587a63f3cdSStefan Hajnoczi * 2597a63f3cdSStefan Hajnoczi * Only this BlockDriverState's AioContext is run, so in-flight requests must 2607a63f3cdSStefan Hajnoczi * not depend on events in other AioContexts. In that case, use 2617a63f3cdSStefan Hajnoczi * bdrv_drain_all() instead. 26261007b31SStefan Hajnoczi */ 263b6e84c97SPaolo Bonzini void coroutine_fn bdrv_co_drain(BlockDriverState *bs) 264b6e84c97SPaolo Bonzini { 2656820643fSKevin Wolf assert(qemu_in_coroutine()); 2666820643fSKevin Wolf bdrv_drained_begin(bs); 2676820643fSKevin Wolf bdrv_drained_end(bs); 268b6e84c97SPaolo Bonzini } 269b6e84c97SPaolo Bonzini 27061007b31SStefan Hajnoczi void bdrv_drain(BlockDriverState *bs) 27161007b31SStefan Hajnoczi { 2726820643fSKevin Wolf bdrv_drained_begin(bs); 2736820643fSKevin Wolf bdrv_drained_end(bs); 27461007b31SStefan Hajnoczi } 27561007b31SStefan Hajnoczi 27661007b31SStefan Hajnoczi /* 27761007b31SStefan Hajnoczi * Wait for pending requests to complete across all BlockDriverStates 27861007b31SStefan Hajnoczi * 27961007b31SStefan Hajnoczi * This function does not flush data to disk, use bdrv_flush_all() for that 28061007b31SStefan Hajnoczi * after calling this function. 28161007b31SStefan Hajnoczi */ 28261007b31SStefan Hajnoczi void bdrv_drain_all(void) 28361007b31SStefan Hajnoczi { 28461007b31SStefan Hajnoczi /* Always run first iteration so any pending completion BHs run */ 28561007b31SStefan Hajnoczi bool busy = true; 2867c8eece4SKevin Wolf BlockDriverState *bs; 28788be7b4bSKevin Wolf BdrvNextIterator it; 288eb1364ceSAlberto Garcia BlockJob *job = NULL; 289f406c03cSAlexander Yarygin GSList *aio_ctxs = NULL, *ctx; 29061007b31SStefan Hajnoczi 291eb1364ceSAlberto Garcia while ((job = block_job_next(job))) { 292eb1364ceSAlberto Garcia AioContext *aio_context = blk_get_aio_context(job->blk); 293eb1364ceSAlberto Garcia 294eb1364ceSAlberto Garcia aio_context_acquire(aio_context); 295eb1364ceSAlberto Garcia block_job_pause(job); 296eb1364ceSAlberto Garcia aio_context_release(aio_context); 297eb1364ceSAlberto Garcia } 298eb1364ceSAlberto Garcia 29988be7b4bSKevin Wolf for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 30061007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 30161007b31SStefan Hajnoczi 30261007b31SStefan Hajnoczi aio_context_acquire(aio_context); 303c2066af0SKevin Wolf bdrv_parent_drained_begin(bs); 3046b98bd64SPaolo Bonzini bdrv_io_unplugged_begin(bs); 3059dcf8ecdSPaolo Bonzini bdrv_drain_recurse(bs); 30661007b31SStefan Hajnoczi aio_context_release(aio_context); 307f406c03cSAlexander Yarygin 308764ba3aeSAlberto Garcia if (!g_slist_find(aio_ctxs, aio_context)) { 309f406c03cSAlexander Yarygin aio_ctxs = g_slist_prepend(aio_ctxs, aio_context); 310f406c03cSAlexander Yarygin } 31161007b31SStefan Hajnoczi } 31261007b31SStefan Hajnoczi 3137a63f3cdSStefan Hajnoczi /* Note that completion of an asynchronous I/O operation can trigger any 3147a63f3cdSStefan Hajnoczi * number of other I/O operations on other devices---for example a 3157a63f3cdSStefan Hajnoczi * coroutine can submit an I/O request to another device in response to 3167a63f3cdSStefan Hajnoczi * request completion. Therefore we must keep looping until there was no 3177a63f3cdSStefan Hajnoczi * more activity rather than simply draining each device independently. 3187a63f3cdSStefan Hajnoczi */ 31961007b31SStefan Hajnoczi while (busy) { 32061007b31SStefan Hajnoczi busy = false; 321f406c03cSAlexander Yarygin 322f406c03cSAlexander Yarygin for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) { 323f406c03cSAlexander Yarygin AioContext *aio_context = ctx->data; 32461007b31SStefan Hajnoczi 32561007b31SStefan Hajnoczi aio_context_acquire(aio_context); 32688be7b4bSKevin Wolf for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 327f406c03cSAlexander Yarygin if (aio_context == bdrv_get_aio_context(bs)) { 328f406c03cSAlexander Yarygin if (bdrv_requests_pending(bs)) { 329f406c03cSAlexander Yarygin busy = true; 330f406c03cSAlexander Yarygin aio_poll(aio_context, busy); 331f406c03cSAlexander Yarygin } 332f406c03cSAlexander Yarygin } 333f406c03cSAlexander Yarygin } 334f406c03cSAlexander Yarygin busy |= aio_poll(aio_context, false); 33561007b31SStefan Hajnoczi aio_context_release(aio_context); 33661007b31SStefan Hajnoczi } 33761007b31SStefan Hajnoczi } 33861007b31SStefan Hajnoczi 33988be7b4bSKevin Wolf for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 34061007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 34161007b31SStefan Hajnoczi 34261007b31SStefan Hajnoczi aio_context_acquire(aio_context); 3436b98bd64SPaolo Bonzini bdrv_io_unplugged_end(bs); 344c2066af0SKevin Wolf bdrv_parent_drained_end(bs); 34561007b31SStefan Hajnoczi aio_context_release(aio_context); 34661007b31SStefan Hajnoczi } 347f406c03cSAlexander Yarygin g_slist_free(aio_ctxs); 348eb1364ceSAlberto Garcia 349eb1364ceSAlberto Garcia job = NULL; 350eb1364ceSAlberto Garcia while ((job = block_job_next(job))) { 351eb1364ceSAlberto Garcia AioContext *aio_context = blk_get_aio_context(job->blk); 352eb1364ceSAlberto Garcia 353eb1364ceSAlberto Garcia aio_context_acquire(aio_context); 354eb1364ceSAlberto Garcia block_job_resume(job); 355eb1364ceSAlberto Garcia aio_context_release(aio_context); 356eb1364ceSAlberto Garcia } 35761007b31SStefan Hajnoczi } 35861007b31SStefan Hajnoczi 35961007b31SStefan Hajnoczi /** 36061007b31SStefan Hajnoczi * Remove an active request from the tracked requests list 36161007b31SStefan Hajnoczi * 36261007b31SStefan Hajnoczi * This function should be called when a tracked request is completing. 36361007b31SStefan Hajnoczi */ 36461007b31SStefan Hajnoczi static void tracked_request_end(BdrvTrackedRequest *req) 36561007b31SStefan Hajnoczi { 36661007b31SStefan Hajnoczi if (req->serialising) { 36761007b31SStefan Hajnoczi req->bs->serialising_in_flight--; 36861007b31SStefan Hajnoczi } 36961007b31SStefan Hajnoczi 37061007b31SStefan Hajnoczi QLIST_REMOVE(req, list); 37161007b31SStefan Hajnoczi qemu_co_queue_restart_all(&req->wait_queue); 37261007b31SStefan Hajnoczi } 37361007b31SStefan Hajnoczi 37461007b31SStefan Hajnoczi /** 37561007b31SStefan Hajnoczi * Add an active request to the tracked requests list 37661007b31SStefan Hajnoczi */ 37761007b31SStefan Hajnoczi static void tracked_request_begin(BdrvTrackedRequest *req, 37861007b31SStefan Hajnoczi BlockDriverState *bs, 37961007b31SStefan Hajnoczi int64_t offset, 380ebde595cSFam Zheng unsigned int bytes, 381ebde595cSFam Zheng enum BdrvTrackedRequestType type) 38261007b31SStefan Hajnoczi { 38361007b31SStefan Hajnoczi *req = (BdrvTrackedRequest){ 38461007b31SStefan Hajnoczi .bs = bs, 38561007b31SStefan Hajnoczi .offset = offset, 38661007b31SStefan Hajnoczi .bytes = bytes, 387ebde595cSFam Zheng .type = type, 38861007b31SStefan Hajnoczi .co = qemu_coroutine_self(), 38961007b31SStefan Hajnoczi .serialising = false, 39061007b31SStefan Hajnoczi .overlap_offset = offset, 39161007b31SStefan Hajnoczi .overlap_bytes = bytes, 39261007b31SStefan Hajnoczi }; 39361007b31SStefan Hajnoczi 39461007b31SStefan Hajnoczi qemu_co_queue_init(&req->wait_queue); 39561007b31SStefan Hajnoczi 39661007b31SStefan Hajnoczi QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); 39761007b31SStefan Hajnoczi } 39861007b31SStefan Hajnoczi 39961007b31SStefan Hajnoczi static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) 40061007b31SStefan Hajnoczi { 40161007b31SStefan Hajnoczi int64_t overlap_offset = req->offset & ~(align - 1); 40261007b31SStefan Hajnoczi unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align) 40361007b31SStefan Hajnoczi - overlap_offset; 40461007b31SStefan Hajnoczi 40561007b31SStefan Hajnoczi if (!req->serialising) { 40661007b31SStefan Hajnoczi req->bs->serialising_in_flight++; 40761007b31SStefan Hajnoczi req->serialising = true; 40861007b31SStefan Hajnoczi } 40961007b31SStefan Hajnoczi 41061007b31SStefan Hajnoczi req->overlap_offset = MIN(req->overlap_offset, overlap_offset); 41161007b31SStefan Hajnoczi req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); 41261007b31SStefan Hajnoczi } 41361007b31SStefan Hajnoczi 41461007b31SStefan Hajnoczi /** 415244483e6SKevin Wolf * Round a region to cluster boundaries (sector-based) 41661007b31SStefan Hajnoczi */ 417244483e6SKevin Wolf void bdrv_round_sectors_to_clusters(BlockDriverState *bs, 41861007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, 41961007b31SStefan Hajnoczi int64_t *cluster_sector_num, 42061007b31SStefan Hajnoczi int *cluster_nb_sectors) 42161007b31SStefan Hajnoczi { 42261007b31SStefan Hajnoczi BlockDriverInfo bdi; 42361007b31SStefan Hajnoczi 42461007b31SStefan Hajnoczi if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 42561007b31SStefan Hajnoczi *cluster_sector_num = sector_num; 42661007b31SStefan Hajnoczi *cluster_nb_sectors = nb_sectors; 42761007b31SStefan Hajnoczi } else { 42861007b31SStefan Hajnoczi int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE; 42961007b31SStefan Hajnoczi *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c); 43061007b31SStefan Hajnoczi *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num + 43161007b31SStefan Hajnoczi nb_sectors, c); 43261007b31SStefan Hajnoczi } 43361007b31SStefan Hajnoczi } 43461007b31SStefan Hajnoczi 435244483e6SKevin Wolf /** 436244483e6SKevin Wolf * Round a region to cluster boundaries 437244483e6SKevin Wolf */ 438244483e6SKevin Wolf void bdrv_round_to_clusters(BlockDriverState *bs, 439244483e6SKevin Wolf int64_t offset, unsigned int bytes, 440244483e6SKevin Wolf int64_t *cluster_offset, 441244483e6SKevin Wolf unsigned int *cluster_bytes) 442244483e6SKevin Wolf { 443244483e6SKevin Wolf BlockDriverInfo bdi; 444244483e6SKevin Wolf 445244483e6SKevin Wolf if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 446244483e6SKevin Wolf *cluster_offset = offset; 447244483e6SKevin Wolf *cluster_bytes = bytes; 448244483e6SKevin Wolf } else { 449244483e6SKevin Wolf int64_t c = bdi.cluster_size; 450244483e6SKevin Wolf *cluster_offset = QEMU_ALIGN_DOWN(offset, c); 451244483e6SKevin Wolf *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c); 452244483e6SKevin Wolf } 453244483e6SKevin Wolf } 454244483e6SKevin Wolf 45561007b31SStefan Hajnoczi static int bdrv_get_cluster_size(BlockDriverState *bs) 45661007b31SStefan Hajnoczi { 45761007b31SStefan Hajnoczi BlockDriverInfo bdi; 45861007b31SStefan Hajnoczi int ret; 45961007b31SStefan Hajnoczi 46061007b31SStefan Hajnoczi ret = bdrv_get_info(bs, &bdi); 46161007b31SStefan Hajnoczi if (ret < 0 || bdi.cluster_size == 0) { 462a5b8dd2cSEric Blake return bs->bl.request_alignment; 46361007b31SStefan Hajnoczi } else { 46461007b31SStefan Hajnoczi return bdi.cluster_size; 46561007b31SStefan Hajnoczi } 46661007b31SStefan Hajnoczi } 46761007b31SStefan Hajnoczi 46861007b31SStefan Hajnoczi static bool tracked_request_overlaps(BdrvTrackedRequest *req, 46961007b31SStefan Hajnoczi int64_t offset, unsigned int bytes) 47061007b31SStefan Hajnoczi { 47161007b31SStefan Hajnoczi /* aaaa bbbb */ 47261007b31SStefan Hajnoczi if (offset >= req->overlap_offset + req->overlap_bytes) { 47361007b31SStefan Hajnoczi return false; 47461007b31SStefan Hajnoczi } 47561007b31SStefan Hajnoczi /* bbbb aaaa */ 47661007b31SStefan Hajnoczi if (req->overlap_offset >= offset + bytes) { 47761007b31SStefan Hajnoczi return false; 47861007b31SStefan Hajnoczi } 47961007b31SStefan Hajnoczi return true; 48061007b31SStefan Hajnoczi } 48161007b31SStefan Hajnoczi 48261007b31SStefan Hajnoczi static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) 48361007b31SStefan Hajnoczi { 48461007b31SStefan Hajnoczi BlockDriverState *bs = self->bs; 48561007b31SStefan Hajnoczi BdrvTrackedRequest *req; 48661007b31SStefan Hajnoczi bool retry; 48761007b31SStefan Hajnoczi bool waited = false; 48861007b31SStefan Hajnoczi 48961007b31SStefan Hajnoczi if (!bs->serialising_in_flight) { 49061007b31SStefan Hajnoczi return false; 49161007b31SStefan Hajnoczi } 49261007b31SStefan Hajnoczi 49361007b31SStefan Hajnoczi do { 49461007b31SStefan Hajnoczi retry = false; 49561007b31SStefan Hajnoczi QLIST_FOREACH(req, &bs->tracked_requests, list) { 49661007b31SStefan Hajnoczi if (req == self || (!req->serialising && !self->serialising)) { 49761007b31SStefan Hajnoczi continue; 49861007b31SStefan Hajnoczi } 49961007b31SStefan Hajnoczi if (tracked_request_overlaps(req, self->overlap_offset, 50061007b31SStefan Hajnoczi self->overlap_bytes)) 50161007b31SStefan Hajnoczi { 50261007b31SStefan Hajnoczi /* Hitting this means there was a reentrant request, for 50361007b31SStefan Hajnoczi * example, a block driver issuing nested requests. This must 50461007b31SStefan Hajnoczi * never happen since it means deadlock. 50561007b31SStefan Hajnoczi */ 50661007b31SStefan Hajnoczi assert(qemu_coroutine_self() != req->co); 50761007b31SStefan Hajnoczi 50861007b31SStefan Hajnoczi /* If the request is already (indirectly) waiting for us, or 50961007b31SStefan Hajnoczi * will wait for us as soon as it wakes up, then just go on 51061007b31SStefan Hajnoczi * (instead of producing a deadlock in the former case). */ 51161007b31SStefan Hajnoczi if (!req->waiting_for) { 51261007b31SStefan Hajnoczi self->waiting_for = req; 51361007b31SStefan Hajnoczi qemu_co_queue_wait(&req->wait_queue); 51461007b31SStefan Hajnoczi self->waiting_for = NULL; 51561007b31SStefan Hajnoczi retry = true; 51661007b31SStefan Hajnoczi waited = true; 51761007b31SStefan Hajnoczi break; 51861007b31SStefan Hajnoczi } 51961007b31SStefan Hajnoczi } 52061007b31SStefan Hajnoczi } 52161007b31SStefan Hajnoczi } while (retry); 52261007b31SStefan Hajnoczi 52361007b31SStefan Hajnoczi return waited; 52461007b31SStefan Hajnoczi } 52561007b31SStefan Hajnoczi 52661007b31SStefan Hajnoczi static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset, 52761007b31SStefan Hajnoczi size_t size) 52861007b31SStefan Hajnoczi { 52961007b31SStefan Hajnoczi if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) { 53061007b31SStefan Hajnoczi return -EIO; 53161007b31SStefan Hajnoczi } 53261007b31SStefan Hajnoczi 53361007b31SStefan Hajnoczi if (!bdrv_is_inserted(bs)) { 53461007b31SStefan Hajnoczi return -ENOMEDIUM; 53561007b31SStefan Hajnoczi } 53661007b31SStefan Hajnoczi 53761007b31SStefan Hajnoczi if (offset < 0) { 53861007b31SStefan Hajnoczi return -EIO; 53961007b31SStefan Hajnoczi } 54061007b31SStefan Hajnoczi 54161007b31SStefan Hajnoczi return 0; 54261007b31SStefan Hajnoczi } 54361007b31SStefan Hajnoczi 54461007b31SStefan Hajnoczi static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num, 54561007b31SStefan Hajnoczi int nb_sectors) 54661007b31SStefan Hajnoczi { 54761007b31SStefan Hajnoczi if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 54861007b31SStefan Hajnoczi return -EIO; 54961007b31SStefan Hajnoczi } 55061007b31SStefan Hajnoczi 55161007b31SStefan Hajnoczi return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE, 55261007b31SStefan Hajnoczi nb_sectors * BDRV_SECTOR_SIZE); 55361007b31SStefan Hajnoczi } 55461007b31SStefan Hajnoczi 55561007b31SStefan Hajnoczi typedef struct RwCo { 556e293b7a3SKevin Wolf BdrvChild *child; 55761007b31SStefan Hajnoczi int64_t offset; 55861007b31SStefan Hajnoczi QEMUIOVector *qiov; 55961007b31SStefan Hajnoczi bool is_write; 56061007b31SStefan Hajnoczi int ret; 56161007b31SStefan Hajnoczi BdrvRequestFlags flags; 56261007b31SStefan Hajnoczi } RwCo; 56361007b31SStefan Hajnoczi 56461007b31SStefan Hajnoczi static void coroutine_fn bdrv_rw_co_entry(void *opaque) 56561007b31SStefan Hajnoczi { 56661007b31SStefan Hajnoczi RwCo *rwco = opaque; 56761007b31SStefan Hajnoczi 56861007b31SStefan Hajnoczi if (!rwco->is_write) { 569a03ef88fSKevin Wolf rwco->ret = bdrv_co_preadv(rwco->child, rwco->offset, 57061007b31SStefan Hajnoczi rwco->qiov->size, rwco->qiov, 57161007b31SStefan Hajnoczi rwco->flags); 57261007b31SStefan Hajnoczi } else { 573a03ef88fSKevin Wolf rwco->ret = bdrv_co_pwritev(rwco->child, rwco->offset, 57461007b31SStefan Hajnoczi rwco->qiov->size, rwco->qiov, 57561007b31SStefan Hajnoczi rwco->flags); 57661007b31SStefan Hajnoczi } 57761007b31SStefan Hajnoczi } 57861007b31SStefan Hajnoczi 57961007b31SStefan Hajnoczi /* 58061007b31SStefan Hajnoczi * Process a vectored synchronous request using coroutines 58161007b31SStefan Hajnoczi */ 582e293b7a3SKevin Wolf static int bdrv_prwv_co(BdrvChild *child, int64_t offset, 58361007b31SStefan Hajnoczi QEMUIOVector *qiov, bool is_write, 58461007b31SStefan Hajnoczi BdrvRequestFlags flags) 58561007b31SStefan Hajnoczi { 58661007b31SStefan Hajnoczi Coroutine *co; 58761007b31SStefan Hajnoczi RwCo rwco = { 588e293b7a3SKevin Wolf .child = child, 58961007b31SStefan Hajnoczi .offset = offset, 59061007b31SStefan Hajnoczi .qiov = qiov, 59161007b31SStefan Hajnoczi .is_write = is_write, 59261007b31SStefan Hajnoczi .ret = NOT_DONE, 59361007b31SStefan Hajnoczi .flags = flags, 59461007b31SStefan Hajnoczi }; 59561007b31SStefan Hajnoczi 59661007b31SStefan Hajnoczi if (qemu_in_coroutine()) { 59761007b31SStefan Hajnoczi /* Fast-path if already in coroutine context */ 59861007b31SStefan Hajnoczi bdrv_rw_co_entry(&rwco); 59961007b31SStefan Hajnoczi } else { 600e293b7a3SKevin Wolf AioContext *aio_context = bdrv_get_aio_context(child->bs); 60161007b31SStefan Hajnoczi 6020b8b8753SPaolo Bonzini co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco); 6030b8b8753SPaolo Bonzini qemu_coroutine_enter(co); 60461007b31SStefan Hajnoczi while (rwco.ret == NOT_DONE) { 60561007b31SStefan Hajnoczi aio_poll(aio_context, true); 60661007b31SStefan Hajnoczi } 60761007b31SStefan Hajnoczi } 60861007b31SStefan Hajnoczi return rwco.ret; 60961007b31SStefan Hajnoczi } 61061007b31SStefan Hajnoczi 61161007b31SStefan Hajnoczi /* 61261007b31SStefan Hajnoczi * Process a synchronous request using coroutines 61361007b31SStefan Hajnoczi */ 614e293b7a3SKevin Wolf static int bdrv_rw_co(BdrvChild *child, int64_t sector_num, uint8_t *buf, 61561007b31SStefan Hajnoczi int nb_sectors, bool is_write, BdrvRequestFlags flags) 61661007b31SStefan Hajnoczi { 61761007b31SStefan Hajnoczi QEMUIOVector qiov; 61861007b31SStefan Hajnoczi struct iovec iov = { 61961007b31SStefan Hajnoczi .iov_base = (void *)buf, 62061007b31SStefan Hajnoczi .iov_len = nb_sectors * BDRV_SECTOR_SIZE, 62161007b31SStefan Hajnoczi }; 62261007b31SStefan Hajnoczi 62361007b31SStefan Hajnoczi if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 62461007b31SStefan Hajnoczi return -EINVAL; 62561007b31SStefan Hajnoczi } 62661007b31SStefan Hajnoczi 62761007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 628e293b7a3SKevin Wolf return bdrv_prwv_co(child, sector_num << BDRV_SECTOR_BITS, 62961007b31SStefan Hajnoczi &qiov, is_write, flags); 63061007b31SStefan Hajnoczi } 63161007b31SStefan Hajnoczi 63261007b31SStefan Hajnoczi /* return < 0 if error. See bdrv_write() for the return codes */ 633fbcbbf4eSKevin Wolf int bdrv_read(BdrvChild *child, int64_t sector_num, 63461007b31SStefan Hajnoczi uint8_t *buf, int nb_sectors) 63561007b31SStefan Hajnoczi { 636e293b7a3SKevin Wolf return bdrv_rw_co(child, sector_num, buf, nb_sectors, false, 0); 63761007b31SStefan Hajnoczi } 63861007b31SStefan Hajnoczi 63961007b31SStefan Hajnoczi /* Return < 0 if error. Important errors are: 64061007b31SStefan Hajnoczi -EIO generic I/O error (may happen for all errors) 64161007b31SStefan Hajnoczi -ENOMEDIUM No media inserted. 64261007b31SStefan Hajnoczi -EINVAL Invalid sector number or nb_sectors 64361007b31SStefan Hajnoczi -EACCES Trying to write a read-only device 64461007b31SStefan Hajnoczi */ 64518d51c4bSKevin Wolf int bdrv_write(BdrvChild *child, int64_t sector_num, 64661007b31SStefan Hajnoczi const uint8_t *buf, int nb_sectors) 64761007b31SStefan Hajnoczi { 648e293b7a3SKevin Wolf return bdrv_rw_co(child, sector_num, (uint8_t *)buf, nb_sectors, true, 0); 64961007b31SStefan Hajnoczi } 65061007b31SStefan Hajnoczi 651720ff280SKevin Wolf int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset, 65274021bc4SEric Blake int count, BdrvRequestFlags flags) 65361007b31SStefan Hajnoczi { 65474021bc4SEric Blake QEMUIOVector qiov; 65574021bc4SEric Blake struct iovec iov = { 65674021bc4SEric Blake .iov_base = NULL, 65774021bc4SEric Blake .iov_len = count, 65874021bc4SEric Blake }; 65974021bc4SEric Blake 66074021bc4SEric Blake qemu_iovec_init_external(&qiov, &iov, 1); 661e293b7a3SKevin Wolf return bdrv_prwv_co(child, offset, &qiov, true, 66261007b31SStefan Hajnoczi BDRV_REQ_ZERO_WRITE | flags); 66361007b31SStefan Hajnoczi } 66461007b31SStefan Hajnoczi 66561007b31SStefan Hajnoczi /* 66674021bc4SEric Blake * Completely zero out a block device with the help of bdrv_pwrite_zeroes. 66761007b31SStefan Hajnoczi * The operation is sped up by checking the block status and only writing 66861007b31SStefan Hajnoczi * zeroes to the device if they currently do not return zeroes. Optional 66974021bc4SEric Blake * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP, 670465fe887SEric Blake * BDRV_REQ_FUA). 67161007b31SStefan Hajnoczi * 67261007b31SStefan Hajnoczi * Returns < 0 on error, 0 on success. For error codes see bdrv_write(). 67361007b31SStefan Hajnoczi */ 674720ff280SKevin Wolf int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags) 67561007b31SStefan Hajnoczi { 67661007b31SStefan Hajnoczi int64_t target_sectors, ret, nb_sectors, sector_num = 0; 677720ff280SKevin Wolf BlockDriverState *bs = child->bs; 67867a0fd2aSFam Zheng BlockDriverState *file; 67961007b31SStefan Hajnoczi int n; 68061007b31SStefan Hajnoczi 68161007b31SStefan Hajnoczi target_sectors = bdrv_nb_sectors(bs); 68261007b31SStefan Hajnoczi if (target_sectors < 0) { 68361007b31SStefan Hajnoczi return target_sectors; 68461007b31SStefan Hajnoczi } 68561007b31SStefan Hajnoczi 68661007b31SStefan Hajnoczi for (;;) { 68761007b31SStefan Hajnoczi nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS); 68861007b31SStefan Hajnoczi if (nb_sectors <= 0) { 68961007b31SStefan Hajnoczi return 0; 69061007b31SStefan Hajnoczi } 69167a0fd2aSFam Zheng ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n, &file); 69261007b31SStefan Hajnoczi if (ret < 0) { 69361007b31SStefan Hajnoczi error_report("error getting block status at sector %" PRId64 ": %s", 69461007b31SStefan Hajnoczi sector_num, strerror(-ret)); 69561007b31SStefan Hajnoczi return ret; 69661007b31SStefan Hajnoczi } 69761007b31SStefan Hajnoczi if (ret & BDRV_BLOCK_ZERO) { 69861007b31SStefan Hajnoczi sector_num += n; 69961007b31SStefan Hajnoczi continue; 70061007b31SStefan Hajnoczi } 701720ff280SKevin Wolf ret = bdrv_pwrite_zeroes(child, sector_num << BDRV_SECTOR_BITS, 70274021bc4SEric Blake n << BDRV_SECTOR_BITS, flags); 70361007b31SStefan Hajnoczi if (ret < 0) { 70461007b31SStefan Hajnoczi error_report("error writing zeroes at sector %" PRId64 ": %s", 70561007b31SStefan Hajnoczi sector_num, strerror(-ret)); 70661007b31SStefan Hajnoczi return ret; 70761007b31SStefan Hajnoczi } 70861007b31SStefan Hajnoczi sector_num += n; 70961007b31SStefan Hajnoczi } 71061007b31SStefan Hajnoczi } 71161007b31SStefan Hajnoczi 712cf2ab8fcSKevin Wolf int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov) 713f1e84741SKevin Wolf { 714f1e84741SKevin Wolf int ret; 715f1e84741SKevin Wolf 716e293b7a3SKevin Wolf ret = bdrv_prwv_co(child, offset, qiov, false, 0); 717f1e84741SKevin Wolf if (ret < 0) { 718f1e84741SKevin Wolf return ret; 719f1e84741SKevin Wolf } 720f1e84741SKevin Wolf 721f1e84741SKevin Wolf return qiov->size; 722f1e84741SKevin Wolf } 723f1e84741SKevin Wolf 724cf2ab8fcSKevin Wolf int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes) 72561007b31SStefan Hajnoczi { 72661007b31SStefan Hajnoczi QEMUIOVector qiov; 72761007b31SStefan Hajnoczi struct iovec iov = { 72861007b31SStefan Hajnoczi .iov_base = (void *)buf, 72961007b31SStefan Hajnoczi .iov_len = bytes, 73061007b31SStefan Hajnoczi }; 73161007b31SStefan Hajnoczi 73261007b31SStefan Hajnoczi if (bytes < 0) { 73361007b31SStefan Hajnoczi return -EINVAL; 73461007b31SStefan Hajnoczi } 73561007b31SStefan Hajnoczi 73661007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 737cf2ab8fcSKevin Wolf return bdrv_preadv(child, offset, &qiov); 73861007b31SStefan Hajnoczi } 73961007b31SStefan Hajnoczi 740d9ca2ea2SKevin Wolf int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov) 74161007b31SStefan Hajnoczi { 74261007b31SStefan Hajnoczi int ret; 74361007b31SStefan Hajnoczi 744e293b7a3SKevin Wolf ret = bdrv_prwv_co(child, offset, qiov, true, 0); 74561007b31SStefan Hajnoczi if (ret < 0) { 74661007b31SStefan Hajnoczi return ret; 74761007b31SStefan Hajnoczi } 74861007b31SStefan Hajnoczi 74961007b31SStefan Hajnoczi return qiov->size; 75061007b31SStefan Hajnoczi } 75161007b31SStefan Hajnoczi 752d9ca2ea2SKevin Wolf int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes) 75361007b31SStefan Hajnoczi { 75461007b31SStefan Hajnoczi QEMUIOVector qiov; 75561007b31SStefan Hajnoczi struct iovec iov = { 75661007b31SStefan Hajnoczi .iov_base = (void *) buf, 75761007b31SStefan Hajnoczi .iov_len = bytes, 75861007b31SStefan Hajnoczi }; 75961007b31SStefan Hajnoczi 76061007b31SStefan Hajnoczi if (bytes < 0) { 76161007b31SStefan Hajnoczi return -EINVAL; 76261007b31SStefan Hajnoczi } 76361007b31SStefan Hajnoczi 76461007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 765d9ca2ea2SKevin Wolf return bdrv_pwritev(child, offset, &qiov); 76661007b31SStefan Hajnoczi } 76761007b31SStefan Hajnoczi 76861007b31SStefan Hajnoczi /* 76961007b31SStefan Hajnoczi * Writes to the file and ensures that no writes are reordered across this 77061007b31SStefan Hajnoczi * request (acts as a barrier) 77161007b31SStefan Hajnoczi * 77261007b31SStefan Hajnoczi * Returns 0 on success, -errno in error cases. 77361007b31SStefan Hajnoczi */ 774d9ca2ea2SKevin Wolf int bdrv_pwrite_sync(BdrvChild *child, int64_t offset, 77561007b31SStefan Hajnoczi const void *buf, int count) 77661007b31SStefan Hajnoczi { 77761007b31SStefan Hajnoczi int ret; 77861007b31SStefan Hajnoczi 779d9ca2ea2SKevin Wolf ret = bdrv_pwrite(child, offset, buf, count); 78061007b31SStefan Hajnoczi if (ret < 0) { 78161007b31SStefan Hajnoczi return ret; 78261007b31SStefan Hajnoczi } 78361007b31SStefan Hajnoczi 784d9ca2ea2SKevin Wolf ret = bdrv_flush(child->bs); 785855a6a93SKevin Wolf if (ret < 0) { 786855a6a93SKevin Wolf return ret; 78761007b31SStefan Hajnoczi } 78861007b31SStefan Hajnoczi 78961007b31SStefan Hajnoczi return 0; 79061007b31SStefan Hajnoczi } 79161007b31SStefan Hajnoczi 79208844473SKevin Wolf typedef struct CoroutineIOCompletion { 79308844473SKevin Wolf Coroutine *coroutine; 79408844473SKevin Wolf int ret; 79508844473SKevin Wolf } CoroutineIOCompletion; 79608844473SKevin Wolf 79708844473SKevin Wolf static void bdrv_co_io_em_complete(void *opaque, int ret) 79808844473SKevin Wolf { 79908844473SKevin Wolf CoroutineIOCompletion *co = opaque; 80008844473SKevin Wolf 80108844473SKevin Wolf co->ret = ret; 8020b8b8753SPaolo Bonzini qemu_coroutine_enter(co->coroutine); 80308844473SKevin Wolf } 80408844473SKevin Wolf 805166fe960SKevin Wolf static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs, 806166fe960SKevin Wolf uint64_t offset, uint64_t bytes, 807166fe960SKevin Wolf QEMUIOVector *qiov, int flags) 808166fe960SKevin Wolf { 809166fe960SKevin Wolf BlockDriver *drv = bs->drv; 8103fb06697SKevin Wolf int64_t sector_num; 8113fb06697SKevin Wolf unsigned int nb_sectors; 8123fb06697SKevin Wolf 813fa166538SEric Blake assert(!(flags & ~BDRV_REQ_MASK)); 814fa166538SEric Blake 8153fb06697SKevin Wolf if (drv->bdrv_co_preadv) { 8163fb06697SKevin Wolf return drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags); 8173fb06697SKevin Wolf } 8183fb06697SKevin Wolf 8193fb06697SKevin Wolf sector_num = offset >> BDRV_SECTOR_BITS; 8203fb06697SKevin Wolf nb_sectors = bytes >> BDRV_SECTOR_BITS; 821166fe960SKevin Wolf 822166fe960SKevin Wolf assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 823166fe960SKevin Wolf assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 824166fe960SKevin Wolf assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS); 825166fe960SKevin Wolf 82608844473SKevin Wolf if (drv->bdrv_co_readv) { 827166fe960SKevin Wolf return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 82808844473SKevin Wolf } else { 82908844473SKevin Wolf BlockAIOCB *acb; 83008844473SKevin Wolf CoroutineIOCompletion co = { 83108844473SKevin Wolf .coroutine = qemu_coroutine_self(), 83208844473SKevin Wolf }; 83308844473SKevin Wolf 83408844473SKevin Wolf acb = bs->drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors, 83508844473SKevin Wolf bdrv_co_io_em_complete, &co); 83608844473SKevin Wolf if (acb == NULL) { 83708844473SKevin Wolf return -EIO; 83808844473SKevin Wolf } else { 83908844473SKevin Wolf qemu_coroutine_yield(); 84008844473SKevin Wolf return co.ret; 84108844473SKevin Wolf } 84208844473SKevin Wolf } 843166fe960SKevin Wolf } 844166fe960SKevin Wolf 84578a07294SKevin Wolf static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs, 84678a07294SKevin Wolf uint64_t offset, uint64_t bytes, 84778a07294SKevin Wolf QEMUIOVector *qiov, int flags) 84878a07294SKevin Wolf { 84978a07294SKevin Wolf BlockDriver *drv = bs->drv; 8503fb06697SKevin Wolf int64_t sector_num; 8513fb06697SKevin Wolf unsigned int nb_sectors; 85278a07294SKevin Wolf int ret; 85378a07294SKevin Wolf 854fa166538SEric Blake assert(!(flags & ~BDRV_REQ_MASK)); 855fa166538SEric Blake 8563fb06697SKevin Wolf if (drv->bdrv_co_pwritev) { 857515c2f43SKevin Wolf ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, 858515c2f43SKevin Wolf flags & bs->supported_write_flags); 859515c2f43SKevin Wolf flags &= ~bs->supported_write_flags; 8603fb06697SKevin Wolf goto emulate_flags; 8613fb06697SKevin Wolf } 8623fb06697SKevin Wolf 8633fb06697SKevin Wolf sector_num = offset >> BDRV_SECTOR_BITS; 8643fb06697SKevin Wolf nb_sectors = bytes >> BDRV_SECTOR_BITS; 8653fb06697SKevin Wolf 86678a07294SKevin Wolf assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 86778a07294SKevin Wolf assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 86878a07294SKevin Wolf assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS); 86978a07294SKevin Wolf 87078a07294SKevin Wolf if (drv->bdrv_co_writev_flags) { 87178a07294SKevin Wolf ret = drv->bdrv_co_writev_flags(bs, sector_num, nb_sectors, qiov, 8724df863f3SEric Blake flags & bs->supported_write_flags); 8734df863f3SEric Blake flags &= ~bs->supported_write_flags; 87408844473SKevin Wolf } else if (drv->bdrv_co_writev) { 8754df863f3SEric Blake assert(!bs->supported_write_flags); 87678a07294SKevin Wolf ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov); 87708844473SKevin Wolf } else { 87808844473SKevin Wolf BlockAIOCB *acb; 87908844473SKevin Wolf CoroutineIOCompletion co = { 88008844473SKevin Wolf .coroutine = qemu_coroutine_self(), 88108844473SKevin Wolf }; 88208844473SKevin Wolf 88308844473SKevin Wolf acb = bs->drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors, 88408844473SKevin Wolf bdrv_co_io_em_complete, &co); 88508844473SKevin Wolf if (acb == NULL) { 8863fb06697SKevin Wolf ret = -EIO; 88708844473SKevin Wolf } else { 88808844473SKevin Wolf qemu_coroutine_yield(); 8893fb06697SKevin Wolf ret = co.ret; 89008844473SKevin Wolf } 89178a07294SKevin Wolf } 89278a07294SKevin Wolf 8933fb06697SKevin Wolf emulate_flags: 8944df863f3SEric Blake if (ret == 0 && (flags & BDRV_REQ_FUA)) { 89578a07294SKevin Wolf ret = bdrv_co_flush(bs); 89678a07294SKevin Wolf } 89778a07294SKevin Wolf 89878a07294SKevin Wolf return ret; 89978a07294SKevin Wolf } 90078a07294SKevin Wolf 90161007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs, 902244483e6SKevin Wolf int64_t offset, unsigned int bytes, QEMUIOVector *qiov) 90361007b31SStefan Hajnoczi { 90461007b31SStefan Hajnoczi /* Perform I/O through a temporary buffer so that users who scribble over 90561007b31SStefan Hajnoczi * their read buffer while the operation is in progress do not end up 90661007b31SStefan Hajnoczi * modifying the image file. This is critical for zero-copy guest I/O 90761007b31SStefan Hajnoczi * where anything might happen inside guest memory. 90861007b31SStefan Hajnoczi */ 90961007b31SStefan Hajnoczi void *bounce_buffer; 91061007b31SStefan Hajnoczi 91161007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 91261007b31SStefan Hajnoczi struct iovec iov; 91361007b31SStefan Hajnoczi QEMUIOVector bounce_qiov; 914244483e6SKevin Wolf int64_t cluster_offset; 915244483e6SKevin Wolf unsigned int cluster_bytes; 91661007b31SStefan Hajnoczi size_t skip_bytes; 91761007b31SStefan Hajnoczi int ret; 91861007b31SStefan Hajnoczi 91961007b31SStefan Hajnoczi /* Cover entire cluster so no additional backing file I/O is required when 92061007b31SStefan Hajnoczi * allocating cluster in the image file. 92161007b31SStefan Hajnoczi */ 922244483e6SKevin Wolf bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes); 92361007b31SStefan Hajnoczi 924244483e6SKevin Wolf trace_bdrv_co_do_copy_on_readv(bs, offset, bytes, 925244483e6SKevin Wolf cluster_offset, cluster_bytes); 92661007b31SStefan Hajnoczi 927244483e6SKevin Wolf iov.iov_len = cluster_bytes; 92861007b31SStefan Hajnoczi iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len); 92961007b31SStefan Hajnoczi if (bounce_buffer == NULL) { 93061007b31SStefan Hajnoczi ret = -ENOMEM; 93161007b31SStefan Hajnoczi goto err; 93261007b31SStefan Hajnoczi } 93361007b31SStefan Hajnoczi 93461007b31SStefan Hajnoczi qemu_iovec_init_external(&bounce_qiov, &iov, 1); 93561007b31SStefan Hajnoczi 936244483e6SKevin Wolf ret = bdrv_driver_preadv(bs, cluster_offset, cluster_bytes, 937166fe960SKevin Wolf &bounce_qiov, 0); 93861007b31SStefan Hajnoczi if (ret < 0) { 93961007b31SStefan Hajnoczi goto err; 94061007b31SStefan Hajnoczi } 94161007b31SStefan Hajnoczi 942c1499a5eSEric Blake if (drv->bdrv_co_pwrite_zeroes && 94361007b31SStefan Hajnoczi buffer_is_zero(bounce_buffer, iov.iov_len)) { 944a604fa2bSEric Blake /* FIXME: Should we (perhaps conditionally) be setting 945a604fa2bSEric Blake * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy 946a604fa2bSEric Blake * that still correctly reads as zero? */ 947244483e6SKevin Wolf ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, cluster_bytes, 0); 94861007b31SStefan Hajnoczi } else { 94961007b31SStefan Hajnoczi /* This does not change the data on the disk, it is not necessary 95061007b31SStefan Hajnoczi * to flush even in cache=writethrough mode. 95161007b31SStefan Hajnoczi */ 952244483e6SKevin Wolf ret = bdrv_driver_pwritev(bs, cluster_offset, cluster_bytes, 95378a07294SKevin Wolf &bounce_qiov, 0); 95461007b31SStefan Hajnoczi } 95561007b31SStefan Hajnoczi 95661007b31SStefan Hajnoczi if (ret < 0) { 95761007b31SStefan Hajnoczi /* It might be okay to ignore write errors for guest requests. If this 95861007b31SStefan Hajnoczi * is a deliberate copy-on-read then we don't want to ignore the error. 95961007b31SStefan Hajnoczi * Simply report it in all cases. 96061007b31SStefan Hajnoczi */ 96161007b31SStefan Hajnoczi goto err; 96261007b31SStefan Hajnoczi } 96361007b31SStefan Hajnoczi 964244483e6SKevin Wolf skip_bytes = offset - cluster_offset; 965244483e6SKevin Wolf qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, bytes); 96661007b31SStefan Hajnoczi 96761007b31SStefan Hajnoczi err: 96861007b31SStefan Hajnoczi qemu_vfree(bounce_buffer); 96961007b31SStefan Hajnoczi return ret; 97061007b31SStefan Hajnoczi } 97161007b31SStefan Hajnoczi 97261007b31SStefan Hajnoczi /* 97361007b31SStefan Hajnoczi * Forwards an already correctly aligned request to the BlockDriver. This 9741a62d0acSEric Blake * handles copy on read, zeroing after EOF, and fragmentation of large 9751a62d0acSEric Blake * reads; any other features must be implemented by the caller. 97661007b31SStefan Hajnoczi */ 97761007b31SStefan Hajnoczi static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs, 97861007b31SStefan Hajnoczi BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 97961007b31SStefan Hajnoczi int64_t align, QEMUIOVector *qiov, int flags) 98061007b31SStefan Hajnoczi { 981c9d20029SKevin Wolf int64_t total_bytes, max_bytes; 9821a62d0acSEric Blake int ret = 0; 9831a62d0acSEric Blake uint64_t bytes_remaining = bytes; 9841a62d0acSEric Blake int max_transfer; 98561007b31SStefan Hajnoczi 98649c07526SKevin Wolf assert(is_power_of_2(align)); 98749c07526SKevin Wolf assert((offset & (align - 1)) == 0); 98849c07526SKevin Wolf assert((bytes & (align - 1)) == 0); 98961007b31SStefan Hajnoczi assert(!qiov || bytes == qiov->size); 990abb06c5aSDaniel P. Berrange assert((bs->open_flags & BDRV_O_NO_IO) == 0); 9911a62d0acSEric Blake max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 9921a62d0acSEric Blake align); 993a604fa2bSEric Blake 994a604fa2bSEric Blake /* TODO: We would need a per-BDS .supported_read_flags and 995a604fa2bSEric Blake * potential fallback support, if we ever implement any read flags 996a604fa2bSEric Blake * to pass through to drivers. For now, there aren't any 997a604fa2bSEric Blake * passthrough flags. */ 998a604fa2bSEric Blake assert(!(flags & ~(BDRV_REQ_NO_SERIALISING | BDRV_REQ_COPY_ON_READ))); 99961007b31SStefan Hajnoczi 100061007b31SStefan Hajnoczi /* Handle Copy on Read and associated serialisation */ 100161007b31SStefan Hajnoczi if (flags & BDRV_REQ_COPY_ON_READ) { 100261007b31SStefan Hajnoczi /* If we touch the same cluster it counts as an overlap. This 100361007b31SStefan Hajnoczi * guarantees that allocating writes will be serialized and not race 100461007b31SStefan Hajnoczi * with each other for the same cluster. For example, in copy-on-read 100561007b31SStefan Hajnoczi * it ensures that the CoR read and write operations are atomic and 100661007b31SStefan Hajnoczi * guest writes cannot interleave between them. */ 100761007b31SStefan Hajnoczi mark_request_serialising(req, bdrv_get_cluster_size(bs)); 100861007b31SStefan Hajnoczi } 100961007b31SStefan Hajnoczi 101061408b25SFam Zheng if (!(flags & BDRV_REQ_NO_SERIALISING)) { 101161007b31SStefan Hajnoczi wait_serialising_requests(req); 101261408b25SFam Zheng } 101361007b31SStefan Hajnoczi 101461007b31SStefan Hajnoczi if (flags & BDRV_REQ_COPY_ON_READ) { 101549c07526SKevin Wolf int64_t start_sector = offset >> BDRV_SECTOR_BITS; 101649c07526SKevin Wolf int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); 101749c07526SKevin Wolf unsigned int nb_sectors = end_sector - start_sector; 101861007b31SStefan Hajnoczi int pnum; 101961007b31SStefan Hajnoczi 102049c07526SKevin Wolf ret = bdrv_is_allocated(bs, start_sector, nb_sectors, &pnum); 102161007b31SStefan Hajnoczi if (ret < 0) { 102261007b31SStefan Hajnoczi goto out; 102361007b31SStefan Hajnoczi } 102461007b31SStefan Hajnoczi 102561007b31SStefan Hajnoczi if (!ret || pnum != nb_sectors) { 1026244483e6SKevin Wolf ret = bdrv_co_do_copy_on_readv(bs, offset, bytes, qiov); 102761007b31SStefan Hajnoczi goto out; 102861007b31SStefan Hajnoczi } 102961007b31SStefan Hajnoczi } 103061007b31SStefan Hajnoczi 10311a62d0acSEric Blake /* Forward the request to the BlockDriver, possibly fragmenting it */ 103249c07526SKevin Wolf total_bytes = bdrv_getlength(bs); 103349c07526SKevin Wolf if (total_bytes < 0) { 103449c07526SKevin Wolf ret = total_bytes; 103561007b31SStefan Hajnoczi goto out; 103661007b31SStefan Hajnoczi } 103761007b31SStefan Hajnoczi 103849c07526SKevin Wolf max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align); 10391a62d0acSEric Blake if (bytes <= max_bytes && bytes <= max_transfer) { 1040166fe960SKevin Wolf ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0); 10411a62d0acSEric Blake goto out; 104261007b31SStefan Hajnoczi } 104361007b31SStefan Hajnoczi 10441a62d0acSEric Blake while (bytes_remaining) { 10451a62d0acSEric Blake int num; 10461a62d0acSEric Blake 10471a62d0acSEric Blake if (max_bytes) { 10481a62d0acSEric Blake QEMUIOVector local_qiov; 10491a62d0acSEric Blake 10501a62d0acSEric Blake num = MIN(bytes_remaining, MIN(max_bytes, max_transfer)); 10511a62d0acSEric Blake assert(num); 10521a62d0acSEric Blake qemu_iovec_init(&local_qiov, qiov->niov); 10531a62d0acSEric Blake qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num); 10541a62d0acSEric Blake 10551a62d0acSEric Blake ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining, 10561a62d0acSEric Blake num, &local_qiov, 0); 10571a62d0acSEric Blake max_bytes -= num; 10581a62d0acSEric Blake qemu_iovec_destroy(&local_qiov); 10591a62d0acSEric Blake } else { 10601a62d0acSEric Blake num = bytes_remaining; 10611a62d0acSEric Blake ret = qemu_iovec_memset(qiov, bytes - bytes_remaining, 0, 10621a62d0acSEric Blake bytes_remaining); 10631a62d0acSEric Blake } 10641a62d0acSEric Blake if (ret < 0) { 10651a62d0acSEric Blake goto out; 10661a62d0acSEric Blake } 10671a62d0acSEric Blake bytes_remaining -= num; 106861007b31SStefan Hajnoczi } 106961007b31SStefan Hajnoczi 107061007b31SStefan Hajnoczi out: 10711a62d0acSEric Blake return ret < 0 ? ret : 0; 107261007b31SStefan Hajnoczi } 107361007b31SStefan Hajnoczi 107461007b31SStefan Hajnoczi /* 107561007b31SStefan Hajnoczi * Handle a read request in coroutine context 107661007b31SStefan Hajnoczi */ 1077a03ef88fSKevin Wolf int coroutine_fn bdrv_co_preadv(BdrvChild *child, 107861007b31SStefan Hajnoczi int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 107961007b31SStefan Hajnoczi BdrvRequestFlags flags) 108061007b31SStefan Hajnoczi { 1081a03ef88fSKevin Wolf BlockDriverState *bs = child->bs; 108261007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 108361007b31SStefan Hajnoczi BdrvTrackedRequest req; 108461007b31SStefan Hajnoczi 1085a5b8dd2cSEric Blake uint64_t align = bs->bl.request_alignment; 108661007b31SStefan Hajnoczi uint8_t *head_buf = NULL; 108761007b31SStefan Hajnoczi uint8_t *tail_buf = NULL; 108861007b31SStefan Hajnoczi QEMUIOVector local_qiov; 108961007b31SStefan Hajnoczi bool use_local_qiov = false; 109061007b31SStefan Hajnoczi int ret; 109161007b31SStefan Hajnoczi 109261007b31SStefan Hajnoczi if (!drv) { 109361007b31SStefan Hajnoczi return -ENOMEDIUM; 109461007b31SStefan Hajnoczi } 109561007b31SStefan Hajnoczi 109661007b31SStefan Hajnoczi ret = bdrv_check_byte_request(bs, offset, bytes); 109761007b31SStefan Hajnoczi if (ret < 0) { 109861007b31SStefan Hajnoczi return ret; 109961007b31SStefan Hajnoczi } 110061007b31SStefan Hajnoczi 11019568b511SWen Congyang /* Don't do copy-on-read if we read data before write operation */ 110261408b25SFam Zheng if (bs->copy_on_read && !(flags & BDRV_REQ_NO_SERIALISING)) { 110361007b31SStefan Hajnoczi flags |= BDRV_REQ_COPY_ON_READ; 110461007b31SStefan Hajnoczi } 110561007b31SStefan Hajnoczi 110661007b31SStefan Hajnoczi /* Align read if necessary by padding qiov */ 110761007b31SStefan Hajnoczi if (offset & (align - 1)) { 110861007b31SStefan Hajnoczi head_buf = qemu_blockalign(bs, align); 110961007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov + 2); 111061007b31SStefan Hajnoczi qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); 111161007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 111261007b31SStefan Hajnoczi use_local_qiov = true; 111361007b31SStefan Hajnoczi 111461007b31SStefan Hajnoczi bytes += offset & (align - 1); 111561007b31SStefan Hajnoczi offset = offset & ~(align - 1); 111661007b31SStefan Hajnoczi } 111761007b31SStefan Hajnoczi 111861007b31SStefan Hajnoczi if ((offset + bytes) & (align - 1)) { 111961007b31SStefan Hajnoczi if (!use_local_qiov) { 112061007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov + 1); 112161007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 112261007b31SStefan Hajnoczi use_local_qiov = true; 112361007b31SStefan Hajnoczi } 112461007b31SStefan Hajnoczi tail_buf = qemu_blockalign(bs, align); 112561007b31SStefan Hajnoczi qemu_iovec_add(&local_qiov, tail_buf, 112661007b31SStefan Hajnoczi align - ((offset + bytes) & (align - 1))); 112761007b31SStefan Hajnoczi 112861007b31SStefan Hajnoczi bytes = ROUND_UP(bytes, align); 112961007b31SStefan Hajnoczi } 113061007b31SStefan Hajnoczi 1131ebde595cSFam Zheng tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ); 113261007b31SStefan Hajnoczi ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align, 113361007b31SStefan Hajnoczi use_local_qiov ? &local_qiov : qiov, 113461007b31SStefan Hajnoczi flags); 113561007b31SStefan Hajnoczi tracked_request_end(&req); 113661007b31SStefan Hajnoczi 113761007b31SStefan Hajnoczi if (use_local_qiov) { 113861007b31SStefan Hajnoczi qemu_iovec_destroy(&local_qiov); 113961007b31SStefan Hajnoczi qemu_vfree(head_buf); 114061007b31SStefan Hajnoczi qemu_vfree(tail_buf); 114161007b31SStefan Hajnoczi } 114261007b31SStefan Hajnoczi 114361007b31SStefan Hajnoczi return ret; 114461007b31SStefan Hajnoczi } 114561007b31SStefan Hajnoczi 1146adad6496SKevin Wolf static int coroutine_fn bdrv_co_do_readv(BdrvChild *child, 114761007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 114861007b31SStefan Hajnoczi BdrvRequestFlags flags) 114961007b31SStefan Hajnoczi { 115061007b31SStefan Hajnoczi if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 115161007b31SStefan Hajnoczi return -EINVAL; 115261007b31SStefan Hajnoczi } 115361007b31SStefan Hajnoczi 1154a03ef88fSKevin Wolf return bdrv_co_preadv(child, sector_num << BDRV_SECTOR_BITS, 115561007b31SStefan Hajnoczi nb_sectors << BDRV_SECTOR_BITS, qiov, flags); 115661007b31SStefan Hajnoczi } 115761007b31SStefan Hajnoczi 115828b04a8fSKevin Wolf int coroutine_fn bdrv_co_readv(BdrvChild *child, int64_t sector_num, 115961007b31SStefan Hajnoczi int nb_sectors, QEMUIOVector *qiov) 116061007b31SStefan Hajnoczi { 116128b04a8fSKevin Wolf trace_bdrv_co_readv(child->bs, sector_num, nb_sectors); 116261007b31SStefan Hajnoczi 1163adad6496SKevin Wolf return bdrv_co_do_readv(child, sector_num, nb_sectors, qiov, 0); 116461007b31SStefan Hajnoczi } 116561007b31SStefan Hajnoczi 11665def6b80SEric Blake /* Maximum buffer for write zeroes fallback, in bytes */ 11675def6b80SEric Blake #define MAX_WRITE_ZEROES_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS) 116861007b31SStefan Hajnoczi 1169d05aa8bbSEric Blake static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 1170d05aa8bbSEric Blake int64_t offset, int count, BdrvRequestFlags flags) 117161007b31SStefan Hajnoczi { 117261007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 117361007b31SStefan Hajnoczi QEMUIOVector qiov; 117461007b31SStefan Hajnoczi struct iovec iov = {0}; 117561007b31SStefan Hajnoczi int ret = 0; 1176465fe887SEric Blake bool need_flush = false; 1177443668caSDenis V. Lunev int head = 0; 1178443668caSDenis V. Lunev int tail = 0; 117961007b31SStefan Hajnoczi 1180cf081fcaSEric Blake int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX); 1181a5b8dd2cSEric Blake int alignment = MAX(bs->bl.pwrite_zeroes_alignment, 1182a5b8dd2cSEric Blake bs->bl.request_alignment); 1183cf081fcaSEric Blake 1184d05aa8bbSEric Blake assert(is_power_of_2(alignment)); 1185d05aa8bbSEric Blake head = offset & (alignment - 1); 1186d05aa8bbSEric Blake tail = (offset + count) & (alignment - 1); 1187d05aa8bbSEric Blake max_write_zeroes &= ~(alignment - 1); 118861007b31SStefan Hajnoczi 1189d05aa8bbSEric Blake while (count > 0 && !ret) { 1190d05aa8bbSEric Blake int num = count; 119161007b31SStefan Hajnoczi 119261007b31SStefan Hajnoczi /* Align request. Block drivers can expect the "bulk" of the request 1193443668caSDenis V. Lunev * to be aligned, and that unaligned requests do not cross cluster 1194443668caSDenis V. Lunev * boundaries. 119561007b31SStefan Hajnoczi */ 1196443668caSDenis V. Lunev if (head) { 119761007b31SStefan Hajnoczi /* Make a small request up to the first aligned sector. */ 1198d05aa8bbSEric Blake num = MIN(count, alignment - head); 1199443668caSDenis V. Lunev head = 0; 1200d05aa8bbSEric Blake } else if (tail && num > alignment) { 1201443668caSDenis V. Lunev /* Shorten the request to the last aligned sector. */ 1202443668caSDenis V. Lunev num -= tail; 120361007b31SStefan Hajnoczi } 120461007b31SStefan Hajnoczi 120561007b31SStefan Hajnoczi /* limit request size */ 120661007b31SStefan Hajnoczi if (num > max_write_zeroes) { 120761007b31SStefan Hajnoczi num = max_write_zeroes; 120861007b31SStefan Hajnoczi } 120961007b31SStefan Hajnoczi 121061007b31SStefan Hajnoczi ret = -ENOTSUP; 121161007b31SStefan Hajnoczi /* First try the efficient write zeroes operation */ 1212d05aa8bbSEric Blake if (drv->bdrv_co_pwrite_zeroes) { 1213d05aa8bbSEric Blake ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num, 1214d05aa8bbSEric Blake flags & bs->supported_zero_flags); 1215d05aa8bbSEric Blake if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) && 1216d05aa8bbSEric Blake !(bs->supported_zero_flags & BDRV_REQ_FUA)) { 1217d05aa8bbSEric Blake need_flush = true; 1218d05aa8bbSEric Blake } 1219465fe887SEric Blake } else { 1220465fe887SEric Blake assert(!bs->supported_zero_flags); 122161007b31SStefan Hajnoczi } 122261007b31SStefan Hajnoczi 122361007b31SStefan Hajnoczi if (ret == -ENOTSUP) { 122461007b31SStefan Hajnoczi /* Fall back to bounce buffer if write zeroes is unsupported */ 12255def6b80SEric Blake int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, 122661007b31SStefan Hajnoczi MAX_WRITE_ZEROES_BOUNCE_BUFFER); 1227465fe887SEric Blake BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE; 1228465fe887SEric Blake 1229465fe887SEric Blake if ((flags & BDRV_REQ_FUA) && 1230465fe887SEric Blake !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1231465fe887SEric Blake /* No need for bdrv_driver_pwrite() to do a fallback 1232465fe887SEric Blake * flush on each chunk; use just one at the end */ 1233465fe887SEric Blake write_flags &= ~BDRV_REQ_FUA; 1234465fe887SEric Blake need_flush = true; 1235465fe887SEric Blake } 12365def6b80SEric Blake num = MIN(num, max_transfer); 1237d05aa8bbSEric Blake iov.iov_len = num; 123861007b31SStefan Hajnoczi if (iov.iov_base == NULL) { 1239d05aa8bbSEric Blake iov.iov_base = qemu_try_blockalign(bs, num); 124061007b31SStefan Hajnoczi if (iov.iov_base == NULL) { 124161007b31SStefan Hajnoczi ret = -ENOMEM; 124261007b31SStefan Hajnoczi goto fail; 124361007b31SStefan Hajnoczi } 1244d05aa8bbSEric Blake memset(iov.iov_base, 0, num); 124561007b31SStefan Hajnoczi } 124661007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 124761007b31SStefan Hajnoczi 1248d05aa8bbSEric Blake ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags); 124961007b31SStefan Hajnoczi 125061007b31SStefan Hajnoczi /* Keep bounce buffer around if it is big enough for all 125161007b31SStefan Hajnoczi * all future requests. 125261007b31SStefan Hajnoczi */ 12535def6b80SEric Blake if (num < max_transfer) { 125461007b31SStefan Hajnoczi qemu_vfree(iov.iov_base); 125561007b31SStefan Hajnoczi iov.iov_base = NULL; 125661007b31SStefan Hajnoczi } 125761007b31SStefan Hajnoczi } 125861007b31SStefan Hajnoczi 1259d05aa8bbSEric Blake offset += num; 1260d05aa8bbSEric Blake count -= num; 126161007b31SStefan Hajnoczi } 126261007b31SStefan Hajnoczi 126361007b31SStefan Hajnoczi fail: 1264465fe887SEric Blake if (ret == 0 && need_flush) { 1265465fe887SEric Blake ret = bdrv_co_flush(bs); 1266465fe887SEric Blake } 126761007b31SStefan Hajnoczi qemu_vfree(iov.iov_base); 126861007b31SStefan Hajnoczi return ret; 126961007b31SStefan Hajnoczi } 127061007b31SStefan Hajnoczi 127161007b31SStefan Hajnoczi /* 127204ed95f4SEric Blake * Forwards an already correctly aligned write request to the BlockDriver, 127304ed95f4SEric Blake * after possibly fragmenting it. 127461007b31SStefan Hajnoczi */ 127561007b31SStefan Hajnoczi static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs, 127661007b31SStefan Hajnoczi BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 1277cff86b38SEric Blake int64_t align, QEMUIOVector *qiov, int flags) 127861007b31SStefan Hajnoczi { 127961007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 128061007b31SStefan Hajnoczi bool waited; 128161007b31SStefan Hajnoczi int ret; 128261007b31SStefan Hajnoczi 12839896c876SKevin Wolf int64_t start_sector = offset >> BDRV_SECTOR_BITS; 12849896c876SKevin Wolf int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); 128504ed95f4SEric Blake uint64_t bytes_remaining = bytes; 128604ed95f4SEric Blake int max_transfer; 128761007b31SStefan Hajnoczi 1288cff86b38SEric Blake assert(is_power_of_2(align)); 1289cff86b38SEric Blake assert((offset & (align - 1)) == 0); 1290cff86b38SEric Blake assert((bytes & (align - 1)) == 0); 129161007b31SStefan Hajnoczi assert(!qiov || bytes == qiov->size); 1292abb06c5aSDaniel P. Berrange assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1293fa166538SEric Blake assert(!(flags & ~BDRV_REQ_MASK)); 129404ed95f4SEric Blake max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 129504ed95f4SEric Blake align); 129661007b31SStefan Hajnoczi 129761007b31SStefan Hajnoczi waited = wait_serialising_requests(req); 129861007b31SStefan Hajnoczi assert(!waited || !req->serialising); 129961007b31SStefan Hajnoczi assert(req->overlap_offset <= offset); 130061007b31SStefan Hajnoczi assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); 130161007b31SStefan Hajnoczi 130261007b31SStefan Hajnoczi ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req); 130361007b31SStefan Hajnoczi 130461007b31SStefan Hajnoczi if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && 1305c1499a5eSEric Blake !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes && 130661007b31SStefan Hajnoczi qemu_iovec_is_zero(qiov)) { 130761007b31SStefan Hajnoczi flags |= BDRV_REQ_ZERO_WRITE; 130861007b31SStefan Hajnoczi if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { 130961007b31SStefan Hajnoczi flags |= BDRV_REQ_MAY_UNMAP; 131061007b31SStefan Hajnoczi } 131161007b31SStefan Hajnoczi } 131261007b31SStefan Hajnoczi 131361007b31SStefan Hajnoczi if (ret < 0) { 131461007b31SStefan Hajnoczi /* Do nothing, write notifier decided to fail this request */ 131561007b31SStefan Hajnoczi } else if (flags & BDRV_REQ_ZERO_WRITE) { 13169a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO); 13179896c876SKevin Wolf ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags); 131804ed95f4SEric Blake } else if (bytes <= max_transfer) { 13199a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV); 132078a07294SKevin Wolf ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags); 132104ed95f4SEric Blake } else { 132204ed95f4SEric Blake bdrv_debug_event(bs, BLKDBG_PWRITEV); 132304ed95f4SEric Blake while (bytes_remaining) { 132404ed95f4SEric Blake int num = MIN(bytes_remaining, max_transfer); 132504ed95f4SEric Blake QEMUIOVector local_qiov; 132604ed95f4SEric Blake int local_flags = flags; 132704ed95f4SEric Blake 132804ed95f4SEric Blake assert(num); 132904ed95f4SEric Blake if (num < bytes_remaining && (flags & BDRV_REQ_FUA) && 133004ed95f4SEric Blake !(bs->supported_write_flags & BDRV_REQ_FUA)) { 133104ed95f4SEric Blake /* If FUA is going to be emulated by flush, we only 133204ed95f4SEric Blake * need to flush on the last iteration */ 133304ed95f4SEric Blake local_flags &= ~BDRV_REQ_FUA; 133404ed95f4SEric Blake } 133504ed95f4SEric Blake qemu_iovec_init(&local_qiov, qiov->niov); 133604ed95f4SEric Blake qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num); 133704ed95f4SEric Blake 133804ed95f4SEric Blake ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining, 133904ed95f4SEric Blake num, &local_qiov, local_flags); 134004ed95f4SEric Blake qemu_iovec_destroy(&local_qiov); 134104ed95f4SEric Blake if (ret < 0) { 134204ed95f4SEric Blake break; 134304ed95f4SEric Blake } 134404ed95f4SEric Blake bytes_remaining -= num; 134504ed95f4SEric Blake } 134661007b31SStefan Hajnoczi } 13479a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE); 134861007b31SStefan Hajnoczi 13493ff2f67aSEvgeny Yakovlev ++bs->write_gen; 13509896c876SKevin Wolf bdrv_set_dirty(bs, start_sector, end_sector - start_sector); 135161007b31SStefan Hajnoczi 135253d8f9d8SMax Reitz if (bs->wr_highest_offset < offset + bytes) { 135353d8f9d8SMax Reitz bs->wr_highest_offset = offset + bytes; 135453d8f9d8SMax Reitz } 135561007b31SStefan Hajnoczi 135661007b31SStefan Hajnoczi if (ret >= 0) { 13579896c876SKevin Wolf bs->total_sectors = MAX(bs->total_sectors, end_sector); 135804ed95f4SEric Blake ret = 0; 135961007b31SStefan Hajnoczi } 136061007b31SStefan Hajnoczi 136161007b31SStefan Hajnoczi return ret; 136261007b31SStefan Hajnoczi } 136361007b31SStefan Hajnoczi 13649eeb6dd1SFam Zheng static int coroutine_fn bdrv_co_do_zero_pwritev(BlockDriverState *bs, 13659eeb6dd1SFam Zheng int64_t offset, 13669eeb6dd1SFam Zheng unsigned int bytes, 13679eeb6dd1SFam Zheng BdrvRequestFlags flags, 13689eeb6dd1SFam Zheng BdrvTrackedRequest *req) 13699eeb6dd1SFam Zheng { 13709eeb6dd1SFam Zheng uint8_t *buf = NULL; 13719eeb6dd1SFam Zheng QEMUIOVector local_qiov; 13729eeb6dd1SFam Zheng struct iovec iov; 1373a5b8dd2cSEric Blake uint64_t align = bs->bl.request_alignment; 13749eeb6dd1SFam Zheng unsigned int head_padding_bytes, tail_padding_bytes; 13759eeb6dd1SFam Zheng int ret = 0; 13769eeb6dd1SFam Zheng 13779eeb6dd1SFam Zheng head_padding_bytes = offset & (align - 1); 13789eeb6dd1SFam Zheng tail_padding_bytes = align - ((offset + bytes) & (align - 1)); 13799eeb6dd1SFam Zheng 13809eeb6dd1SFam Zheng 13819eeb6dd1SFam Zheng assert(flags & BDRV_REQ_ZERO_WRITE); 13829eeb6dd1SFam Zheng if (head_padding_bytes || tail_padding_bytes) { 13839eeb6dd1SFam Zheng buf = qemu_blockalign(bs, align); 13849eeb6dd1SFam Zheng iov = (struct iovec) { 13859eeb6dd1SFam Zheng .iov_base = buf, 13869eeb6dd1SFam Zheng .iov_len = align, 13879eeb6dd1SFam Zheng }; 13889eeb6dd1SFam Zheng qemu_iovec_init_external(&local_qiov, &iov, 1); 13899eeb6dd1SFam Zheng } 13909eeb6dd1SFam Zheng if (head_padding_bytes) { 13919eeb6dd1SFam Zheng uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes); 13929eeb6dd1SFam Zheng 13939eeb6dd1SFam Zheng /* RMW the unaligned part before head. */ 13949eeb6dd1SFam Zheng mark_request_serialising(req, align); 13959eeb6dd1SFam Zheng wait_serialising_requests(req); 13969a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 13979eeb6dd1SFam Zheng ret = bdrv_aligned_preadv(bs, req, offset & ~(align - 1), align, 13989eeb6dd1SFam Zheng align, &local_qiov, 0); 13999eeb6dd1SFam Zheng if (ret < 0) { 14009eeb6dd1SFam Zheng goto fail; 14019eeb6dd1SFam Zheng } 14029a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 14039eeb6dd1SFam Zheng 14049eeb6dd1SFam Zheng memset(buf + head_padding_bytes, 0, zero_bytes); 14059eeb6dd1SFam Zheng ret = bdrv_aligned_pwritev(bs, req, offset & ~(align - 1), align, 1406cff86b38SEric Blake align, &local_qiov, 14079eeb6dd1SFam Zheng flags & ~BDRV_REQ_ZERO_WRITE); 14089eeb6dd1SFam Zheng if (ret < 0) { 14099eeb6dd1SFam Zheng goto fail; 14109eeb6dd1SFam Zheng } 14119eeb6dd1SFam Zheng offset += zero_bytes; 14129eeb6dd1SFam Zheng bytes -= zero_bytes; 14139eeb6dd1SFam Zheng } 14149eeb6dd1SFam Zheng 14159eeb6dd1SFam Zheng assert(!bytes || (offset & (align - 1)) == 0); 14169eeb6dd1SFam Zheng if (bytes >= align) { 14179eeb6dd1SFam Zheng /* Write the aligned part in the middle. */ 14189eeb6dd1SFam Zheng uint64_t aligned_bytes = bytes & ~(align - 1); 1419cff86b38SEric Blake ret = bdrv_aligned_pwritev(bs, req, offset, aligned_bytes, align, 14209eeb6dd1SFam Zheng NULL, flags); 14219eeb6dd1SFam Zheng if (ret < 0) { 14229eeb6dd1SFam Zheng goto fail; 14239eeb6dd1SFam Zheng } 14249eeb6dd1SFam Zheng bytes -= aligned_bytes; 14259eeb6dd1SFam Zheng offset += aligned_bytes; 14269eeb6dd1SFam Zheng } 14279eeb6dd1SFam Zheng 14289eeb6dd1SFam Zheng assert(!bytes || (offset & (align - 1)) == 0); 14299eeb6dd1SFam Zheng if (bytes) { 14309eeb6dd1SFam Zheng assert(align == tail_padding_bytes + bytes); 14319eeb6dd1SFam Zheng /* RMW the unaligned part after tail. */ 14329eeb6dd1SFam Zheng mark_request_serialising(req, align); 14339eeb6dd1SFam Zheng wait_serialising_requests(req); 14349a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 14359eeb6dd1SFam Zheng ret = bdrv_aligned_preadv(bs, req, offset, align, 14369eeb6dd1SFam Zheng align, &local_qiov, 0); 14379eeb6dd1SFam Zheng if (ret < 0) { 14389eeb6dd1SFam Zheng goto fail; 14399eeb6dd1SFam Zheng } 14409a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 14419eeb6dd1SFam Zheng 14429eeb6dd1SFam Zheng memset(buf, 0, bytes); 1443cff86b38SEric Blake ret = bdrv_aligned_pwritev(bs, req, offset, align, align, 14449eeb6dd1SFam Zheng &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE); 14459eeb6dd1SFam Zheng } 14469eeb6dd1SFam Zheng fail: 14479eeb6dd1SFam Zheng qemu_vfree(buf); 14489eeb6dd1SFam Zheng return ret; 14499eeb6dd1SFam Zheng 14509eeb6dd1SFam Zheng } 14519eeb6dd1SFam Zheng 145261007b31SStefan Hajnoczi /* 145361007b31SStefan Hajnoczi * Handle a write request in coroutine context 145461007b31SStefan Hajnoczi */ 1455a03ef88fSKevin Wolf int coroutine_fn bdrv_co_pwritev(BdrvChild *child, 145661007b31SStefan Hajnoczi int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 145761007b31SStefan Hajnoczi BdrvRequestFlags flags) 145861007b31SStefan Hajnoczi { 1459a03ef88fSKevin Wolf BlockDriverState *bs = child->bs; 146061007b31SStefan Hajnoczi BdrvTrackedRequest req; 1461a5b8dd2cSEric Blake uint64_t align = bs->bl.request_alignment; 146261007b31SStefan Hajnoczi uint8_t *head_buf = NULL; 146361007b31SStefan Hajnoczi uint8_t *tail_buf = NULL; 146461007b31SStefan Hajnoczi QEMUIOVector local_qiov; 146561007b31SStefan Hajnoczi bool use_local_qiov = false; 146661007b31SStefan Hajnoczi int ret; 146761007b31SStefan Hajnoczi 146861007b31SStefan Hajnoczi if (!bs->drv) { 146961007b31SStefan Hajnoczi return -ENOMEDIUM; 147061007b31SStefan Hajnoczi } 147161007b31SStefan Hajnoczi if (bs->read_only) { 1472eaf5fe2dSPaolo Bonzini return -EPERM; 147361007b31SStefan Hajnoczi } 147404c01a5cSKevin Wolf assert(!(bs->open_flags & BDRV_O_INACTIVE)); 147561007b31SStefan Hajnoczi 147661007b31SStefan Hajnoczi ret = bdrv_check_byte_request(bs, offset, bytes); 147761007b31SStefan Hajnoczi if (ret < 0) { 147861007b31SStefan Hajnoczi return ret; 147961007b31SStefan Hajnoczi } 148061007b31SStefan Hajnoczi 148161007b31SStefan Hajnoczi /* 148261007b31SStefan Hajnoczi * Align write if necessary by performing a read-modify-write cycle. 148361007b31SStefan Hajnoczi * Pad qiov with the read parts and be sure to have a tracked request not 148461007b31SStefan Hajnoczi * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle. 148561007b31SStefan Hajnoczi */ 1486ebde595cSFam Zheng tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE); 148761007b31SStefan Hajnoczi 14889eeb6dd1SFam Zheng if (!qiov) { 14899eeb6dd1SFam Zheng ret = bdrv_co_do_zero_pwritev(bs, offset, bytes, flags, &req); 14909eeb6dd1SFam Zheng goto out; 14919eeb6dd1SFam Zheng } 14929eeb6dd1SFam Zheng 149361007b31SStefan Hajnoczi if (offset & (align - 1)) { 149461007b31SStefan Hajnoczi QEMUIOVector head_qiov; 149561007b31SStefan Hajnoczi struct iovec head_iov; 149661007b31SStefan Hajnoczi 149761007b31SStefan Hajnoczi mark_request_serialising(&req, align); 149861007b31SStefan Hajnoczi wait_serialising_requests(&req); 149961007b31SStefan Hajnoczi 150061007b31SStefan Hajnoczi head_buf = qemu_blockalign(bs, align); 150161007b31SStefan Hajnoczi head_iov = (struct iovec) { 150261007b31SStefan Hajnoczi .iov_base = head_buf, 150361007b31SStefan Hajnoczi .iov_len = align, 150461007b31SStefan Hajnoczi }; 150561007b31SStefan Hajnoczi qemu_iovec_init_external(&head_qiov, &head_iov, 1); 150661007b31SStefan Hajnoczi 15079a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 150861007b31SStefan Hajnoczi ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align, 150961007b31SStefan Hajnoczi align, &head_qiov, 0); 151061007b31SStefan Hajnoczi if (ret < 0) { 151161007b31SStefan Hajnoczi goto fail; 151261007b31SStefan Hajnoczi } 15139a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 151461007b31SStefan Hajnoczi 151561007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov + 2); 151661007b31SStefan Hajnoczi qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); 151761007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 151861007b31SStefan Hajnoczi use_local_qiov = true; 151961007b31SStefan Hajnoczi 152061007b31SStefan Hajnoczi bytes += offset & (align - 1); 152161007b31SStefan Hajnoczi offset = offset & ~(align - 1); 1522117bc3faSPeter Lieven 1523117bc3faSPeter Lieven /* We have read the tail already if the request is smaller 1524117bc3faSPeter Lieven * than one aligned block. 1525117bc3faSPeter Lieven */ 1526117bc3faSPeter Lieven if (bytes < align) { 1527117bc3faSPeter Lieven qemu_iovec_add(&local_qiov, head_buf + bytes, align - bytes); 1528117bc3faSPeter Lieven bytes = align; 1529117bc3faSPeter Lieven } 153061007b31SStefan Hajnoczi } 153161007b31SStefan Hajnoczi 153261007b31SStefan Hajnoczi if ((offset + bytes) & (align - 1)) { 153361007b31SStefan Hajnoczi QEMUIOVector tail_qiov; 153461007b31SStefan Hajnoczi struct iovec tail_iov; 153561007b31SStefan Hajnoczi size_t tail_bytes; 153661007b31SStefan Hajnoczi bool waited; 153761007b31SStefan Hajnoczi 153861007b31SStefan Hajnoczi mark_request_serialising(&req, align); 153961007b31SStefan Hajnoczi waited = wait_serialising_requests(&req); 154061007b31SStefan Hajnoczi assert(!waited || !use_local_qiov); 154161007b31SStefan Hajnoczi 154261007b31SStefan Hajnoczi tail_buf = qemu_blockalign(bs, align); 154361007b31SStefan Hajnoczi tail_iov = (struct iovec) { 154461007b31SStefan Hajnoczi .iov_base = tail_buf, 154561007b31SStefan Hajnoczi .iov_len = align, 154661007b31SStefan Hajnoczi }; 154761007b31SStefan Hajnoczi qemu_iovec_init_external(&tail_qiov, &tail_iov, 1); 154861007b31SStefan Hajnoczi 15499a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 155061007b31SStefan Hajnoczi ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align, 155161007b31SStefan Hajnoczi align, &tail_qiov, 0); 155261007b31SStefan Hajnoczi if (ret < 0) { 155361007b31SStefan Hajnoczi goto fail; 155461007b31SStefan Hajnoczi } 15559a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 155661007b31SStefan Hajnoczi 155761007b31SStefan Hajnoczi if (!use_local_qiov) { 155861007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov + 1); 155961007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 156061007b31SStefan Hajnoczi use_local_qiov = true; 156161007b31SStefan Hajnoczi } 156261007b31SStefan Hajnoczi 156361007b31SStefan Hajnoczi tail_bytes = (offset + bytes) & (align - 1); 156461007b31SStefan Hajnoczi qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes); 156561007b31SStefan Hajnoczi 156661007b31SStefan Hajnoczi bytes = ROUND_UP(bytes, align); 156761007b31SStefan Hajnoczi } 156861007b31SStefan Hajnoczi 1569cff86b38SEric Blake ret = bdrv_aligned_pwritev(bs, &req, offset, bytes, align, 157061007b31SStefan Hajnoczi use_local_qiov ? &local_qiov : qiov, 157161007b31SStefan Hajnoczi flags); 157261007b31SStefan Hajnoczi 157361007b31SStefan Hajnoczi fail: 157461007b31SStefan Hajnoczi 157561007b31SStefan Hajnoczi if (use_local_qiov) { 157661007b31SStefan Hajnoczi qemu_iovec_destroy(&local_qiov); 157761007b31SStefan Hajnoczi } 157861007b31SStefan Hajnoczi qemu_vfree(head_buf); 157961007b31SStefan Hajnoczi qemu_vfree(tail_buf); 15809eeb6dd1SFam Zheng out: 15819eeb6dd1SFam Zheng tracked_request_end(&req); 158261007b31SStefan Hajnoczi return ret; 158361007b31SStefan Hajnoczi } 158461007b31SStefan Hajnoczi 1585adad6496SKevin Wolf static int coroutine_fn bdrv_co_do_writev(BdrvChild *child, 158661007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 158761007b31SStefan Hajnoczi BdrvRequestFlags flags) 158861007b31SStefan Hajnoczi { 158961007b31SStefan Hajnoczi if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 159061007b31SStefan Hajnoczi return -EINVAL; 159161007b31SStefan Hajnoczi } 159261007b31SStefan Hajnoczi 1593a03ef88fSKevin Wolf return bdrv_co_pwritev(child, sector_num << BDRV_SECTOR_BITS, 159461007b31SStefan Hajnoczi nb_sectors << BDRV_SECTOR_BITS, qiov, flags); 159561007b31SStefan Hajnoczi } 159661007b31SStefan Hajnoczi 159725ec177dSKevin Wolf int coroutine_fn bdrv_co_writev(BdrvChild *child, int64_t sector_num, 159861007b31SStefan Hajnoczi int nb_sectors, QEMUIOVector *qiov) 159961007b31SStefan Hajnoczi { 160025ec177dSKevin Wolf trace_bdrv_co_writev(child->bs, sector_num, nb_sectors); 160161007b31SStefan Hajnoczi 1602adad6496SKevin Wolf return bdrv_co_do_writev(child, sector_num, nb_sectors, qiov, 0); 160361007b31SStefan Hajnoczi } 160461007b31SStefan Hajnoczi 1605a03ef88fSKevin Wolf int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset, 1606a03ef88fSKevin Wolf int count, BdrvRequestFlags flags) 160761007b31SStefan Hajnoczi { 1608a03ef88fSKevin Wolf trace_bdrv_co_pwrite_zeroes(child->bs, offset, count, flags); 160961007b31SStefan Hajnoczi 1610a03ef88fSKevin Wolf if (!(child->bs->open_flags & BDRV_O_UNMAP)) { 161161007b31SStefan Hajnoczi flags &= ~BDRV_REQ_MAY_UNMAP; 161261007b31SStefan Hajnoczi } 161361007b31SStefan Hajnoczi 1614a03ef88fSKevin Wolf return bdrv_co_pwritev(child, offset, count, NULL, 161561007b31SStefan Hajnoczi BDRV_REQ_ZERO_WRITE | flags); 161661007b31SStefan Hajnoczi } 161761007b31SStefan Hajnoczi 161861007b31SStefan Hajnoczi typedef struct BdrvCoGetBlockStatusData { 161961007b31SStefan Hajnoczi BlockDriverState *bs; 162061007b31SStefan Hajnoczi BlockDriverState *base; 162167a0fd2aSFam Zheng BlockDriverState **file; 162261007b31SStefan Hajnoczi int64_t sector_num; 162361007b31SStefan Hajnoczi int nb_sectors; 162461007b31SStefan Hajnoczi int *pnum; 162561007b31SStefan Hajnoczi int64_t ret; 162661007b31SStefan Hajnoczi bool done; 162761007b31SStefan Hajnoczi } BdrvCoGetBlockStatusData; 162861007b31SStefan Hajnoczi 162961007b31SStefan Hajnoczi /* 163061007b31SStefan Hajnoczi * Returns the allocation status of the specified sectors. 163161007b31SStefan Hajnoczi * Drivers not implementing the functionality are assumed to not support 163261007b31SStefan Hajnoczi * backing files, hence all their sectors are reported as allocated. 163361007b31SStefan Hajnoczi * 163461007b31SStefan Hajnoczi * If 'sector_num' is beyond the end of the disk image the return value is 0 163561007b31SStefan Hajnoczi * and 'pnum' is set to 0. 163661007b31SStefan Hajnoczi * 163761007b31SStefan Hajnoczi * 'pnum' is set to the number of sectors (including and immediately following 163861007b31SStefan Hajnoczi * the specified sector) that are known to be in the same 163961007b31SStefan Hajnoczi * allocated/unallocated state. 164061007b31SStefan Hajnoczi * 164161007b31SStefan Hajnoczi * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes 164261007b31SStefan Hajnoczi * beyond the end of the disk image it will be clamped. 164367a0fd2aSFam Zheng * 164467a0fd2aSFam Zheng * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file' 164567a0fd2aSFam Zheng * points to the BDS which the sector range is allocated in. 164661007b31SStefan Hajnoczi */ 164761007b31SStefan Hajnoczi static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs, 164861007b31SStefan Hajnoczi int64_t sector_num, 164967a0fd2aSFam Zheng int nb_sectors, int *pnum, 165067a0fd2aSFam Zheng BlockDriverState **file) 165161007b31SStefan Hajnoczi { 165261007b31SStefan Hajnoczi int64_t total_sectors; 165361007b31SStefan Hajnoczi int64_t n; 165461007b31SStefan Hajnoczi int64_t ret, ret2; 165561007b31SStefan Hajnoczi 165661007b31SStefan Hajnoczi total_sectors = bdrv_nb_sectors(bs); 165761007b31SStefan Hajnoczi if (total_sectors < 0) { 165861007b31SStefan Hajnoczi return total_sectors; 165961007b31SStefan Hajnoczi } 166061007b31SStefan Hajnoczi 166161007b31SStefan Hajnoczi if (sector_num >= total_sectors) { 166261007b31SStefan Hajnoczi *pnum = 0; 166361007b31SStefan Hajnoczi return 0; 166461007b31SStefan Hajnoczi } 166561007b31SStefan Hajnoczi 166661007b31SStefan Hajnoczi n = total_sectors - sector_num; 166761007b31SStefan Hajnoczi if (n < nb_sectors) { 166861007b31SStefan Hajnoczi nb_sectors = n; 166961007b31SStefan Hajnoczi } 167061007b31SStefan Hajnoczi 167161007b31SStefan Hajnoczi if (!bs->drv->bdrv_co_get_block_status) { 167261007b31SStefan Hajnoczi *pnum = nb_sectors; 167361007b31SStefan Hajnoczi ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; 167461007b31SStefan Hajnoczi if (bs->drv->protocol_name) { 167561007b31SStefan Hajnoczi ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE); 167661007b31SStefan Hajnoczi } 167761007b31SStefan Hajnoczi return ret; 167861007b31SStefan Hajnoczi } 167961007b31SStefan Hajnoczi 168067a0fd2aSFam Zheng *file = NULL; 168167a0fd2aSFam Zheng ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum, 168267a0fd2aSFam Zheng file); 168361007b31SStefan Hajnoczi if (ret < 0) { 168461007b31SStefan Hajnoczi *pnum = 0; 168561007b31SStefan Hajnoczi return ret; 168661007b31SStefan Hajnoczi } 168761007b31SStefan Hajnoczi 168861007b31SStefan Hajnoczi if (ret & BDRV_BLOCK_RAW) { 168961007b31SStefan Hajnoczi assert(ret & BDRV_BLOCK_OFFSET_VALID); 16909a4f4c31SKevin Wolf return bdrv_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS, 169167a0fd2aSFam Zheng *pnum, pnum, file); 169261007b31SStefan Hajnoczi } 169361007b31SStefan Hajnoczi 169461007b31SStefan Hajnoczi if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { 169561007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ALLOCATED; 1696a53f1a95SPaolo Bonzini } else { 169761007b31SStefan Hajnoczi if (bdrv_unallocated_blocks_are_zero(bs)) { 169861007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ZERO; 1699760e0063SKevin Wolf } else if (bs->backing) { 1700760e0063SKevin Wolf BlockDriverState *bs2 = bs->backing->bs; 170161007b31SStefan Hajnoczi int64_t nb_sectors2 = bdrv_nb_sectors(bs2); 170261007b31SStefan Hajnoczi if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) { 170361007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ZERO; 170461007b31SStefan Hajnoczi } 170561007b31SStefan Hajnoczi } 170661007b31SStefan Hajnoczi } 170761007b31SStefan Hajnoczi 1708ac987b30SFam Zheng if (*file && *file != bs && 170961007b31SStefan Hajnoczi (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && 171061007b31SStefan Hajnoczi (ret & BDRV_BLOCK_OFFSET_VALID)) { 171167a0fd2aSFam Zheng BlockDriverState *file2; 171261007b31SStefan Hajnoczi int file_pnum; 171361007b31SStefan Hajnoczi 1714ac987b30SFam Zheng ret2 = bdrv_co_get_block_status(*file, ret >> BDRV_SECTOR_BITS, 171567a0fd2aSFam Zheng *pnum, &file_pnum, &file2); 171661007b31SStefan Hajnoczi if (ret2 >= 0) { 171761007b31SStefan Hajnoczi /* Ignore errors. This is just providing extra information, it 171861007b31SStefan Hajnoczi * is useful but not necessary. 171961007b31SStefan Hajnoczi */ 172061007b31SStefan Hajnoczi if (!file_pnum) { 172161007b31SStefan Hajnoczi /* !file_pnum indicates an offset at or beyond the EOF; it is 172261007b31SStefan Hajnoczi * perfectly valid for the format block driver to point to such 172361007b31SStefan Hajnoczi * offsets, so catch it and mark everything as zero */ 172461007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ZERO; 172561007b31SStefan Hajnoczi } else { 172661007b31SStefan Hajnoczi /* Limit request to the range reported by the protocol driver */ 172761007b31SStefan Hajnoczi *pnum = file_pnum; 172861007b31SStefan Hajnoczi ret |= (ret2 & BDRV_BLOCK_ZERO); 172961007b31SStefan Hajnoczi } 173061007b31SStefan Hajnoczi } 173161007b31SStefan Hajnoczi } 173261007b31SStefan Hajnoczi 173361007b31SStefan Hajnoczi return ret; 173461007b31SStefan Hajnoczi } 173561007b31SStefan Hajnoczi 1736ba3f0e25SFam Zheng static int64_t coroutine_fn bdrv_co_get_block_status_above(BlockDriverState *bs, 1737ba3f0e25SFam Zheng BlockDriverState *base, 1738ba3f0e25SFam Zheng int64_t sector_num, 1739ba3f0e25SFam Zheng int nb_sectors, 174067a0fd2aSFam Zheng int *pnum, 174167a0fd2aSFam Zheng BlockDriverState **file) 1742ba3f0e25SFam Zheng { 1743ba3f0e25SFam Zheng BlockDriverState *p; 1744ba3f0e25SFam Zheng int64_t ret = 0; 1745ba3f0e25SFam Zheng 1746ba3f0e25SFam Zheng assert(bs != base); 1747760e0063SKevin Wolf for (p = bs; p != base; p = backing_bs(p)) { 174867a0fd2aSFam Zheng ret = bdrv_co_get_block_status(p, sector_num, nb_sectors, pnum, file); 1749ba3f0e25SFam Zheng if (ret < 0 || ret & BDRV_BLOCK_ALLOCATED) { 1750ba3f0e25SFam Zheng break; 1751ba3f0e25SFam Zheng } 1752ba3f0e25SFam Zheng /* [sector_num, pnum] unallocated on this layer, which could be only 1753ba3f0e25SFam Zheng * the first part of [sector_num, nb_sectors]. */ 1754ba3f0e25SFam Zheng nb_sectors = MIN(nb_sectors, *pnum); 1755ba3f0e25SFam Zheng } 1756ba3f0e25SFam Zheng return ret; 1757ba3f0e25SFam Zheng } 1758ba3f0e25SFam Zheng 1759ba3f0e25SFam Zheng /* Coroutine wrapper for bdrv_get_block_status_above() */ 1760ba3f0e25SFam Zheng static void coroutine_fn bdrv_get_block_status_above_co_entry(void *opaque) 176161007b31SStefan Hajnoczi { 176261007b31SStefan Hajnoczi BdrvCoGetBlockStatusData *data = opaque; 176361007b31SStefan Hajnoczi 1764ba3f0e25SFam Zheng data->ret = bdrv_co_get_block_status_above(data->bs, data->base, 1765ba3f0e25SFam Zheng data->sector_num, 1766ba3f0e25SFam Zheng data->nb_sectors, 176767a0fd2aSFam Zheng data->pnum, 176867a0fd2aSFam Zheng data->file); 176961007b31SStefan Hajnoczi data->done = true; 177061007b31SStefan Hajnoczi } 177161007b31SStefan Hajnoczi 177261007b31SStefan Hajnoczi /* 1773ba3f0e25SFam Zheng * Synchronous wrapper around bdrv_co_get_block_status_above(). 177461007b31SStefan Hajnoczi * 1775ba3f0e25SFam Zheng * See bdrv_co_get_block_status_above() for details. 177661007b31SStefan Hajnoczi */ 1777ba3f0e25SFam Zheng int64_t bdrv_get_block_status_above(BlockDriverState *bs, 1778ba3f0e25SFam Zheng BlockDriverState *base, 1779ba3f0e25SFam Zheng int64_t sector_num, 178067a0fd2aSFam Zheng int nb_sectors, int *pnum, 178167a0fd2aSFam Zheng BlockDriverState **file) 178261007b31SStefan Hajnoczi { 178361007b31SStefan Hajnoczi Coroutine *co; 178461007b31SStefan Hajnoczi BdrvCoGetBlockStatusData data = { 178561007b31SStefan Hajnoczi .bs = bs, 1786ba3f0e25SFam Zheng .base = base, 178767a0fd2aSFam Zheng .file = file, 178861007b31SStefan Hajnoczi .sector_num = sector_num, 178961007b31SStefan Hajnoczi .nb_sectors = nb_sectors, 179061007b31SStefan Hajnoczi .pnum = pnum, 179161007b31SStefan Hajnoczi .done = false, 179261007b31SStefan Hajnoczi }; 179361007b31SStefan Hajnoczi 179461007b31SStefan Hajnoczi if (qemu_in_coroutine()) { 179561007b31SStefan Hajnoczi /* Fast-path if already in coroutine context */ 1796ba3f0e25SFam Zheng bdrv_get_block_status_above_co_entry(&data); 179761007b31SStefan Hajnoczi } else { 179861007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 179961007b31SStefan Hajnoczi 18000b8b8753SPaolo Bonzini co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry, 18010b8b8753SPaolo Bonzini &data); 18020b8b8753SPaolo Bonzini qemu_coroutine_enter(co); 180361007b31SStefan Hajnoczi while (!data.done) { 180461007b31SStefan Hajnoczi aio_poll(aio_context, true); 180561007b31SStefan Hajnoczi } 180661007b31SStefan Hajnoczi } 180761007b31SStefan Hajnoczi return data.ret; 180861007b31SStefan Hajnoczi } 180961007b31SStefan Hajnoczi 1810ba3f0e25SFam Zheng int64_t bdrv_get_block_status(BlockDriverState *bs, 1811ba3f0e25SFam Zheng int64_t sector_num, 181267a0fd2aSFam Zheng int nb_sectors, int *pnum, 181367a0fd2aSFam Zheng BlockDriverState **file) 1814ba3f0e25SFam Zheng { 1815760e0063SKevin Wolf return bdrv_get_block_status_above(bs, backing_bs(bs), 181667a0fd2aSFam Zheng sector_num, nb_sectors, pnum, file); 1817ba3f0e25SFam Zheng } 1818ba3f0e25SFam Zheng 181961007b31SStefan Hajnoczi int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, 182061007b31SStefan Hajnoczi int nb_sectors, int *pnum) 182161007b31SStefan Hajnoczi { 182267a0fd2aSFam Zheng BlockDriverState *file; 182367a0fd2aSFam Zheng int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum, 182467a0fd2aSFam Zheng &file); 182561007b31SStefan Hajnoczi if (ret < 0) { 182661007b31SStefan Hajnoczi return ret; 182761007b31SStefan Hajnoczi } 182861007b31SStefan Hajnoczi return !!(ret & BDRV_BLOCK_ALLOCATED); 182961007b31SStefan Hajnoczi } 183061007b31SStefan Hajnoczi 183161007b31SStefan Hajnoczi /* 183261007b31SStefan Hajnoczi * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] 183361007b31SStefan Hajnoczi * 183461007b31SStefan Hajnoczi * Return true if the given sector is allocated in any image between 183561007b31SStefan Hajnoczi * BASE and TOP (inclusive). BASE can be NULL to check if the given 183661007b31SStefan Hajnoczi * sector is allocated in any image of the chain. Return false otherwise. 183761007b31SStefan Hajnoczi * 183861007b31SStefan Hajnoczi * 'pnum' is set to the number of sectors (including and immediately following 183961007b31SStefan Hajnoczi * the specified sector) that are known to be in the same 184061007b31SStefan Hajnoczi * allocated/unallocated state. 184161007b31SStefan Hajnoczi * 184261007b31SStefan Hajnoczi */ 184361007b31SStefan Hajnoczi int bdrv_is_allocated_above(BlockDriverState *top, 184461007b31SStefan Hajnoczi BlockDriverState *base, 184561007b31SStefan Hajnoczi int64_t sector_num, 184661007b31SStefan Hajnoczi int nb_sectors, int *pnum) 184761007b31SStefan Hajnoczi { 184861007b31SStefan Hajnoczi BlockDriverState *intermediate; 184961007b31SStefan Hajnoczi int ret, n = nb_sectors; 185061007b31SStefan Hajnoczi 185161007b31SStefan Hajnoczi intermediate = top; 185261007b31SStefan Hajnoczi while (intermediate && intermediate != base) { 185361007b31SStefan Hajnoczi int pnum_inter; 185461007b31SStefan Hajnoczi ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors, 185561007b31SStefan Hajnoczi &pnum_inter); 185661007b31SStefan Hajnoczi if (ret < 0) { 185761007b31SStefan Hajnoczi return ret; 185861007b31SStefan Hajnoczi } else if (ret) { 185961007b31SStefan Hajnoczi *pnum = pnum_inter; 186061007b31SStefan Hajnoczi return 1; 186161007b31SStefan Hajnoczi } 186261007b31SStefan Hajnoczi 186361007b31SStefan Hajnoczi /* 186461007b31SStefan Hajnoczi * [sector_num, nb_sectors] is unallocated on top but intermediate 186561007b31SStefan Hajnoczi * might have 186661007b31SStefan Hajnoczi * 186761007b31SStefan Hajnoczi * [sector_num+x, nr_sectors] allocated. 186861007b31SStefan Hajnoczi */ 186961007b31SStefan Hajnoczi if (n > pnum_inter && 187061007b31SStefan Hajnoczi (intermediate == top || 187161007b31SStefan Hajnoczi sector_num + pnum_inter < intermediate->total_sectors)) { 187261007b31SStefan Hajnoczi n = pnum_inter; 187361007b31SStefan Hajnoczi } 187461007b31SStefan Hajnoczi 1875760e0063SKevin Wolf intermediate = backing_bs(intermediate); 187661007b31SStefan Hajnoczi } 187761007b31SStefan Hajnoczi 187861007b31SStefan Hajnoczi *pnum = n; 187961007b31SStefan Hajnoczi return 0; 188061007b31SStefan Hajnoczi } 188161007b31SStefan Hajnoczi 188261007b31SStefan Hajnoczi int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num, 188361007b31SStefan Hajnoczi const uint8_t *buf, int nb_sectors) 188461007b31SStefan Hajnoczi { 188561007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 188661007b31SStefan Hajnoczi int ret; 188761007b31SStefan Hajnoczi 188861007b31SStefan Hajnoczi if (!drv) { 188961007b31SStefan Hajnoczi return -ENOMEDIUM; 189061007b31SStefan Hajnoczi } 189161007b31SStefan Hajnoczi if (!drv->bdrv_write_compressed) { 189261007b31SStefan Hajnoczi return -ENOTSUP; 189361007b31SStefan Hajnoczi } 189461007b31SStefan Hajnoczi ret = bdrv_check_request(bs, sector_num, nb_sectors); 189561007b31SStefan Hajnoczi if (ret < 0) { 189661007b31SStefan Hajnoczi return ret; 189761007b31SStefan Hajnoczi } 189861007b31SStefan Hajnoczi 189961007b31SStefan Hajnoczi assert(QLIST_EMPTY(&bs->dirty_bitmaps)); 190061007b31SStefan Hajnoczi 190161007b31SStefan Hajnoczi return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors); 190261007b31SStefan Hajnoczi } 190361007b31SStefan Hajnoczi 19041a8ae822SKevin Wolf typedef struct BdrvVmstateCo { 19051a8ae822SKevin Wolf BlockDriverState *bs; 19061a8ae822SKevin Wolf QEMUIOVector *qiov; 19071a8ae822SKevin Wolf int64_t pos; 19081a8ae822SKevin Wolf bool is_read; 19091a8ae822SKevin Wolf int ret; 19101a8ae822SKevin Wolf } BdrvVmstateCo; 19111a8ae822SKevin Wolf 19121a8ae822SKevin Wolf static int coroutine_fn 19131a8ae822SKevin Wolf bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos, 19141a8ae822SKevin Wolf bool is_read) 19151a8ae822SKevin Wolf { 19161a8ae822SKevin Wolf BlockDriver *drv = bs->drv; 19171a8ae822SKevin Wolf 19181a8ae822SKevin Wolf if (!drv) { 19191a8ae822SKevin Wolf return -ENOMEDIUM; 19201a8ae822SKevin Wolf } else if (drv->bdrv_load_vmstate) { 19211a8ae822SKevin Wolf return is_read ? drv->bdrv_load_vmstate(bs, qiov, pos) 19221a8ae822SKevin Wolf : drv->bdrv_save_vmstate(bs, qiov, pos); 19231a8ae822SKevin Wolf } else if (bs->file) { 19241a8ae822SKevin Wolf return bdrv_co_rw_vmstate(bs->file->bs, qiov, pos, is_read); 19251a8ae822SKevin Wolf } 19261a8ae822SKevin Wolf 19271a8ae822SKevin Wolf return -ENOTSUP; 19281a8ae822SKevin Wolf } 19291a8ae822SKevin Wolf 19301a8ae822SKevin Wolf static void coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque) 19311a8ae822SKevin Wolf { 19321a8ae822SKevin Wolf BdrvVmstateCo *co = opaque; 19331a8ae822SKevin Wolf co->ret = bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read); 19341a8ae822SKevin Wolf } 19351a8ae822SKevin Wolf 19361a8ae822SKevin Wolf static inline int 19371a8ae822SKevin Wolf bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos, 19381a8ae822SKevin Wolf bool is_read) 19391a8ae822SKevin Wolf { 19401a8ae822SKevin Wolf if (qemu_in_coroutine()) { 19411a8ae822SKevin Wolf return bdrv_co_rw_vmstate(bs, qiov, pos, is_read); 19421a8ae822SKevin Wolf } else { 19431a8ae822SKevin Wolf BdrvVmstateCo data = { 19441a8ae822SKevin Wolf .bs = bs, 19451a8ae822SKevin Wolf .qiov = qiov, 19461a8ae822SKevin Wolf .pos = pos, 19471a8ae822SKevin Wolf .is_read = is_read, 19481a8ae822SKevin Wolf .ret = -EINPROGRESS, 19491a8ae822SKevin Wolf }; 19500b8b8753SPaolo Bonzini Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry, &data); 19511a8ae822SKevin Wolf 19520b8b8753SPaolo Bonzini qemu_coroutine_enter(co); 19531a8ae822SKevin Wolf while (data.ret == -EINPROGRESS) { 19541a8ae822SKevin Wolf aio_poll(bdrv_get_aio_context(bs), true); 19551a8ae822SKevin Wolf } 19561a8ae822SKevin Wolf return data.ret; 19571a8ae822SKevin Wolf } 19581a8ae822SKevin Wolf } 19591a8ae822SKevin Wolf 196061007b31SStefan Hajnoczi int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 196161007b31SStefan Hajnoczi int64_t pos, int size) 196261007b31SStefan Hajnoczi { 196361007b31SStefan Hajnoczi QEMUIOVector qiov; 196461007b31SStefan Hajnoczi struct iovec iov = { 196561007b31SStefan Hajnoczi .iov_base = (void *) buf, 196661007b31SStefan Hajnoczi .iov_len = size, 196761007b31SStefan Hajnoczi }; 1968b433d942SKevin Wolf int ret; 196961007b31SStefan Hajnoczi 197061007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 1971b433d942SKevin Wolf 1972b433d942SKevin Wolf ret = bdrv_writev_vmstate(bs, &qiov, pos); 1973b433d942SKevin Wolf if (ret < 0) { 1974b433d942SKevin Wolf return ret; 1975b433d942SKevin Wolf } 1976b433d942SKevin Wolf 1977b433d942SKevin Wolf return size; 197861007b31SStefan Hajnoczi } 197961007b31SStefan Hajnoczi 198061007b31SStefan Hajnoczi int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 198161007b31SStefan Hajnoczi { 19821a8ae822SKevin Wolf return bdrv_rw_vmstate(bs, qiov, pos, false); 198361007b31SStefan Hajnoczi } 198461007b31SStefan Hajnoczi 198561007b31SStefan Hajnoczi int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 198661007b31SStefan Hajnoczi int64_t pos, int size) 198761007b31SStefan Hajnoczi { 19885ddda0b8SKevin Wolf QEMUIOVector qiov; 19895ddda0b8SKevin Wolf struct iovec iov = { 19905ddda0b8SKevin Wolf .iov_base = buf, 19915ddda0b8SKevin Wolf .iov_len = size, 19925ddda0b8SKevin Wolf }; 1993b433d942SKevin Wolf int ret; 19945ddda0b8SKevin Wolf 19955ddda0b8SKevin Wolf qemu_iovec_init_external(&qiov, &iov, 1); 1996b433d942SKevin Wolf ret = bdrv_readv_vmstate(bs, &qiov, pos); 1997b433d942SKevin Wolf if (ret < 0) { 1998b433d942SKevin Wolf return ret; 1999b433d942SKevin Wolf } 2000b433d942SKevin Wolf 2001b433d942SKevin Wolf return size; 20025ddda0b8SKevin Wolf } 20035ddda0b8SKevin Wolf 20045ddda0b8SKevin Wolf int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 20055ddda0b8SKevin Wolf { 20061a8ae822SKevin Wolf return bdrv_rw_vmstate(bs, qiov, pos, true); 200761007b31SStefan Hajnoczi } 200861007b31SStefan Hajnoczi 200961007b31SStefan Hajnoczi /**************************************************************/ 201061007b31SStefan Hajnoczi /* async I/Os */ 201161007b31SStefan Hajnoczi 2012ebb7af21SKevin Wolf BlockAIOCB *bdrv_aio_readv(BdrvChild *child, int64_t sector_num, 201361007b31SStefan Hajnoczi QEMUIOVector *qiov, int nb_sectors, 201461007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 201561007b31SStefan Hajnoczi { 2016ebb7af21SKevin Wolf trace_bdrv_aio_readv(child->bs, sector_num, nb_sectors, opaque); 201761007b31SStefan Hajnoczi 2018adad6496SKevin Wolf return bdrv_co_aio_rw_vector(child, sector_num, qiov, nb_sectors, 0, 201961007b31SStefan Hajnoczi cb, opaque, false); 202061007b31SStefan Hajnoczi } 202161007b31SStefan Hajnoczi 20220d1049c7SKevin Wolf BlockAIOCB *bdrv_aio_writev(BdrvChild *child, int64_t sector_num, 202361007b31SStefan Hajnoczi QEMUIOVector *qiov, int nb_sectors, 202461007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 202561007b31SStefan Hajnoczi { 20260d1049c7SKevin Wolf trace_bdrv_aio_writev(child->bs, sector_num, nb_sectors, opaque); 202761007b31SStefan Hajnoczi 2028adad6496SKevin Wolf return bdrv_co_aio_rw_vector(child, sector_num, qiov, nb_sectors, 0, 202961007b31SStefan Hajnoczi cb, opaque, true); 203061007b31SStefan Hajnoczi } 203161007b31SStefan Hajnoczi 203261007b31SStefan Hajnoczi void bdrv_aio_cancel(BlockAIOCB *acb) 203361007b31SStefan Hajnoczi { 203461007b31SStefan Hajnoczi qemu_aio_ref(acb); 203561007b31SStefan Hajnoczi bdrv_aio_cancel_async(acb); 203661007b31SStefan Hajnoczi while (acb->refcnt > 1) { 203761007b31SStefan Hajnoczi if (acb->aiocb_info->get_aio_context) { 203861007b31SStefan Hajnoczi aio_poll(acb->aiocb_info->get_aio_context(acb), true); 203961007b31SStefan Hajnoczi } else if (acb->bs) { 204061007b31SStefan Hajnoczi aio_poll(bdrv_get_aio_context(acb->bs), true); 204161007b31SStefan Hajnoczi } else { 204261007b31SStefan Hajnoczi abort(); 204361007b31SStefan Hajnoczi } 204461007b31SStefan Hajnoczi } 204561007b31SStefan Hajnoczi qemu_aio_unref(acb); 204661007b31SStefan Hajnoczi } 204761007b31SStefan Hajnoczi 204861007b31SStefan Hajnoczi /* Async version of aio cancel. The caller is not blocked if the acb implements 204961007b31SStefan Hajnoczi * cancel_async, otherwise we do nothing and let the request normally complete. 205061007b31SStefan Hajnoczi * In either case the completion callback must be called. */ 205161007b31SStefan Hajnoczi void bdrv_aio_cancel_async(BlockAIOCB *acb) 205261007b31SStefan Hajnoczi { 205361007b31SStefan Hajnoczi if (acb->aiocb_info->cancel_async) { 205461007b31SStefan Hajnoczi acb->aiocb_info->cancel_async(acb); 205561007b31SStefan Hajnoczi } 205661007b31SStefan Hajnoczi } 205761007b31SStefan Hajnoczi 205861007b31SStefan Hajnoczi /**************************************************************/ 205961007b31SStefan Hajnoczi /* async block device emulation */ 206061007b31SStefan Hajnoczi 206141574268SEric Blake typedef struct BlockRequest { 206241574268SEric Blake union { 206341574268SEric Blake /* Used during read, write, trim */ 206441574268SEric Blake struct { 206541574268SEric Blake int64_t sector; 206641574268SEric Blake int nb_sectors; 206741574268SEric Blake int flags; 206841574268SEric Blake QEMUIOVector *qiov; 206941574268SEric Blake }; 207041574268SEric Blake /* Used during ioctl */ 207141574268SEric Blake struct { 207241574268SEric Blake int req; 207341574268SEric Blake void *buf; 207441574268SEric Blake }; 207541574268SEric Blake }; 207641574268SEric Blake BlockCompletionFunc *cb; 207741574268SEric Blake void *opaque; 207841574268SEric Blake 207941574268SEric Blake int error; 208041574268SEric Blake } BlockRequest; 208141574268SEric Blake 208261007b31SStefan Hajnoczi typedef struct BlockAIOCBCoroutine { 208361007b31SStefan Hajnoczi BlockAIOCB common; 2084adad6496SKevin Wolf BdrvChild *child; 208561007b31SStefan Hajnoczi BlockRequest req; 208661007b31SStefan Hajnoczi bool is_write; 208761007b31SStefan Hajnoczi bool need_bh; 208861007b31SStefan Hajnoczi bool *done; 208961007b31SStefan Hajnoczi QEMUBH* bh; 209061007b31SStefan Hajnoczi } BlockAIOCBCoroutine; 209161007b31SStefan Hajnoczi 209261007b31SStefan Hajnoczi static const AIOCBInfo bdrv_em_co_aiocb_info = { 209361007b31SStefan Hajnoczi .aiocb_size = sizeof(BlockAIOCBCoroutine), 209461007b31SStefan Hajnoczi }; 209561007b31SStefan Hajnoczi 209661007b31SStefan Hajnoczi static void bdrv_co_complete(BlockAIOCBCoroutine *acb) 209761007b31SStefan Hajnoczi { 209861007b31SStefan Hajnoczi if (!acb->need_bh) { 209961007b31SStefan Hajnoczi acb->common.cb(acb->common.opaque, acb->req.error); 210061007b31SStefan Hajnoczi qemu_aio_unref(acb); 210161007b31SStefan Hajnoczi } 210261007b31SStefan Hajnoczi } 210361007b31SStefan Hajnoczi 210461007b31SStefan Hajnoczi static void bdrv_co_em_bh(void *opaque) 210561007b31SStefan Hajnoczi { 210661007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb = opaque; 210761007b31SStefan Hajnoczi 210861007b31SStefan Hajnoczi assert(!acb->need_bh); 210961007b31SStefan Hajnoczi qemu_bh_delete(acb->bh); 211061007b31SStefan Hajnoczi bdrv_co_complete(acb); 211161007b31SStefan Hajnoczi } 211261007b31SStefan Hajnoczi 211361007b31SStefan Hajnoczi static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb) 211461007b31SStefan Hajnoczi { 211561007b31SStefan Hajnoczi acb->need_bh = false; 211661007b31SStefan Hajnoczi if (acb->req.error != -EINPROGRESS) { 211761007b31SStefan Hajnoczi BlockDriverState *bs = acb->common.bs; 211861007b31SStefan Hajnoczi 211961007b31SStefan Hajnoczi acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb); 212061007b31SStefan Hajnoczi qemu_bh_schedule(acb->bh); 212161007b31SStefan Hajnoczi } 212261007b31SStefan Hajnoczi } 212361007b31SStefan Hajnoczi 212461007b31SStefan Hajnoczi /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */ 212561007b31SStefan Hajnoczi static void coroutine_fn bdrv_co_do_rw(void *opaque) 212661007b31SStefan Hajnoczi { 212761007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb = opaque; 212861007b31SStefan Hajnoczi 212961007b31SStefan Hajnoczi if (!acb->is_write) { 2130adad6496SKevin Wolf acb->req.error = bdrv_co_do_readv(acb->child, acb->req.sector, 213161007b31SStefan Hajnoczi acb->req.nb_sectors, acb->req.qiov, acb->req.flags); 213261007b31SStefan Hajnoczi } else { 2133adad6496SKevin Wolf acb->req.error = bdrv_co_do_writev(acb->child, acb->req.sector, 213461007b31SStefan Hajnoczi acb->req.nb_sectors, acb->req.qiov, acb->req.flags); 213561007b31SStefan Hajnoczi } 213661007b31SStefan Hajnoczi 213761007b31SStefan Hajnoczi bdrv_co_complete(acb); 213861007b31SStefan Hajnoczi } 213961007b31SStefan Hajnoczi 2140adad6496SKevin Wolf static BlockAIOCB *bdrv_co_aio_rw_vector(BdrvChild *child, 214161007b31SStefan Hajnoczi int64_t sector_num, 214261007b31SStefan Hajnoczi QEMUIOVector *qiov, 214361007b31SStefan Hajnoczi int nb_sectors, 214461007b31SStefan Hajnoczi BdrvRequestFlags flags, 214561007b31SStefan Hajnoczi BlockCompletionFunc *cb, 214661007b31SStefan Hajnoczi void *opaque, 214761007b31SStefan Hajnoczi bool is_write) 214861007b31SStefan Hajnoczi { 214961007b31SStefan Hajnoczi Coroutine *co; 215061007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb; 215161007b31SStefan Hajnoczi 2152adad6496SKevin Wolf acb = qemu_aio_get(&bdrv_em_co_aiocb_info, child->bs, cb, opaque); 2153adad6496SKevin Wolf acb->child = child; 215461007b31SStefan Hajnoczi acb->need_bh = true; 215561007b31SStefan Hajnoczi acb->req.error = -EINPROGRESS; 215661007b31SStefan Hajnoczi acb->req.sector = sector_num; 215761007b31SStefan Hajnoczi acb->req.nb_sectors = nb_sectors; 215861007b31SStefan Hajnoczi acb->req.qiov = qiov; 215961007b31SStefan Hajnoczi acb->req.flags = flags; 216061007b31SStefan Hajnoczi acb->is_write = is_write; 216161007b31SStefan Hajnoczi 21620b8b8753SPaolo Bonzini co = qemu_coroutine_create(bdrv_co_do_rw, acb); 21630b8b8753SPaolo Bonzini qemu_coroutine_enter(co); 216461007b31SStefan Hajnoczi 216561007b31SStefan Hajnoczi bdrv_co_maybe_schedule_bh(acb); 216661007b31SStefan Hajnoczi return &acb->common; 216761007b31SStefan Hajnoczi } 216861007b31SStefan Hajnoczi 216961007b31SStefan Hajnoczi static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque) 217061007b31SStefan Hajnoczi { 217161007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb = opaque; 217261007b31SStefan Hajnoczi BlockDriverState *bs = acb->common.bs; 217361007b31SStefan Hajnoczi 217461007b31SStefan Hajnoczi acb->req.error = bdrv_co_flush(bs); 217561007b31SStefan Hajnoczi bdrv_co_complete(acb); 217661007b31SStefan Hajnoczi } 217761007b31SStefan Hajnoczi 217861007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs, 217961007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 218061007b31SStefan Hajnoczi { 218161007b31SStefan Hajnoczi trace_bdrv_aio_flush(bs, opaque); 218261007b31SStefan Hajnoczi 218361007b31SStefan Hajnoczi Coroutine *co; 218461007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb; 218561007b31SStefan Hajnoczi 218661007b31SStefan Hajnoczi acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 218761007b31SStefan Hajnoczi acb->need_bh = true; 218861007b31SStefan Hajnoczi acb->req.error = -EINPROGRESS; 218961007b31SStefan Hajnoczi 21900b8b8753SPaolo Bonzini co = qemu_coroutine_create(bdrv_aio_flush_co_entry, acb); 21910b8b8753SPaolo Bonzini qemu_coroutine_enter(co); 219261007b31SStefan Hajnoczi 219361007b31SStefan Hajnoczi bdrv_co_maybe_schedule_bh(acb); 219461007b31SStefan Hajnoczi return &acb->common; 219561007b31SStefan Hajnoczi } 219661007b31SStefan Hajnoczi 219761007b31SStefan Hajnoczi static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque) 219861007b31SStefan Hajnoczi { 219961007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb = opaque; 220061007b31SStefan Hajnoczi BlockDriverState *bs = acb->common.bs; 220161007b31SStefan Hajnoczi 22029f1963b3SEric Blake acb->req.error = bdrv_co_pdiscard(bs, acb->req.sector << BDRV_SECTOR_BITS, 22039f1963b3SEric Blake acb->req.nb_sectors << BDRV_SECTOR_BITS); 220461007b31SStefan Hajnoczi bdrv_co_complete(acb); 220561007b31SStefan Hajnoczi } 220661007b31SStefan Hajnoczi 220761007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs, 220861007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, 220961007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 221061007b31SStefan Hajnoczi { 221161007b31SStefan Hajnoczi Coroutine *co; 221261007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb; 221361007b31SStefan Hajnoczi 221461007b31SStefan Hajnoczi trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque); 221561007b31SStefan Hajnoczi 221661007b31SStefan Hajnoczi acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 221761007b31SStefan Hajnoczi acb->need_bh = true; 221861007b31SStefan Hajnoczi acb->req.error = -EINPROGRESS; 221961007b31SStefan Hajnoczi acb->req.sector = sector_num; 222061007b31SStefan Hajnoczi acb->req.nb_sectors = nb_sectors; 22210b8b8753SPaolo Bonzini co = qemu_coroutine_create(bdrv_aio_discard_co_entry, acb); 22220b8b8753SPaolo Bonzini qemu_coroutine_enter(co); 222361007b31SStefan Hajnoczi 222461007b31SStefan Hajnoczi bdrv_co_maybe_schedule_bh(acb); 222561007b31SStefan Hajnoczi return &acb->common; 222661007b31SStefan Hajnoczi } 222761007b31SStefan Hajnoczi 222861007b31SStefan Hajnoczi void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, 222961007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 223061007b31SStefan Hajnoczi { 223161007b31SStefan Hajnoczi BlockAIOCB *acb; 223261007b31SStefan Hajnoczi 2233c84b3192SPaolo Bonzini acb = g_malloc(aiocb_info->aiocb_size); 223461007b31SStefan Hajnoczi acb->aiocb_info = aiocb_info; 223561007b31SStefan Hajnoczi acb->bs = bs; 223661007b31SStefan Hajnoczi acb->cb = cb; 223761007b31SStefan Hajnoczi acb->opaque = opaque; 223861007b31SStefan Hajnoczi acb->refcnt = 1; 223961007b31SStefan Hajnoczi return acb; 224061007b31SStefan Hajnoczi } 224161007b31SStefan Hajnoczi 224261007b31SStefan Hajnoczi void qemu_aio_ref(void *p) 224361007b31SStefan Hajnoczi { 224461007b31SStefan Hajnoczi BlockAIOCB *acb = p; 224561007b31SStefan Hajnoczi acb->refcnt++; 224661007b31SStefan Hajnoczi } 224761007b31SStefan Hajnoczi 224861007b31SStefan Hajnoczi void qemu_aio_unref(void *p) 224961007b31SStefan Hajnoczi { 225061007b31SStefan Hajnoczi BlockAIOCB *acb = p; 225161007b31SStefan Hajnoczi assert(acb->refcnt > 0); 225261007b31SStefan Hajnoczi if (--acb->refcnt == 0) { 2253c84b3192SPaolo Bonzini g_free(acb); 225461007b31SStefan Hajnoczi } 225561007b31SStefan Hajnoczi } 225661007b31SStefan Hajnoczi 225761007b31SStefan Hajnoczi /**************************************************************/ 225861007b31SStefan Hajnoczi /* Coroutine block device emulation */ 225961007b31SStefan Hajnoczi 2260e293b7a3SKevin Wolf typedef struct FlushCo { 2261e293b7a3SKevin Wolf BlockDriverState *bs; 2262e293b7a3SKevin Wolf int ret; 2263e293b7a3SKevin Wolf } FlushCo; 2264e293b7a3SKevin Wolf 2265e293b7a3SKevin Wolf 226661007b31SStefan Hajnoczi static void coroutine_fn bdrv_flush_co_entry(void *opaque) 226761007b31SStefan Hajnoczi { 2268e293b7a3SKevin Wolf FlushCo *rwco = opaque; 226961007b31SStefan Hajnoczi 227061007b31SStefan Hajnoczi rwco->ret = bdrv_co_flush(rwco->bs); 227161007b31SStefan Hajnoczi } 227261007b31SStefan Hajnoczi 227361007b31SStefan Hajnoczi int coroutine_fn bdrv_co_flush(BlockDriverState *bs) 227461007b31SStefan Hajnoczi { 227561007b31SStefan Hajnoczi int ret; 2276cdb5e315SFam Zheng BdrvTrackedRequest req; 227761007b31SStefan Hajnoczi 22781b6bc94dSDimitris Aragiorgis if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs) || 22791b6bc94dSDimitris Aragiorgis bdrv_is_sg(bs)) { 228061007b31SStefan Hajnoczi return 0; 228161007b31SStefan Hajnoczi } 228261007b31SStefan Hajnoczi 2283cdb5e315SFam Zheng tracked_request_begin(&req, bs, 0, 0, BDRV_TRACKED_FLUSH); 2284c32b82afSPavel Dovgalyuk 22853ff2f67aSEvgeny Yakovlev int current_gen = bs->write_gen; 22863ff2f67aSEvgeny Yakovlev 22873ff2f67aSEvgeny Yakovlev /* Wait until any previous flushes are completed */ 22883ff2f67aSEvgeny Yakovlev while (bs->flush_started_gen != bs->flushed_gen) { 22893ff2f67aSEvgeny Yakovlev qemu_co_queue_wait(&bs->flush_queue); 22903ff2f67aSEvgeny Yakovlev } 22913ff2f67aSEvgeny Yakovlev 22923ff2f67aSEvgeny Yakovlev bs->flush_started_gen = current_gen; 22933ff2f67aSEvgeny Yakovlev 2294c32b82afSPavel Dovgalyuk /* Write back all layers by calling one driver function */ 2295c32b82afSPavel Dovgalyuk if (bs->drv->bdrv_co_flush) { 2296c32b82afSPavel Dovgalyuk ret = bs->drv->bdrv_co_flush(bs); 2297c32b82afSPavel Dovgalyuk goto out; 2298c32b82afSPavel Dovgalyuk } 2299c32b82afSPavel Dovgalyuk 230061007b31SStefan Hajnoczi /* Write back cached data to the OS even with cache=unsafe */ 230161007b31SStefan Hajnoczi BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS); 230261007b31SStefan Hajnoczi if (bs->drv->bdrv_co_flush_to_os) { 230361007b31SStefan Hajnoczi ret = bs->drv->bdrv_co_flush_to_os(bs); 230461007b31SStefan Hajnoczi if (ret < 0) { 2305cdb5e315SFam Zheng goto out; 230661007b31SStefan Hajnoczi } 230761007b31SStefan Hajnoczi } 230861007b31SStefan Hajnoczi 230961007b31SStefan Hajnoczi /* But don't actually force it to the disk with cache=unsafe */ 231061007b31SStefan Hajnoczi if (bs->open_flags & BDRV_O_NO_FLUSH) { 231161007b31SStefan Hajnoczi goto flush_parent; 231261007b31SStefan Hajnoczi } 231361007b31SStefan Hajnoczi 23143ff2f67aSEvgeny Yakovlev /* Check if we really need to flush anything */ 23153ff2f67aSEvgeny Yakovlev if (bs->flushed_gen == current_gen) { 23163ff2f67aSEvgeny Yakovlev goto flush_parent; 23173ff2f67aSEvgeny Yakovlev } 23183ff2f67aSEvgeny Yakovlev 231961007b31SStefan Hajnoczi BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK); 232061007b31SStefan Hajnoczi if (bs->drv->bdrv_co_flush_to_disk) { 232161007b31SStefan Hajnoczi ret = bs->drv->bdrv_co_flush_to_disk(bs); 232261007b31SStefan Hajnoczi } else if (bs->drv->bdrv_aio_flush) { 232361007b31SStefan Hajnoczi BlockAIOCB *acb; 232461007b31SStefan Hajnoczi CoroutineIOCompletion co = { 232561007b31SStefan Hajnoczi .coroutine = qemu_coroutine_self(), 232661007b31SStefan Hajnoczi }; 232761007b31SStefan Hajnoczi 232861007b31SStefan Hajnoczi acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); 232961007b31SStefan Hajnoczi if (acb == NULL) { 233061007b31SStefan Hajnoczi ret = -EIO; 233161007b31SStefan Hajnoczi } else { 233261007b31SStefan Hajnoczi qemu_coroutine_yield(); 233361007b31SStefan Hajnoczi ret = co.ret; 233461007b31SStefan Hajnoczi } 233561007b31SStefan Hajnoczi } else { 233661007b31SStefan Hajnoczi /* 233761007b31SStefan Hajnoczi * Some block drivers always operate in either writethrough or unsafe 233861007b31SStefan Hajnoczi * mode and don't support bdrv_flush therefore. Usually qemu doesn't 233961007b31SStefan Hajnoczi * know how the server works (because the behaviour is hardcoded or 234061007b31SStefan Hajnoczi * depends on server-side configuration), so we can't ensure that 234161007b31SStefan Hajnoczi * everything is safe on disk. Returning an error doesn't work because 234261007b31SStefan Hajnoczi * that would break guests even if the server operates in writethrough 234361007b31SStefan Hajnoczi * mode. 234461007b31SStefan Hajnoczi * 234561007b31SStefan Hajnoczi * Let's hope the user knows what he's doing. 234661007b31SStefan Hajnoczi */ 234761007b31SStefan Hajnoczi ret = 0; 234861007b31SStefan Hajnoczi } 23493ff2f67aSEvgeny Yakovlev 235061007b31SStefan Hajnoczi if (ret < 0) { 2351cdb5e315SFam Zheng goto out; 235261007b31SStefan Hajnoczi } 235361007b31SStefan Hajnoczi 235461007b31SStefan Hajnoczi /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH 235561007b31SStefan Hajnoczi * in the case of cache=unsafe, so there are no useless flushes. 235661007b31SStefan Hajnoczi */ 235761007b31SStefan Hajnoczi flush_parent: 2358cdb5e315SFam Zheng ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0; 2359cdb5e315SFam Zheng out: 23603ff2f67aSEvgeny Yakovlev /* Notify any pending flushes that we have completed */ 23613ff2f67aSEvgeny Yakovlev bs->flushed_gen = current_gen; 23623ff2f67aSEvgeny Yakovlev qemu_co_queue_restart_all(&bs->flush_queue); 23633ff2f67aSEvgeny Yakovlev 2364cdb5e315SFam Zheng tracked_request_end(&req); 2365cdb5e315SFam Zheng return ret; 236661007b31SStefan Hajnoczi } 236761007b31SStefan Hajnoczi 236861007b31SStefan Hajnoczi int bdrv_flush(BlockDriverState *bs) 236961007b31SStefan Hajnoczi { 237061007b31SStefan Hajnoczi Coroutine *co; 2371e293b7a3SKevin Wolf FlushCo flush_co = { 237261007b31SStefan Hajnoczi .bs = bs, 237361007b31SStefan Hajnoczi .ret = NOT_DONE, 237461007b31SStefan Hajnoczi }; 237561007b31SStefan Hajnoczi 237661007b31SStefan Hajnoczi if (qemu_in_coroutine()) { 237761007b31SStefan Hajnoczi /* Fast-path if already in coroutine context */ 2378e293b7a3SKevin Wolf bdrv_flush_co_entry(&flush_co); 237961007b31SStefan Hajnoczi } else { 238061007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 238161007b31SStefan Hajnoczi 23820b8b8753SPaolo Bonzini co = qemu_coroutine_create(bdrv_flush_co_entry, &flush_co); 23830b8b8753SPaolo Bonzini qemu_coroutine_enter(co); 2384e293b7a3SKevin Wolf while (flush_co.ret == NOT_DONE) { 238561007b31SStefan Hajnoczi aio_poll(aio_context, true); 238661007b31SStefan Hajnoczi } 238761007b31SStefan Hajnoczi } 238861007b31SStefan Hajnoczi 2389e293b7a3SKevin Wolf return flush_co.ret; 239061007b31SStefan Hajnoczi } 239161007b31SStefan Hajnoczi 239261007b31SStefan Hajnoczi typedef struct DiscardCo { 239361007b31SStefan Hajnoczi BlockDriverState *bs; 2394*0c51a893SEric Blake int64_t offset; 2395*0c51a893SEric Blake int count; 239661007b31SStefan Hajnoczi int ret; 239761007b31SStefan Hajnoczi } DiscardCo; 2398*0c51a893SEric Blake static void coroutine_fn bdrv_pdiscard_co_entry(void *opaque) 239961007b31SStefan Hajnoczi { 240061007b31SStefan Hajnoczi DiscardCo *rwco = opaque; 240161007b31SStefan Hajnoczi 2402*0c51a893SEric Blake rwco->ret = bdrv_co_pdiscard(rwco->bs, rwco->offset, rwco->count); 240361007b31SStefan Hajnoczi } 240461007b31SStefan Hajnoczi 24059f1963b3SEric Blake int coroutine_fn bdrv_co_pdiscard(BlockDriverState *bs, int64_t offset, 24069f1963b3SEric Blake int count) 240761007b31SStefan Hajnoczi { 2408b1066c87SFam Zheng BdrvTrackedRequest req; 24099f1963b3SEric Blake int max_pdiscard, ret; 24109f1963b3SEric Blake int head, align; 241161007b31SStefan Hajnoczi 241261007b31SStefan Hajnoczi if (!bs->drv) { 241361007b31SStefan Hajnoczi return -ENOMEDIUM; 241461007b31SStefan Hajnoczi } 241561007b31SStefan Hajnoczi 24169f1963b3SEric Blake ret = bdrv_check_byte_request(bs, offset, count); 241761007b31SStefan Hajnoczi if (ret < 0) { 241861007b31SStefan Hajnoczi return ret; 241961007b31SStefan Hajnoczi } else if (bs->read_only) { 2420eaf5fe2dSPaolo Bonzini return -EPERM; 242161007b31SStefan Hajnoczi } 242204c01a5cSKevin Wolf assert(!(bs->open_flags & BDRV_O_INACTIVE)); 242361007b31SStefan Hajnoczi 242461007b31SStefan Hajnoczi /* Do nothing if disabled. */ 242561007b31SStefan Hajnoczi if (!(bs->open_flags & BDRV_O_UNMAP)) { 242661007b31SStefan Hajnoczi return 0; 242761007b31SStefan Hajnoczi } 242861007b31SStefan Hajnoczi 242961007b31SStefan Hajnoczi if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) { 243061007b31SStefan Hajnoczi return 0; 243161007b31SStefan Hajnoczi } 243261007b31SStefan Hajnoczi 24339f1963b3SEric Blake /* Discard is advisory, so ignore any unaligned head or tail */ 24349f1963b3SEric Blake align = MAX(BDRV_SECTOR_SIZE, 24359f1963b3SEric Blake MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment)); 24369f1963b3SEric Blake assert(is_power_of_2(align)); 24379f1963b3SEric Blake head = MIN(count, -offset & (align - 1)); 24389f1963b3SEric Blake if (head) { 24399f1963b3SEric Blake count -= head; 24409f1963b3SEric Blake offset += head; 24419f1963b3SEric Blake } 24429f1963b3SEric Blake count = QEMU_ALIGN_DOWN(count, align); 24439f1963b3SEric Blake if (!count) { 24449f1963b3SEric Blake return 0; 24459f1963b3SEric Blake } 24469f1963b3SEric Blake 24479f1963b3SEric Blake tracked_request_begin(&req, bs, offset, count, BDRV_TRACKED_DISCARD); 244850824995SFam Zheng 2449ec050f77SDenis V. Lunev ret = notifier_with_return_list_notify(&bs->before_write_notifiers, &req); 2450ec050f77SDenis V. Lunev if (ret < 0) { 2451ec050f77SDenis V. Lunev goto out; 2452ec050f77SDenis V. Lunev } 2453ec050f77SDenis V. Lunev 24549f1963b3SEric Blake max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX), 24559f1963b3SEric Blake align); 24569f1963b3SEric Blake 24579f1963b3SEric Blake while (count > 0) { 245861007b31SStefan Hajnoczi int ret; 24599f1963b3SEric Blake int num = MIN(count, max_pdiscard); 246061007b31SStefan Hajnoczi 246161007b31SStefan Hajnoczi if (bs->drv->bdrv_co_discard) { 24629f1963b3SEric Blake ret = bs->drv->bdrv_co_discard(bs, offset >> BDRV_SECTOR_BITS, 24639f1963b3SEric Blake num >> BDRV_SECTOR_BITS); 246461007b31SStefan Hajnoczi } else { 246561007b31SStefan Hajnoczi BlockAIOCB *acb; 246661007b31SStefan Hajnoczi CoroutineIOCompletion co = { 246761007b31SStefan Hajnoczi .coroutine = qemu_coroutine_self(), 246861007b31SStefan Hajnoczi }; 246961007b31SStefan Hajnoczi 24709f1963b3SEric Blake acb = bs->drv->bdrv_aio_discard(bs, offset >> BDRV_SECTOR_BITS, 24719f1963b3SEric Blake num >> BDRV_SECTOR_BITS, 247261007b31SStefan Hajnoczi bdrv_co_io_em_complete, &co); 247361007b31SStefan Hajnoczi if (acb == NULL) { 2474b1066c87SFam Zheng ret = -EIO; 2475b1066c87SFam Zheng goto out; 247661007b31SStefan Hajnoczi } else { 247761007b31SStefan Hajnoczi qemu_coroutine_yield(); 247861007b31SStefan Hajnoczi ret = co.ret; 247961007b31SStefan Hajnoczi } 248061007b31SStefan Hajnoczi } 248161007b31SStefan Hajnoczi if (ret && ret != -ENOTSUP) { 2482b1066c87SFam Zheng goto out; 248361007b31SStefan Hajnoczi } 248461007b31SStefan Hajnoczi 24859f1963b3SEric Blake offset += num; 24869f1963b3SEric Blake count -= num; 248761007b31SStefan Hajnoczi } 2488b1066c87SFam Zheng ret = 0; 2489b1066c87SFam Zheng out: 24903ff2f67aSEvgeny Yakovlev ++bs->write_gen; 2491968d8b06SDenis V. Lunev bdrv_set_dirty(bs, req.offset >> BDRV_SECTOR_BITS, 2492968d8b06SDenis V. Lunev req.bytes >> BDRV_SECTOR_BITS); 2493b1066c87SFam Zheng tracked_request_end(&req); 2494b1066c87SFam Zheng return ret; 249561007b31SStefan Hajnoczi } 249661007b31SStefan Hajnoczi 2497*0c51a893SEric Blake int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int count) 249861007b31SStefan Hajnoczi { 249961007b31SStefan Hajnoczi Coroutine *co; 250061007b31SStefan Hajnoczi DiscardCo rwco = { 250161007b31SStefan Hajnoczi .bs = bs, 2502*0c51a893SEric Blake .offset = offset, 2503*0c51a893SEric Blake .count = count, 250461007b31SStefan Hajnoczi .ret = NOT_DONE, 250561007b31SStefan Hajnoczi }; 250661007b31SStefan Hajnoczi 250761007b31SStefan Hajnoczi if (qemu_in_coroutine()) { 250861007b31SStefan Hajnoczi /* Fast-path if already in coroutine context */ 2509*0c51a893SEric Blake bdrv_pdiscard_co_entry(&rwco); 251061007b31SStefan Hajnoczi } else { 251161007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 251261007b31SStefan Hajnoczi 2513*0c51a893SEric Blake co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco); 25140b8b8753SPaolo Bonzini qemu_coroutine_enter(co); 251561007b31SStefan Hajnoczi while (rwco.ret == NOT_DONE) { 251661007b31SStefan Hajnoczi aio_poll(aio_context, true); 251761007b31SStefan Hajnoczi } 251861007b31SStefan Hajnoczi } 251961007b31SStefan Hajnoczi 252061007b31SStefan Hajnoczi return rwco.ret; 252161007b31SStefan Hajnoczi } 252261007b31SStefan Hajnoczi 25235c5ae76aSFam Zheng static int bdrv_co_do_ioctl(BlockDriverState *bs, int req, void *buf) 252461007b31SStefan Hajnoczi { 252561007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 25265c5ae76aSFam Zheng BdrvTrackedRequest tracked_req; 25275c5ae76aSFam Zheng CoroutineIOCompletion co = { 25285c5ae76aSFam Zheng .coroutine = qemu_coroutine_self(), 25295c5ae76aSFam Zheng }; 25305c5ae76aSFam Zheng BlockAIOCB *acb; 253161007b31SStefan Hajnoczi 25325c5ae76aSFam Zheng tracked_request_begin(&tracked_req, bs, 0, 0, BDRV_TRACKED_IOCTL); 25335c5ae76aSFam Zheng if (!drv || !drv->bdrv_aio_ioctl) { 25345c5ae76aSFam Zheng co.ret = -ENOTSUP; 25355c5ae76aSFam Zheng goto out; 25365c5ae76aSFam Zheng } 25375c5ae76aSFam Zheng 25385c5ae76aSFam Zheng acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co); 25395c5ae76aSFam Zheng if (!acb) { 2540c8a9fd80SFam Zheng co.ret = -ENOTSUP; 2541c8a9fd80SFam Zheng goto out; 25425c5ae76aSFam Zheng } 25435c5ae76aSFam Zheng qemu_coroutine_yield(); 25445c5ae76aSFam Zheng out: 25455c5ae76aSFam Zheng tracked_request_end(&tracked_req); 25465c5ae76aSFam Zheng return co.ret; 25475c5ae76aSFam Zheng } 25485c5ae76aSFam Zheng 25495c5ae76aSFam Zheng typedef struct { 25505c5ae76aSFam Zheng BlockDriverState *bs; 25515c5ae76aSFam Zheng int req; 25525c5ae76aSFam Zheng void *buf; 25535c5ae76aSFam Zheng int ret; 25545c5ae76aSFam Zheng } BdrvIoctlCoData; 25555c5ae76aSFam Zheng 25565c5ae76aSFam Zheng static void coroutine_fn bdrv_co_ioctl_entry(void *opaque) 25575c5ae76aSFam Zheng { 25585c5ae76aSFam Zheng BdrvIoctlCoData *data = opaque; 25595c5ae76aSFam Zheng data->ret = bdrv_co_do_ioctl(data->bs, data->req, data->buf); 25605c5ae76aSFam Zheng } 25615c5ae76aSFam Zheng 25625c5ae76aSFam Zheng /* needed for generic scsi interface */ 25635c5ae76aSFam Zheng int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) 25645c5ae76aSFam Zheng { 25655c5ae76aSFam Zheng BdrvIoctlCoData data = { 25665c5ae76aSFam Zheng .bs = bs, 25675c5ae76aSFam Zheng .req = req, 25685c5ae76aSFam Zheng .buf = buf, 25695c5ae76aSFam Zheng .ret = -EINPROGRESS, 25705c5ae76aSFam Zheng }; 25715c5ae76aSFam Zheng 25725c5ae76aSFam Zheng if (qemu_in_coroutine()) { 25735c5ae76aSFam Zheng /* Fast-path if already in coroutine context */ 25745c5ae76aSFam Zheng bdrv_co_ioctl_entry(&data); 25755c5ae76aSFam Zheng } else { 25760b8b8753SPaolo Bonzini Coroutine *co = qemu_coroutine_create(bdrv_co_ioctl_entry, &data); 2577ba889444SPaolo Bonzini 25780b8b8753SPaolo Bonzini qemu_coroutine_enter(co); 25795c5ae76aSFam Zheng while (data.ret == -EINPROGRESS) { 25805c5ae76aSFam Zheng aio_poll(bdrv_get_aio_context(bs), true); 25815c5ae76aSFam Zheng } 2582ba889444SPaolo Bonzini } 25835c5ae76aSFam Zheng return data.ret; 25845c5ae76aSFam Zheng } 25855c5ae76aSFam Zheng 25865c5ae76aSFam Zheng static void coroutine_fn bdrv_co_aio_ioctl_entry(void *opaque) 25875c5ae76aSFam Zheng { 25885c5ae76aSFam Zheng BlockAIOCBCoroutine *acb = opaque; 25895c5ae76aSFam Zheng acb->req.error = bdrv_co_do_ioctl(acb->common.bs, 25905c5ae76aSFam Zheng acb->req.req, acb->req.buf); 25915c5ae76aSFam Zheng bdrv_co_complete(acb); 259261007b31SStefan Hajnoczi } 259361007b31SStefan Hajnoczi 259461007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, 259561007b31SStefan Hajnoczi unsigned long int req, void *buf, 259661007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 259761007b31SStefan Hajnoczi { 25985c5ae76aSFam Zheng BlockAIOCBCoroutine *acb = qemu_aio_get(&bdrv_em_co_aiocb_info, 25995c5ae76aSFam Zheng bs, cb, opaque); 26005c5ae76aSFam Zheng Coroutine *co; 260161007b31SStefan Hajnoczi 26025c5ae76aSFam Zheng acb->need_bh = true; 26035c5ae76aSFam Zheng acb->req.error = -EINPROGRESS; 26045c5ae76aSFam Zheng acb->req.req = req; 26055c5ae76aSFam Zheng acb->req.buf = buf; 26060b8b8753SPaolo Bonzini co = qemu_coroutine_create(bdrv_co_aio_ioctl_entry, acb); 26070b8b8753SPaolo Bonzini qemu_coroutine_enter(co); 26085c5ae76aSFam Zheng 26095c5ae76aSFam Zheng bdrv_co_maybe_schedule_bh(acb); 26105c5ae76aSFam Zheng return &acb->common; 261161007b31SStefan Hajnoczi } 261261007b31SStefan Hajnoczi 261361007b31SStefan Hajnoczi void *qemu_blockalign(BlockDriverState *bs, size_t size) 261461007b31SStefan Hajnoczi { 261561007b31SStefan Hajnoczi return qemu_memalign(bdrv_opt_mem_align(bs), size); 261661007b31SStefan Hajnoczi } 261761007b31SStefan Hajnoczi 261861007b31SStefan Hajnoczi void *qemu_blockalign0(BlockDriverState *bs, size_t size) 261961007b31SStefan Hajnoczi { 262061007b31SStefan Hajnoczi return memset(qemu_blockalign(bs, size), 0, size); 262161007b31SStefan Hajnoczi } 262261007b31SStefan Hajnoczi 262361007b31SStefan Hajnoczi void *qemu_try_blockalign(BlockDriverState *bs, size_t size) 262461007b31SStefan Hajnoczi { 262561007b31SStefan Hajnoczi size_t align = bdrv_opt_mem_align(bs); 262661007b31SStefan Hajnoczi 262761007b31SStefan Hajnoczi /* Ensure that NULL is never returned on success */ 262861007b31SStefan Hajnoczi assert(align > 0); 262961007b31SStefan Hajnoczi if (size == 0) { 263061007b31SStefan Hajnoczi size = align; 263161007b31SStefan Hajnoczi } 263261007b31SStefan Hajnoczi 263361007b31SStefan Hajnoczi return qemu_try_memalign(align, size); 263461007b31SStefan Hajnoczi } 263561007b31SStefan Hajnoczi 263661007b31SStefan Hajnoczi void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) 263761007b31SStefan Hajnoczi { 263861007b31SStefan Hajnoczi void *mem = qemu_try_blockalign(bs, size); 263961007b31SStefan Hajnoczi 264061007b31SStefan Hajnoczi if (mem) { 264161007b31SStefan Hajnoczi memset(mem, 0, size); 264261007b31SStefan Hajnoczi } 264361007b31SStefan Hajnoczi 264461007b31SStefan Hajnoczi return mem; 264561007b31SStefan Hajnoczi } 264661007b31SStefan Hajnoczi 264761007b31SStefan Hajnoczi /* 264861007b31SStefan Hajnoczi * Check if all memory in this vector is sector aligned. 264961007b31SStefan Hajnoczi */ 265061007b31SStefan Hajnoczi bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) 265161007b31SStefan Hajnoczi { 265261007b31SStefan Hajnoczi int i; 26534196d2f0SDenis V. Lunev size_t alignment = bdrv_min_mem_align(bs); 265461007b31SStefan Hajnoczi 265561007b31SStefan Hajnoczi for (i = 0; i < qiov->niov; i++) { 265661007b31SStefan Hajnoczi if ((uintptr_t) qiov->iov[i].iov_base % alignment) { 265761007b31SStefan Hajnoczi return false; 265861007b31SStefan Hajnoczi } 265961007b31SStefan Hajnoczi if (qiov->iov[i].iov_len % alignment) { 266061007b31SStefan Hajnoczi return false; 266161007b31SStefan Hajnoczi } 266261007b31SStefan Hajnoczi } 266361007b31SStefan Hajnoczi 266461007b31SStefan Hajnoczi return true; 266561007b31SStefan Hajnoczi } 266661007b31SStefan Hajnoczi 266761007b31SStefan Hajnoczi void bdrv_add_before_write_notifier(BlockDriverState *bs, 266861007b31SStefan Hajnoczi NotifierWithReturn *notifier) 266961007b31SStefan Hajnoczi { 267061007b31SStefan Hajnoczi notifier_with_return_list_add(&bs->before_write_notifiers, notifier); 267161007b31SStefan Hajnoczi } 267261007b31SStefan Hajnoczi 267361007b31SStefan Hajnoczi void bdrv_io_plug(BlockDriverState *bs) 267461007b31SStefan Hajnoczi { 26756b98bd64SPaolo Bonzini BdrvChild *child; 26766b98bd64SPaolo Bonzini 26776b98bd64SPaolo Bonzini QLIST_FOREACH(child, &bs->children, next) { 26786b98bd64SPaolo Bonzini bdrv_io_plug(child->bs); 26796b98bd64SPaolo Bonzini } 26806b98bd64SPaolo Bonzini 26816b98bd64SPaolo Bonzini if (bs->io_plugged++ == 0 && bs->io_plug_disabled == 0) { 268261007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 268361007b31SStefan Hajnoczi if (drv && drv->bdrv_io_plug) { 268461007b31SStefan Hajnoczi drv->bdrv_io_plug(bs); 26856b98bd64SPaolo Bonzini } 268661007b31SStefan Hajnoczi } 268761007b31SStefan Hajnoczi } 268861007b31SStefan Hajnoczi 268961007b31SStefan Hajnoczi void bdrv_io_unplug(BlockDriverState *bs) 269061007b31SStefan Hajnoczi { 26916b98bd64SPaolo Bonzini BdrvChild *child; 26926b98bd64SPaolo Bonzini 26936b98bd64SPaolo Bonzini assert(bs->io_plugged); 26946b98bd64SPaolo Bonzini if (--bs->io_plugged == 0 && bs->io_plug_disabled == 0) { 269561007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 269661007b31SStefan Hajnoczi if (drv && drv->bdrv_io_unplug) { 269761007b31SStefan Hajnoczi drv->bdrv_io_unplug(bs); 269861007b31SStefan Hajnoczi } 269961007b31SStefan Hajnoczi } 270061007b31SStefan Hajnoczi 27016b98bd64SPaolo Bonzini QLIST_FOREACH(child, &bs->children, next) { 27026b98bd64SPaolo Bonzini bdrv_io_unplug(child->bs); 27036b98bd64SPaolo Bonzini } 27046b98bd64SPaolo Bonzini } 27056b98bd64SPaolo Bonzini 27066b98bd64SPaolo Bonzini void bdrv_io_unplugged_begin(BlockDriverState *bs) 270761007b31SStefan Hajnoczi { 27086b98bd64SPaolo Bonzini BdrvChild *child; 27096b98bd64SPaolo Bonzini 27106b98bd64SPaolo Bonzini if (bs->io_plug_disabled++ == 0 && bs->io_plugged > 0) { 271161007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 27126b98bd64SPaolo Bonzini if (drv && drv->bdrv_io_unplug) { 27136b98bd64SPaolo Bonzini drv->bdrv_io_unplug(bs); 27146b98bd64SPaolo Bonzini } 27156b98bd64SPaolo Bonzini } 27166b98bd64SPaolo Bonzini 27176b98bd64SPaolo Bonzini QLIST_FOREACH(child, &bs->children, next) { 27186b98bd64SPaolo Bonzini bdrv_io_unplugged_begin(child->bs); 27196b98bd64SPaolo Bonzini } 27206b98bd64SPaolo Bonzini } 27216b98bd64SPaolo Bonzini 27226b98bd64SPaolo Bonzini void bdrv_io_unplugged_end(BlockDriverState *bs) 27236b98bd64SPaolo Bonzini { 27246b98bd64SPaolo Bonzini BdrvChild *child; 27256b98bd64SPaolo Bonzini 27266b98bd64SPaolo Bonzini assert(bs->io_plug_disabled); 27276b98bd64SPaolo Bonzini QLIST_FOREACH(child, &bs->children, next) { 27286b98bd64SPaolo Bonzini bdrv_io_unplugged_end(child->bs); 27296b98bd64SPaolo Bonzini } 27306b98bd64SPaolo Bonzini 27316b98bd64SPaolo Bonzini if (--bs->io_plug_disabled == 0 && bs->io_plugged > 0) { 27326b98bd64SPaolo Bonzini BlockDriver *drv = bs->drv; 27336b98bd64SPaolo Bonzini if (drv && drv->bdrv_io_plug) { 27346b98bd64SPaolo Bonzini drv->bdrv_io_plug(bs); 27356b98bd64SPaolo Bonzini } 273661007b31SStefan Hajnoczi } 273761007b31SStefan Hajnoczi } 2738