161007b31SStefan Hajnoczi /* 261007b31SStefan Hajnoczi * Block layer I/O functions 361007b31SStefan Hajnoczi * 461007b31SStefan Hajnoczi * Copyright (c) 2003 Fabrice Bellard 561007b31SStefan Hajnoczi * 661007b31SStefan Hajnoczi * Permission is hereby granted, free of charge, to any person obtaining a copy 761007b31SStefan Hajnoczi * of this software and associated documentation files (the "Software"), to deal 861007b31SStefan Hajnoczi * in the Software without restriction, including without limitation the rights 961007b31SStefan Hajnoczi * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 1061007b31SStefan Hajnoczi * copies of the Software, and to permit persons to whom the Software is 1161007b31SStefan Hajnoczi * furnished to do so, subject to the following conditions: 1261007b31SStefan Hajnoczi * 1361007b31SStefan Hajnoczi * The above copyright notice and this permission notice shall be included in 1461007b31SStefan Hajnoczi * all copies or substantial portions of the Software. 1561007b31SStefan Hajnoczi * 1661007b31SStefan Hajnoczi * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 1761007b31SStefan Hajnoczi * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 1861007b31SStefan Hajnoczi * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 1961007b31SStefan Hajnoczi * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 2061007b31SStefan Hajnoczi * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 2161007b31SStefan Hajnoczi * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 2261007b31SStefan Hajnoczi * THE SOFTWARE. 2361007b31SStefan Hajnoczi */ 2461007b31SStefan Hajnoczi 2580c71a24SPeter Maydell #include "qemu/osdep.h" 2661007b31SStefan Hajnoczi #include "trace.h" 277f0e9da6SMax Reitz #include "sysemu/block-backend.h" 2861007b31SStefan Hajnoczi #include "block/blockjob.h" 2961007b31SStefan Hajnoczi #include "block/block_int.h" 30f348b6d1SVeronia Bahaa #include "qemu/cutils.h" 31da34e65cSMarkus Armbruster #include "qapi/error.h" 32d49b6836SMarkus Armbruster #include "qemu/error-report.h" 3361007b31SStefan Hajnoczi 3461007b31SStefan Hajnoczi #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ 3561007b31SStefan Hajnoczi 36adad6496SKevin Wolf static BlockAIOCB *bdrv_co_aio_rw_vector(BdrvChild *child, 3761007b31SStefan Hajnoczi int64_t sector_num, 3861007b31SStefan Hajnoczi QEMUIOVector *qiov, 3961007b31SStefan Hajnoczi int nb_sectors, 4061007b31SStefan Hajnoczi BdrvRequestFlags flags, 4161007b31SStefan Hajnoczi BlockCompletionFunc *cb, 4261007b31SStefan Hajnoczi void *opaque, 4361007b31SStefan Hajnoczi bool is_write); 4461007b31SStefan Hajnoczi static void coroutine_fn bdrv_co_do_rw(void *opaque); 45d05aa8bbSEric Blake static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 46d05aa8bbSEric Blake int64_t offset, int count, BdrvRequestFlags flags); 4761007b31SStefan Hajnoczi 48c2066af0SKevin Wolf static void bdrv_parent_drained_begin(BlockDriverState *bs) 4961007b31SStefan Hajnoczi { 50c2066af0SKevin Wolf BdrvChild *c; 5127ccdd52SKevin Wolf 52c2066af0SKevin Wolf QLIST_FOREACH(c, &bs->parents, next_parent) { 53c2066af0SKevin Wolf if (c->role->drained_begin) { 54c2066af0SKevin Wolf c->role->drained_begin(c); 55c2066af0SKevin Wolf } 56ce0f1412SPaolo Bonzini } 57ce0f1412SPaolo Bonzini } 58ce0f1412SPaolo Bonzini 59c2066af0SKevin Wolf static void bdrv_parent_drained_end(BlockDriverState *bs) 60ce0f1412SPaolo Bonzini { 61c2066af0SKevin Wolf BdrvChild *c; 6227ccdd52SKevin Wolf 63c2066af0SKevin Wolf QLIST_FOREACH(c, &bs->parents, next_parent) { 64c2066af0SKevin Wolf if (c->role->drained_end) { 65c2066af0SKevin Wolf c->role->drained_end(c); 6627ccdd52SKevin Wolf } 67c2066af0SKevin Wolf } 6861007b31SStefan Hajnoczi } 6961007b31SStefan Hajnoczi 70d9e0dfa2SEric Blake static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src) 71d9e0dfa2SEric Blake { 72d9e0dfa2SEric Blake dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer); 73d9e0dfa2SEric Blake dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer); 74d9e0dfa2SEric Blake dst->opt_mem_alignment = MAX(dst->opt_mem_alignment, 75d9e0dfa2SEric Blake src->opt_mem_alignment); 76d9e0dfa2SEric Blake dst->min_mem_alignment = MAX(dst->min_mem_alignment, 77d9e0dfa2SEric Blake src->min_mem_alignment); 78d9e0dfa2SEric Blake dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov); 79d9e0dfa2SEric Blake } 80d9e0dfa2SEric Blake 8161007b31SStefan Hajnoczi void bdrv_refresh_limits(BlockDriverState *bs, Error **errp) 8261007b31SStefan Hajnoczi { 8361007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 8461007b31SStefan Hajnoczi Error *local_err = NULL; 8561007b31SStefan Hajnoczi 8661007b31SStefan Hajnoczi memset(&bs->bl, 0, sizeof(bs->bl)); 8761007b31SStefan Hajnoczi 8861007b31SStefan Hajnoczi if (!drv) { 8961007b31SStefan Hajnoczi return; 9061007b31SStefan Hajnoczi } 9161007b31SStefan Hajnoczi 9279ba8c98SEric Blake /* Default alignment based on whether driver has byte interface */ 93a5b8dd2cSEric Blake bs->bl.request_alignment = drv->bdrv_co_preadv ? 1 : 512; 9479ba8c98SEric Blake 9561007b31SStefan Hajnoczi /* Take some limits from the children as a default */ 9661007b31SStefan Hajnoczi if (bs->file) { 979a4f4c31SKevin Wolf bdrv_refresh_limits(bs->file->bs, &local_err); 9861007b31SStefan Hajnoczi if (local_err) { 9961007b31SStefan Hajnoczi error_propagate(errp, local_err); 10061007b31SStefan Hajnoczi return; 10161007b31SStefan Hajnoczi } 102d9e0dfa2SEric Blake bdrv_merge_limits(&bs->bl, &bs->file->bs->bl); 10361007b31SStefan Hajnoczi } else { 1044196d2f0SDenis V. Lunev bs->bl.min_mem_alignment = 512; 105459b4e66SDenis V. Lunev bs->bl.opt_mem_alignment = getpagesize(); 106bd44feb7SStefan Hajnoczi 107bd44feb7SStefan Hajnoczi /* Safe default since most protocols use readv()/writev()/etc */ 108bd44feb7SStefan Hajnoczi bs->bl.max_iov = IOV_MAX; 10961007b31SStefan Hajnoczi } 11061007b31SStefan Hajnoczi 111760e0063SKevin Wolf if (bs->backing) { 112760e0063SKevin Wolf bdrv_refresh_limits(bs->backing->bs, &local_err); 11361007b31SStefan Hajnoczi if (local_err) { 11461007b31SStefan Hajnoczi error_propagate(errp, local_err); 11561007b31SStefan Hajnoczi return; 11661007b31SStefan Hajnoczi } 117d9e0dfa2SEric Blake bdrv_merge_limits(&bs->bl, &bs->backing->bs->bl); 11861007b31SStefan Hajnoczi } 11961007b31SStefan Hajnoczi 12061007b31SStefan Hajnoczi /* Then let the driver override it */ 12161007b31SStefan Hajnoczi if (drv->bdrv_refresh_limits) { 12261007b31SStefan Hajnoczi drv->bdrv_refresh_limits(bs, errp); 12361007b31SStefan Hajnoczi } 12461007b31SStefan Hajnoczi } 12561007b31SStefan Hajnoczi 12661007b31SStefan Hajnoczi /** 12761007b31SStefan Hajnoczi * The copy-on-read flag is actually a reference count so multiple users may 12861007b31SStefan Hajnoczi * use the feature without worrying about clobbering its previous state. 12961007b31SStefan Hajnoczi * Copy-on-read stays enabled until all users have called to disable it. 13061007b31SStefan Hajnoczi */ 13161007b31SStefan Hajnoczi void bdrv_enable_copy_on_read(BlockDriverState *bs) 13261007b31SStefan Hajnoczi { 13361007b31SStefan Hajnoczi bs->copy_on_read++; 13461007b31SStefan Hajnoczi } 13561007b31SStefan Hajnoczi 13661007b31SStefan Hajnoczi void bdrv_disable_copy_on_read(BlockDriverState *bs) 13761007b31SStefan Hajnoczi { 13861007b31SStefan Hajnoczi assert(bs->copy_on_read > 0); 13961007b31SStefan Hajnoczi bs->copy_on_read--; 14061007b31SStefan Hajnoczi } 14161007b31SStefan Hajnoczi 14261007b31SStefan Hajnoczi /* Check if any requests are in-flight (including throttled requests) */ 143439db28cSKevin Wolf bool bdrv_requests_pending(BlockDriverState *bs) 14461007b31SStefan Hajnoczi { 14537a639a7SKevin Wolf BdrvChild *child; 14637a639a7SKevin Wolf 14761007b31SStefan Hajnoczi if (!QLIST_EMPTY(&bs->tracked_requests)) { 14861007b31SStefan Hajnoczi return true; 14961007b31SStefan Hajnoczi } 15037a639a7SKevin Wolf 15137a639a7SKevin Wolf QLIST_FOREACH(child, &bs->children, next) { 15237a639a7SKevin Wolf if (bdrv_requests_pending(child->bs)) { 15361007b31SStefan Hajnoczi return true; 15461007b31SStefan Hajnoczi } 15561007b31SStefan Hajnoczi } 15637a639a7SKevin Wolf 15761007b31SStefan Hajnoczi return false; 15861007b31SStefan Hajnoczi } 15961007b31SStefan Hajnoczi 16067da1dc5SFam Zheng static void bdrv_drain_recurse(BlockDriverState *bs) 16167da1dc5SFam Zheng { 16267da1dc5SFam Zheng BdrvChild *child; 16367da1dc5SFam Zheng 16467da1dc5SFam Zheng if (bs->drv && bs->drv->bdrv_drain) { 16567da1dc5SFam Zheng bs->drv->bdrv_drain(bs); 16667da1dc5SFam Zheng } 16767da1dc5SFam Zheng QLIST_FOREACH(child, &bs->children, next) { 16867da1dc5SFam Zheng bdrv_drain_recurse(child->bs); 16967da1dc5SFam Zheng } 17067da1dc5SFam Zheng } 17167da1dc5SFam Zheng 172a77fd4bbSFam Zheng typedef struct { 173a77fd4bbSFam Zheng Coroutine *co; 174a77fd4bbSFam Zheng BlockDriverState *bs; 175a77fd4bbSFam Zheng QEMUBH *bh; 176a77fd4bbSFam Zheng bool done; 177a77fd4bbSFam Zheng } BdrvCoDrainData; 178a77fd4bbSFam Zheng 179b6e84c97SPaolo Bonzini static void bdrv_drain_poll(BlockDriverState *bs) 180b6e84c97SPaolo Bonzini { 181b6e84c97SPaolo Bonzini bool busy = true; 182b6e84c97SPaolo Bonzini 183b6e84c97SPaolo Bonzini while (busy) { 184b6e84c97SPaolo Bonzini /* Keep iterating */ 185b6e84c97SPaolo Bonzini busy = bdrv_requests_pending(bs); 186b6e84c97SPaolo Bonzini busy |= aio_poll(bdrv_get_aio_context(bs), busy); 187b6e84c97SPaolo Bonzini } 188b6e84c97SPaolo Bonzini } 189b6e84c97SPaolo Bonzini 190a77fd4bbSFam Zheng static void bdrv_co_drain_bh_cb(void *opaque) 191a77fd4bbSFam Zheng { 192a77fd4bbSFam Zheng BdrvCoDrainData *data = opaque; 193a77fd4bbSFam Zheng Coroutine *co = data->co; 194a77fd4bbSFam Zheng 195a77fd4bbSFam Zheng qemu_bh_delete(data->bh); 196b6e84c97SPaolo Bonzini bdrv_drain_poll(data->bs); 197a77fd4bbSFam Zheng data->done = true; 198a77fd4bbSFam Zheng qemu_coroutine_enter(co, NULL); 199a77fd4bbSFam Zheng } 200a77fd4bbSFam Zheng 201b6e84c97SPaolo Bonzini static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs) 202a77fd4bbSFam Zheng { 203a77fd4bbSFam Zheng BdrvCoDrainData data; 204a77fd4bbSFam Zheng 205a77fd4bbSFam Zheng /* Calling bdrv_drain() from a BH ensures the current coroutine yields and 206a77fd4bbSFam Zheng * other coroutines run if they were queued from 207a77fd4bbSFam Zheng * qemu_co_queue_run_restart(). */ 208a77fd4bbSFam Zheng 209a77fd4bbSFam Zheng assert(qemu_in_coroutine()); 210a77fd4bbSFam Zheng data = (BdrvCoDrainData) { 211a77fd4bbSFam Zheng .co = qemu_coroutine_self(), 212a77fd4bbSFam Zheng .bs = bs, 213a77fd4bbSFam Zheng .done = false, 214a77fd4bbSFam Zheng .bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_drain_bh_cb, &data), 215a77fd4bbSFam Zheng }; 216a77fd4bbSFam Zheng qemu_bh_schedule(data.bh); 217a77fd4bbSFam Zheng 218a77fd4bbSFam Zheng qemu_coroutine_yield(); 219a77fd4bbSFam Zheng /* If we are resumed from some other event (such as an aio completion or a 220a77fd4bbSFam Zheng * timer callback), it is a bug in the caller that should be fixed. */ 221a77fd4bbSFam Zheng assert(data.done); 222a77fd4bbSFam Zheng } 223a77fd4bbSFam Zheng 2246820643fSKevin Wolf void bdrv_drained_begin(BlockDriverState *bs) 2256820643fSKevin Wolf { 2266820643fSKevin Wolf if (!bs->quiesce_counter++) { 2276820643fSKevin Wolf aio_disable_external(bdrv_get_aio_context(bs)); 2286820643fSKevin Wolf bdrv_parent_drained_begin(bs); 2296820643fSKevin Wolf } 2306820643fSKevin Wolf 2316820643fSKevin Wolf bdrv_io_unplugged_begin(bs); 2326820643fSKevin Wolf bdrv_drain_recurse(bs); 2336820643fSKevin Wolf if (qemu_in_coroutine()) { 2346820643fSKevin Wolf bdrv_co_yield_to_drain(bs); 2356820643fSKevin Wolf } else { 2366820643fSKevin Wolf bdrv_drain_poll(bs); 2376820643fSKevin Wolf } 2386820643fSKevin Wolf bdrv_io_unplugged_end(bs); 2396820643fSKevin Wolf } 2406820643fSKevin Wolf 2416820643fSKevin Wolf void bdrv_drained_end(BlockDriverState *bs) 2426820643fSKevin Wolf { 2436820643fSKevin Wolf assert(bs->quiesce_counter > 0); 2446820643fSKevin Wolf if (--bs->quiesce_counter > 0) { 2456820643fSKevin Wolf return; 2466820643fSKevin Wolf } 2476820643fSKevin Wolf 2486820643fSKevin Wolf bdrv_parent_drained_end(bs); 2496820643fSKevin Wolf aio_enable_external(bdrv_get_aio_context(bs)); 2506820643fSKevin Wolf } 2516820643fSKevin Wolf 25261007b31SStefan Hajnoczi /* 25367da1dc5SFam Zheng * Wait for pending requests to complete on a single BlockDriverState subtree, 25467da1dc5SFam Zheng * and suspend block driver's internal I/O until next request arrives. 25561007b31SStefan Hajnoczi * 25661007b31SStefan Hajnoczi * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState 25761007b31SStefan Hajnoczi * AioContext. 2587a63f3cdSStefan Hajnoczi * 2597a63f3cdSStefan Hajnoczi * Only this BlockDriverState's AioContext is run, so in-flight requests must 2607a63f3cdSStefan Hajnoczi * not depend on events in other AioContexts. In that case, use 2617a63f3cdSStefan Hajnoczi * bdrv_drain_all() instead. 26261007b31SStefan Hajnoczi */ 263b6e84c97SPaolo Bonzini void coroutine_fn bdrv_co_drain(BlockDriverState *bs) 264b6e84c97SPaolo Bonzini { 2656820643fSKevin Wolf assert(qemu_in_coroutine()); 2666820643fSKevin Wolf bdrv_drained_begin(bs); 2676820643fSKevin Wolf bdrv_drained_end(bs); 268b6e84c97SPaolo Bonzini } 269b6e84c97SPaolo Bonzini 27061007b31SStefan Hajnoczi void bdrv_drain(BlockDriverState *bs) 27161007b31SStefan Hajnoczi { 2726820643fSKevin Wolf bdrv_drained_begin(bs); 2736820643fSKevin Wolf bdrv_drained_end(bs); 27461007b31SStefan Hajnoczi } 27561007b31SStefan Hajnoczi 27661007b31SStefan Hajnoczi /* 27761007b31SStefan Hajnoczi * Wait for pending requests to complete across all BlockDriverStates 27861007b31SStefan Hajnoczi * 27961007b31SStefan Hajnoczi * This function does not flush data to disk, use bdrv_flush_all() for that 28061007b31SStefan Hajnoczi * after calling this function. 28161007b31SStefan Hajnoczi */ 28261007b31SStefan Hajnoczi void bdrv_drain_all(void) 28361007b31SStefan Hajnoczi { 28461007b31SStefan Hajnoczi /* Always run first iteration so any pending completion BHs run */ 28561007b31SStefan Hajnoczi bool busy = true; 2867c8eece4SKevin Wolf BlockDriverState *bs; 28788be7b4bSKevin Wolf BdrvNextIterator it; 288eb1364ceSAlberto Garcia BlockJob *job = NULL; 289f406c03cSAlexander Yarygin GSList *aio_ctxs = NULL, *ctx; 29061007b31SStefan Hajnoczi 291eb1364ceSAlberto Garcia while ((job = block_job_next(job))) { 292eb1364ceSAlberto Garcia AioContext *aio_context = blk_get_aio_context(job->blk); 293eb1364ceSAlberto Garcia 294eb1364ceSAlberto Garcia aio_context_acquire(aio_context); 295eb1364ceSAlberto Garcia block_job_pause(job); 296eb1364ceSAlberto Garcia aio_context_release(aio_context); 297eb1364ceSAlberto Garcia } 298eb1364ceSAlberto Garcia 29988be7b4bSKevin Wolf for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 30061007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 30161007b31SStefan Hajnoczi 30261007b31SStefan Hajnoczi aio_context_acquire(aio_context); 303c2066af0SKevin Wolf bdrv_parent_drained_begin(bs); 3046b98bd64SPaolo Bonzini bdrv_io_unplugged_begin(bs); 3059dcf8ecdSPaolo Bonzini bdrv_drain_recurse(bs); 30661007b31SStefan Hajnoczi aio_context_release(aio_context); 307f406c03cSAlexander Yarygin 308764ba3aeSAlberto Garcia if (!g_slist_find(aio_ctxs, aio_context)) { 309f406c03cSAlexander Yarygin aio_ctxs = g_slist_prepend(aio_ctxs, aio_context); 310f406c03cSAlexander Yarygin } 31161007b31SStefan Hajnoczi } 31261007b31SStefan Hajnoczi 3137a63f3cdSStefan Hajnoczi /* Note that completion of an asynchronous I/O operation can trigger any 3147a63f3cdSStefan Hajnoczi * number of other I/O operations on other devices---for example a 3157a63f3cdSStefan Hajnoczi * coroutine can submit an I/O request to another device in response to 3167a63f3cdSStefan Hajnoczi * request completion. Therefore we must keep looping until there was no 3177a63f3cdSStefan Hajnoczi * more activity rather than simply draining each device independently. 3187a63f3cdSStefan Hajnoczi */ 31961007b31SStefan Hajnoczi while (busy) { 32061007b31SStefan Hajnoczi busy = false; 321f406c03cSAlexander Yarygin 322f406c03cSAlexander Yarygin for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) { 323f406c03cSAlexander Yarygin AioContext *aio_context = ctx->data; 32461007b31SStefan Hajnoczi 32561007b31SStefan Hajnoczi aio_context_acquire(aio_context); 32688be7b4bSKevin Wolf for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 327f406c03cSAlexander Yarygin if (aio_context == bdrv_get_aio_context(bs)) { 328f406c03cSAlexander Yarygin if (bdrv_requests_pending(bs)) { 329f406c03cSAlexander Yarygin busy = true; 330f406c03cSAlexander Yarygin aio_poll(aio_context, busy); 331f406c03cSAlexander Yarygin } 332f406c03cSAlexander Yarygin } 333f406c03cSAlexander Yarygin } 334f406c03cSAlexander Yarygin busy |= aio_poll(aio_context, false); 33561007b31SStefan Hajnoczi aio_context_release(aio_context); 33661007b31SStefan Hajnoczi } 33761007b31SStefan Hajnoczi } 33861007b31SStefan Hajnoczi 33988be7b4bSKevin Wolf for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 34061007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 34161007b31SStefan Hajnoczi 34261007b31SStefan Hajnoczi aio_context_acquire(aio_context); 3436b98bd64SPaolo Bonzini bdrv_io_unplugged_end(bs); 344c2066af0SKevin Wolf bdrv_parent_drained_end(bs); 34561007b31SStefan Hajnoczi aio_context_release(aio_context); 34661007b31SStefan Hajnoczi } 347f406c03cSAlexander Yarygin g_slist_free(aio_ctxs); 348eb1364ceSAlberto Garcia 349eb1364ceSAlberto Garcia job = NULL; 350eb1364ceSAlberto Garcia while ((job = block_job_next(job))) { 351eb1364ceSAlberto Garcia AioContext *aio_context = blk_get_aio_context(job->blk); 352eb1364ceSAlberto Garcia 353eb1364ceSAlberto Garcia aio_context_acquire(aio_context); 354eb1364ceSAlberto Garcia block_job_resume(job); 355eb1364ceSAlberto Garcia aio_context_release(aio_context); 356eb1364ceSAlberto Garcia } 35761007b31SStefan Hajnoczi } 35861007b31SStefan Hajnoczi 35961007b31SStefan Hajnoczi /** 36061007b31SStefan Hajnoczi * Remove an active request from the tracked requests list 36161007b31SStefan Hajnoczi * 36261007b31SStefan Hajnoczi * This function should be called when a tracked request is completing. 36361007b31SStefan Hajnoczi */ 36461007b31SStefan Hajnoczi static void tracked_request_end(BdrvTrackedRequest *req) 36561007b31SStefan Hajnoczi { 36661007b31SStefan Hajnoczi if (req->serialising) { 36761007b31SStefan Hajnoczi req->bs->serialising_in_flight--; 36861007b31SStefan Hajnoczi } 36961007b31SStefan Hajnoczi 37061007b31SStefan Hajnoczi QLIST_REMOVE(req, list); 37161007b31SStefan Hajnoczi qemu_co_queue_restart_all(&req->wait_queue); 37261007b31SStefan Hajnoczi } 37361007b31SStefan Hajnoczi 37461007b31SStefan Hajnoczi /** 37561007b31SStefan Hajnoczi * Add an active request to the tracked requests list 37661007b31SStefan Hajnoczi */ 37761007b31SStefan Hajnoczi static void tracked_request_begin(BdrvTrackedRequest *req, 37861007b31SStefan Hajnoczi BlockDriverState *bs, 37961007b31SStefan Hajnoczi int64_t offset, 380ebde595cSFam Zheng unsigned int bytes, 381ebde595cSFam Zheng enum BdrvTrackedRequestType type) 38261007b31SStefan Hajnoczi { 38361007b31SStefan Hajnoczi *req = (BdrvTrackedRequest){ 38461007b31SStefan Hajnoczi .bs = bs, 38561007b31SStefan Hajnoczi .offset = offset, 38661007b31SStefan Hajnoczi .bytes = bytes, 387ebde595cSFam Zheng .type = type, 38861007b31SStefan Hajnoczi .co = qemu_coroutine_self(), 38961007b31SStefan Hajnoczi .serialising = false, 39061007b31SStefan Hajnoczi .overlap_offset = offset, 39161007b31SStefan Hajnoczi .overlap_bytes = bytes, 39261007b31SStefan Hajnoczi }; 39361007b31SStefan Hajnoczi 39461007b31SStefan Hajnoczi qemu_co_queue_init(&req->wait_queue); 39561007b31SStefan Hajnoczi 39661007b31SStefan Hajnoczi QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); 39761007b31SStefan Hajnoczi } 39861007b31SStefan Hajnoczi 39961007b31SStefan Hajnoczi static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) 40061007b31SStefan Hajnoczi { 40161007b31SStefan Hajnoczi int64_t overlap_offset = req->offset & ~(align - 1); 40261007b31SStefan Hajnoczi unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align) 40361007b31SStefan Hajnoczi - overlap_offset; 40461007b31SStefan Hajnoczi 40561007b31SStefan Hajnoczi if (!req->serialising) { 40661007b31SStefan Hajnoczi req->bs->serialising_in_flight++; 40761007b31SStefan Hajnoczi req->serialising = true; 40861007b31SStefan Hajnoczi } 40961007b31SStefan Hajnoczi 41061007b31SStefan Hajnoczi req->overlap_offset = MIN(req->overlap_offset, overlap_offset); 41161007b31SStefan Hajnoczi req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); 41261007b31SStefan Hajnoczi } 41361007b31SStefan Hajnoczi 41461007b31SStefan Hajnoczi /** 415244483e6SKevin Wolf * Round a region to cluster boundaries (sector-based) 41661007b31SStefan Hajnoczi */ 417244483e6SKevin Wolf void bdrv_round_sectors_to_clusters(BlockDriverState *bs, 41861007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, 41961007b31SStefan Hajnoczi int64_t *cluster_sector_num, 42061007b31SStefan Hajnoczi int *cluster_nb_sectors) 42161007b31SStefan Hajnoczi { 42261007b31SStefan Hajnoczi BlockDriverInfo bdi; 42361007b31SStefan Hajnoczi 42461007b31SStefan Hajnoczi if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 42561007b31SStefan Hajnoczi *cluster_sector_num = sector_num; 42661007b31SStefan Hajnoczi *cluster_nb_sectors = nb_sectors; 42761007b31SStefan Hajnoczi } else { 42861007b31SStefan Hajnoczi int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE; 42961007b31SStefan Hajnoczi *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c); 43061007b31SStefan Hajnoczi *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num + 43161007b31SStefan Hajnoczi nb_sectors, c); 43261007b31SStefan Hajnoczi } 43361007b31SStefan Hajnoczi } 43461007b31SStefan Hajnoczi 435244483e6SKevin Wolf /** 436244483e6SKevin Wolf * Round a region to cluster boundaries 437244483e6SKevin Wolf */ 438244483e6SKevin Wolf void bdrv_round_to_clusters(BlockDriverState *bs, 439244483e6SKevin Wolf int64_t offset, unsigned int bytes, 440244483e6SKevin Wolf int64_t *cluster_offset, 441244483e6SKevin Wolf unsigned int *cluster_bytes) 442244483e6SKevin Wolf { 443244483e6SKevin Wolf BlockDriverInfo bdi; 444244483e6SKevin Wolf 445244483e6SKevin Wolf if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 446244483e6SKevin Wolf *cluster_offset = offset; 447244483e6SKevin Wolf *cluster_bytes = bytes; 448244483e6SKevin Wolf } else { 449244483e6SKevin Wolf int64_t c = bdi.cluster_size; 450244483e6SKevin Wolf *cluster_offset = QEMU_ALIGN_DOWN(offset, c); 451244483e6SKevin Wolf *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c); 452244483e6SKevin Wolf } 453244483e6SKevin Wolf } 454244483e6SKevin Wolf 45561007b31SStefan Hajnoczi static int bdrv_get_cluster_size(BlockDriverState *bs) 45661007b31SStefan Hajnoczi { 45761007b31SStefan Hajnoczi BlockDriverInfo bdi; 45861007b31SStefan Hajnoczi int ret; 45961007b31SStefan Hajnoczi 46061007b31SStefan Hajnoczi ret = bdrv_get_info(bs, &bdi); 46161007b31SStefan Hajnoczi if (ret < 0 || bdi.cluster_size == 0) { 462a5b8dd2cSEric Blake return bs->bl.request_alignment; 46361007b31SStefan Hajnoczi } else { 46461007b31SStefan Hajnoczi return bdi.cluster_size; 46561007b31SStefan Hajnoczi } 46661007b31SStefan Hajnoczi } 46761007b31SStefan Hajnoczi 46861007b31SStefan Hajnoczi static bool tracked_request_overlaps(BdrvTrackedRequest *req, 46961007b31SStefan Hajnoczi int64_t offset, unsigned int bytes) 47061007b31SStefan Hajnoczi { 47161007b31SStefan Hajnoczi /* aaaa bbbb */ 47261007b31SStefan Hajnoczi if (offset >= req->overlap_offset + req->overlap_bytes) { 47361007b31SStefan Hajnoczi return false; 47461007b31SStefan Hajnoczi } 47561007b31SStefan Hajnoczi /* bbbb aaaa */ 47661007b31SStefan Hajnoczi if (req->overlap_offset >= offset + bytes) { 47761007b31SStefan Hajnoczi return false; 47861007b31SStefan Hajnoczi } 47961007b31SStefan Hajnoczi return true; 48061007b31SStefan Hajnoczi } 48161007b31SStefan Hajnoczi 48261007b31SStefan Hajnoczi static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) 48361007b31SStefan Hajnoczi { 48461007b31SStefan Hajnoczi BlockDriverState *bs = self->bs; 48561007b31SStefan Hajnoczi BdrvTrackedRequest *req; 48661007b31SStefan Hajnoczi bool retry; 48761007b31SStefan Hajnoczi bool waited = false; 48861007b31SStefan Hajnoczi 48961007b31SStefan Hajnoczi if (!bs->serialising_in_flight) { 49061007b31SStefan Hajnoczi return false; 49161007b31SStefan Hajnoczi } 49261007b31SStefan Hajnoczi 49361007b31SStefan Hajnoczi do { 49461007b31SStefan Hajnoczi retry = false; 49561007b31SStefan Hajnoczi QLIST_FOREACH(req, &bs->tracked_requests, list) { 49661007b31SStefan Hajnoczi if (req == self || (!req->serialising && !self->serialising)) { 49761007b31SStefan Hajnoczi continue; 49861007b31SStefan Hajnoczi } 49961007b31SStefan Hajnoczi if (tracked_request_overlaps(req, self->overlap_offset, 50061007b31SStefan Hajnoczi self->overlap_bytes)) 50161007b31SStefan Hajnoczi { 50261007b31SStefan Hajnoczi /* Hitting this means there was a reentrant request, for 50361007b31SStefan Hajnoczi * example, a block driver issuing nested requests. This must 50461007b31SStefan Hajnoczi * never happen since it means deadlock. 50561007b31SStefan Hajnoczi */ 50661007b31SStefan Hajnoczi assert(qemu_coroutine_self() != req->co); 50761007b31SStefan Hajnoczi 50861007b31SStefan Hajnoczi /* If the request is already (indirectly) waiting for us, or 50961007b31SStefan Hajnoczi * will wait for us as soon as it wakes up, then just go on 51061007b31SStefan Hajnoczi * (instead of producing a deadlock in the former case). */ 51161007b31SStefan Hajnoczi if (!req->waiting_for) { 51261007b31SStefan Hajnoczi self->waiting_for = req; 51361007b31SStefan Hajnoczi qemu_co_queue_wait(&req->wait_queue); 51461007b31SStefan Hajnoczi self->waiting_for = NULL; 51561007b31SStefan Hajnoczi retry = true; 51661007b31SStefan Hajnoczi waited = true; 51761007b31SStefan Hajnoczi break; 51861007b31SStefan Hajnoczi } 51961007b31SStefan Hajnoczi } 52061007b31SStefan Hajnoczi } 52161007b31SStefan Hajnoczi } while (retry); 52261007b31SStefan Hajnoczi 52361007b31SStefan Hajnoczi return waited; 52461007b31SStefan Hajnoczi } 52561007b31SStefan Hajnoczi 52661007b31SStefan Hajnoczi static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset, 52761007b31SStefan Hajnoczi size_t size) 52861007b31SStefan Hajnoczi { 52961007b31SStefan Hajnoczi if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) { 53061007b31SStefan Hajnoczi return -EIO; 53161007b31SStefan Hajnoczi } 53261007b31SStefan Hajnoczi 53361007b31SStefan Hajnoczi if (!bdrv_is_inserted(bs)) { 53461007b31SStefan Hajnoczi return -ENOMEDIUM; 53561007b31SStefan Hajnoczi } 53661007b31SStefan Hajnoczi 53761007b31SStefan Hajnoczi if (offset < 0) { 53861007b31SStefan Hajnoczi return -EIO; 53961007b31SStefan Hajnoczi } 54061007b31SStefan Hajnoczi 54161007b31SStefan Hajnoczi return 0; 54261007b31SStefan Hajnoczi } 54361007b31SStefan Hajnoczi 54461007b31SStefan Hajnoczi static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num, 54561007b31SStefan Hajnoczi int nb_sectors) 54661007b31SStefan Hajnoczi { 54761007b31SStefan Hajnoczi if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 54861007b31SStefan Hajnoczi return -EIO; 54961007b31SStefan Hajnoczi } 55061007b31SStefan Hajnoczi 55161007b31SStefan Hajnoczi return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE, 55261007b31SStefan Hajnoczi nb_sectors * BDRV_SECTOR_SIZE); 55361007b31SStefan Hajnoczi } 55461007b31SStefan Hajnoczi 55561007b31SStefan Hajnoczi typedef struct RwCo { 55661007b31SStefan Hajnoczi BlockDriverState *bs; 55761007b31SStefan Hajnoczi int64_t offset; 55861007b31SStefan Hajnoczi QEMUIOVector *qiov; 55961007b31SStefan Hajnoczi bool is_write; 56061007b31SStefan Hajnoczi int ret; 56161007b31SStefan Hajnoczi BdrvRequestFlags flags; 56261007b31SStefan Hajnoczi } RwCo; 56361007b31SStefan Hajnoczi 56461007b31SStefan Hajnoczi static void coroutine_fn bdrv_rw_co_entry(void *opaque) 56561007b31SStefan Hajnoczi { 56661007b31SStefan Hajnoczi RwCo *rwco = opaque; 56761007b31SStefan Hajnoczi 56861007b31SStefan Hajnoczi if (!rwco->is_write) { 569cab3a356SKevin Wolf rwco->ret = bdrv_co_preadv(rwco->bs, rwco->offset, 57061007b31SStefan Hajnoczi rwco->qiov->size, rwco->qiov, 57161007b31SStefan Hajnoczi rwco->flags); 57261007b31SStefan Hajnoczi } else { 573cab3a356SKevin Wolf rwco->ret = bdrv_co_pwritev(rwco->bs, rwco->offset, 57461007b31SStefan Hajnoczi rwco->qiov->size, rwco->qiov, 57561007b31SStefan Hajnoczi rwco->flags); 57661007b31SStefan Hajnoczi } 57761007b31SStefan Hajnoczi } 57861007b31SStefan Hajnoczi 57961007b31SStefan Hajnoczi /* 58061007b31SStefan Hajnoczi * Process a vectored synchronous request using coroutines 58161007b31SStefan Hajnoczi */ 58261007b31SStefan Hajnoczi static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset, 58361007b31SStefan Hajnoczi QEMUIOVector *qiov, bool is_write, 58461007b31SStefan Hajnoczi BdrvRequestFlags flags) 58561007b31SStefan Hajnoczi { 58661007b31SStefan Hajnoczi Coroutine *co; 58761007b31SStefan Hajnoczi RwCo rwco = { 58861007b31SStefan Hajnoczi .bs = bs, 58961007b31SStefan Hajnoczi .offset = offset, 59061007b31SStefan Hajnoczi .qiov = qiov, 59161007b31SStefan Hajnoczi .is_write = is_write, 59261007b31SStefan Hajnoczi .ret = NOT_DONE, 59361007b31SStefan Hajnoczi .flags = flags, 59461007b31SStefan Hajnoczi }; 59561007b31SStefan Hajnoczi 59661007b31SStefan Hajnoczi if (qemu_in_coroutine()) { 59761007b31SStefan Hajnoczi /* Fast-path if already in coroutine context */ 59861007b31SStefan Hajnoczi bdrv_rw_co_entry(&rwco); 59961007b31SStefan Hajnoczi } else { 60061007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 60161007b31SStefan Hajnoczi 60261007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_rw_co_entry); 60361007b31SStefan Hajnoczi qemu_coroutine_enter(co, &rwco); 60461007b31SStefan Hajnoczi while (rwco.ret == NOT_DONE) { 60561007b31SStefan Hajnoczi aio_poll(aio_context, true); 60661007b31SStefan Hajnoczi } 60761007b31SStefan Hajnoczi } 60861007b31SStefan Hajnoczi return rwco.ret; 60961007b31SStefan Hajnoczi } 61061007b31SStefan Hajnoczi 61161007b31SStefan Hajnoczi /* 61261007b31SStefan Hajnoczi * Process a synchronous request using coroutines 61361007b31SStefan Hajnoczi */ 61461007b31SStefan Hajnoczi static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, 61561007b31SStefan Hajnoczi int nb_sectors, bool is_write, BdrvRequestFlags flags) 61661007b31SStefan Hajnoczi { 61761007b31SStefan Hajnoczi QEMUIOVector qiov; 61861007b31SStefan Hajnoczi struct iovec iov = { 61961007b31SStefan Hajnoczi .iov_base = (void *)buf, 62061007b31SStefan Hajnoczi .iov_len = nb_sectors * BDRV_SECTOR_SIZE, 62161007b31SStefan Hajnoczi }; 62261007b31SStefan Hajnoczi 62361007b31SStefan Hajnoczi if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 62461007b31SStefan Hajnoczi return -EINVAL; 62561007b31SStefan Hajnoczi } 62661007b31SStefan Hajnoczi 62761007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 62861007b31SStefan Hajnoczi return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS, 62961007b31SStefan Hajnoczi &qiov, is_write, flags); 63061007b31SStefan Hajnoczi } 63161007b31SStefan Hajnoczi 63261007b31SStefan Hajnoczi /* return < 0 if error. See bdrv_write() for the return codes */ 633fbcbbf4eSKevin Wolf int bdrv_read(BdrvChild *child, int64_t sector_num, 63461007b31SStefan Hajnoczi uint8_t *buf, int nb_sectors) 63561007b31SStefan Hajnoczi { 636fbcbbf4eSKevin Wolf return bdrv_rw_co(child->bs, sector_num, buf, nb_sectors, false, 0); 63761007b31SStefan Hajnoczi } 63861007b31SStefan Hajnoczi 63961007b31SStefan Hajnoczi /* Return < 0 if error. Important errors are: 64061007b31SStefan Hajnoczi -EIO generic I/O error (may happen for all errors) 64161007b31SStefan Hajnoczi -ENOMEDIUM No media inserted. 64261007b31SStefan Hajnoczi -EINVAL Invalid sector number or nb_sectors 64361007b31SStefan Hajnoczi -EACCES Trying to write a read-only device 64461007b31SStefan Hajnoczi */ 64518d51c4bSKevin Wolf int bdrv_write(BdrvChild *child, int64_t sector_num, 64661007b31SStefan Hajnoczi const uint8_t *buf, int nb_sectors) 64761007b31SStefan Hajnoczi { 64818d51c4bSKevin Wolf return bdrv_rw_co(child->bs, sector_num, (uint8_t *)buf, nb_sectors, 64918d51c4bSKevin Wolf true, 0); 65061007b31SStefan Hajnoczi } 65161007b31SStefan Hajnoczi 65274021bc4SEric Blake int bdrv_pwrite_zeroes(BlockDriverState *bs, int64_t offset, 65374021bc4SEric Blake int count, BdrvRequestFlags flags) 65461007b31SStefan Hajnoczi { 65574021bc4SEric Blake QEMUIOVector qiov; 65674021bc4SEric Blake struct iovec iov = { 65774021bc4SEric Blake .iov_base = NULL, 65874021bc4SEric Blake .iov_len = count, 65974021bc4SEric Blake }; 66074021bc4SEric Blake 66174021bc4SEric Blake qemu_iovec_init_external(&qiov, &iov, 1); 66274021bc4SEric Blake return bdrv_prwv_co(bs, offset, &qiov, true, 66361007b31SStefan Hajnoczi BDRV_REQ_ZERO_WRITE | flags); 66461007b31SStefan Hajnoczi } 66561007b31SStefan Hajnoczi 66661007b31SStefan Hajnoczi /* 66774021bc4SEric Blake * Completely zero out a block device with the help of bdrv_pwrite_zeroes. 66861007b31SStefan Hajnoczi * The operation is sped up by checking the block status and only writing 66961007b31SStefan Hajnoczi * zeroes to the device if they currently do not return zeroes. Optional 67074021bc4SEric Blake * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP, 671465fe887SEric Blake * BDRV_REQ_FUA). 67261007b31SStefan Hajnoczi * 67361007b31SStefan Hajnoczi * Returns < 0 on error, 0 on success. For error codes see bdrv_write(). 67461007b31SStefan Hajnoczi */ 67561007b31SStefan Hajnoczi int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags) 67661007b31SStefan Hajnoczi { 67761007b31SStefan Hajnoczi int64_t target_sectors, ret, nb_sectors, sector_num = 0; 67867a0fd2aSFam Zheng BlockDriverState *file; 67961007b31SStefan Hajnoczi int n; 68061007b31SStefan Hajnoczi 68161007b31SStefan Hajnoczi target_sectors = bdrv_nb_sectors(bs); 68261007b31SStefan Hajnoczi if (target_sectors < 0) { 68361007b31SStefan Hajnoczi return target_sectors; 68461007b31SStefan Hajnoczi } 68561007b31SStefan Hajnoczi 68661007b31SStefan Hajnoczi for (;;) { 68761007b31SStefan Hajnoczi nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS); 68861007b31SStefan Hajnoczi if (nb_sectors <= 0) { 68961007b31SStefan Hajnoczi return 0; 69061007b31SStefan Hajnoczi } 69167a0fd2aSFam Zheng ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n, &file); 69261007b31SStefan Hajnoczi if (ret < 0) { 69361007b31SStefan Hajnoczi error_report("error getting block status at sector %" PRId64 ": %s", 69461007b31SStefan Hajnoczi sector_num, strerror(-ret)); 69561007b31SStefan Hajnoczi return ret; 69661007b31SStefan Hajnoczi } 69761007b31SStefan Hajnoczi if (ret & BDRV_BLOCK_ZERO) { 69861007b31SStefan Hajnoczi sector_num += n; 69961007b31SStefan Hajnoczi continue; 70061007b31SStefan Hajnoczi } 70174021bc4SEric Blake ret = bdrv_pwrite_zeroes(bs, sector_num << BDRV_SECTOR_BITS, 70274021bc4SEric Blake n << BDRV_SECTOR_BITS, flags); 70361007b31SStefan Hajnoczi if (ret < 0) { 70461007b31SStefan Hajnoczi error_report("error writing zeroes at sector %" PRId64 ": %s", 70561007b31SStefan Hajnoczi sector_num, strerror(-ret)); 70661007b31SStefan Hajnoczi return ret; 70761007b31SStefan Hajnoczi } 70861007b31SStefan Hajnoczi sector_num += n; 70961007b31SStefan Hajnoczi } 71061007b31SStefan Hajnoczi } 71161007b31SStefan Hajnoczi 712*cf2ab8fcSKevin Wolf int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov) 713f1e84741SKevin Wolf { 714f1e84741SKevin Wolf int ret; 715f1e84741SKevin Wolf 716*cf2ab8fcSKevin Wolf ret = bdrv_prwv_co(child->bs, offset, qiov, false, 0); 717f1e84741SKevin Wolf if (ret < 0) { 718f1e84741SKevin Wolf return ret; 719f1e84741SKevin Wolf } 720f1e84741SKevin Wolf 721f1e84741SKevin Wolf return qiov->size; 722f1e84741SKevin Wolf } 723f1e84741SKevin Wolf 724*cf2ab8fcSKevin Wolf int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes) 72561007b31SStefan Hajnoczi { 72661007b31SStefan Hajnoczi QEMUIOVector qiov; 72761007b31SStefan Hajnoczi struct iovec iov = { 72861007b31SStefan Hajnoczi .iov_base = (void *)buf, 72961007b31SStefan Hajnoczi .iov_len = bytes, 73061007b31SStefan Hajnoczi }; 73161007b31SStefan Hajnoczi 73261007b31SStefan Hajnoczi if (bytes < 0) { 73361007b31SStefan Hajnoczi return -EINVAL; 73461007b31SStefan Hajnoczi } 73561007b31SStefan Hajnoczi 73661007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 737*cf2ab8fcSKevin Wolf return bdrv_preadv(child, offset, &qiov); 73861007b31SStefan Hajnoczi } 73961007b31SStefan Hajnoczi 74061007b31SStefan Hajnoczi int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov) 74161007b31SStefan Hajnoczi { 74261007b31SStefan Hajnoczi int ret; 74361007b31SStefan Hajnoczi 74461007b31SStefan Hajnoczi ret = bdrv_prwv_co(bs, offset, qiov, true, 0); 74561007b31SStefan Hajnoczi if (ret < 0) { 74661007b31SStefan Hajnoczi return ret; 74761007b31SStefan Hajnoczi } 74861007b31SStefan Hajnoczi 74961007b31SStefan Hajnoczi return qiov->size; 75061007b31SStefan Hajnoczi } 75161007b31SStefan Hajnoczi 75261007b31SStefan Hajnoczi int bdrv_pwrite(BlockDriverState *bs, int64_t offset, 75361007b31SStefan Hajnoczi const void *buf, int bytes) 75461007b31SStefan Hajnoczi { 75561007b31SStefan Hajnoczi QEMUIOVector qiov; 75661007b31SStefan Hajnoczi struct iovec iov = { 75761007b31SStefan Hajnoczi .iov_base = (void *) buf, 75861007b31SStefan Hajnoczi .iov_len = bytes, 75961007b31SStefan Hajnoczi }; 76061007b31SStefan Hajnoczi 76161007b31SStefan Hajnoczi if (bytes < 0) { 76261007b31SStefan Hajnoczi return -EINVAL; 76361007b31SStefan Hajnoczi } 76461007b31SStefan Hajnoczi 76561007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 76661007b31SStefan Hajnoczi return bdrv_pwritev(bs, offset, &qiov); 76761007b31SStefan Hajnoczi } 76861007b31SStefan Hajnoczi 76961007b31SStefan Hajnoczi /* 77061007b31SStefan Hajnoczi * Writes to the file and ensures that no writes are reordered across this 77161007b31SStefan Hajnoczi * request (acts as a barrier) 77261007b31SStefan Hajnoczi * 77361007b31SStefan Hajnoczi * Returns 0 on success, -errno in error cases. 77461007b31SStefan Hajnoczi */ 77561007b31SStefan Hajnoczi int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset, 77661007b31SStefan Hajnoczi const void *buf, int count) 77761007b31SStefan Hajnoczi { 77861007b31SStefan Hajnoczi int ret; 77961007b31SStefan Hajnoczi 78061007b31SStefan Hajnoczi ret = bdrv_pwrite(bs, offset, buf, count); 78161007b31SStefan Hajnoczi if (ret < 0) { 78261007b31SStefan Hajnoczi return ret; 78361007b31SStefan Hajnoczi } 78461007b31SStefan Hajnoczi 785855a6a93SKevin Wolf ret = bdrv_flush(bs); 786855a6a93SKevin Wolf if (ret < 0) { 787855a6a93SKevin Wolf return ret; 78861007b31SStefan Hajnoczi } 78961007b31SStefan Hajnoczi 79061007b31SStefan Hajnoczi return 0; 79161007b31SStefan Hajnoczi } 79261007b31SStefan Hajnoczi 79308844473SKevin Wolf typedef struct CoroutineIOCompletion { 79408844473SKevin Wolf Coroutine *coroutine; 79508844473SKevin Wolf int ret; 79608844473SKevin Wolf } CoroutineIOCompletion; 79708844473SKevin Wolf 79808844473SKevin Wolf static void bdrv_co_io_em_complete(void *opaque, int ret) 79908844473SKevin Wolf { 80008844473SKevin Wolf CoroutineIOCompletion *co = opaque; 80108844473SKevin Wolf 80208844473SKevin Wolf co->ret = ret; 80308844473SKevin Wolf qemu_coroutine_enter(co->coroutine, NULL); 80408844473SKevin Wolf } 80508844473SKevin Wolf 806166fe960SKevin Wolf static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs, 807166fe960SKevin Wolf uint64_t offset, uint64_t bytes, 808166fe960SKevin Wolf QEMUIOVector *qiov, int flags) 809166fe960SKevin Wolf { 810166fe960SKevin Wolf BlockDriver *drv = bs->drv; 8113fb06697SKevin Wolf int64_t sector_num; 8123fb06697SKevin Wolf unsigned int nb_sectors; 8133fb06697SKevin Wolf 814fa166538SEric Blake assert(!(flags & ~BDRV_REQ_MASK)); 815fa166538SEric Blake 8163fb06697SKevin Wolf if (drv->bdrv_co_preadv) { 8173fb06697SKevin Wolf return drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags); 8183fb06697SKevin Wolf } 8193fb06697SKevin Wolf 8203fb06697SKevin Wolf sector_num = offset >> BDRV_SECTOR_BITS; 8213fb06697SKevin Wolf nb_sectors = bytes >> BDRV_SECTOR_BITS; 822166fe960SKevin Wolf 823166fe960SKevin Wolf assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 824166fe960SKevin Wolf assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 825166fe960SKevin Wolf assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS); 826166fe960SKevin Wolf 82708844473SKevin Wolf if (drv->bdrv_co_readv) { 828166fe960SKevin Wolf return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 82908844473SKevin Wolf } else { 83008844473SKevin Wolf BlockAIOCB *acb; 83108844473SKevin Wolf CoroutineIOCompletion co = { 83208844473SKevin Wolf .coroutine = qemu_coroutine_self(), 83308844473SKevin Wolf }; 83408844473SKevin Wolf 83508844473SKevin Wolf acb = bs->drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors, 83608844473SKevin Wolf bdrv_co_io_em_complete, &co); 83708844473SKevin Wolf if (acb == NULL) { 83808844473SKevin Wolf return -EIO; 83908844473SKevin Wolf } else { 84008844473SKevin Wolf qemu_coroutine_yield(); 84108844473SKevin Wolf return co.ret; 84208844473SKevin Wolf } 84308844473SKevin Wolf } 844166fe960SKevin Wolf } 845166fe960SKevin Wolf 84678a07294SKevin Wolf static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs, 84778a07294SKevin Wolf uint64_t offset, uint64_t bytes, 84878a07294SKevin Wolf QEMUIOVector *qiov, int flags) 84978a07294SKevin Wolf { 85078a07294SKevin Wolf BlockDriver *drv = bs->drv; 8513fb06697SKevin Wolf int64_t sector_num; 8523fb06697SKevin Wolf unsigned int nb_sectors; 85378a07294SKevin Wolf int ret; 85478a07294SKevin Wolf 855fa166538SEric Blake assert(!(flags & ~BDRV_REQ_MASK)); 856fa166538SEric Blake 8573fb06697SKevin Wolf if (drv->bdrv_co_pwritev) { 858515c2f43SKevin Wolf ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, 859515c2f43SKevin Wolf flags & bs->supported_write_flags); 860515c2f43SKevin Wolf flags &= ~bs->supported_write_flags; 8613fb06697SKevin Wolf goto emulate_flags; 8623fb06697SKevin Wolf } 8633fb06697SKevin Wolf 8643fb06697SKevin Wolf sector_num = offset >> BDRV_SECTOR_BITS; 8653fb06697SKevin Wolf nb_sectors = bytes >> BDRV_SECTOR_BITS; 8663fb06697SKevin Wolf 86778a07294SKevin Wolf assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 86878a07294SKevin Wolf assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 86978a07294SKevin Wolf assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS); 87078a07294SKevin Wolf 87178a07294SKevin Wolf if (drv->bdrv_co_writev_flags) { 87278a07294SKevin Wolf ret = drv->bdrv_co_writev_flags(bs, sector_num, nb_sectors, qiov, 8734df863f3SEric Blake flags & bs->supported_write_flags); 8744df863f3SEric Blake flags &= ~bs->supported_write_flags; 87508844473SKevin Wolf } else if (drv->bdrv_co_writev) { 8764df863f3SEric Blake assert(!bs->supported_write_flags); 87778a07294SKevin Wolf ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov); 87808844473SKevin Wolf } else { 87908844473SKevin Wolf BlockAIOCB *acb; 88008844473SKevin Wolf CoroutineIOCompletion co = { 88108844473SKevin Wolf .coroutine = qemu_coroutine_self(), 88208844473SKevin Wolf }; 88308844473SKevin Wolf 88408844473SKevin Wolf acb = bs->drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors, 88508844473SKevin Wolf bdrv_co_io_em_complete, &co); 88608844473SKevin Wolf if (acb == NULL) { 8873fb06697SKevin Wolf ret = -EIO; 88808844473SKevin Wolf } else { 88908844473SKevin Wolf qemu_coroutine_yield(); 8903fb06697SKevin Wolf ret = co.ret; 89108844473SKevin Wolf } 89278a07294SKevin Wolf } 89378a07294SKevin Wolf 8943fb06697SKevin Wolf emulate_flags: 8954df863f3SEric Blake if (ret == 0 && (flags & BDRV_REQ_FUA)) { 89678a07294SKevin Wolf ret = bdrv_co_flush(bs); 89778a07294SKevin Wolf } 89878a07294SKevin Wolf 89978a07294SKevin Wolf return ret; 90078a07294SKevin Wolf } 90178a07294SKevin Wolf 90261007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs, 903244483e6SKevin Wolf int64_t offset, unsigned int bytes, QEMUIOVector *qiov) 90461007b31SStefan Hajnoczi { 90561007b31SStefan Hajnoczi /* Perform I/O through a temporary buffer so that users who scribble over 90661007b31SStefan Hajnoczi * their read buffer while the operation is in progress do not end up 90761007b31SStefan Hajnoczi * modifying the image file. This is critical for zero-copy guest I/O 90861007b31SStefan Hajnoczi * where anything might happen inside guest memory. 90961007b31SStefan Hajnoczi */ 91061007b31SStefan Hajnoczi void *bounce_buffer; 91161007b31SStefan Hajnoczi 91261007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 91361007b31SStefan Hajnoczi struct iovec iov; 91461007b31SStefan Hajnoczi QEMUIOVector bounce_qiov; 915244483e6SKevin Wolf int64_t cluster_offset; 916244483e6SKevin Wolf unsigned int cluster_bytes; 91761007b31SStefan Hajnoczi size_t skip_bytes; 91861007b31SStefan Hajnoczi int ret; 91961007b31SStefan Hajnoczi 92061007b31SStefan Hajnoczi /* Cover entire cluster so no additional backing file I/O is required when 92161007b31SStefan Hajnoczi * allocating cluster in the image file. 92261007b31SStefan Hajnoczi */ 923244483e6SKevin Wolf bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes); 92461007b31SStefan Hajnoczi 925244483e6SKevin Wolf trace_bdrv_co_do_copy_on_readv(bs, offset, bytes, 926244483e6SKevin Wolf cluster_offset, cluster_bytes); 92761007b31SStefan Hajnoczi 928244483e6SKevin Wolf iov.iov_len = cluster_bytes; 92961007b31SStefan Hajnoczi iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len); 93061007b31SStefan Hajnoczi if (bounce_buffer == NULL) { 93161007b31SStefan Hajnoczi ret = -ENOMEM; 93261007b31SStefan Hajnoczi goto err; 93361007b31SStefan Hajnoczi } 93461007b31SStefan Hajnoczi 93561007b31SStefan Hajnoczi qemu_iovec_init_external(&bounce_qiov, &iov, 1); 93661007b31SStefan Hajnoczi 937244483e6SKevin Wolf ret = bdrv_driver_preadv(bs, cluster_offset, cluster_bytes, 938166fe960SKevin Wolf &bounce_qiov, 0); 93961007b31SStefan Hajnoczi if (ret < 0) { 94061007b31SStefan Hajnoczi goto err; 94161007b31SStefan Hajnoczi } 94261007b31SStefan Hajnoczi 943c1499a5eSEric Blake if (drv->bdrv_co_pwrite_zeroes && 94461007b31SStefan Hajnoczi buffer_is_zero(bounce_buffer, iov.iov_len)) { 945a604fa2bSEric Blake /* FIXME: Should we (perhaps conditionally) be setting 946a604fa2bSEric Blake * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy 947a604fa2bSEric Blake * that still correctly reads as zero? */ 948244483e6SKevin Wolf ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, cluster_bytes, 0); 94961007b31SStefan Hajnoczi } else { 95061007b31SStefan Hajnoczi /* This does not change the data on the disk, it is not necessary 95161007b31SStefan Hajnoczi * to flush even in cache=writethrough mode. 95261007b31SStefan Hajnoczi */ 953244483e6SKevin Wolf ret = bdrv_driver_pwritev(bs, cluster_offset, cluster_bytes, 95478a07294SKevin Wolf &bounce_qiov, 0); 95561007b31SStefan Hajnoczi } 95661007b31SStefan Hajnoczi 95761007b31SStefan Hajnoczi if (ret < 0) { 95861007b31SStefan Hajnoczi /* It might be okay to ignore write errors for guest requests. If this 95961007b31SStefan Hajnoczi * is a deliberate copy-on-read then we don't want to ignore the error. 96061007b31SStefan Hajnoczi * Simply report it in all cases. 96161007b31SStefan Hajnoczi */ 96261007b31SStefan Hajnoczi goto err; 96361007b31SStefan Hajnoczi } 96461007b31SStefan Hajnoczi 965244483e6SKevin Wolf skip_bytes = offset - cluster_offset; 966244483e6SKevin Wolf qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, bytes); 96761007b31SStefan Hajnoczi 96861007b31SStefan Hajnoczi err: 96961007b31SStefan Hajnoczi qemu_vfree(bounce_buffer); 97061007b31SStefan Hajnoczi return ret; 97161007b31SStefan Hajnoczi } 97261007b31SStefan Hajnoczi 97361007b31SStefan Hajnoczi /* 97461007b31SStefan Hajnoczi * Forwards an already correctly aligned request to the BlockDriver. This 97561007b31SStefan Hajnoczi * handles copy on read and zeroing after EOF; any other features must be 97661007b31SStefan Hajnoczi * implemented by the caller. 97761007b31SStefan Hajnoczi */ 97861007b31SStefan Hajnoczi static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs, 97961007b31SStefan Hajnoczi BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 98061007b31SStefan Hajnoczi int64_t align, QEMUIOVector *qiov, int flags) 98161007b31SStefan Hajnoczi { 982c9d20029SKevin Wolf int64_t total_bytes, max_bytes; 98361007b31SStefan Hajnoczi int ret; 98461007b31SStefan Hajnoczi 98549c07526SKevin Wolf assert(is_power_of_2(align)); 98649c07526SKevin Wolf assert((offset & (align - 1)) == 0); 98749c07526SKevin Wolf assert((bytes & (align - 1)) == 0); 98861007b31SStefan Hajnoczi assert(!qiov || bytes == qiov->size); 989abb06c5aSDaniel P. Berrange assert((bs->open_flags & BDRV_O_NO_IO) == 0); 990a604fa2bSEric Blake 991a604fa2bSEric Blake /* TODO: We would need a per-BDS .supported_read_flags and 992a604fa2bSEric Blake * potential fallback support, if we ever implement any read flags 993a604fa2bSEric Blake * to pass through to drivers. For now, there aren't any 994a604fa2bSEric Blake * passthrough flags. */ 995a604fa2bSEric Blake assert(!(flags & ~(BDRV_REQ_NO_SERIALISING | BDRV_REQ_COPY_ON_READ))); 99661007b31SStefan Hajnoczi 99761007b31SStefan Hajnoczi /* Handle Copy on Read and associated serialisation */ 99861007b31SStefan Hajnoczi if (flags & BDRV_REQ_COPY_ON_READ) { 99961007b31SStefan Hajnoczi /* If we touch the same cluster it counts as an overlap. This 100061007b31SStefan Hajnoczi * guarantees that allocating writes will be serialized and not race 100161007b31SStefan Hajnoczi * with each other for the same cluster. For example, in copy-on-read 100261007b31SStefan Hajnoczi * it ensures that the CoR read and write operations are atomic and 100361007b31SStefan Hajnoczi * guest writes cannot interleave between them. */ 100461007b31SStefan Hajnoczi mark_request_serialising(req, bdrv_get_cluster_size(bs)); 100561007b31SStefan Hajnoczi } 100661007b31SStefan Hajnoczi 100761408b25SFam Zheng if (!(flags & BDRV_REQ_NO_SERIALISING)) { 100861007b31SStefan Hajnoczi wait_serialising_requests(req); 100961408b25SFam Zheng } 101061007b31SStefan Hajnoczi 101161007b31SStefan Hajnoczi if (flags & BDRV_REQ_COPY_ON_READ) { 101249c07526SKevin Wolf int64_t start_sector = offset >> BDRV_SECTOR_BITS; 101349c07526SKevin Wolf int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); 101449c07526SKevin Wolf unsigned int nb_sectors = end_sector - start_sector; 101561007b31SStefan Hajnoczi int pnum; 101661007b31SStefan Hajnoczi 101749c07526SKevin Wolf ret = bdrv_is_allocated(bs, start_sector, nb_sectors, &pnum); 101861007b31SStefan Hajnoczi if (ret < 0) { 101961007b31SStefan Hajnoczi goto out; 102061007b31SStefan Hajnoczi } 102161007b31SStefan Hajnoczi 102261007b31SStefan Hajnoczi if (!ret || pnum != nb_sectors) { 1023244483e6SKevin Wolf ret = bdrv_co_do_copy_on_readv(bs, offset, bytes, qiov); 102461007b31SStefan Hajnoczi goto out; 102561007b31SStefan Hajnoczi } 102661007b31SStefan Hajnoczi } 102761007b31SStefan Hajnoczi 102861007b31SStefan Hajnoczi /* Forward the request to the BlockDriver */ 102949c07526SKevin Wolf total_bytes = bdrv_getlength(bs); 103049c07526SKevin Wolf if (total_bytes < 0) { 103149c07526SKevin Wolf ret = total_bytes; 103261007b31SStefan Hajnoczi goto out; 103361007b31SStefan Hajnoczi } 103461007b31SStefan Hajnoczi 103549c07526SKevin Wolf max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align); 103682524274SEric Blake if (bytes <= max_bytes) { 1037166fe960SKevin Wolf ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0); 103849c07526SKevin Wolf } else if (max_bytes > 0) { 103961007b31SStefan Hajnoczi QEMUIOVector local_qiov; 104061007b31SStefan Hajnoczi 104161007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov); 104249c07526SKevin Wolf qemu_iovec_concat(&local_qiov, qiov, 0, max_bytes); 104361007b31SStefan Hajnoczi 104449c07526SKevin Wolf ret = bdrv_driver_preadv(bs, offset, max_bytes, &local_qiov, 0); 104561007b31SStefan Hajnoczi 104661007b31SStefan Hajnoczi qemu_iovec_destroy(&local_qiov); 104761007b31SStefan Hajnoczi } else { 104861007b31SStefan Hajnoczi ret = 0; 104961007b31SStefan Hajnoczi } 105061007b31SStefan Hajnoczi 105161007b31SStefan Hajnoczi /* Reading beyond end of file is supposed to produce zeroes */ 105249c07526SKevin Wolf if (ret == 0 && total_bytes < offset + bytes) { 105349c07526SKevin Wolf uint64_t zero_offset = MAX(0, total_bytes - offset); 105449c07526SKevin Wolf uint64_t zero_bytes = offset + bytes - zero_offset; 105549c07526SKevin Wolf qemu_iovec_memset(qiov, zero_offset, 0, zero_bytes); 105661007b31SStefan Hajnoczi } 105761007b31SStefan Hajnoczi 105861007b31SStefan Hajnoczi out: 105961007b31SStefan Hajnoczi return ret; 106061007b31SStefan Hajnoczi } 106161007b31SStefan Hajnoczi 106261007b31SStefan Hajnoczi /* 106361007b31SStefan Hajnoczi * Handle a read request in coroutine context 106461007b31SStefan Hajnoczi */ 1065cab3a356SKevin Wolf int coroutine_fn bdrv_co_preadv(BlockDriverState *bs, 106661007b31SStefan Hajnoczi int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 106761007b31SStefan Hajnoczi BdrvRequestFlags flags) 106861007b31SStefan Hajnoczi { 106961007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 107061007b31SStefan Hajnoczi BdrvTrackedRequest req; 107161007b31SStefan Hajnoczi 1072a5b8dd2cSEric Blake uint64_t align = bs->bl.request_alignment; 107361007b31SStefan Hajnoczi uint8_t *head_buf = NULL; 107461007b31SStefan Hajnoczi uint8_t *tail_buf = NULL; 107561007b31SStefan Hajnoczi QEMUIOVector local_qiov; 107661007b31SStefan Hajnoczi bool use_local_qiov = false; 107761007b31SStefan Hajnoczi int ret; 107861007b31SStefan Hajnoczi 107961007b31SStefan Hajnoczi if (!drv) { 108061007b31SStefan Hajnoczi return -ENOMEDIUM; 108161007b31SStefan Hajnoczi } 108261007b31SStefan Hajnoczi 108361007b31SStefan Hajnoczi ret = bdrv_check_byte_request(bs, offset, bytes); 108461007b31SStefan Hajnoczi if (ret < 0) { 108561007b31SStefan Hajnoczi return ret; 108661007b31SStefan Hajnoczi } 108761007b31SStefan Hajnoczi 10889568b511SWen Congyang /* Don't do copy-on-read if we read data before write operation */ 108961408b25SFam Zheng if (bs->copy_on_read && !(flags & BDRV_REQ_NO_SERIALISING)) { 109061007b31SStefan Hajnoczi flags |= BDRV_REQ_COPY_ON_READ; 109161007b31SStefan Hajnoczi } 109261007b31SStefan Hajnoczi 109361007b31SStefan Hajnoczi /* Align read if necessary by padding qiov */ 109461007b31SStefan Hajnoczi if (offset & (align - 1)) { 109561007b31SStefan Hajnoczi head_buf = qemu_blockalign(bs, align); 109661007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov + 2); 109761007b31SStefan Hajnoczi qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); 109861007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 109961007b31SStefan Hajnoczi use_local_qiov = true; 110061007b31SStefan Hajnoczi 110161007b31SStefan Hajnoczi bytes += offset & (align - 1); 110261007b31SStefan Hajnoczi offset = offset & ~(align - 1); 110361007b31SStefan Hajnoczi } 110461007b31SStefan Hajnoczi 110561007b31SStefan Hajnoczi if ((offset + bytes) & (align - 1)) { 110661007b31SStefan Hajnoczi if (!use_local_qiov) { 110761007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov + 1); 110861007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 110961007b31SStefan Hajnoczi use_local_qiov = true; 111061007b31SStefan Hajnoczi } 111161007b31SStefan Hajnoczi tail_buf = qemu_blockalign(bs, align); 111261007b31SStefan Hajnoczi qemu_iovec_add(&local_qiov, tail_buf, 111361007b31SStefan Hajnoczi align - ((offset + bytes) & (align - 1))); 111461007b31SStefan Hajnoczi 111561007b31SStefan Hajnoczi bytes = ROUND_UP(bytes, align); 111661007b31SStefan Hajnoczi } 111761007b31SStefan Hajnoczi 1118ebde595cSFam Zheng tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ); 111961007b31SStefan Hajnoczi ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align, 112061007b31SStefan Hajnoczi use_local_qiov ? &local_qiov : qiov, 112161007b31SStefan Hajnoczi flags); 112261007b31SStefan Hajnoczi tracked_request_end(&req); 112361007b31SStefan Hajnoczi 112461007b31SStefan Hajnoczi if (use_local_qiov) { 112561007b31SStefan Hajnoczi qemu_iovec_destroy(&local_qiov); 112661007b31SStefan Hajnoczi qemu_vfree(head_buf); 112761007b31SStefan Hajnoczi qemu_vfree(tail_buf); 112861007b31SStefan Hajnoczi } 112961007b31SStefan Hajnoczi 113061007b31SStefan Hajnoczi return ret; 113161007b31SStefan Hajnoczi } 113261007b31SStefan Hajnoczi 1133adad6496SKevin Wolf static int coroutine_fn bdrv_co_do_readv(BdrvChild *child, 113461007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 113561007b31SStefan Hajnoczi BdrvRequestFlags flags) 113661007b31SStefan Hajnoczi { 113761007b31SStefan Hajnoczi if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 113861007b31SStefan Hajnoczi return -EINVAL; 113961007b31SStefan Hajnoczi } 114061007b31SStefan Hajnoczi 1141adad6496SKevin Wolf return bdrv_co_preadv(child->bs, sector_num << BDRV_SECTOR_BITS, 114261007b31SStefan Hajnoczi nb_sectors << BDRV_SECTOR_BITS, qiov, flags); 114361007b31SStefan Hajnoczi } 114461007b31SStefan Hajnoczi 114528b04a8fSKevin Wolf int coroutine_fn bdrv_co_readv(BdrvChild *child, int64_t sector_num, 114661007b31SStefan Hajnoczi int nb_sectors, QEMUIOVector *qiov) 114761007b31SStefan Hajnoczi { 114828b04a8fSKevin Wolf trace_bdrv_co_readv(child->bs, sector_num, nb_sectors); 114961007b31SStefan Hajnoczi 1150adad6496SKevin Wolf return bdrv_co_do_readv(child, sector_num, nb_sectors, qiov, 0); 115161007b31SStefan Hajnoczi } 115261007b31SStefan Hajnoczi 11535def6b80SEric Blake /* Maximum buffer for write zeroes fallback, in bytes */ 11545def6b80SEric Blake #define MAX_WRITE_ZEROES_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS) 115561007b31SStefan Hajnoczi 1156d05aa8bbSEric Blake static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 1157d05aa8bbSEric Blake int64_t offset, int count, BdrvRequestFlags flags) 115861007b31SStefan Hajnoczi { 115961007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 116061007b31SStefan Hajnoczi QEMUIOVector qiov; 116161007b31SStefan Hajnoczi struct iovec iov = {0}; 116261007b31SStefan Hajnoczi int ret = 0; 1163465fe887SEric Blake bool need_flush = false; 1164443668caSDenis V. Lunev int head = 0; 1165443668caSDenis V. Lunev int tail = 0; 116661007b31SStefan Hajnoczi 1167cf081fcaSEric Blake int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX); 1168a5b8dd2cSEric Blake int alignment = MAX(bs->bl.pwrite_zeroes_alignment, 1169a5b8dd2cSEric Blake bs->bl.request_alignment); 1170cf081fcaSEric Blake 1171d05aa8bbSEric Blake assert(is_power_of_2(alignment)); 1172d05aa8bbSEric Blake head = offset & (alignment - 1); 1173d05aa8bbSEric Blake tail = (offset + count) & (alignment - 1); 1174d05aa8bbSEric Blake max_write_zeroes &= ~(alignment - 1); 117561007b31SStefan Hajnoczi 1176d05aa8bbSEric Blake while (count > 0 && !ret) { 1177d05aa8bbSEric Blake int num = count; 117861007b31SStefan Hajnoczi 117961007b31SStefan Hajnoczi /* Align request. Block drivers can expect the "bulk" of the request 1180443668caSDenis V. Lunev * to be aligned, and that unaligned requests do not cross cluster 1181443668caSDenis V. Lunev * boundaries. 118261007b31SStefan Hajnoczi */ 1183443668caSDenis V. Lunev if (head) { 118461007b31SStefan Hajnoczi /* Make a small request up to the first aligned sector. */ 1185d05aa8bbSEric Blake num = MIN(count, alignment - head); 1186443668caSDenis V. Lunev head = 0; 1187d05aa8bbSEric Blake } else if (tail && num > alignment) { 1188443668caSDenis V. Lunev /* Shorten the request to the last aligned sector. */ 1189443668caSDenis V. Lunev num -= tail; 119061007b31SStefan Hajnoczi } 119161007b31SStefan Hajnoczi 119261007b31SStefan Hajnoczi /* limit request size */ 119361007b31SStefan Hajnoczi if (num > max_write_zeroes) { 119461007b31SStefan Hajnoczi num = max_write_zeroes; 119561007b31SStefan Hajnoczi } 119661007b31SStefan Hajnoczi 119761007b31SStefan Hajnoczi ret = -ENOTSUP; 119861007b31SStefan Hajnoczi /* First try the efficient write zeroes operation */ 1199d05aa8bbSEric Blake if (drv->bdrv_co_pwrite_zeroes) { 1200d05aa8bbSEric Blake ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num, 1201d05aa8bbSEric Blake flags & bs->supported_zero_flags); 1202d05aa8bbSEric Blake if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) && 1203d05aa8bbSEric Blake !(bs->supported_zero_flags & BDRV_REQ_FUA)) { 1204d05aa8bbSEric Blake need_flush = true; 1205d05aa8bbSEric Blake } 1206465fe887SEric Blake } else { 1207465fe887SEric Blake assert(!bs->supported_zero_flags); 120861007b31SStefan Hajnoczi } 120961007b31SStefan Hajnoczi 121061007b31SStefan Hajnoczi if (ret == -ENOTSUP) { 121161007b31SStefan Hajnoczi /* Fall back to bounce buffer if write zeroes is unsupported */ 12125def6b80SEric Blake int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, 121361007b31SStefan Hajnoczi MAX_WRITE_ZEROES_BOUNCE_BUFFER); 1214465fe887SEric Blake BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE; 1215465fe887SEric Blake 1216465fe887SEric Blake if ((flags & BDRV_REQ_FUA) && 1217465fe887SEric Blake !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1218465fe887SEric Blake /* No need for bdrv_driver_pwrite() to do a fallback 1219465fe887SEric Blake * flush on each chunk; use just one at the end */ 1220465fe887SEric Blake write_flags &= ~BDRV_REQ_FUA; 1221465fe887SEric Blake need_flush = true; 1222465fe887SEric Blake } 12235def6b80SEric Blake num = MIN(num, max_transfer); 1224d05aa8bbSEric Blake iov.iov_len = num; 122561007b31SStefan Hajnoczi if (iov.iov_base == NULL) { 1226d05aa8bbSEric Blake iov.iov_base = qemu_try_blockalign(bs, num); 122761007b31SStefan Hajnoczi if (iov.iov_base == NULL) { 122861007b31SStefan Hajnoczi ret = -ENOMEM; 122961007b31SStefan Hajnoczi goto fail; 123061007b31SStefan Hajnoczi } 1231d05aa8bbSEric Blake memset(iov.iov_base, 0, num); 123261007b31SStefan Hajnoczi } 123361007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 123461007b31SStefan Hajnoczi 1235d05aa8bbSEric Blake ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags); 123661007b31SStefan Hajnoczi 123761007b31SStefan Hajnoczi /* Keep bounce buffer around if it is big enough for all 123861007b31SStefan Hajnoczi * all future requests. 123961007b31SStefan Hajnoczi */ 12405def6b80SEric Blake if (num < max_transfer) { 124161007b31SStefan Hajnoczi qemu_vfree(iov.iov_base); 124261007b31SStefan Hajnoczi iov.iov_base = NULL; 124361007b31SStefan Hajnoczi } 124461007b31SStefan Hajnoczi } 124561007b31SStefan Hajnoczi 1246d05aa8bbSEric Blake offset += num; 1247d05aa8bbSEric Blake count -= num; 124861007b31SStefan Hajnoczi } 124961007b31SStefan Hajnoczi 125061007b31SStefan Hajnoczi fail: 1251465fe887SEric Blake if (ret == 0 && need_flush) { 1252465fe887SEric Blake ret = bdrv_co_flush(bs); 1253465fe887SEric Blake } 125461007b31SStefan Hajnoczi qemu_vfree(iov.iov_base); 125561007b31SStefan Hajnoczi return ret; 125661007b31SStefan Hajnoczi } 125761007b31SStefan Hajnoczi 125861007b31SStefan Hajnoczi /* 125961007b31SStefan Hajnoczi * Forwards an already correctly aligned write request to the BlockDriver. 126061007b31SStefan Hajnoczi */ 126161007b31SStefan Hajnoczi static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs, 126261007b31SStefan Hajnoczi BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 1263cff86b38SEric Blake int64_t align, QEMUIOVector *qiov, int flags) 126461007b31SStefan Hajnoczi { 126561007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 126661007b31SStefan Hajnoczi bool waited; 126761007b31SStefan Hajnoczi int ret; 126861007b31SStefan Hajnoczi 12699896c876SKevin Wolf int64_t start_sector = offset >> BDRV_SECTOR_BITS; 12709896c876SKevin Wolf int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); 127161007b31SStefan Hajnoczi 1272cff86b38SEric Blake assert(is_power_of_2(align)); 1273cff86b38SEric Blake assert((offset & (align - 1)) == 0); 1274cff86b38SEric Blake assert((bytes & (align - 1)) == 0); 127561007b31SStefan Hajnoczi assert(!qiov || bytes == qiov->size); 1276abb06c5aSDaniel P. Berrange assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1277fa166538SEric Blake assert(!(flags & ~BDRV_REQ_MASK)); 127861007b31SStefan Hajnoczi 127961007b31SStefan Hajnoczi waited = wait_serialising_requests(req); 128061007b31SStefan Hajnoczi assert(!waited || !req->serialising); 128161007b31SStefan Hajnoczi assert(req->overlap_offset <= offset); 128261007b31SStefan Hajnoczi assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); 128361007b31SStefan Hajnoczi 128461007b31SStefan Hajnoczi ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req); 128561007b31SStefan Hajnoczi 128661007b31SStefan Hajnoczi if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && 1287c1499a5eSEric Blake !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes && 128861007b31SStefan Hajnoczi qemu_iovec_is_zero(qiov)) { 128961007b31SStefan Hajnoczi flags |= BDRV_REQ_ZERO_WRITE; 129061007b31SStefan Hajnoczi if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { 129161007b31SStefan Hajnoczi flags |= BDRV_REQ_MAY_UNMAP; 129261007b31SStefan Hajnoczi } 129361007b31SStefan Hajnoczi } 129461007b31SStefan Hajnoczi 129561007b31SStefan Hajnoczi if (ret < 0) { 129661007b31SStefan Hajnoczi /* Do nothing, write notifier decided to fail this request */ 129761007b31SStefan Hajnoczi } else if (flags & BDRV_REQ_ZERO_WRITE) { 12989a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO); 12999896c876SKevin Wolf ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags); 130061007b31SStefan Hajnoczi } else { 13019a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV); 130278a07294SKevin Wolf ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags); 130361007b31SStefan Hajnoczi } 13049a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE); 130561007b31SStefan Hajnoczi 13069896c876SKevin Wolf bdrv_set_dirty(bs, start_sector, end_sector - start_sector); 130761007b31SStefan Hajnoczi 130853d8f9d8SMax Reitz if (bs->wr_highest_offset < offset + bytes) { 130953d8f9d8SMax Reitz bs->wr_highest_offset = offset + bytes; 131053d8f9d8SMax Reitz } 131161007b31SStefan Hajnoczi 131261007b31SStefan Hajnoczi if (ret >= 0) { 13139896c876SKevin Wolf bs->total_sectors = MAX(bs->total_sectors, end_sector); 131461007b31SStefan Hajnoczi } 131561007b31SStefan Hajnoczi 131661007b31SStefan Hajnoczi return ret; 131761007b31SStefan Hajnoczi } 131861007b31SStefan Hajnoczi 13199eeb6dd1SFam Zheng static int coroutine_fn bdrv_co_do_zero_pwritev(BlockDriverState *bs, 13209eeb6dd1SFam Zheng int64_t offset, 13219eeb6dd1SFam Zheng unsigned int bytes, 13229eeb6dd1SFam Zheng BdrvRequestFlags flags, 13239eeb6dd1SFam Zheng BdrvTrackedRequest *req) 13249eeb6dd1SFam Zheng { 13259eeb6dd1SFam Zheng uint8_t *buf = NULL; 13269eeb6dd1SFam Zheng QEMUIOVector local_qiov; 13279eeb6dd1SFam Zheng struct iovec iov; 1328a5b8dd2cSEric Blake uint64_t align = bs->bl.request_alignment; 13299eeb6dd1SFam Zheng unsigned int head_padding_bytes, tail_padding_bytes; 13309eeb6dd1SFam Zheng int ret = 0; 13319eeb6dd1SFam Zheng 13329eeb6dd1SFam Zheng head_padding_bytes = offset & (align - 1); 13339eeb6dd1SFam Zheng tail_padding_bytes = align - ((offset + bytes) & (align - 1)); 13349eeb6dd1SFam Zheng 13359eeb6dd1SFam Zheng 13369eeb6dd1SFam Zheng assert(flags & BDRV_REQ_ZERO_WRITE); 13379eeb6dd1SFam Zheng if (head_padding_bytes || tail_padding_bytes) { 13389eeb6dd1SFam Zheng buf = qemu_blockalign(bs, align); 13399eeb6dd1SFam Zheng iov = (struct iovec) { 13409eeb6dd1SFam Zheng .iov_base = buf, 13419eeb6dd1SFam Zheng .iov_len = align, 13429eeb6dd1SFam Zheng }; 13439eeb6dd1SFam Zheng qemu_iovec_init_external(&local_qiov, &iov, 1); 13449eeb6dd1SFam Zheng } 13459eeb6dd1SFam Zheng if (head_padding_bytes) { 13469eeb6dd1SFam Zheng uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes); 13479eeb6dd1SFam Zheng 13489eeb6dd1SFam Zheng /* RMW the unaligned part before head. */ 13499eeb6dd1SFam Zheng mark_request_serialising(req, align); 13509eeb6dd1SFam Zheng wait_serialising_requests(req); 13519a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 13529eeb6dd1SFam Zheng ret = bdrv_aligned_preadv(bs, req, offset & ~(align - 1), align, 13539eeb6dd1SFam Zheng align, &local_qiov, 0); 13549eeb6dd1SFam Zheng if (ret < 0) { 13559eeb6dd1SFam Zheng goto fail; 13569eeb6dd1SFam Zheng } 13579a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 13589eeb6dd1SFam Zheng 13599eeb6dd1SFam Zheng memset(buf + head_padding_bytes, 0, zero_bytes); 13609eeb6dd1SFam Zheng ret = bdrv_aligned_pwritev(bs, req, offset & ~(align - 1), align, 1361cff86b38SEric Blake align, &local_qiov, 13629eeb6dd1SFam Zheng flags & ~BDRV_REQ_ZERO_WRITE); 13639eeb6dd1SFam Zheng if (ret < 0) { 13649eeb6dd1SFam Zheng goto fail; 13659eeb6dd1SFam Zheng } 13669eeb6dd1SFam Zheng offset += zero_bytes; 13679eeb6dd1SFam Zheng bytes -= zero_bytes; 13689eeb6dd1SFam Zheng } 13699eeb6dd1SFam Zheng 13709eeb6dd1SFam Zheng assert(!bytes || (offset & (align - 1)) == 0); 13719eeb6dd1SFam Zheng if (bytes >= align) { 13729eeb6dd1SFam Zheng /* Write the aligned part in the middle. */ 13739eeb6dd1SFam Zheng uint64_t aligned_bytes = bytes & ~(align - 1); 1374cff86b38SEric Blake ret = bdrv_aligned_pwritev(bs, req, offset, aligned_bytes, align, 13759eeb6dd1SFam Zheng NULL, flags); 13769eeb6dd1SFam Zheng if (ret < 0) { 13779eeb6dd1SFam Zheng goto fail; 13789eeb6dd1SFam Zheng } 13799eeb6dd1SFam Zheng bytes -= aligned_bytes; 13809eeb6dd1SFam Zheng offset += aligned_bytes; 13819eeb6dd1SFam Zheng } 13829eeb6dd1SFam Zheng 13839eeb6dd1SFam Zheng assert(!bytes || (offset & (align - 1)) == 0); 13849eeb6dd1SFam Zheng if (bytes) { 13859eeb6dd1SFam Zheng assert(align == tail_padding_bytes + bytes); 13869eeb6dd1SFam Zheng /* RMW the unaligned part after tail. */ 13879eeb6dd1SFam Zheng mark_request_serialising(req, align); 13889eeb6dd1SFam Zheng wait_serialising_requests(req); 13899a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 13909eeb6dd1SFam Zheng ret = bdrv_aligned_preadv(bs, req, offset, align, 13919eeb6dd1SFam Zheng align, &local_qiov, 0); 13929eeb6dd1SFam Zheng if (ret < 0) { 13939eeb6dd1SFam Zheng goto fail; 13949eeb6dd1SFam Zheng } 13959a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 13969eeb6dd1SFam Zheng 13979eeb6dd1SFam Zheng memset(buf, 0, bytes); 1398cff86b38SEric Blake ret = bdrv_aligned_pwritev(bs, req, offset, align, align, 13999eeb6dd1SFam Zheng &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE); 14009eeb6dd1SFam Zheng } 14019eeb6dd1SFam Zheng fail: 14029eeb6dd1SFam Zheng qemu_vfree(buf); 14039eeb6dd1SFam Zheng return ret; 14049eeb6dd1SFam Zheng 14059eeb6dd1SFam Zheng } 14069eeb6dd1SFam Zheng 140761007b31SStefan Hajnoczi /* 140861007b31SStefan Hajnoczi * Handle a write request in coroutine context 140961007b31SStefan Hajnoczi */ 1410cab3a356SKevin Wolf int coroutine_fn bdrv_co_pwritev(BlockDriverState *bs, 141161007b31SStefan Hajnoczi int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 141261007b31SStefan Hajnoczi BdrvRequestFlags flags) 141361007b31SStefan Hajnoczi { 141461007b31SStefan Hajnoczi BdrvTrackedRequest req; 1415a5b8dd2cSEric Blake uint64_t align = bs->bl.request_alignment; 141661007b31SStefan Hajnoczi uint8_t *head_buf = NULL; 141761007b31SStefan Hajnoczi uint8_t *tail_buf = NULL; 141861007b31SStefan Hajnoczi QEMUIOVector local_qiov; 141961007b31SStefan Hajnoczi bool use_local_qiov = false; 142061007b31SStefan Hajnoczi int ret; 142161007b31SStefan Hajnoczi 142261007b31SStefan Hajnoczi if (!bs->drv) { 142361007b31SStefan Hajnoczi return -ENOMEDIUM; 142461007b31SStefan Hajnoczi } 142561007b31SStefan Hajnoczi if (bs->read_only) { 1426eaf5fe2dSPaolo Bonzini return -EPERM; 142761007b31SStefan Hajnoczi } 142804c01a5cSKevin Wolf assert(!(bs->open_flags & BDRV_O_INACTIVE)); 142961007b31SStefan Hajnoczi 143061007b31SStefan Hajnoczi ret = bdrv_check_byte_request(bs, offset, bytes); 143161007b31SStefan Hajnoczi if (ret < 0) { 143261007b31SStefan Hajnoczi return ret; 143361007b31SStefan Hajnoczi } 143461007b31SStefan Hajnoczi 143561007b31SStefan Hajnoczi /* 143661007b31SStefan Hajnoczi * Align write if necessary by performing a read-modify-write cycle. 143761007b31SStefan Hajnoczi * Pad qiov with the read parts and be sure to have a tracked request not 143861007b31SStefan Hajnoczi * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle. 143961007b31SStefan Hajnoczi */ 1440ebde595cSFam Zheng tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE); 144161007b31SStefan Hajnoczi 14429eeb6dd1SFam Zheng if (!qiov) { 14439eeb6dd1SFam Zheng ret = bdrv_co_do_zero_pwritev(bs, offset, bytes, flags, &req); 14449eeb6dd1SFam Zheng goto out; 14459eeb6dd1SFam Zheng } 14469eeb6dd1SFam Zheng 144761007b31SStefan Hajnoczi if (offset & (align - 1)) { 144861007b31SStefan Hajnoczi QEMUIOVector head_qiov; 144961007b31SStefan Hajnoczi struct iovec head_iov; 145061007b31SStefan Hajnoczi 145161007b31SStefan Hajnoczi mark_request_serialising(&req, align); 145261007b31SStefan Hajnoczi wait_serialising_requests(&req); 145361007b31SStefan Hajnoczi 145461007b31SStefan Hajnoczi head_buf = qemu_blockalign(bs, align); 145561007b31SStefan Hajnoczi head_iov = (struct iovec) { 145661007b31SStefan Hajnoczi .iov_base = head_buf, 145761007b31SStefan Hajnoczi .iov_len = align, 145861007b31SStefan Hajnoczi }; 145961007b31SStefan Hajnoczi qemu_iovec_init_external(&head_qiov, &head_iov, 1); 146061007b31SStefan Hajnoczi 14619a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 146261007b31SStefan Hajnoczi ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align, 146361007b31SStefan Hajnoczi align, &head_qiov, 0); 146461007b31SStefan Hajnoczi if (ret < 0) { 146561007b31SStefan Hajnoczi goto fail; 146661007b31SStefan Hajnoczi } 14679a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 146861007b31SStefan Hajnoczi 146961007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov + 2); 147061007b31SStefan Hajnoczi qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); 147161007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 147261007b31SStefan Hajnoczi use_local_qiov = true; 147361007b31SStefan Hajnoczi 147461007b31SStefan Hajnoczi bytes += offset & (align - 1); 147561007b31SStefan Hajnoczi offset = offset & ~(align - 1); 1476117bc3faSPeter Lieven 1477117bc3faSPeter Lieven /* We have read the tail already if the request is smaller 1478117bc3faSPeter Lieven * than one aligned block. 1479117bc3faSPeter Lieven */ 1480117bc3faSPeter Lieven if (bytes < align) { 1481117bc3faSPeter Lieven qemu_iovec_add(&local_qiov, head_buf + bytes, align - bytes); 1482117bc3faSPeter Lieven bytes = align; 1483117bc3faSPeter Lieven } 148461007b31SStefan Hajnoczi } 148561007b31SStefan Hajnoczi 148661007b31SStefan Hajnoczi if ((offset + bytes) & (align - 1)) { 148761007b31SStefan Hajnoczi QEMUIOVector tail_qiov; 148861007b31SStefan Hajnoczi struct iovec tail_iov; 148961007b31SStefan Hajnoczi size_t tail_bytes; 149061007b31SStefan Hajnoczi bool waited; 149161007b31SStefan Hajnoczi 149261007b31SStefan Hajnoczi mark_request_serialising(&req, align); 149361007b31SStefan Hajnoczi waited = wait_serialising_requests(&req); 149461007b31SStefan Hajnoczi assert(!waited || !use_local_qiov); 149561007b31SStefan Hajnoczi 149661007b31SStefan Hajnoczi tail_buf = qemu_blockalign(bs, align); 149761007b31SStefan Hajnoczi tail_iov = (struct iovec) { 149861007b31SStefan Hajnoczi .iov_base = tail_buf, 149961007b31SStefan Hajnoczi .iov_len = align, 150061007b31SStefan Hajnoczi }; 150161007b31SStefan Hajnoczi qemu_iovec_init_external(&tail_qiov, &tail_iov, 1); 150261007b31SStefan Hajnoczi 15039a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 150461007b31SStefan Hajnoczi ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align, 150561007b31SStefan Hajnoczi align, &tail_qiov, 0); 150661007b31SStefan Hajnoczi if (ret < 0) { 150761007b31SStefan Hajnoczi goto fail; 150861007b31SStefan Hajnoczi } 15099a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 151061007b31SStefan Hajnoczi 151161007b31SStefan Hajnoczi if (!use_local_qiov) { 151261007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov + 1); 151361007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 151461007b31SStefan Hajnoczi use_local_qiov = true; 151561007b31SStefan Hajnoczi } 151661007b31SStefan Hajnoczi 151761007b31SStefan Hajnoczi tail_bytes = (offset + bytes) & (align - 1); 151861007b31SStefan Hajnoczi qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes); 151961007b31SStefan Hajnoczi 152061007b31SStefan Hajnoczi bytes = ROUND_UP(bytes, align); 152161007b31SStefan Hajnoczi } 152261007b31SStefan Hajnoczi 1523cff86b38SEric Blake ret = bdrv_aligned_pwritev(bs, &req, offset, bytes, align, 152461007b31SStefan Hajnoczi use_local_qiov ? &local_qiov : qiov, 152561007b31SStefan Hajnoczi flags); 152661007b31SStefan Hajnoczi 152761007b31SStefan Hajnoczi fail: 152861007b31SStefan Hajnoczi 152961007b31SStefan Hajnoczi if (use_local_qiov) { 153061007b31SStefan Hajnoczi qemu_iovec_destroy(&local_qiov); 153161007b31SStefan Hajnoczi } 153261007b31SStefan Hajnoczi qemu_vfree(head_buf); 153361007b31SStefan Hajnoczi qemu_vfree(tail_buf); 15349eeb6dd1SFam Zheng out: 15359eeb6dd1SFam Zheng tracked_request_end(&req); 153661007b31SStefan Hajnoczi return ret; 153761007b31SStefan Hajnoczi } 153861007b31SStefan Hajnoczi 1539adad6496SKevin Wolf static int coroutine_fn bdrv_co_do_writev(BdrvChild *child, 154061007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 154161007b31SStefan Hajnoczi BdrvRequestFlags flags) 154261007b31SStefan Hajnoczi { 154361007b31SStefan Hajnoczi if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 154461007b31SStefan Hajnoczi return -EINVAL; 154561007b31SStefan Hajnoczi } 154661007b31SStefan Hajnoczi 1547adad6496SKevin Wolf return bdrv_co_pwritev(child->bs, sector_num << BDRV_SECTOR_BITS, 154861007b31SStefan Hajnoczi nb_sectors << BDRV_SECTOR_BITS, qiov, flags); 154961007b31SStefan Hajnoczi } 155061007b31SStefan Hajnoczi 155125ec177dSKevin Wolf int coroutine_fn bdrv_co_writev(BdrvChild *child, int64_t sector_num, 155261007b31SStefan Hajnoczi int nb_sectors, QEMUIOVector *qiov) 155361007b31SStefan Hajnoczi { 155425ec177dSKevin Wolf trace_bdrv_co_writev(child->bs, sector_num, nb_sectors); 155561007b31SStefan Hajnoczi 1556adad6496SKevin Wolf return bdrv_co_do_writev(child, sector_num, nb_sectors, qiov, 0); 155761007b31SStefan Hajnoczi } 155861007b31SStefan Hajnoczi 155974021bc4SEric Blake int coroutine_fn bdrv_co_pwrite_zeroes(BlockDriverState *bs, 156074021bc4SEric Blake int64_t offset, int count, 156161007b31SStefan Hajnoczi BdrvRequestFlags flags) 156261007b31SStefan Hajnoczi { 156374021bc4SEric Blake trace_bdrv_co_pwrite_zeroes(bs, offset, count, flags); 156461007b31SStefan Hajnoczi 156561007b31SStefan Hajnoczi if (!(bs->open_flags & BDRV_O_UNMAP)) { 156661007b31SStefan Hajnoczi flags &= ~BDRV_REQ_MAY_UNMAP; 156761007b31SStefan Hajnoczi } 156861007b31SStefan Hajnoczi 156974021bc4SEric Blake return bdrv_co_pwritev(bs, offset, count, NULL, 157061007b31SStefan Hajnoczi BDRV_REQ_ZERO_WRITE | flags); 157161007b31SStefan Hajnoczi } 157261007b31SStefan Hajnoczi 157361007b31SStefan Hajnoczi typedef struct BdrvCoGetBlockStatusData { 157461007b31SStefan Hajnoczi BlockDriverState *bs; 157561007b31SStefan Hajnoczi BlockDriverState *base; 157667a0fd2aSFam Zheng BlockDriverState **file; 157761007b31SStefan Hajnoczi int64_t sector_num; 157861007b31SStefan Hajnoczi int nb_sectors; 157961007b31SStefan Hajnoczi int *pnum; 158061007b31SStefan Hajnoczi int64_t ret; 158161007b31SStefan Hajnoczi bool done; 158261007b31SStefan Hajnoczi } BdrvCoGetBlockStatusData; 158361007b31SStefan Hajnoczi 158461007b31SStefan Hajnoczi /* 158561007b31SStefan Hajnoczi * Returns the allocation status of the specified sectors. 158661007b31SStefan Hajnoczi * Drivers not implementing the functionality are assumed to not support 158761007b31SStefan Hajnoczi * backing files, hence all their sectors are reported as allocated. 158861007b31SStefan Hajnoczi * 158961007b31SStefan Hajnoczi * If 'sector_num' is beyond the end of the disk image the return value is 0 159061007b31SStefan Hajnoczi * and 'pnum' is set to 0. 159161007b31SStefan Hajnoczi * 159261007b31SStefan Hajnoczi * 'pnum' is set to the number of sectors (including and immediately following 159361007b31SStefan Hajnoczi * the specified sector) that are known to be in the same 159461007b31SStefan Hajnoczi * allocated/unallocated state. 159561007b31SStefan Hajnoczi * 159661007b31SStefan Hajnoczi * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes 159761007b31SStefan Hajnoczi * beyond the end of the disk image it will be clamped. 159867a0fd2aSFam Zheng * 159967a0fd2aSFam Zheng * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file' 160067a0fd2aSFam Zheng * points to the BDS which the sector range is allocated in. 160161007b31SStefan Hajnoczi */ 160261007b31SStefan Hajnoczi static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs, 160361007b31SStefan Hajnoczi int64_t sector_num, 160467a0fd2aSFam Zheng int nb_sectors, int *pnum, 160567a0fd2aSFam Zheng BlockDriverState **file) 160661007b31SStefan Hajnoczi { 160761007b31SStefan Hajnoczi int64_t total_sectors; 160861007b31SStefan Hajnoczi int64_t n; 160961007b31SStefan Hajnoczi int64_t ret, ret2; 161061007b31SStefan Hajnoczi 161161007b31SStefan Hajnoczi total_sectors = bdrv_nb_sectors(bs); 161261007b31SStefan Hajnoczi if (total_sectors < 0) { 161361007b31SStefan Hajnoczi return total_sectors; 161461007b31SStefan Hajnoczi } 161561007b31SStefan Hajnoczi 161661007b31SStefan Hajnoczi if (sector_num >= total_sectors) { 161761007b31SStefan Hajnoczi *pnum = 0; 161861007b31SStefan Hajnoczi return 0; 161961007b31SStefan Hajnoczi } 162061007b31SStefan Hajnoczi 162161007b31SStefan Hajnoczi n = total_sectors - sector_num; 162261007b31SStefan Hajnoczi if (n < nb_sectors) { 162361007b31SStefan Hajnoczi nb_sectors = n; 162461007b31SStefan Hajnoczi } 162561007b31SStefan Hajnoczi 162661007b31SStefan Hajnoczi if (!bs->drv->bdrv_co_get_block_status) { 162761007b31SStefan Hajnoczi *pnum = nb_sectors; 162861007b31SStefan Hajnoczi ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; 162961007b31SStefan Hajnoczi if (bs->drv->protocol_name) { 163061007b31SStefan Hajnoczi ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE); 163161007b31SStefan Hajnoczi } 163261007b31SStefan Hajnoczi return ret; 163361007b31SStefan Hajnoczi } 163461007b31SStefan Hajnoczi 163567a0fd2aSFam Zheng *file = NULL; 163667a0fd2aSFam Zheng ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum, 163767a0fd2aSFam Zheng file); 163861007b31SStefan Hajnoczi if (ret < 0) { 163961007b31SStefan Hajnoczi *pnum = 0; 164061007b31SStefan Hajnoczi return ret; 164161007b31SStefan Hajnoczi } 164261007b31SStefan Hajnoczi 164361007b31SStefan Hajnoczi if (ret & BDRV_BLOCK_RAW) { 164461007b31SStefan Hajnoczi assert(ret & BDRV_BLOCK_OFFSET_VALID); 16459a4f4c31SKevin Wolf return bdrv_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS, 164667a0fd2aSFam Zheng *pnum, pnum, file); 164761007b31SStefan Hajnoczi } 164861007b31SStefan Hajnoczi 164961007b31SStefan Hajnoczi if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { 165061007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ALLOCATED; 1651a53f1a95SPaolo Bonzini } else { 165261007b31SStefan Hajnoczi if (bdrv_unallocated_blocks_are_zero(bs)) { 165361007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ZERO; 1654760e0063SKevin Wolf } else if (bs->backing) { 1655760e0063SKevin Wolf BlockDriverState *bs2 = bs->backing->bs; 165661007b31SStefan Hajnoczi int64_t nb_sectors2 = bdrv_nb_sectors(bs2); 165761007b31SStefan Hajnoczi if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) { 165861007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ZERO; 165961007b31SStefan Hajnoczi } 166061007b31SStefan Hajnoczi } 166161007b31SStefan Hajnoczi } 166261007b31SStefan Hajnoczi 1663ac987b30SFam Zheng if (*file && *file != bs && 166461007b31SStefan Hajnoczi (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && 166561007b31SStefan Hajnoczi (ret & BDRV_BLOCK_OFFSET_VALID)) { 166667a0fd2aSFam Zheng BlockDriverState *file2; 166761007b31SStefan Hajnoczi int file_pnum; 166861007b31SStefan Hajnoczi 1669ac987b30SFam Zheng ret2 = bdrv_co_get_block_status(*file, ret >> BDRV_SECTOR_BITS, 167067a0fd2aSFam Zheng *pnum, &file_pnum, &file2); 167161007b31SStefan Hajnoczi if (ret2 >= 0) { 167261007b31SStefan Hajnoczi /* Ignore errors. This is just providing extra information, it 167361007b31SStefan Hajnoczi * is useful but not necessary. 167461007b31SStefan Hajnoczi */ 167561007b31SStefan Hajnoczi if (!file_pnum) { 167661007b31SStefan Hajnoczi /* !file_pnum indicates an offset at or beyond the EOF; it is 167761007b31SStefan Hajnoczi * perfectly valid for the format block driver to point to such 167861007b31SStefan Hajnoczi * offsets, so catch it and mark everything as zero */ 167961007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ZERO; 168061007b31SStefan Hajnoczi } else { 168161007b31SStefan Hajnoczi /* Limit request to the range reported by the protocol driver */ 168261007b31SStefan Hajnoczi *pnum = file_pnum; 168361007b31SStefan Hajnoczi ret |= (ret2 & BDRV_BLOCK_ZERO); 168461007b31SStefan Hajnoczi } 168561007b31SStefan Hajnoczi } 168661007b31SStefan Hajnoczi } 168761007b31SStefan Hajnoczi 168861007b31SStefan Hajnoczi return ret; 168961007b31SStefan Hajnoczi } 169061007b31SStefan Hajnoczi 1691ba3f0e25SFam Zheng static int64_t coroutine_fn bdrv_co_get_block_status_above(BlockDriverState *bs, 1692ba3f0e25SFam Zheng BlockDriverState *base, 1693ba3f0e25SFam Zheng int64_t sector_num, 1694ba3f0e25SFam Zheng int nb_sectors, 169567a0fd2aSFam Zheng int *pnum, 169667a0fd2aSFam Zheng BlockDriverState **file) 1697ba3f0e25SFam Zheng { 1698ba3f0e25SFam Zheng BlockDriverState *p; 1699ba3f0e25SFam Zheng int64_t ret = 0; 1700ba3f0e25SFam Zheng 1701ba3f0e25SFam Zheng assert(bs != base); 1702760e0063SKevin Wolf for (p = bs; p != base; p = backing_bs(p)) { 170367a0fd2aSFam Zheng ret = bdrv_co_get_block_status(p, sector_num, nb_sectors, pnum, file); 1704ba3f0e25SFam Zheng if (ret < 0 || ret & BDRV_BLOCK_ALLOCATED) { 1705ba3f0e25SFam Zheng break; 1706ba3f0e25SFam Zheng } 1707ba3f0e25SFam Zheng /* [sector_num, pnum] unallocated on this layer, which could be only 1708ba3f0e25SFam Zheng * the first part of [sector_num, nb_sectors]. */ 1709ba3f0e25SFam Zheng nb_sectors = MIN(nb_sectors, *pnum); 1710ba3f0e25SFam Zheng } 1711ba3f0e25SFam Zheng return ret; 1712ba3f0e25SFam Zheng } 1713ba3f0e25SFam Zheng 1714ba3f0e25SFam Zheng /* Coroutine wrapper for bdrv_get_block_status_above() */ 1715ba3f0e25SFam Zheng static void coroutine_fn bdrv_get_block_status_above_co_entry(void *opaque) 171661007b31SStefan Hajnoczi { 171761007b31SStefan Hajnoczi BdrvCoGetBlockStatusData *data = opaque; 171861007b31SStefan Hajnoczi 1719ba3f0e25SFam Zheng data->ret = bdrv_co_get_block_status_above(data->bs, data->base, 1720ba3f0e25SFam Zheng data->sector_num, 1721ba3f0e25SFam Zheng data->nb_sectors, 172267a0fd2aSFam Zheng data->pnum, 172367a0fd2aSFam Zheng data->file); 172461007b31SStefan Hajnoczi data->done = true; 172561007b31SStefan Hajnoczi } 172661007b31SStefan Hajnoczi 172761007b31SStefan Hajnoczi /* 1728ba3f0e25SFam Zheng * Synchronous wrapper around bdrv_co_get_block_status_above(). 172961007b31SStefan Hajnoczi * 1730ba3f0e25SFam Zheng * See bdrv_co_get_block_status_above() for details. 173161007b31SStefan Hajnoczi */ 1732ba3f0e25SFam Zheng int64_t bdrv_get_block_status_above(BlockDriverState *bs, 1733ba3f0e25SFam Zheng BlockDriverState *base, 1734ba3f0e25SFam Zheng int64_t sector_num, 173567a0fd2aSFam Zheng int nb_sectors, int *pnum, 173667a0fd2aSFam Zheng BlockDriverState **file) 173761007b31SStefan Hajnoczi { 173861007b31SStefan Hajnoczi Coroutine *co; 173961007b31SStefan Hajnoczi BdrvCoGetBlockStatusData data = { 174061007b31SStefan Hajnoczi .bs = bs, 1741ba3f0e25SFam Zheng .base = base, 174267a0fd2aSFam Zheng .file = file, 174361007b31SStefan Hajnoczi .sector_num = sector_num, 174461007b31SStefan Hajnoczi .nb_sectors = nb_sectors, 174561007b31SStefan Hajnoczi .pnum = pnum, 174661007b31SStefan Hajnoczi .done = false, 174761007b31SStefan Hajnoczi }; 174861007b31SStefan Hajnoczi 174961007b31SStefan Hajnoczi if (qemu_in_coroutine()) { 175061007b31SStefan Hajnoczi /* Fast-path if already in coroutine context */ 1751ba3f0e25SFam Zheng bdrv_get_block_status_above_co_entry(&data); 175261007b31SStefan Hajnoczi } else { 175361007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 175461007b31SStefan Hajnoczi 1755ba3f0e25SFam Zheng co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry); 175661007b31SStefan Hajnoczi qemu_coroutine_enter(co, &data); 175761007b31SStefan Hajnoczi while (!data.done) { 175861007b31SStefan Hajnoczi aio_poll(aio_context, true); 175961007b31SStefan Hajnoczi } 176061007b31SStefan Hajnoczi } 176161007b31SStefan Hajnoczi return data.ret; 176261007b31SStefan Hajnoczi } 176361007b31SStefan Hajnoczi 1764ba3f0e25SFam Zheng int64_t bdrv_get_block_status(BlockDriverState *bs, 1765ba3f0e25SFam Zheng int64_t sector_num, 176667a0fd2aSFam Zheng int nb_sectors, int *pnum, 176767a0fd2aSFam Zheng BlockDriverState **file) 1768ba3f0e25SFam Zheng { 1769760e0063SKevin Wolf return bdrv_get_block_status_above(bs, backing_bs(bs), 177067a0fd2aSFam Zheng sector_num, nb_sectors, pnum, file); 1771ba3f0e25SFam Zheng } 1772ba3f0e25SFam Zheng 177361007b31SStefan Hajnoczi int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, 177461007b31SStefan Hajnoczi int nb_sectors, int *pnum) 177561007b31SStefan Hajnoczi { 177667a0fd2aSFam Zheng BlockDriverState *file; 177767a0fd2aSFam Zheng int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum, 177867a0fd2aSFam Zheng &file); 177961007b31SStefan Hajnoczi if (ret < 0) { 178061007b31SStefan Hajnoczi return ret; 178161007b31SStefan Hajnoczi } 178261007b31SStefan Hajnoczi return !!(ret & BDRV_BLOCK_ALLOCATED); 178361007b31SStefan Hajnoczi } 178461007b31SStefan Hajnoczi 178561007b31SStefan Hajnoczi /* 178661007b31SStefan Hajnoczi * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] 178761007b31SStefan Hajnoczi * 178861007b31SStefan Hajnoczi * Return true if the given sector is allocated in any image between 178961007b31SStefan Hajnoczi * BASE and TOP (inclusive). BASE can be NULL to check if the given 179061007b31SStefan Hajnoczi * sector is allocated in any image of the chain. Return false otherwise. 179161007b31SStefan Hajnoczi * 179261007b31SStefan Hajnoczi * 'pnum' is set to the number of sectors (including and immediately following 179361007b31SStefan Hajnoczi * the specified sector) that are known to be in the same 179461007b31SStefan Hajnoczi * allocated/unallocated state. 179561007b31SStefan Hajnoczi * 179661007b31SStefan Hajnoczi */ 179761007b31SStefan Hajnoczi int bdrv_is_allocated_above(BlockDriverState *top, 179861007b31SStefan Hajnoczi BlockDriverState *base, 179961007b31SStefan Hajnoczi int64_t sector_num, 180061007b31SStefan Hajnoczi int nb_sectors, int *pnum) 180161007b31SStefan Hajnoczi { 180261007b31SStefan Hajnoczi BlockDriverState *intermediate; 180361007b31SStefan Hajnoczi int ret, n = nb_sectors; 180461007b31SStefan Hajnoczi 180561007b31SStefan Hajnoczi intermediate = top; 180661007b31SStefan Hajnoczi while (intermediate && intermediate != base) { 180761007b31SStefan Hajnoczi int pnum_inter; 180861007b31SStefan Hajnoczi ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors, 180961007b31SStefan Hajnoczi &pnum_inter); 181061007b31SStefan Hajnoczi if (ret < 0) { 181161007b31SStefan Hajnoczi return ret; 181261007b31SStefan Hajnoczi } else if (ret) { 181361007b31SStefan Hajnoczi *pnum = pnum_inter; 181461007b31SStefan Hajnoczi return 1; 181561007b31SStefan Hajnoczi } 181661007b31SStefan Hajnoczi 181761007b31SStefan Hajnoczi /* 181861007b31SStefan Hajnoczi * [sector_num, nb_sectors] is unallocated on top but intermediate 181961007b31SStefan Hajnoczi * might have 182061007b31SStefan Hajnoczi * 182161007b31SStefan Hajnoczi * [sector_num+x, nr_sectors] allocated. 182261007b31SStefan Hajnoczi */ 182361007b31SStefan Hajnoczi if (n > pnum_inter && 182461007b31SStefan Hajnoczi (intermediate == top || 182561007b31SStefan Hajnoczi sector_num + pnum_inter < intermediate->total_sectors)) { 182661007b31SStefan Hajnoczi n = pnum_inter; 182761007b31SStefan Hajnoczi } 182861007b31SStefan Hajnoczi 1829760e0063SKevin Wolf intermediate = backing_bs(intermediate); 183061007b31SStefan Hajnoczi } 183161007b31SStefan Hajnoczi 183261007b31SStefan Hajnoczi *pnum = n; 183361007b31SStefan Hajnoczi return 0; 183461007b31SStefan Hajnoczi } 183561007b31SStefan Hajnoczi 183661007b31SStefan Hajnoczi int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num, 183761007b31SStefan Hajnoczi const uint8_t *buf, int nb_sectors) 183861007b31SStefan Hajnoczi { 183961007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 184061007b31SStefan Hajnoczi int ret; 184161007b31SStefan Hajnoczi 184261007b31SStefan Hajnoczi if (!drv) { 184361007b31SStefan Hajnoczi return -ENOMEDIUM; 184461007b31SStefan Hajnoczi } 184561007b31SStefan Hajnoczi if (!drv->bdrv_write_compressed) { 184661007b31SStefan Hajnoczi return -ENOTSUP; 184761007b31SStefan Hajnoczi } 184861007b31SStefan Hajnoczi ret = bdrv_check_request(bs, sector_num, nb_sectors); 184961007b31SStefan Hajnoczi if (ret < 0) { 185061007b31SStefan Hajnoczi return ret; 185161007b31SStefan Hajnoczi } 185261007b31SStefan Hajnoczi 185361007b31SStefan Hajnoczi assert(QLIST_EMPTY(&bs->dirty_bitmaps)); 185461007b31SStefan Hajnoczi 185561007b31SStefan Hajnoczi return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors); 185661007b31SStefan Hajnoczi } 185761007b31SStefan Hajnoczi 18581a8ae822SKevin Wolf typedef struct BdrvVmstateCo { 18591a8ae822SKevin Wolf BlockDriverState *bs; 18601a8ae822SKevin Wolf QEMUIOVector *qiov; 18611a8ae822SKevin Wolf int64_t pos; 18621a8ae822SKevin Wolf bool is_read; 18631a8ae822SKevin Wolf int ret; 18641a8ae822SKevin Wolf } BdrvVmstateCo; 18651a8ae822SKevin Wolf 18661a8ae822SKevin Wolf static int coroutine_fn 18671a8ae822SKevin Wolf bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos, 18681a8ae822SKevin Wolf bool is_read) 18691a8ae822SKevin Wolf { 18701a8ae822SKevin Wolf BlockDriver *drv = bs->drv; 18711a8ae822SKevin Wolf 18721a8ae822SKevin Wolf if (!drv) { 18731a8ae822SKevin Wolf return -ENOMEDIUM; 18741a8ae822SKevin Wolf } else if (drv->bdrv_load_vmstate) { 18751a8ae822SKevin Wolf return is_read ? drv->bdrv_load_vmstate(bs, qiov, pos) 18761a8ae822SKevin Wolf : drv->bdrv_save_vmstate(bs, qiov, pos); 18771a8ae822SKevin Wolf } else if (bs->file) { 18781a8ae822SKevin Wolf return bdrv_co_rw_vmstate(bs->file->bs, qiov, pos, is_read); 18791a8ae822SKevin Wolf } 18801a8ae822SKevin Wolf 18811a8ae822SKevin Wolf return -ENOTSUP; 18821a8ae822SKevin Wolf } 18831a8ae822SKevin Wolf 18841a8ae822SKevin Wolf static void coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque) 18851a8ae822SKevin Wolf { 18861a8ae822SKevin Wolf BdrvVmstateCo *co = opaque; 18871a8ae822SKevin Wolf co->ret = bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read); 18881a8ae822SKevin Wolf } 18891a8ae822SKevin Wolf 18901a8ae822SKevin Wolf static inline int 18911a8ae822SKevin Wolf bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos, 18921a8ae822SKevin Wolf bool is_read) 18931a8ae822SKevin Wolf { 18941a8ae822SKevin Wolf if (qemu_in_coroutine()) { 18951a8ae822SKevin Wolf return bdrv_co_rw_vmstate(bs, qiov, pos, is_read); 18961a8ae822SKevin Wolf } else { 18971a8ae822SKevin Wolf BdrvVmstateCo data = { 18981a8ae822SKevin Wolf .bs = bs, 18991a8ae822SKevin Wolf .qiov = qiov, 19001a8ae822SKevin Wolf .pos = pos, 19011a8ae822SKevin Wolf .is_read = is_read, 19021a8ae822SKevin Wolf .ret = -EINPROGRESS, 19031a8ae822SKevin Wolf }; 19041a8ae822SKevin Wolf Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry); 19051a8ae822SKevin Wolf 19061a8ae822SKevin Wolf qemu_coroutine_enter(co, &data); 19071a8ae822SKevin Wolf while (data.ret == -EINPROGRESS) { 19081a8ae822SKevin Wolf aio_poll(bdrv_get_aio_context(bs), true); 19091a8ae822SKevin Wolf } 19101a8ae822SKevin Wolf return data.ret; 19111a8ae822SKevin Wolf } 19121a8ae822SKevin Wolf } 19131a8ae822SKevin Wolf 191461007b31SStefan Hajnoczi int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 191561007b31SStefan Hajnoczi int64_t pos, int size) 191661007b31SStefan Hajnoczi { 191761007b31SStefan Hajnoczi QEMUIOVector qiov; 191861007b31SStefan Hajnoczi struct iovec iov = { 191961007b31SStefan Hajnoczi .iov_base = (void *) buf, 192061007b31SStefan Hajnoczi .iov_len = size, 192161007b31SStefan Hajnoczi }; 1922b433d942SKevin Wolf int ret; 192361007b31SStefan Hajnoczi 192461007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 1925b433d942SKevin Wolf 1926b433d942SKevin Wolf ret = bdrv_writev_vmstate(bs, &qiov, pos); 1927b433d942SKevin Wolf if (ret < 0) { 1928b433d942SKevin Wolf return ret; 1929b433d942SKevin Wolf } 1930b433d942SKevin Wolf 1931b433d942SKevin Wolf return size; 193261007b31SStefan Hajnoczi } 193361007b31SStefan Hajnoczi 193461007b31SStefan Hajnoczi int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 193561007b31SStefan Hajnoczi { 19361a8ae822SKevin Wolf return bdrv_rw_vmstate(bs, qiov, pos, false); 193761007b31SStefan Hajnoczi } 193861007b31SStefan Hajnoczi 193961007b31SStefan Hajnoczi int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 194061007b31SStefan Hajnoczi int64_t pos, int size) 194161007b31SStefan Hajnoczi { 19425ddda0b8SKevin Wolf QEMUIOVector qiov; 19435ddda0b8SKevin Wolf struct iovec iov = { 19445ddda0b8SKevin Wolf .iov_base = buf, 19455ddda0b8SKevin Wolf .iov_len = size, 19465ddda0b8SKevin Wolf }; 1947b433d942SKevin Wolf int ret; 19485ddda0b8SKevin Wolf 19495ddda0b8SKevin Wolf qemu_iovec_init_external(&qiov, &iov, 1); 1950b433d942SKevin Wolf ret = bdrv_readv_vmstate(bs, &qiov, pos); 1951b433d942SKevin Wolf if (ret < 0) { 1952b433d942SKevin Wolf return ret; 1953b433d942SKevin Wolf } 1954b433d942SKevin Wolf 1955b433d942SKevin Wolf return size; 19565ddda0b8SKevin Wolf } 19575ddda0b8SKevin Wolf 19585ddda0b8SKevin Wolf int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 19595ddda0b8SKevin Wolf { 19601a8ae822SKevin Wolf return bdrv_rw_vmstate(bs, qiov, pos, true); 196161007b31SStefan Hajnoczi } 196261007b31SStefan Hajnoczi 196361007b31SStefan Hajnoczi /**************************************************************/ 196461007b31SStefan Hajnoczi /* async I/Os */ 196561007b31SStefan Hajnoczi 1966ebb7af21SKevin Wolf BlockAIOCB *bdrv_aio_readv(BdrvChild *child, int64_t sector_num, 196761007b31SStefan Hajnoczi QEMUIOVector *qiov, int nb_sectors, 196861007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 196961007b31SStefan Hajnoczi { 1970ebb7af21SKevin Wolf trace_bdrv_aio_readv(child->bs, sector_num, nb_sectors, opaque); 197161007b31SStefan Hajnoczi 1972adad6496SKevin Wolf return bdrv_co_aio_rw_vector(child, sector_num, qiov, nb_sectors, 0, 197361007b31SStefan Hajnoczi cb, opaque, false); 197461007b31SStefan Hajnoczi } 197561007b31SStefan Hajnoczi 19760d1049c7SKevin Wolf BlockAIOCB *bdrv_aio_writev(BdrvChild *child, int64_t sector_num, 197761007b31SStefan Hajnoczi QEMUIOVector *qiov, int nb_sectors, 197861007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 197961007b31SStefan Hajnoczi { 19800d1049c7SKevin Wolf trace_bdrv_aio_writev(child->bs, sector_num, nb_sectors, opaque); 198161007b31SStefan Hajnoczi 1982adad6496SKevin Wolf return bdrv_co_aio_rw_vector(child, sector_num, qiov, nb_sectors, 0, 198361007b31SStefan Hajnoczi cb, opaque, true); 198461007b31SStefan Hajnoczi } 198561007b31SStefan Hajnoczi 198661007b31SStefan Hajnoczi void bdrv_aio_cancel(BlockAIOCB *acb) 198761007b31SStefan Hajnoczi { 198861007b31SStefan Hajnoczi qemu_aio_ref(acb); 198961007b31SStefan Hajnoczi bdrv_aio_cancel_async(acb); 199061007b31SStefan Hajnoczi while (acb->refcnt > 1) { 199161007b31SStefan Hajnoczi if (acb->aiocb_info->get_aio_context) { 199261007b31SStefan Hajnoczi aio_poll(acb->aiocb_info->get_aio_context(acb), true); 199361007b31SStefan Hajnoczi } else if (acb->bs) { 199461007b31SStefan Hajnoczi aio_poll(bdrv_get_aio_context(acb->bs), true); 199561007b31SStefan Hajnoczi } else { 199661007b31SStefan Hajnoczi abort(); 199761007b31SStefan Hajnoczi } 199861007b31SStefan Hajnoczi } 199961007b31SStefan Hajnoczi qemu_aio_unref(acb); 200061007b31SStefan Hajnoczi } 200161007b31SStefan Hajnoczi 200261007b31SStefan Hajnoczi /* Async version of aio cancel. The caller is not blocked if the acb implements 200361007b31SStefan Hajnoczi * cancel_async, otherwise we do nothing and let the request normally complete. 200461007b31SStefan Hajnoczi * In either case the completion callback must be called. */ 200561007b31SStefan Hajnoczi void bdrv_aio_cancel_async(BlockAIOCB *acb) 200661007b31SStefan Hajnoczi { 200761007b31SStefan Hajnoczi if (acb->aiocb_info->cancel_async) { 200861007b31SStefan Hajnoczi acb->aiocb_info->cancel_async(acb); 200961007b31SStefan Hajnoczi } 201061007b31SStefan Hajnoczi } 201161007b31SStefan Hajnoczi 201261007b31SStefan Hajnoczi /**************************************************************/ 201361007b31SStefan Hajnoczi /* async block device emulation */ 201461007b31SStefan Hajnoczi 201541574268SEric Blake typedef struct BlockRequest { 201641574268SEric Blake union { 201741574268SEric Blake /* Used during read, write, trim */ 201841574268SEric Blake struct { 201941574268SEric Blake int64_t sector; 202041574268SEric Blake int nb_sectors; 202141574268SEric Blake int flags; 202241574268SEric Blake QEMUIOVector *qiov; 202341574268SEric Blake }; 202441574268SEric Blake /* Used during ioctl */ 202541574268SEric Blake struct { 202641574268SEric Blake int req; 202741574268SEric Blake void *buf; 202841574268SEric Blake }; 202941574268SEric Blake }; 203041574268SEric Blake BlockCompletionFunc *cb; 203141574268SEric Blake void *opaque; 203241574268SEric Blake 203341574268SEric Blake int error; 203441574268SEric Blake } BlockRequest; 203541574268SEric Blake 203661007b31SStefan Hajnoczi typedef struct BlockAIOCBCoroutine { 203761007b31SStefan Hajnoczi BlockAIOCB common; 2038adad6496SKevin Wolf BdrvChild *child; 203961007b31SStefan Hajnoczi BlockRequest req; 204061007b31SStefan Hajnoczi bool is_write; 204161007b31SStefan Hajnoczi bool need_bh; 204261007b31SStefan Hajnoczi bool *done; 204361007b31SStefan Hajnoczi QEMUBH* bh; 204461007b31SStefan Hajnoczi } BlockAIOCBCoroutine; 204561007b31SStefan Hajnoczi 204661007b31SStefan Hajnoczi static const AIOCBInfo bdrv_em_co_aiocb_info = { 204761007b31SStefan Hajnoczi .aiocb_size = sizeof(BlockAIOCBCoroutine), 204861007b31SStefan Hajnoczi }; 204961007b31SStefan Hajnoczi 205061007b31SStefan Hajnoczi static void bdrv_co_complete(BlockAIOCBCoroutine *acb) 205161007b31SStefan Hajnoczi { 205261007b31SStefan Hajnoczi if (!acb->need_bh) { 205361007b31SStefan Hajnoczi acb->common.cb(acb->common.opaque, acb->req.error); 205461007b31SStefan Hajnoczi qemu_aio_unref(acb); 205561007b31SStefan Hajnoczi } 205661007b31SStefan Hajnoczi } 205761007b31SStefan Hajnoczi 205861007b31SStefan Hajnoczi static void bdrv_co_em_bh(void *opaque) 205961007b31SStefan Hajnoczi { 206061007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb = opaque; 206161007b31SStefan Hajnoczi 206261007b31SStefan Hajnoczi assert(!acb->need_bh); 206361007b31SStefan Hajnoczi qemu_bh_delete(acb->bh); 206461007b31SStefan Hajnoczi bdrv_co_complete(acb); 206561007b31SStefan Hajnoczi } 206661007b31SStefan Hajnoczi 206761007b31SStefan Hajnoczi static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb) 206861007b31SStefan Hajnoczi { 206961007b31SStefan Hajnoczi acb->need_bh = false; 207061007b31SStefan Hajnoczi if (acb->req.error != -EINPROGRESS) { 207161007b31SStefan Hajnoczi BlockDriverState *bs = acb->common.bs; 207261007b31SStefan Hajnoczi 207361007b31SStefan Hajnoczi acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb); 207461007b31SStefan Hajnoczi qemu_bh_schedule(acb->bh); 207561007b31SStefan Hajnoczi } 207661007b31SStefan Hajnoczi } 207761007b31SStefan Hajnoczi 207861007b31SStefan Hajnoczi /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */ 207961007b31SStefan Hajnoczi static void coroutine_fn bdrv_co_do_rw(void *opaque) 208061007b31SStefan Hajnoczi { 208161007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb = opaque; 208261007b31SStefan Hajnoczi 208361007b31SStefan Hajnoczi if (!acb->is_write) { 2084adad6496SKevin Wolf acb->req.error = bdrv_co_do_readv(acb->child, acb->req.sector, 208561007b31SStefan Hajnoczi acb->req.nb_sectors, acb->req.qiov, acb->req.flags); 208661007b31SStefan Hajnoczi } else { 2087adad6496SKevin Wolf acb->req.error = bdrv_co_do_writev(acb->child, acb->req.sector, 208861007b31SStefan Hajnoczi acb->req.nb_sectors, acb->req.qiov, acb->req.flags); 208961007b31SStefan Hajnoczi } 209061007b31SStefan Hajnoczi 209161007b31SStefan Hajnoczi bdrv_co_complete(acb); 209261007b31SStefan Hajnoczi } 209361007b31SStefan Hajnoczi 2094adad6496SKevin Wolf static BlockAIOCB *bdrv_co_aio_rw_vector(BdrvChild *child, 209561007b31SStefan Hajnoczi int64_t sector_num, 209661007b31SStefan Hajnoczi QEMUIOVector *qiov, 209761007b31SStefan Hajnoczi int nb_sectors, 209861007b31SStefan Hajnoczi BdrvRequestFlags flags, 209961007b31SStefan Hajnoczi BlockCompletionFunc *cb, 210061007b31SStefan Hajnoczi void *opaque, 210161007b31SStefan Hajnoczi bool is_write) 210261007b31SStefan Hajnoczi { 210361007b31SStefan Hajnoczi Coroutine *co; 210461007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb; 210561007b31SStefan Hajnoczi 2106adad6496SKevin Wolf acb = qemu_aio_get(&bdrv_em_co_aiocb_info, child->bs, cb, opaque); 2107adad6496SKevin Wolf acb->child = child; 210861007b31SStefan Hajnoczi acb->need_bh = true; 210961007b31SStefan Hajnoczi acb->req.error = -EINPROGRESS; 211061007b31SStefan Hajnoczi acb->req.sector = sector_num; 211161007b31SStefan Hajnoczi acb->req.nb_sectors = nb_sectors; 211261007b31SStefan Hajnoczi acb->req.qiov = qiov; 211361007b31SStefan Hajnoczi acb->req.flags = flags; 211461007b31SStefan Hajnoczi acb->is_write = is_write; 211561007b31SStefan Hajnoczi 211661007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_co_do_rw); 211761007b31SStefan Hajnoczi qemu_coroutine_enter(co, acb); 211861007b31SStefan Hajnoczi 211961007b31SStefan Hajnoczi bdrv_co_maybe_schedule_bh(acb); 212061007b31SStefan Hajnoczi return &acb->common; 212161007b31SStefan Hajnoczi } 212261007b31SStefan Hajnoczi 212361007b31SStefan Hajnoczi static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque) 212461007b31SStefan Hajnoczi { 212561007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb = opaque; 212661007b31SStefan Hajnoczi BlockDriverState *bs = acb->common.bs; 212761007b31SStefan Hajnoczi 212861007b31SStefan Hajnoczi acb->req.error = bdrv_co_flush(bs); 212961007b31SStefan Hajnoczi bdrv_co_complete(acb); 213061007b31SStefan Hajnoczi } 213161007b31SStefan Hajnoczi 213261007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs, 213361007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 213461007b31SStefan Hajnoczi { 213561007b31SStefan Hajnoczi trace_bdrv_aio_flush(bs, opaque); 213661007b31SStefan Hajnoczi 213761007b31SStefan Hajnoczi Coroutine *co; 213861007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb; 213961007b31SStefan Hajnoczi 214061007b31SStefan Hajnoczi acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 214161007b31SStefan Hajnoczi acb->need_bh = true; 214261007b31SStefan Hajnoczi acb->req.error = -EINPROGRESS; 214361007b31SStefan Hajnoczi 214461007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_aio_flush_co_entry); 214561007b31SStefan Hajnoczi qemu_coroutine_enter(co, acb); 214661007b31SStefan Hajnoczi 214761007b31SStefan Hajnoczi bdrv_co_maybe_schedule_bh(acb); 214861007b31SStefan Hajnoczi return &acb->common; 214961007b31SStefan Hajnoczi } 215061007b31SStefan Hajnoczi 215161007b31SStefan Hajnoczi static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque) 215261007b31SStefan Hajnoczi { 215361007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb = opaque; 215461007b31SStefan Hajnoczi BlockDriverState *bs = acb->common.bs; 215561007b31SStefan Hajnoczi 215661007b31SStefan Hajnoczi acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors); 215761007b31SStefan Hajnoczi bdrv_co_complete(acb); 215861007b31SStefan Hajnoczi } 215961007b31SStefan Hajnoczi 216061007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs, 216161007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, 216261007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 216361007b31SStefan Hajnoczi { 216461007b31SStefan Hajnoczi Coroutine *co; 216561007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb; 216661007b31SStefan Hajnoczi 216761007b31SStefan Hajnoczi trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque); 216861007b31SStefan Hajnoczi 216961007b31SStefan Hajnoczi acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 217061007b31SStefan Hajnoczi acb->need_bh = true; 217161007b31SStefan Hajnoczi acb->req.error = -EINPROGRESS; 217261007b31SStefan Hajnoczi acb->req.sector = sector_num; 217361007b31SStefan Hajnoczi acb->req.nb_sectors = nb_sectors; 217461007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_aio_discard_co_entry); 217561007b31SStefan Hajnoczi qemu_coroutine_enter(co, acb); 217661007b31SStefan Hajnoczi 217761007b31SStefan Hajnoczi bdrv_co_maybe_schedule_bh(acb); 217861007b31SStefan Hajnoczi return &acb->common; 217961007b31SStefan Hajnoczi } 218061007b31SStefan Hajnoczi 218161007b31SStefan Hajnoczi void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, 218261007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 218361007b31SStefan Hajnoczi { 218461007b31SStefan Hajnoczi BlockAIOCB *acb; 218561007b31SStefan Hajnoczi 2186c84b3192SPaolo Bonzini acb = g_malloc(aiocb_info->aiocb_size); 218761007b31SStefan Hajnoczi acb->aiocb_info = aiocb_info; 218861007b31SStefan Hajnoczi acb->bs = bs; 218961007b31SStefan Hajnoczi acb->cb = cb; 219061007b31SStefan Hajnoczi acb->opaque = opaque; 219161007b31SStefan Hajnoczi acb->refcnt = 1; 219261007b31SStefan Hajnoczi return acb; 219361007b31SStefan Hajnoczi } 219461007b31SStefan Hajnoczi 219561007b31SStefan Hajnoczi void qemu_aio_ref(void *p) 219661007b31SStefan Hajnoczi { 219761007b31SStefan Hajnoczi BlockAIOCB *acb = p; 219861007b31SStefan Hajnoczi acb->refcnt++; 219961007b31SStefan Hajnoczi } 220061007b31SStefan Hajnoczi 220161007b31SStefan Hajnoczi void qemu_aio_unref(void *p) 220261007b31SStefan Hajnoczi { 220361007b31SStefan Hajnoczi BlockAIOCB *acb = p; 220461007b31SStefan Hajnoczi assert(acb->refcnt > 0); 220561007b31SStefan Hajnoczi if (--acb->refcnt == 0) { 2206c84b3192SPaolo Bonzini g_free(acb); 220761007b31SStefan Hajnoczi } 220861007b31SStefan Hajnoczi } 220961007b31SStefan Hajnoczi 221061007b31SStefan Hajnoczi /**************************************************************/ 221161007b31SStefan Hajnoczi /* Coroutine block device emulation */ 221261007b31SStefan Hajnoczi 221361007b31SStefan Hajnoczi static void coroutine_fn bdrv_flush_co_entry(void *opaque) 221461007b31SStefan Hajnoczi { 221561007b31SStefan Hajnoczi RwCo *rwco = opaque; 221661007b31SStefan Hajnoczi 221761007b31SStefan Hajnoczi rwco->ret = bdrv_co_flush(rwco->bs); 221861007b31SStefan Hajnoczi } 221961007b31SStefan Hajnoczi 222061007b31SStefan Hajnoczi int coroutine_fn bdrv_co_flush(BlockDriverState *bs) 222161007b31SStefan Hajnoczi { 222261007b31SStefan Hajnoczi int ret; 2223cdb5e315SFam Zheng BdrvTrackedRequest req; 222461007b31SStefan Hajnoczi 22251b6bc94dSDimitris Aragiorgis if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs) || 22261b6bc94dSDimitris Aragiorgis bdrv_is_sg(bs)) { 222761007b31SStefan Hajnoczi return 0; 222861007b31SStefan Hajnoczi } 222961007b31SStefan Hajnoczi 2230cdb5e315SFam Zheng tracked_request_begin(&req, bs, 0, 0, BDRV_TRACKED_FLUSH); 2231c32b82afSPavel Dovgalyuk 2232c32b82afSPavel Dovgalyuk /* Write back all layers by calling one driver function */ 2233c32b82afSPavel Dovgalyuk if (bs->drv->bdrv_co_flush) { 2234c32b82afSPavel Dovgalyuk ret = bs->drv->bdrv_co_flush(bs); 2235c32b82afSPavel Dovgalyuk goto out; 2236c32b82afSPavel Dovgalyuk } 2237c32b82afSPavel Dovgalyuk 223861007b31SStefan Hajnoczi /* Write back cached data to the OS even with cache=unsafe */ 223961007b31SStefan Hajnoczi BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS); 224061007b31SStefan Hajnoczi if (bs->drv->bdrv_co_flush_to_os) { 224161007b31SStefan Hajnoczi ret = bs->drv->bdrv_co_flush_to_os(bs); 224261007b31SStefan Hajnoczi if (ret < 0) { 2243cdb5e315SFam Zheng goto out; 224461007b31SStefan Hajnoczi } 224561007b31SStefan Hajnoczi } 224661007b31SStefan Hajnoczi 224761007b31SStefan Hajnoczi /* But don't actually force it to the disk with cache=unsafe */ 224861007b31SStefan Hajnoczi if (bs->open_flags & BDRV_O_NO_FLUSH) { 224961007b31SStefan Hajnoczi goto flush_parent; 225061007b31SStefan Hajnoczi } 225161007b31SStefan Hajnoczi 225261007b31SStefan Hajnoczi BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK); 225361007b31SStefan Hajnoczi if (bs->drv->bdrv_co_flush_to_disk) { 225461007b31SStefan Hajnoczi ret = bs->drv->bdrv_co_flush_to_disk(bs); 225561007b31SStefan Hajnoczi } else if (bs->drv->bdrv_aio_flush) { 225661007b31SStefan Hajnoczi BlockAIOCB *acb; 225761007b31SStefan Hajnoczi CoroutineIOCompletion co = { 225861007b31SStefan Hajnoczi .coroutine = qemu_coroutine_self(), 225961007b31SStefan Hajnoczi }; 226061007b31SStefan Hajnoczi 226161007b31SStefan Hajnoczi acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); 226261007b31SStefan Hajnoczi if (acb == NULL) { 226361007b31SStefan Hajnoczi ret = -EIO; 226461007b31SStefan Hajnoczi } else { 226561007b31SStefan Hajnoczi qemu_coroutine_yield(); 226661007b31SStefan Hajnoczi ret = co.ret; 226761007b31SStefan Hajnoczi } 226861007b31SStefan Hajnoczi } else { 226961007b31SStefan Hajnoczi /* 227061007b31SStefan Hajnoczi * Some block drivers always operate in either writethrough or unsafe 227161007b31SStefan Hajnoczi * mode and don't support bdrv_flush therefore. Usually qemu doesn't 227261007b31SStefan Hajnoczi * know how the server works (because the behaviour is hardcoded or 227361007b31SStefan Hajnoczi * depends on server-side configuration), so we can't ensure that 227461007b31SStefan Hajnoczi * everything is safe on disk. Returning an error doesn't work because 227561007b31SStefan Hajnoczi * that would break guests even if the server operates in writethrough 227661007b31SStefan Hajnoczi * mode. 227761007b31SStefan Hajnoczi * 227861007b31SStefan Hajnoczi * Let's hope the user knows what he's doing. 227961007b31SStefan Hajnoczi */ 228061007b31SStefan Hajnoczi ret = 0; 228161007b31SStefan Hajnoczi } 228261007b31SStefan Hajnoczi if (ret < 0) { 2283cdb5e315SFam Zheng goto out; 228461007b31SStefan Hajnoczi } 228561007b31SStefan Hajnoczi 228661007b31SStefan Hajnoczi /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH 228761007b31SStefan Hajnoczi * in the case of cache=unsafe, so there are no useless flushes. 228861007b31SStefan Hajnoczi */ 228961007b31SStefan Hajnoczi flush_parent: 2290cdb5e315SFam Zheng ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0; 2291cdb5e315SFam Zheng out: 2292cdb5e315SFam Zheng tracked_request_end(&req); 2293cdb5e315SFam Zheng return ret; 229461007b31SStefan Hajnoczi } 229561007b31SStefan Hajnoczi 229661007b31SStefan Hajnoczi int bdrv_flush(BlockDriverState *bs) 229761007b31SStefan Hajnoczi { 229861007b31SStefan Hajnoczi Coroutine *co; 229961007b31SStefan Hajnoczi RwCo rwco = { 230061007b31SStefan Hajnoczi .bs = bs, 230161007b31SStefan Hajnoczi .ret = NOT_DONE, 230261007b31SStefan Hajnoczi }; 230361007b31SStefan Hajnoczi 230461007b31SStefan Hajnoczi if (qemu_in_coroutine()) { 230561007b31SStefan Hajnoczi /* Fast-path if already in coroutine context */ 230661007b31SStefan Hajnoczi bdrv_flush_co_entry(&rwco); 230761007b31SStefan Hajnoczi } else { 230861007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 230961007b31SStefan Hajnoczi 231061007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_flush_co_entry); 231161007b31SStefan Hajnoczi qemu_coroutine_enter(co, &rwco); 231261007b31SStefan Hajnoczi while (rwco.ret == NOT_DONE) { 231361007b31SStefan Hajnoczi aio_poll(aio_context, true); 231461007b31SStefan Hajnoczi } 231561007b31SStefan Hajnoczi } 231661007b31SStefan Hajnoczi 231761007b31SStefan Hajnoczi return rwco.ret; 231861007b31SStefan Hajnoczi } 231961007b31SStefan Hajnoczi 232061007b31SStefan Hajnoczi typedef struct DiscardCo { 232161007b31SStefan Hajnoczi BlockDriverState *bs; 232261007b31SStefan Hajnoczi int64_t sector_num; 232361007b31SStefan Hajnoczi int nb_sectors; 232461007b31SStefan Hajnoczi int ret; 232561007b31SStefan Hajnoczi } DiscardCo; 232661007b31SStefan Hajnoczi static void coroutine_fn bdrv_discard_co_entry(void *opaque) 232761007b31SStefan Hajnoczi { 232861007b31SStefan Hajnoczi DiscardCo *rwco = opaque; 232961007b31SStefan Hajnoczi 233061007b31SStefan Hajnoczi rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors); 233161007b31SStefan Hajnoczi } 233261007b31SStefan Hajnoczi 233361007b31SStefan Hajnoczi int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, 233461007b31SStefan Hajnoczi int nb_sectors) 233561007b31SStefan Hajnoczi { 2336b1066c87SFam Zheng BdrvTrackedRequest req; 233761007b31SStefan Hajnoczi int max_discard, ret; 233861007b31SStefan Hajnoczi 233961007b31SStefan Hajnoczi if (!bs->drv) { 234061007b31SStefan Hajnoczi return -ENOMEDIUM; 234161007b31SStefan Hajnoczi } 234261007b31SStefan Hajnoczi 234361007b31SStefan Hajnoczi ret = bdrv_check_request(bs, sector_num, nb_sectors); 234461007b31SStefan Hajnoczi if (ret < 0) { 234561007b31SStefan Hajnoczi return ret; 234661007b31SStefan Hajnoczi } else if (bs->read_only) { 2347eaf5fe2dSPaolo Bonzini return -EPERM; 234861007b31SStefan Hajnoczi } 234904c01a5cSKevin Wolf assert(!(bs->open_flags & BDRV_O_INACTIVE)); 235061007b31SStefan Hajnoczi 235161007b31SStefan Hajnoczi /* Do nothing if disabled. */ 235261007b31SStefan Hajnoczi if (!(bs->open_flags & BDRV_O_UNMAP)) { 235361007b31SStefan Hajnoczi return 0; 235461007b31SStefan Hajnoczi } 235561007b31SStefan Hajnoczi 235661007b31SStefan Hajnoczi if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) { 235761007b31SStefan Hajnoczi return 0; 235861007b31SStefan Hajnoczi } 235961007b31SStefan Hajnoczi 23603a36e474SDenis V. Lunev tracked_request_begin(&req, bs, sector_num << BDRV_SECTOR_BITS, 23613a36e474SDenis V. Lunev nb_sectors << BDRV_SECTOR_BITS, BDRV_TRACKED_DISCARD); 236250824995SFam Zheng 2363ec050f77SDenis V. Lunev ret = notifier_with_return_list_notify(&bs->before_write_notifiers, &req); 2364ec050f77SDenis V. Lunev if (ret < 0) { 2365ec050f77SDenis V. Lunev goto out; 2366ec050f77SDenis V. Lunev } 2367ec050f77SDenis V. Lunev 2368b9f7855aSEric Blake max_discard = MIN_NON_ZERO(bs->bl.max_pdiscard >> BDRV_SECTOR_BITS, 2369b9f7855aSEric Blake BDRV_REQUEST_MAX_SECTORS); 237061007b31SStefan Hajnoczi while (nb_sectors > 0) { 237161007b31SStefan Hajnoczi int ret; 237261007b31SStefan Hajnoczi int num = nb_sectors; 2373b9f7855aSEric Blake int discard_alignment = bs->bl.pdiscard_alignment >> BDRV_SECTOR_BITS; 237461007b31SStefan Hajnoczi 237561007b31SStefan Hajnoczi /* align request */ 2376b9f7855aSEric Blake if (discard_alignment && 2377b9f7855aSEric Blake num >= discard_alignment && 2378b9f7855aSEric Blake sector_num % discard_alignment) { 2379b9f7855aSEric Blake if (num > discard_alignment) { 2380b9f7855aSEric Blake num = discard_alignment; 238161007b31SStefan Hajnoczi } 2382b9f7855aSEric Blake num -= sector_num % discard_alignment; 238361007b31SStefan Hajnoczi } 238461007b31SStefan Hajnoczi 238561007b31SStefan Hajnoczi /* limit request size */ 238661007b31SStefan Hajnoczi if (num > max_discard) { 238761007b31SStefan Hajnoczi num = max_discard; 238861007b31SStefan Hajnoczi } 238961007b31SStefan Hajnoczi 239061007b31SStefan Hajnoczi if (bs->drv->bdrv_co_discard) { 239161007b31SStefan Hajnoczi ret = bs->drv->bdrv_co_discard(bs, sector_num, num); 239261007b31SStefan Hajnoczi } else { 239361007b31SStefan Hajnoczi BlockAIOCB *acb; 239461007b31SStefan Hajnoczi CoroutineIOCompletion co = { 239561007b31SStefan Hajnoczi .coroutine = qemu_coroutine_self(), 239661007b31SStefan Hajnoczi }; 239761007b31SStefan Hajnoczi 239861007b31SStefan Hajnoczi acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors, 239961007b31SStefan Hajnoczi bdrv_co_io_em_complete, &co); 240061007b31SStefan Hajnoczi if (acb == NULL) { 2401b1066c87SFam Zheng ret = -EIO; 2402b1066c87SFam Zheng goto out; 240361007b31SStefan Hajnoczi } else { 240461007b31SStefan Hajnoczi qemu_coroutine_yield(); 240561007b31SStefan Hajnoczi ret = co.ret; 240661007b31SStefan Hajnoczi } 240761007b31SStefan Hajnoczi } 240861007b31SStefan Hajnoczi if (ret && ret != -ENOTSUP) { 2409b1066c87SFam Zheng goto out; 241061007b31SStefan Hajnoczi } 241161007b31SStefan Hajnoczi 241261007b31SStefan Hajnoczi sector_num += num; 241361007b31SStefan Hajnoczi nb_sectors -= num; 241461007b31SStefan Hajnoczi } 2415b1066c87SFam Zheng ret = 0; 2416b1066c87SFam Zheng out: 2417968d8b06SDenis V. Lunev bdrv_set_dirty(bs, req.offset >> BDRV_SECTOR_BITS, 2418968d8b06SDenis V. Lunev req.bytes >> BDRV_SECTOR_BITS); 2419b1066c87SFam Zheng tracked_request_end(&req); 2420b1066c87SFam Zheng return ret; 242161007b31SStefan Hajnoczi } 242261007b31SStefan Hajnoczi 242361007b31SStefan Hajnoczi int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors) 242461007b31SStefan Hajnoczi { 242561007b31SStefan Hajnoczi Coroutine *co; 242661007b31SStefan Hajnoczi DiscardCo rwco = { 242761007b31SStefan Hajnoczi .bs = bs, 242861007b31SStefan Hajnoczi .sector_num = sector_num, 242961007b31SStefan Hajnoczi .nb_sectors = nb_sectors, 243061007b31SStefan Hajnoczi .ret = NOT_DONE, 243161007b31SStefan Hajnoczi }; 243261007b31SStefan Hajnoczi 243361007b31SStefan Hajnoczi if (qemu_in_coroutine()) { 243461007b31SStefan Hajnoczi /* Fast-path if already in coroutine context */ 243561007b31SStefan Hajnoczi bdrv_discard_co_entry(&rwco); 243661007b31SStefan Hajnoczi } else { 243761007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 243861007b31SStefan Hajnoczi 243961007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_discard_co_entry); 244061007b31SStefan Hajnoczi qemu_coroutine_enter(co, &rwco); 244161007b31SStefan Hajnoczi while (rwco.ret == NOT_DONE) { 244261007b31SStefan Hajnoczi aio_poll(aio_context, true); 244361007b31SStefan Hajnoczi } 244461007b31SStefan Hajnoczi } 244561007b31SStefan Hajnoczi 244661007b31SStefan Hajnoczi return rwco.ret; 244761007b31SStefan Hajnoczi } 244861007b31SStefan Hajnoczi 24495c5ae76aSFam Zheng static int bdrv_co_do_ioctl(BlockDriverState *bs, int req, void *buf) 245061007b31SStefan Hajnoczi { 245161007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 24525c5ae76aSFam Zheng BdrvTrackedRequest tracked_req; 24535c5ae76aSFam Zheng CoroutineIOCompletion co = { 24545c5ae76aSFam Zheng .coroutine = qemu_coroutine_self(), 24555c5ae76aSFam Zheng }; 24565c5ae76aSFam Zheng BlockAIOCB *acb; 245761007b31SStefan Hajnoczi 24585c5ae76aSFam Zheng tracked_request_begin(&tracked_req, bs, 0, 0, BDRV_TRACKED_IOCTL); 24595c5ae76aSFam Zheng if (!drv || !drv->bdrv_aio_ioctl) { 24605c5ae76aSFam Zheng co.ret = -ENOTSUP; 24615c5ae76aSFam Zheng goto out; 24625c5ae76aSFam Zheng } 24635c5ae76aSFam Zheng 24645c5ae76aSFam Zheng acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co); 24655c5ae76aSFam Zheng if (!acb) { 2466c8a9fd80SFam Zheng co.ret = -ENOTSUP; 2467c8a9fd80SFam Zheng goto out; 24685c5ae76aSFam Zheng } 24695c5ae76aSFam Zheng qemu_coroutine_yield(); 24705c5ae76aSFam Zheng out: 24715c5ae76aSFam Zheng tracked_request_end(&tracked_req); 24725c5ae76aSFam Zheng return co.ret; 24735c5ae76aSFam Zheng } 24745c5ae76aSFam Zheng 24755c5ae76aSFam Zheng typedef struct { 24765c5ae76aSFam Zheng BlockDriverState *bs; 24775c5ae76aSFam Zheng int req; 24785c5ae76aSFam Zheng void *buf; 24795c5ae76aSFam Zheng int ret; 24805c5ae76aSFam Zheng } BdrvIoctlCoData; 24815c5ae76aSFam Zheng 24825c5ae76aSFam Zheng static void coroutine_fn bdrv_co_ioctl_entry(void *opaque) 24835c5ae76aSFam Zheng { 24845c5ae76aSFam Zheng BdrvIoctlCoData *data = opaque; 24855c5ae76aSFam Zheng data->ret = bdrv_co_do_ioctl(data->bs, data->req, data->buf); 24865c5ae76aSFam Zheng } 24875c5ae76aSFam Zheng 24885c5ae76aSFam Zheng /* needed for generic scsi interface */ 24895c5ae76aSFam Zheng int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) 24905c5ae76aSFam Zheng { 24915c5ae76aSFam Zheng BdrvIoctlCoData data = { 24925c5ae76aSFam Zheng .bs = bs, 24935c5ae76aSFam Zheng .req = req, 24945c5ae76aSFam Zheng .buf = buf, 24955c5ae76aSFam Zheng .ret = -EINPROGRESS, 24965c5ae76aSFam Zheng }; 24975c5ae76aSFam Zheng 24985c5ae76aSFam Zheng if (qemu_in_coroutine()) { 24995c5ae76aSFam Zheng /* Fast-path if already in coroutine context */ 25005c5ae76aSFam Zheng bdrv_co_ioctl_entry(&data); 25015c5ae76aSFam Zheng } else { 25025c5ae76aSFam Zheng Coroutine *co = qemu_coroutine_create(bdrv_co_ioctl_entry); 2503ba889444SPaolo Bonzini 25045c5ae76aSFam Zheng qemu_coroutine_enter(co, &data); 25055c5ae76aSFam Zheng while (data.ret == -EINPROGRESS) { 25065c5ae76aSFam Zheng aio_poll(bdrv_get_aio_context(bs), true); 25075c5ae76aSFam Zheng } 2508ba889444SPaolo Bonzini } 25095c5ae76aSFam Zheng return data.ret; 25105c5ae76aSFam Zheng } 25115c5ae76aSFam Zheng 25125c5ae76aSFam Zheng static void coroutine_fn bdrv_co_aio_ioctl_entry(void *opaque) 25135c5ae76aSFam Zheng { 25145c5ae76aSFam Zheng BlockAIOCBCoroutine *acb = opaque; 25155c5ae76aSFam Zheng acb->req.error = bdrv_co_do_ioctl(acb->common.bs, 25165c5ae76aSFam Zheng acb->req.req, acb->req.buf); 25175c5ae76aSFam Zheng bdrv_co_complete(acb); 251861007b31SStefan Hajnoczi } 251961007b31SStefan Hajnoczi 252061007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, 252161007b31SStefan Hajnoczi unsigned long int req, void *buf, 252261007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 252361007b31SStefan Hajnoczi { 25245c5ae76aSFam Zheng BlockAIOCBCoroutine *acb = qemu_aio_get(&bdrv_em_co_aiocb_info, 25255c5ae76aSFam Zheng bs, cb, opaque); 25265c5ae76aSFam Zheng Coroutine *co; 252761007b31SStefan Hajnoczi 25285c5ae76aSFam Zheng acb->need_bh = true; 25295c5ae76aSFam Zheng acb->req.error = -EINPROGRESS; 25305c5ae76aSFam Zheng acb->req.req = req; 25315c5ae76aSFam Zheng acb->req.buf = buf; 25325c5ae76aSFam Zheng co = qemu_coroutine_create(bdrv_co_aio_ioctl_entry); 25335c5ae76aSFam Zheng qemu_coroutine_enter(co, acb); 25345c5ae76aSFam Zheng 25355c5ae76aSFam Zheng bdrv_co_maybe_schedule_bh(acb); 25365c5ae76aSFam Zheng return &acb->common; 253761007b31SStefan Hajnoczi } 253861007b31SStefan Hajnoczi 253961007b31SStefan Hajnoczi void *qemu_blockalign(BlockDriverState *bs, size_t size) 254061007b31SStefan Hajnoczi { 254161007b31SStefan Hajnoczi return qemu_memalign(bdrv_opt_mem_align(bs), size); 254261007b31SStefan Hajnoczi } 254361007b31SStefan Hajnoczi 254461007b31SStefan Hajnoczi void *qemu_blockalign0(BlockDriverState *bs, size_t size) 254561007b31SStefan Hajnoczi { 254661007b31SStefan Hajnoczi return memset(qemu_blockalign(bs, size), 0, size); 254761007b31SStefan Hajnoczi } 254861007b31SStefan Hajnoczi 254961007b31SStefan Hajnoczi void *qemu_try_blockalign(BlockDriverState *bs, size_t size) 255061007b31SStefan Hajnoczi { 255161007b31SStefan Hajnoczi size_t align = bdrv_opt_mem_align(bs); 255261007b31SStefan Hajnoczi 255361007b31SStefan Hajnoczi /* Ensure that NULL is never returned on success */ 255461007b31SStefan Hajnoczi assert(align > 0); 255561007b31SStefan Hajnoczi if (size == 0) { 255661007b31SStefan Hajnoczi size = align; 255761007b31SStefan Hajnoczi } 255861007b31SStefan Hajnoczi 255961007b31SStefan Hajnoczi return qemu_try_memalign(align, size); 256061007b31SStefan Hajnoczi } 256161007b31SStefan Hajnoczi 256261007b31SStefan Hajnoczi void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) 256361007b31SStefan Hajnoczi { 256461007b31SStefan Hajnoczi void *mem = qemu_try_blockalign(bs, size); 256561007b31SStefan Hajnoczi 256661007b31SStefan Hajnoczi if (mem) { 256761007b31SStefan Hajnoczi memset(mem, 0, size); 256861007b31SStefan Hajnoczi } 256961007b31SStefan Hajnoczi 257061007b31SStefan Hajnoczi return mem; 257161007b31SStefan Hajnoczi } 257261007b31SStefan Hajnoczi 257361007b31SStefan Hajnoczi /* 257461007b31SStefan Hajnoczi * Check if all memory in this vector is sector aligned. 257561007b31SStefan Hajnoczi */ 257661007b31SStefan Hajnoczi bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) 257761007b31SStefan Hajnoczi { 257861007b31SStefan Hajnoczi int i; 25794196d2f0SDenis V. Lunev size_t alignment = bdrv_min_mem_align(bs); 258061007b31SStefan Hajnoczi 258161007b31SStefan Hajnoczi for (i = 0; i < qiov->niov; i++) { 258261007b31SStefan Hajnoczi if ((uintptr_t) qiov->iov[i].iov_base % alignment) { 258361007b31SStefan Hajnoczi return false; 258461007b31SStefan Hajnoczi } 258561007b31SStefan Hajnoczi if (qiov->iov[i].iov_len % alignment) { 258661007b31SStefan Hajnoczi return false; 258761007b31SStefan Hajnoczi } 258861007b31SStefan Hajnoczi } 258961007b31SStefan Hajnoczi 259061007b31SStefan Hajnoczi return true; 259161007b31SStefan Hajnoczi } 259261007b31SStefan Hajnoczi 259361007b31SStefan Hajnoczi void bdrv_add_before_write_notifier(BlockDriverState *bs, 259461007b31SStefan Hajnoczi NotifierWithReturn *notifier) 259561007b31SStefan Hajnoczi { 259661007b31SStefan Hajnoczi notifier_with_return_list_add(&bs->before_write_notifiers, notifier); 259761007b31SStefan Hajnoczi } 259861007b31SStefan Hajnoczi 259961007b31SStefan Hajnoczi void bdrv_io_plug(BlockDriverState *bs) 260061007b31SStefan Hajnoczi { 26016b98bd64SPaolo Bonzini BdrvChild *child; 26026b98bd64SPaolo Bonzini 26036b98bd64SPaolo Bonzini QLIST_FOREACH(child, &bs->children, next) { 26046b98bd64SPaolo Bonzini bdrv_io_plug(child->bs); 26056b98bd64SPaolo Bonzini } 26066b98bd64SPaolo Bonzini 26076b98bd64SPaolo Bonzini if (bs->io_plugged++ == 0 && bs->io_plug_disabled == 0) { 260861007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 260961007b31SStefan Hajnoczi if (drv && drv->bdrv_io_plug) { 261061007b31SStefan Hajnoczi drv->bdrv_io_plug(bs); 26116b98bd64SPaolo Bonzini } 261261007b31SStefan Hajnoczi } 261361007b31SStefan Hajnoczi } 261461007b31SStefan Hajnoczi 261561007b31SStefan Hajnoczi void bdrv_io_unplug(BlockDriverState *bs) 261661007b31SStefan Hajnoczi { 26176b98bd64SPaolo Bonzini BdrvChild *child; 26186b98bd64SPaolo Bonzini 26196b98bd64SPaolo Bonzini assert(bs->io_plugged); 26206b98bd64SPaolo Bonzini if (--bs->io_plugged == 0 && bs->io_plug_disabled == 0) { 262161007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 262261007b31SStefan Hajnoczi if (drv && drv->bdrv_io_unplug) { 262361007b31SStefan Hajnoczi drv->bdrv_io_unplug(bs); 262461007b31SStefan Hajnoczi } 262561007b31SStefan Hajnoczi } 262661007b31SStefan Hajnoczi 26276b98bd64SPaolo Bonzini QLIST_FOREACH(child, &bs->children, next) { 26286b98bd64SPaolo Bonzini bdrv_io_unplug(child->bs); 26296b98bd64SPaolo Bonzini } 26306b98bd64SPaolo Bonzini } 26316b98bd64SPaolo Bonzini 26326b98bd64SPaolo Bonzini void bdrv_io_unplugged_begin(BlockDriverState *bs) 263361007b31SStefan Hajnoczi { 26346b98bd64SPaolo Bonzini BdrvChild *child; 26356b98bd64SPaolo Bonzini 26366b98bd64SPaolo Bonzini if (bs->io_plug_disabled++ == 0 && bs->io_plugged > 0) { 263761007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 26386b98bd64SPaolo Bonzini if (drv && drv->bdrv_io_unplug) { 26396b98bd64SPaolo Bonzini drv->bdrv_io_unplug(bs); 26406b98bd64SPaolo Bonzini } 26416b98bd64SPaolo Bonzini } 26426b98bd64SPaolo Bonzini 26436b98bd64SPaolo Bonzini QLIST_FOREACH(child, &bs->children, next) { 26446b98bd64SPaolo Bonzini bdrv_io_unplugged_begin(child->bs); 26456b98bd64SPaolo Bonzini } 26466b98bd64SPaolo Bonzini } 26476b98bd64SPaolo Bonzini 26486b98bd64SPaolo Bonzini void bdrv_io_unplugged_end(BlockDriverState *bs) 26496b98bd64SPaolo Bonzini { 26506b98bd64SPaolo Bonzini BdrvChild *child; 26516b98bd64SPaolo Bonzini 26526b98bd64SPaolo Bonzini assert(bs->io_plug_disabled); 26536b98bd64SPaolo Bonzini QLIST_FOREACH(child, &bs->children, next) { 26546b98bd64SPaolo Bonzini bdrv_io_unplugged_end(child->bs); 26556b98bd64SPaolo Bonzini } 26566b98bd64SPaolo Bonzini 26576b98bd64SPaolo Bonzini if (--bs->io_plug_disabled == 0 && bs->io_plugged > 0) { 26586b98bd64SPaolo Bonzini BlockDriver *drv = bs->drv; 26596b98bd64SPaolo Bonzini if (drv && drv->bdrv_io_plug) { 26606b98bd64SPaolo Bonzini drv->bdrv_io_plug(bs); 26616b98bd64SPaolo Bonzini } 266261007b31SStefan Hajnoczi } 266361007b31SStefan Hajnoczi } 2664