161007b31SStefan Hajnoczi /* 261007b31SStefan Hajnoczi * Block layer I/O functions 361007b31SStefan Hajnoczi * 461007b31SStefan Hajnoczi * Copyright (c) 2003 Fabrice Bellard 561007b31SStefan Hajnoczi * 661007b31SStefan Hajnoczi * Permission is hereby granted, free of charge, to any person obtaining a copy 761007b31SStefan Hajnoczi * of this software and associated documentation files (the "Software"), to deal 861007b31SStefan Hajnoczi * in the Software without restriction, including without limitation the rights 961007b31SStefan Hajnoczi * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 1061007b31SStefan Hajnoczi * copies of the Software, and to permit persons to whom the Software is 1161007b31SStefan Hajnoczi * furnished to do so, subject to the following conditions: 1261007b31SStefan Hajnoczi * 1361007b31SStefan Hajnoczi * The above copyright notice and this permission notice shall be included in 1461007b31SStefan Hajnoczi * all copies or substantial portions of the Software. 1561007b31SStefan Hajnoczi * 1661007b31SStefan Hajnoczi * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 1761007b31SStefan Hajnoczi * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 1861007b31SStefan Hajnoczi * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 1961007b31SStefan Hajnoczi * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 2061007b31SStefan Hajnoczi * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 2161007b31SStefan Hajnoczi * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 2261007b31SStefan Hajnoczi * THE SOFTWARE. 2361007b31SStefan Hajnoczi */ 2461007b31SStefan Hajnoczi 2561007b31SStefan Hajnoczi #include "trace.h" 267f0e9da6SMax Reitz #include "sysemu/block-backend.h" 2761007b31SStefan Hajnoczi #include "block/blockjob.h" 2861007b31SStefan Hajnoczi #include "block/block_int.h" 2976f4afb4SAlberto Garcia #include "block/throttle-groups.h" 30d49b6836SMarkus Armbruster #include "qemu/error-report.h" 3161007b31SStefan Hajnoczi 3261007b31SStefan Hajnoczi #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ 3361007b31SStefan Hajnoczi 3461007b31SStefan Hajnoczi static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, 3561007b31SStefan Hajnoczi int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 3661007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque); 3761007b31SStefan Hajnoczi static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs, 3861007b31SStefan Hajnoczi int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 3961007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque); 4061007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, 4161007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, 4261007b31SStefan Hajnoczi QEMUIOVector *iov); 4361007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, 4461007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, 4561007b31SStefan Hajnoczi QEMUIOVector *iov); 4661007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs, 4761007b31SStefan Hajnoczi int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 4861007b31SStefan Hajnoczi BdrvRequestFlags flags); 4961007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs, 5061007b31SStefan Hajnoczi int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 5161007b31SStefan Hajnoczi BdrvRequestFlags flags); 5261007b31SStefan Hajnoczi static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, 5361007b31SStefan Hajnoczi int64_t sector_num, 5461007b31SStefan Hajnoczi QEMUIOVector *qiov, 5561007b31SStefan Hajnoczi int nb_sectors, 5661007b31SStefan Hajnoczi BdrvRequestFlags flags, 5761007b31SStefan Hajnoczi BlockCompletionFunc *cb, 5861007b31SStefan Hajnoczi void *opaque, 5961007b31SStefan Hajnoczi bool is_write); 6061007b31SStefan Hajnoczi static void coroutine_fn bdrv_co_do_rw(void *opaque); 6161007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, 6261007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, BdrvRequestFlags flags); 6361007b31SStefan Hajnoczi 6461007b31SStefan Hajnoczi /* throttling disk I/O limits */ 6561007b31SStefan Hajnoczi void bdrv_set_io_limits(BlockDriverState *bs, 6661007b31SStefan Hajnoczi ThrottleConfig *cfg) 6761007b31SStefan Hajnoczi { 6861007b31SStefan Hajnoczi int i; 6961007b31SStefan Hajnoczi 7076f4afb4SAlberto Garcia throttle_group_config(bs, cfg); 7161007b31SStefan Hajnoczi 7261007b31SStefan Hajnoczi for (i = 0; i < 2; i++) { 7361007b31SStefan Hajnoczi qemu_co_enter_next(&bs->throttled_reqs[i]); 7461007b31SStefan Hajnoczi } 7561007b31SStefan Hajnoczi } 7661007b31SStefan Hajnoczi 7761007b31SStefan Hajnoczi /* this function drain all the throttled IOs */ 7861007b31SStefan Hajnoczi static bool bdrv_start_throttled_reqs(BlockDriverState *bs) 7961007b31SStefan Hajnoczi { 8061007b31SStefan Hajnoczi bool drained = false; 8161007b31SStefan Hajnoczi bool enabled = bs->io_limits_enabled; 8261007b31SStefan Hajnoczi int i; 8361007b31SStefan Hajnoczi 8461007b31SStefan Hajnoczi bs->io_limits_enabled = false; 8561007b31SStefan Hajnoczi 8661007b31SStefan Hajnoczi for (i = 0; i < 2; i++) { 8761007b31SStefan Hajnoczi while (qemu_co_enter_next(&bs->throttled_reqs[i])) { 8861007b31SStefan Hajnoczi drained = true; 8961007b31SStefan Hajnoczi } 9061007b31SStefan Hajnoczi } 9161007b31SStefan Hajnoczi 9261007b31SStefan Hajnoczi bs->io_limits_enabled = enabled; 9361007b31SStefan Hajnoczi 9461007b31SStefan Hajnoczi return drained; 9561007b31SStefan Hajnoczi } 9661007b31SStefan Hajnoczi 9761007b31SStefan Hajnoczi void bdrv_io_limits_disable(BlockDriverState *bs) 9861007b31SStefan Hajnoczi { 9961007b31SStefan Hajnoczi bs->io_limits_enabled = false; 10061007b31SStefan Hajnoczi bdrv_start_throttled_reqs(bs); 10176f4afb4SAlberto Garcia throttle_group_unregister_bs(bs); 10261007b31SStefan Hajnoczi } 10361007b31SStefan Hajnoczi 10461007b31SStefan Hajnoczi /* should be called before bdrv_set_io_limits if a limit is set */ 10576f4afb4SAlberto Garcia void bdrv_io_limits_enable(BlockDriverState *bs, const char *group) 10661007b31SStefan Hajnoczi { 10761007b31SStefan Hajnoczi assert(!bs->io_limits_enabled); 10876f4afb4SAlberto Garcia throttle_group_register_bs(bs, group); 10961007b31SStefan Hajnoczi bs->io_limits_enabled = true; 11061007b31SStefan Hajnoczi } 11161007b31SStefan Hajnoczi 11276f4afb4SAlberto Garcia void bdrv_io_limits_update_group(BlockDriverState *bs, const char *group) 11361007b31SStefan Hajnoczi { 11476f4afb4SAlberto Garcia /* this bs is not part of any group */ 11576f4afb4SAlberto Garcia if (!bs->throttle_state) { 11661007b31SStefan Hajnoczi return; 11761007b31SStefan Hajnoczi } 11861007b31SStefan Hajnoczi 11976f4afb4SAlberto Garcia /* this bs is a part of the same group than the one we want */ 12076f4afb4SAlberto Garcia if (!g_strcmp0(throttle_group_get_name(bs), group)) { 12176f4afb4SAlberto Garcia return; 12276f4afb4SAlberto Garcia } 12376f4afb4SAlberto Garcia 12476f4afb4SAlberto Garcia /* need to change the group this bs belong to */ 12576f4afb4SAlberto Garcia bdrv_io_limits_disable(bs); 12676f4afb4SAlberto Garcia bdrv_io_limits_enable(bs, group); 12761007b31SStefan Hajnoczi } 12861007b31SStefan Hajnoczi 12961007b31SStefan Hajnoczi void bdrv_setup_io_funcs(BlockDriver *bdrv) 13061007b31SStefan Hajnoczi { 13161007b31SStefan Hajnoczi /* Block drivers without coroutine functions need emulation */ 13261007b31SStefan Hajnoczi if (!bdrv->bdrv_co_readv) { 13361007b31SStefan Hajnoczi bdrv->bdrv_co_readv = bdrv_co_readv_em; 13461007b31SStefan Hajnoczi bdrv->bdrv_co_writev = bdrv_co_writev_em; 13561007b31SStefan Hajnoczi 13661007b31SStefan Hajnoczi /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if 13761007b31SStefan Hajnoczi * the block driver lacks aio we need to emulate that too. 13861007b31SStefan Hajnoczi */ 13961007b31SStefan Hajnoczi if (!bdrv->bdrv_aio_readv) { 14061007b31SStefan Hajnoczi /* add AIO emulation layer */ 14161007b31SStefan Hajnoczi bdrv->bdrv_aio_readv = bdrv_aio_readv_em; 14261007b31SStefan Hajnoczi bdrv->bdrv_aio_writev = bdrv_aio_writev_em; 14361007b31SStefan Hajnoczi } 14461007b31SStefan Hajnoczi } 14561007b31SStefan Hajnoczi } 14661007b31SStefan Hajnoczi 14761007b31SStefan Hajnoczi void bdrv_refresh_limits(BlockDriverState *bs, Error **errp) 14861007b31SStefan Hajnoczi { 14961007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 15061007b31SStefan Hajnoczi Error *local_err = NULL; 15161007b31SStefan Hajnoczi 15261007b31SStefan Hajnoczi memset(&bs->bl, 0, sizeof(bs->bl)); 15361007b31SStefan Hajnoczi 15461007b31SStefan Hajnoczi if (!drv) { 15561007b31SStefan Hajnoczi return; 15661007b31SStefan Hajnoczi } 15761007b31SStefan Hajnoczi 15861007b31SStefan Hajnoczi /* Take some limits from the children as a default */ 15961007b31SStefan Hajnoczi if (bs->file) { 1609a4f4c31SKevin Wolf bdrv_refresh_limits(bs->file->bs, &local_err); 16161007b31SStefan Hajnoczi if (local_err) { 16261007b31SStefan Hajnoczi error_propagate(errp, local_err); 16361007b31SStefan Hajnoczi return; 16461007b31SStefan Hajnoczi } 1659a4f4c31SKevin Wolf bs->bl.opt_transfer_length = bs->file->bs->bl.opt_transfer_length; 1669a4f4c31SKevin Wolf bs->bl.max_transfer_length = bs->file->bs->bl.max_transfer_length; 1679a4f4c31SKevin Wolf bs->bl.min_mem_alignment = bs->file->bs->bl.min_mem_alignment; 1689a4f4c31SKevin Wolf bs->bl.opt_mem_alignment = bs->file->bs->bl.opt_mem_alignment; 16961007b31SStefan Hajnoczi } else { 1704196d2f0SDenis V. Lunev bs->bl.min_mem_alignment = 512; 171459b4e66SDenis V. Lunev bs->bl.opt_mem_alignment = getpagesize(); 17261007b31SStefan Hajnoczi } 17361007b31SStefan Hajnoczi 174760e0063SKevin Wolf if (bs->backing) { 175760e0063SKevin Wolf bdrv_refresh_limits(bs->backing->bs, &local_err); 17661007b31SStefan Hajnoczi if (local_err) { 17761007b31SStefan Hajnoczi error_propagate(errp, local_err); 17861007b31SStefan Hajnoczi return; 17961007b31SStefan Hajnoczi } 18061007b31SStefan Hajnoczi bs->bl.opt_transfer_length = 18161007b31SStefan Hajnoczi MAX(bs->bl.opt_transfer_length, 182760e0063SKevin Wolf bs->backing->bs->bl.opt_transfer_length); 18361007b31SStefan Hajnoczi bs->bl.max_transfer_length = 18461007b31SStefan Hajnoczi MIN_NON_ZERO(bs->bl.max_transfer_length, 185760e0063SKevin Wolf bs->backing->bs->bl.max_transfer_length); 18661007b31SStefan Hajnoczi bs->bl.opt_mem_alignment = 18761007b31SStefan Hajnoczi MAX(bs->bl.opt_mem_alignment, 188760e0063SKevin Wolf bs->backing->bs->bl.opt_mem_alignment); 1894196d2f0SDenis V. Lunev bs->bl.min_mem_alignment = 1904196d2f0SDenis V. Lunev MAX(bs->bl.min_mem_alignment, 191760e0063SKevin Wolf bs->backing->bs->bl.min_mem_alignment); 19261007b31SStefan Hajnoczi } 19361007b31SStefan Hajnoczi 19461007b31SStefan Hajnoczi /* Then let the driver override it */ 19561007b31SStefan Hajnoczi if (drv->bdrv_refresh_limits) { 19661007b31SStefan Hajnoczi drv->bdrv_refresh_limits(bs, errp); 19761007b31SStefan Hajnoczi } 19861007b31SStefan Hajnoczi } 19961007b31SStefan Hajnoczi 20061007b31SStefan Hajnoczi /** 20161007b31SStefan Hajnoczi * The copy-on-read flag is actually a reference count so multiple users may 20261007b31SStefan Hajnoczi * use the feature without worrying about clobbering its previous state. 20361007b31SStefan Hajnoczi * Copy-on-read stays enabled until all users have called to disable it. 20461007b31SStefan Hajnoczi */ 20561007b31SStefan Hajnoczi void bdrv_enable_copy_on_read(BlockDriverState *bs) 20661007b31SStefan Hajnoczi { 20761007b31SStefan Hajnoczi bs->copy_on_read++; 20861007b31SStefan Hajnoczi } 20961007b31SStefan Hajnoczi 21061007b31SStefan Hajnoczi void bdrv_disable_copy_on_read(BlockDriverState *bs) 21161007b31SStefan Hajnoczi { 21261007b31SStefan Hajnoczi assert(bs->copy_on_read > 0); 21361007b31SStefan Hajnoczi bs->copy_on_read--; 21461007b31SStefan Hajnoczi } 21561007b31SStefan Hajnoczi 21661007b31SStefan Hajnoczi /* Check if any requests are in-flight (including throttled requests) */ 217439db28cSKevin Wolf bool bdrv_requests_pending(BlockDriverState *bs) 21861007b31SStefan Hajnoczi { 21937a639a7SKevin Wolf BdrvChild *child; 22037a639a7SKevin Wolf 22161007b31SStefan Hajnoczi if (!QLIST_EMPTY(&bs->tracked_requests)) { 22261007b31SStefan Hajnoczi return true; 22361007b31SStefan Hajnoczi } 22461007b31SStefan Hajnoczi if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) { 22561007b31SStefan Hajnoczi return true; 22661007b31SStefan Hajnoczi } 22761007b31SStefan Hajnoczi if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) { 22861007b31SStefan Hajnoczi return true; 22961007b31SStefan Hajnoczi } 23037a639a7SKevin Wolf 23137a639a7SKevin Wolf QLIST_FOREACH(child, &bs->children, next) { 23237a639a7SKevin Wolf if (bdrv_requests_pending(child->bs)) { 23361007b31SStefan Hajnoczi return true; 23461007b31SStefan Hajnoczi } 23561007b31SStefan Hajnoczi } 23637a639a7SKevin Wolf 23761007b31SStefan Hajnoczi return false; 23861007b31SStefan Hajnoczi } 23961007b31SStefan Hajnoczi 24067da1dc5SFam Zheng static void bdrv_drain_recurse(BlockDriverState *bs) 24167da1dc5SFam Zheng { 24267da1dc5SFam Zheng BdrvChild *child; 24367da1dc5SFam Zheng 24467da1dc5SFam Zheng if (bs->drv && bs->drv->bdrv_drain) { 24567da1dc5SFam Zheng bs->drv->bdrv_drain(bs); 24667da1dc5SFam Zheng } 24767da1dc5SFam Zheng QLIST_FOREACH(child, &bs->children, next) { 24867da1dc5SFam Zheng bdrv_drain_recurse(child->bs); 24967da1dc5SFam Zheng } 25067da1dc5SFam Zheng } 25167da1dc5SFam Zheng 25261007b31SStefan Hajnoczi /* 25367da1dc5SFam Zheng * Wait for pending requests to complete on a single BlockDriverState subtree, 25467da1dc5SFam Zheng * and suspend block driver's internal I/O until next request arrives. 25561007b31SStefan Hajnoczi * 25661007b31SStefan Hajnoczi * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState 25761007b31SStefan Hajnoczi * AioContext. 2587a63f3cdSStefan Hajnoczi * 2597a63f3cdSStefan Hajnoczi * Only this BlockDriverState's AioContext is run, so in-flight requests must 2607a63f3cdSStefan Hajnoczi * not depend on events in other AioContexts. In that case, use 2617a63f3cdSStefan Hajnoczi * bdrv_drain_all() instead. 26261007b31SStefan Hajnoczi */ 26361007b31SStefan Hajnoczi void bdrv_drain(BlockDriverState *bs) 26461007b31SStefan Hajnoczi { 265f406c03cSAlexander Yarygin bool busy = true; 266f406c03cSAlexander Yarygin 26767da1dc5SFam Zheng bdrv_drain_recurse(bs); 268f406c03cSAlexander Yarygin while (busy) { 26961007b31SStefan Hajnoczi /* Keep iterating */ 270f406c03cSAlexander Yarygin bdrv_flush_io_queue(bs); 271f406c03cSAlexander Yarygin busy = bdrv_requests_pending(bs); 272f406c03cSAlexander Yarygin busy |= aio_poll(bdrv_get_aio_context(bs), busy); 27361007b31SStefan Hajnoczi } 27461007b31SStefan Hajnoczi } 27561007b31SStefan Hajnoczi 27661007b31SStefan Hajnoczi /* 27761007b31SStefan Hajnoczi * Wait for pending requests to complete across all BlockDriverStates 27861007b31SStefan Hajnoczi * 27961007b31SStefan Hajnoczi * This function does not flush data to disk, use bdrv_flush_all() for that 28061007b31SStefan Hajnoczi * after calling this function. 28161007b31SStefan Hajnoczi */ 28261007b31SStefan Hajnoczi void bdrv_drain_all(void) 28361007b31SStefan Hajnoczi { 28461007b31SStefan Hajnoczi /* Always run first iteration so any pending completion BHs run */ 28561007b31SStefan Hajnoczi bool busy = true; 28661007b31SStefan Hajnoczi BlockDriverState *bs = NULL; 287f406c03cSAlexander Yarygin GSList *aio_ctxs = NULL, *ctx; 28861007b31SStefan Hajnoczi 28961007b31SStefan Hajnoczi while ((bs = bdrv_next(bs))) { 29061007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 29161007b31SStefan Hajnoczi 29261007b31SStefan Hajnoczi aio_context_acquire(aio_context); 29361007b31SStefan Hajnoczi if (bs->job) { 29461007b31SStefan Hajnoczi block_job_pause(bs->job); 29561007b31SStefan Hajnoczi } 29661007b31SStefan Hajnoczi aio_context_release(aio_context); 297f406c03cSAlexander Yarygin 298764ba3aeSAlberto Garcia if (!g_slist_find(aio_ctxs, aio_context)) { 299f406c03cSAlexander Yarygin aio_ctxs = g_slist_prepend(aio_ctxs, aio_context); 300f406c03cSAlexander Yarygin } 30161007b31SStefan Hajnoczi } 30261007b31SStefan Hajnoczi 3037a63f3cdSStefan Hajnoczi /* Note that completion of an asynchronous I/O operation can trigger any 3047a63f3cdSStefan Hajnoczi * number of other I/O operations on other devices---for example a 3057a63f3cdSStefan Hajnoczi * coroutine can submit an I/O request to another device in response to 3067a63f3cdSStefan Hajnoczi * request completion. Therefore we must keep looping until there was no 3077a63f3cdSStefan Hajnoczi * more activity rather than simply draining each device independently. 3087a63f3cdSStefan Hajnoczi */ 30961007b31SStefan Hajnoczi while (busy) { 31061007b31SStefan Hajnoczi busy = false; 311f406c03cSAlexander Yarygin 312f406c03cSAlexander Yarygin for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) { 313f406c03cSAlexander Yarygin AioContext *aio_context = ctx->data; 31461007b31SStefan Hajnoczi bs = NULL; 31561007b31SStefan Hajnoczi 31661007b31SStefan Hajnoczi aio_context_acquire(aio_context); 317f406c03cSAlexander Yarygin while ((bs = bdrv_next(bs))) { 318f406c03cSAlexander Yarygin if (aio_context == bdrv_get_aio_context(bs)) { 319f406c03cSAlexander Yarygin bdrv_flush_io_queue(bs); 320f406c03cSAlexander Yarygin if (bdrv_requests_pending(bs)) { 321f406c03cSAlexander Yarygin busy = true; 322f406c03cSAlexander Yarygin aio_poll(aio_context, busy); 323f406c03cSAlexander Yarygin } 324f406c03cSAlexander Yarygin } 325f406c03cSAlexander Yarygin } 326f406c03cSAlexander Yarygin busy |= aio_poll(aio_context, false); 32761007b31SStefan Hajnoczi aio_context_release(aio_context); 32861007b31SStefan Hajnoczi } 32961007b31SStefan Hajnoczi } 33061007b31SStefan Hajnoczi 33161007b31SStefan Hajnoczi bs = NULL; 33261007b31SStefan Hajnoczi while ((bs = bdrv_next(bs))) { 33361007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 33461007b31SStefan Hajnoczi 33561007b31SStefan Hajnoczi aio_context_acquire(aio_context); 33661007b31SStefan Hajnoczi if (bs->job) { 33761007b31SStefan Hajnoczi block_job_resume(bs->job); 33861007b31SStefan Hajnoczi } 33961007b31SStefan Hajnoczi aio_context_release(aio_context); 34061007b31SStefan Hajnoczi } 341f406c03cSAlexander Yarygin g_slist_free(aio_ctxs); 34261007b31SStefan Hajnoczi } 34361007b31SStefan Hajnoczi 34461007b31SStefan Hajnoczi /** 34561007b31SStefan Hajnoczi * Remove an active request from the tracked requests list 34661007b31SStefan Hajnoczi * 34761007b31SStefan Hajnoczi * This function should be called when a tracked request is completing. 34861007b31SStefan Hajnoczi */ 34961007b31SStefan Hajnoczi static void tracked_request_end(BdrvTrackedRequest *req) 35061007b31SStefan Hajnoczi { 35161007b31SStefan Hajnoczi if (req->serialising) { 35261007b31SStefan Hajnoczi req->bs->serialising_in_flight--; 35361007b31SStefan Hajnoczi } 35461007b31SStefan Hajnoczi 35561007b31SStefan Hajnoczi QLIST_REMOVE(req, list); 35661007b31SStefan Hajnoczi qemu_co_queue_restart_all(&req->wait_queue); 35761007b31SStefan Hajnoczi } 35861007b31SStefan Hajnoczi 35961007b31SStefan Hajnoczi /** 36061007b31SStefan Hajnoczi * Add an active request to the tracked requests list 36161007b31SStefan Hajnoczi */ 36261007b31SStefan Hajnoczi static void tracked_request_begin(BdrvTrackedRequest *req, 36361007b31SStefan Hajnoczi BlockDriverState *bs, 36461007b31SStefan Hajnoczi int64_t offset, 365ebde595cSFam Zheng unsigned int bytes, 366ebde595cSFam Zheng enum BdrvTrackedRequestType type) 36761007b31SStefan Hajnoczi { 36861007b31SStefan Hajnoczi *req = (BdrvTrackedRequest){ 36961007b31SStefan Hajnoczi .bs = bs, 37061007b31SStefan Hajnoczi .offset = offset, 37161007b31SStefan Hajnoczi .bytes = bytes, 372ebde595cSFam Zheng .type = type, 37361007b31SStefan Hajnoczi .co = qemu_coroutine_self(), 37461007b31SStefan Hajnoczi .serialising = false, 37561007b31SStefan Hajnoczi .overlap_offset = offset, 37661007b31SStefan Hajnoczi .overlap_bytes = bytes, 37761007b31SStefan Hajnoczi }; 37861007b31SStefan Hajnoczi 37961007b31SStefan Hajnoczi qemu_co_queue_init(&req->wait_queue); 38061007b31SStefan Hajnoczi 38161007b31SStefan Hajnoczi QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); 38261007b31SStefan Hajnoczi } 38361007b31SStefan Hajnoczi 38461007b31SStefan Hajnoczi static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) 38561007b31SStefan Hajnoczi { 38661007b31SStefan Hajnoczi int64_t overlap_offset = req->offset & ~(align - 1); 38761007b31SStefan Hajnoczi unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align) 38861007b31SStefan Hajnoczi - overlap_offset; 38961007b31SStefan Hajnoczi 39061007b31SStefan Hajnoczi if (!req->serialising) { 39161007b31SStefan Hajnoczi req->bs->serialising_in_flight++; 39261007b31SStefan Hajnoczi req->serialising = true; 39361007b31SStefan Hajnoczi } 39461007b31SStefan Hajnoczi 39561007b31SStefan Hajnoczi req->overlap_offset = MIN(req->overlap_offset, overlap_offset); 39661007b31SStefan Hajnoczi req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); 39761007b31SStefan Hajnoczi } 39861007b31SStefan Hajnoczi 39961007b31SStefan Hajnoczi /** 40061007b31SStefan Hajnoczi * Round a region to cluster boundaries 40161007b31SStefan Hajnoczi */ 40261007b31SStefan Hajnoczi void bdrv_round_to_clusters(BlockDriverState *bs, 40361007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, 40461007b31SStefan Hajnoczi int64_t *cluster_sector_num, 40561007b31SStefan Hajnoczi int *cluster_nb_sectors) 40661007b31SStefan Hajnoczi { 40761007b31SStefan Hajnoczi BlockDriverInfo bdi; 40861007b31SStefan Hajnoczi 40961007b31SStefan Hajnoczi if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 41061007b31SStefan Hajnoczi *cluster_sector_num = sector_num; 41161007b31SStefan Hajnoczi *cluster_nb_sectors = nb_sectors; 41261007b31SStefan Hajnoczi } else { 41361007b31SStefan Hajnoczi int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE; 41461007b31SStefan Hajnoczi *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c); 41561007b31SStefan Hajnoczi *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num + 41661007b31SStefan Hajnoczi nb_sectors, c); 41761007b31SStefan Hajnoczi } 41861007b31SStefan Hajnoczi } 41961007b31SStefan Hajnoczi 42061007b31SStefan Hajnoczi static int bdrv_get_cluster_size(BlockDriverState *bs) 42161007b31SStefan Hajnoczi { 42261007b31SStefan Hajnoczi BlockDriverInfo bdi; 42361007b31SStefan Hajnoczi int ret; 42461007b31SStefan Hajnoczi 42561007b31SStefan Hajnoczi ret = bdrv_get_info(bs, &bdi); 42661007b31SStefan Hajnoczi if (ret < 0 || bdi.cluster_size == 0) { 42761007b31SStefan Hajnoczi return bs->request_alignment; 42861007b31SStefan Hajnoczi } else { 42961007b31SStefan Hajnoczi return bdi.cluster_size; 43061007b31SStefan Hajnoczi } 43161007b31SStefan Hajnoczi } 43261007b31SStefan Hajnoczi 43361007b31SStefan Hajnoczi static bool tracked_request_overlaps(BdrvTrackedRequest *req, 43461007b31SStefan Hajnoczi int64_t offset, unsigned int bytes) 43561007b31SStefan Hajnoczi { 43661007b31SStefan Hajnoczi /* aaaa bbbb */ 43761007b31SStefan Hajnoczi if (offset >= req->overlap_offset + req->overlap_bytes) { 43861007b31SStefan Hajnoczi return false; 43961007b31SStefan Hajnoczi } 44061007b31SStefan Hajnoczi /* bbbb aaaa */ 44161007b31SStefan Hajnoczi if (req->overlap_offset >= offset + bytes) { 44261007b31SStefan Hajnoczi return false; 44361007b31SStefan Hajnoczi } 44461007b31SStefan Hajnoczi return true; 44561007b31SStefan Hajnoczi } 44661007b31SStefan Hajnoczi 44761007b31SStefan Hajnoczi static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) 44861007b31SStefan Hajnoczi { 44961007b31SStefan Hajnoczi BlockDriverState *bs = self->bs; 45061007b31SStefan Hajnoczi BdrvTrackedRequest *req; 45161007b31SStefan Hajnoczi bool retry; 45261007b31SStefan Hajnoczi bool waited = false; 45361007b31SStefan Hajnoczi 45461007b31SStefan Hajnoczi if (!bs->serialising_in_flight) { 45561007b31SStefan Hajnoczi return false; 45661007b31SStefan Hajnoczi } 45761007b31SStefan Hajnoczi 45861007b31SStefan Hajnoczi do { 45961007b31SStefan Hajnoczi retry = false; 46061007b31SStefan Hajnoczi QLIST_FOREACH(req, &bs->tracked_requests, list) { 46161007b31SStefan Hajnoczi if (req == self || (!req->serialising && !self->serialising)) { 46261007b31SStefan Hajnoczi continue; 46361007b31SStefan Hajnoczi } 46461007b31SStefan Hajnoczi if (tracked_request_overlaps(req, self->overlap_offset, 46561007b31SStefan Hajnoczi self->overlap_bytes)) 46661007b31SStefan Hajnoczi { 46761007b31SStefan Hajnoczi /* Hitting this means there was a reentrant request, for 46861007b31SStefan Hajnoczi * example, a block driver issuing nested requests. This must 46961007b31SStefan Hajnoczi * never happen since it means deadlock. 47061007b31SStefan Hajnoczi */ 47161007b31SStefan Hajnoczi assert(qemu_coroutine_self() != req->co); 47261007b31SStefan Hajnoczi 47361007b31SStefan Hajnoczi /* If the request is already (indirectly) waiting for us, or 47461007b31SStefan Hajnoczi * will wait for us as soon as it wakes up, then just go on 47561007b31SStefan Hajnoczi * (instead of producing a deadlock in the former case). */ 47661007b31SStefan Hajnoczi if (!req->waiting_for) { 47761007b31SStefan Hajnoczi self->waiting_for = req; 47861007b31SStefan Hajnoczi qemu_co_queue_wait(&req->wait_queue); 47961007b31SStefan Hajnoczi self->waiting_for = NULL; 48061007b31SStefan Hajnoczi retry = true; 48161007b31SStefan Hajnoczi waited = true; 48261007b31SStefan Hajnoczi break; 48361007b31SStefan Hajnoczi } 48461007b31SStefan Hajnoczi } 48561007b31SStefan Hajnoczi } 48661007b31SStefan Hajnoczi } while (retry); 48761007b31SStefan Hajnoczi 48861007b31SStefan Hajnoczi return waited; 48961007b31SStefan Hajnoczi } 49061007b31SStefan Hajnoczi 49161007b31SStefan Hajnoczi static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset, 49261007b31SStefan Hajnoczi size_t size) 49361007b31SStefan Hajnoczi { 49461007b31SStefan Hajnoczi if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) { 49561007b31SStefan Hajnoczi return -EIO; 49661007b31SStefan Hajnoczi } 49761007b31SStefan Hajnoczi 49861007b31SStefan Hajnoczi if (!bdrv_is_inserted(bs)) { 49961007b31SStefan Hajnoczi return -ENOMEDIUM; 50061007b31SStefan Hajnoczi } 50161007b31SStefan Hajnoczi 50261007b31SStefan Hajnoczi if (offset < 0) { 50361007b31SStefan Hajnoczi return -EIO; 50461007b31SStefan Hajnoczi } 50561007b31SStefan Hajnoczi 50661007b31SStefan Hajnoczi return 0; 50761007b31SStefan Hajnoczi } 50861007b31SStefan Hajnoczi 50961007b31SStefan Hajnoczi static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num, 51061007b31SStefan Hajnoczi int nb_sectors) 51161007b31SStefan Hajnoczi { 51261007b31SStefan Hajnoczi if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 51361007b31SStefan Hajnoczi return -EIO; 51461007b31SStefan Hajnoczi } 51561007b31SStefan Hajnoczi 51661007b31SStefan Hajnoczi return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE, 51761007b31SStefan Hajnoczi nb_sectors * BDRV_SECTOR_SIZE); 51861007b31SStefan Hajnoczi } 51961007b31SStefan Hajnoczi 52061007b31SStefan Hajnoczi typedef struct RwCo { 52161007b31SStefan Hajnoczi BlockDriverState *bs; 52261007b31SStefan Hajnoczi int64_t offset; 52361007b31SStefan Hajnoczi QEMUIOVector *qiov; 52461007b31SStefan Hajnoczi bool is_write; 52561007b31SStefan Hajnoczi int ret; 52661007b31SStefan Hajnoczi BdrvRequestFlags flags; 52761007b31SStefan Hajnoczi } RwCo; 52861007b31SStefan Hajnoczi 52961007b31SStefan Hajnoczi static void coroutine_fn bdrv_rw_co_entry(void *opaque) 53061007b31SStefan Hajnoczi { 53161007b31SStefan Hajnoczi RwCo *rwco = opaque; 53261007b31SStefan Hajnoczi 53361007b31SStefan Hajnoczi if (!rwco->is_write) { 53461007b31SStefan Hajnoczi rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset, 53561007b31SStefan Hajnoczi rwco->qiov->size, rwco->qiov, 53661007b31SStefan Hajnoczi rwco->flags); 53761007b31SStefan Hajnoczi } else { 53861007b31SStefan Hajnoczi rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset, 53961007b31SStefan Hajnoczi rwco->qiov->size, rwco->qiov, 54061007b31SStefan Hajnoczi rwco->flags); 54161007b31SStefan Hajnoczi } 54261007b31SStefan Hajnoczi } 54361007b31SStefan Hajnoczi 54461007b31SStefan Hajnoczi /* 54561007b31SStefan Hajnoczi * Process a vectored synchronous request using coroutines 54661007b31SStefan Hajnoczi */ 54761007b31SStefan Hajnoczi static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset, 54861007b31SStefan Hajnoczi QEMUIOVector *qiov, bool is_write, 54961007b31SStefan Hajnoczi BdrvRequestFlags flags) 55061007b31SStefan Hajnoczi { 55161007b31SStefan Hajnoczi Coroutine *co; 55261007b31SStefan Hajnoczi RwCo rwco = { 55361007b31SStefan Hajnoczi .bs = bs, 55461007b31SStefan Hajnoczi .offset = offset, 55561007b31SStefan Hajnoczi .qiov = qiov, 55661007b31SStefan Hajnoczi .is_write = is_write, 55761007b31SStefan Hajnoczi .ret = NOT_DONE, 55861007b31SStefan Hajnoczi .flags = flags, 55961007b31SStefan Hajnoczi }; 56061007b31SStefan Hajnoczi 56161007b31SStefan Hajnoczi /** 56261007b31SStefan Hajnoczi * In sync call context, when the vcpu is blocked, this throttling timer 56361007b31SStefan Hajnoczi * will not fire; so the I/O throttling function has to be disabled here 56461007b31SStefan Hajnoczi * if it has been enabled. 56561007b31SStefan Hajnoczi */ 56661007b31SStefan Hajnoczi if (bs->io_limits_enabled) { 56761007b31SStefan Hajnoczi fprintf(stderr, "Disabling I/O throttling on '%s' due " 56861007b31SStefan Hajnoczi "to synchronous I/O.\n", bdrv_get_device_name(bs)); 56961007b31SStefan Hajnoczi bdrv_io_limits_disable(bs); 57061007b31SStefan Hajnoczi } 57161007b31SStefan Hajnoczi 57261007b31SStefan Hajnoczi if (qemu_in_coroutine()) { 57361007b31SStefan Hajnoczi /* Fast-path if already in coroutine context */ 57461007b31SStefan Hajnoczi bdrv_rw_co_entry(&rwco); 57561007b31SStefan Hajnoczi } else { 57661007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 57761007b31SStefan Hajnoczi 57861007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_rw_co_entry); 57961007b31SStefan Hajnoczi qemu_coroutine_enter(co, &rwco); 58061007b31SStefan Hajnoczi while (rwco.ret == NOT_DONE) { 58161007b31SStefan Hajnoczi aio_poll(aio_context, true); 58261007b31SStefan Hajnoczi } 58361007b31SStefan Hajnoczi } 58461007b31SStefan Hajnoczi return rwco.ret; 58561007b31SStefan Hajnoczi } 58661007b31SStefan Hajnoczi 58761007b31SStefan Hajnoczi /* 58861007b31SStefan Hajnoczi * Process a synchronous request using coroutines 58961007b31SStefan Hajnoczi */ 59061007b31SStefan Hajnoczi static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, 59161007b31SStefan Hajnoczi int nb_sectors, bool is_write, BdrvRequestFlags flags) 59261007b31SStefan Hajnoczi { 59361007b31SStefan Hajnoczi QEMUIOVector qiov; 59461007b31SStefan Hajnoczi struct iovec iov = { 59561007b31SStefan Hajnoczi .iov_base = (void *)buf, 59661007b31SStefan Hajnoczi .iov_len = nb_sectors * BDRV_SECTOR_SIZE, 59761007b31SStefan Hajnoczi }; 59861007b31SStefan Hajnoczi 59961007b31SStefan Hajnoczi if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 60061007b31SStefan Hajnoczi return -EINVAL; 60161007b31SStefan Hajnoczi } 60261007b31SStefan Hajnoczi 60361007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 60461007b31SStefan Hajnoczi return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS, 60561007b31SStefan Hajnoczi &qiov, is_write, flags); 60661007b31SStefan Hajnoczi } 60761007b31SStefan Hajnoczi 60861007b31SStefan Hajnoczi /* return < 0 if error. See bdrv_write() for the return codes */ 60961007b31SStefan Hajnoczi int bdrv_read(BlockDriverState *bs, int64_t sector_num, 61061007b31SStefan Hajnoczi uint8_t *buf, int nb_sectors) 61161007b31SStefan Hajnoczi { 61261007b31SStefan Hajnoczi return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0); 61361007b31SStefan Hajnoczi } 61461007b31SStefan Hajnoczi 61561007b31SStefan Hajnoczi /* Just like bdrv_read(), but with I/O throttling temporarily disabled */ 61661007b31SStefan Hajnoczi int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num, 61761007b31SStefan Hajnoczi uint8_t *buf, int nb_sectors) 61861007b31SStefan Hajnoczi { 61961007b31SStefan Hajnoczi bool enabled; 62061007b31SStefan Hajnoczi int ret; 62161007b31SStefan Hajnoczi 62261007b31SStefan Hajnoczi enabled = bs->io_limits_enabled; 62361007b31SStefan Hajnoczi bs->io_limits_enabled = false; 62461007b31SStefan Hajnoczi ret = bdrv_read(bs, sector_num, buf, nb_sectors); 62561007b31SStefan Hajnoczi bs->io_limits_enabled = enabled; 62661007b31SStefan Hajnoczi return ret; 62761007b31SStefan Hajnoczi } 62861007b31SStefan Hajnoczi 62961007b31SStefan Hajnoczi /* Return < 0 if error. Important errors are: 63061007b31SStefan Hajnoczi -EIO generic I/O error (may happen for all errors) 63161007b31SStefan Hajnoczi -ENOMEDIUM No media inserted. 63261007b31SStefan Hajnoczi -EINVAL Invalid sector number or nb_sectors 63361007b31SStefan Hajnoczi -EACCES Trying to write a read-only device 63461007b31SStefan Hajnoczi */ 63561007b31SStefan Hajnoczi int bdrv_write(BlockDriverState *bs, int64_t sector_num, 63661007b31SStefan Hajnoczi const uint8_t *buf, int nb_sectors) 63761007b31SStefan Hajnoczi { 63861007b31SStefan Hajnoczi return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0); 63961007b31SStefan Hajnoczi } 64061007b31SStefan Hajnoczi 64161007b31SStefan Hajnoczi int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num, 64261007b31SStefan Hajnoczi int nb_sectors, BdrvRequestFlags flags) 64361007b31SStefan Hajnoczi { 64461007b31SStefan Hajnoczi return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true, 64561007b31SStefan Hajnoczi BDRV_REQ_ZERO_WRITE | flags); 64661007b31SStefan Hajnoczi } 64761007b31SStefan Hajnoczi 64861007b31SStefan Hajnoczi /* 64961007b31SStefan Hajnoczi * Completely zero out a block device with the help of bdrv_write_zeroes. 65061007b31SStefan Hajnoczi * The operation is sped up by checking the block status and only writing 65161007b31SStefan Hajnoczi * zeroes to the device if they currently do not return zeroes. Optional 65261007b31SStefan Hajnoczi * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP). 65361007b31SStefan Hajnoczi * 65461007b31SStefan Hajnoczi * Returns < 0 on error, 0 on success. For error codes see bdrv_write(). 65561007b31SStefan Hajnoczi */ 65661007b31SStefan Hajnoczi int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags) 65761007b31SStefan Hajnoczi { 65861007b31SStefan Hajnoczi int64_t target_sectors, ret, nb_sectors, sector_num = 0; 65961007b31SStefan Hajnoczi int n; 66061007b31SStefan Hajnoczi 66161007b31SStefan Hajnoczi target_sectors = bdrv_nb_sectors(bs); 66261007b31SStefan Hajnoczi if (target_sectors < 0) { 66361007b31SStefan Hajnoczi return target_sectors; 66461007b31SStefan Hajnoczi } 66561007b31SStefan Hajnoczi 66661007b31SStefan Hajnoczi for (;;) { 66761007b31SStefan Hajnoczi nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS); 66861007b31SStefan Hajnoczi if (nb_sectors <= 0) { 66961007b31SStefan Hajnoczi return 0; 67061007b31SStefan Hajnoczi } 67161007b31SStefan Hajnoczi ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n); 67261007b31SStefan Hajnoczi if (ret < 0) { 67361007b31SStefan Hajnoczi error_report("error getting block status at sector %" PRId64 ": %s", 67461007b31SStefan Hajnoczi sector_num, strerror(-ret)); 67561007b31SStefan Hajnoczi return ret; 67661007b31SStefan Hajnoczi } 67761007b31SStefan Hajnoczi if (ret & BDRV_BLOCK_ZERO) { 67861007b31SStefan Hajnoczi sector_num += n; 67961007b31SStefan Hajnoczi continue; 68061007b31SStefan Hajnoczi } 68161007b31SStefan Hajnoczi ret = bdrv_write_zeroes(bs, sector_num, n, flags); 68261007b31SStefan Hajnoczi if (ret < 0) { 68361007b31SStefan Hajnoczi error_report("error writing zeroes at sector %" PRId64 ": %s", 68461007b31SStefan Hajnoczi sector_num, strerror(-ret)); 68561007b31SStefan Hajnoczi return ret; 68661007b31SStefan Hajnoczi } 68761007b31SStefan Hajnoczi sector_num += n; 68861007b31SStefan Hajnoczi } 68961007b31SStefan Hajnoczi } 69061007b31SStefan Hajnoczi 69161007b31SStefan Hajnoczi int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes) 69261007b31SStefan Hajnoczi { 69361007b31SStefan Hajnoczi QEMUIOVector qiov; 69461007b31SStefan Hajnoczi struct iovec iov = { 69561007b31SStefan Hajnoczi .iov_base = (void *)buf, 69661007b31SStefan Hajnoczi .iov_len = bytes, 69761007b31SStefan Hajnoczi }; 69861007b31SStefan Hajnoczi int ret; 69961007b31SStefan Hajnoczi 70061007b31SStefan Hajnoczi if (bytes < 0) { 70161007b31SStefan Hajnoczi return -EINVAL; 70261007b31SStefan Hajnoczi } 70361007b31SStefan Hajnoczi 70461007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 70561007b31SStefan Hajnoczi ret = bdrv_prwv_co(bs, offset, &qiov, false, 0); 70661007b31SStefan Hajnoczi if (ret < 0) { 70761007b31SStefan Hajnoczi return ret; 70861007b31SStefan Hajnoczi } 70961007b31SStefan Hajnoczi 71061007b31SStefan Hajnoczi return bytes; 71161007b31SStefan Hajnoczi } 71261007b31SStefan Hajnoczi 71361007b31SStefan Hajnoczi int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov) 71461007b31SStefan Hajnoczi { 71561007b31SStefan Hajnoczi int ret; 71661007b31SStefan Hajnoczi 71761007b31SStefan Hajnoczi ret = bdrv_prwv_co(bs, offset, qiov, true, 0); 71861007b31SStefan Hajnoczi if (ret < 0) { 71961007b31SStefan Hajnoczi return ret; 72061007b31SStefan Hajnoczi } 72161007b31SStefan Hajnoczi 72261007b31SStefan Hajnoczi return qiov->size; 72361007b31SStefan Hajnoczi } 72461007b31SStefan Hajnoczi 72561007b31SStefan Hajnoczi int bdrv_pwrite(BlockDriverState *bs, int64_t offset, 72661007b31SStefan Hajnoczi const void *buf, int bytes) 72761007b31SStefan Hajnoczi { 72861007b31SStefan Hajnoczi QEMUIOVector qiov; 72961007b31SStefan Hajnoczi struct iovec iov = { 73061007b31SStefan Hajnoczi .iov_base = (void *) buf, 73161007b31SStefan Hajnoczi .iov_len = bytes, 73261007b31SStefan Hajnoczi }; 73361007b31SStefan Hajnoczi 73461007b31SStefan Hajnoczi if (bytes < 0) { 73561007b31SStefan Hajnoczi return -EINVAL; 73661007b31SStefan Hajnoczi } 73761007b31SStefan Hajnoczi 73861007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 73961007b31SStefan Hajnoczi return bdrv_pwritev(bs, offset, &qiov); 74061007b31SStefan Hajnoczi } 74161007b31SStefan Hajnoczi 74261007b31SStefan Hajnoczi /* 74361007b31SStefan Hajnoczi * Writes to the file and ensures that no writes are reordered across this 74461007b31SStefan Hajnoczi * request (acts as a barrier) 74561007b31SStefan Hajnoczi * 74661007b31SStefan Hajnoczi * Returns 0 on success, -errno in error cases. 74761007b31SStefan Hajnoczi */ 74861007b31SStefan Hajnoczi int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset, 74961007b31SStefan Hajnoczi const void *buf, int count) 75061007b31SStefan Hajnoczi { 75161007b31SStefan Hajnoczi int ret; 75261007b31SStefan Hajnoczi 75361007b31SStefan Hajnoczi ret = bdrv_pwrite(bs, offset, buf, count); 75461007b31SStefan Hajnoczi if (ret < 0) { 75561007b31SStefan Hajnoczi return ret; 75661007b31SStefan Hajnoczi } 75761007b31SStefan Hajnoczi 75861007b31SStefan Hajnoczi /* No flush needed for cache modes that already do it */ 75961007b31SStefan Hajnoczi if (bs->enable_write_cache) { 76061007b31SStefan Hajnoczi bdrv_flush(bs); 76161007b31SStefan Hajnoczi } 76261007b31SStefan Hajnoczi 76361007b31SStefan Hajnoczi return 0; 76461007b31SStefan Hajnoczi } 76561007b31SStefan Hajnoczi 76661007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs, 76761007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) 76861007b31SStefan Hajnoczi { 76961007b31SStefan Hajnoczi /* Perform I/O through a temporary buffer so that users who scribble over 77061007b31SStefan Hajnoczi * their read buffer while the operation is in progress do not end up 77161007b31SStefan Hajnoczi * modifying the image file. This is critical for zero-copy guest I/O 77261007b31SStefan Hajnoczi * where anything might happen inside guest memory. 77361007b31SStefan Hajnoczi */ 77461007b31SStefan Hajnoczi void *bounce_buffer; 77561007b31SStefan Hajnoczi 77661007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 77761007b31SStefan Hajnoczi struct iovec iov; 77861007b31SStefan Hajnoczi QEMUIOVector bounce_qiov; 77961007b31SStefan Hajnoczi int64_t cluster_sector_num; 78061007b31SStefan Hajnoczi int cluster_nb_sectors; 78161007b31SStefan Hajnoczi size_t skip_bytes; 78261007b31SStefan Hajnoczi int ret; 78361007b31SStefan Hajnoczi 78461007b31SStefan Hajnoczi /* Cover entire cluster so no additional backing file I/O is required when 78561007b31SStefan Hajnoczi * allocating cluster in the image file. 78661007b31SStefan Hajnoczi */ 78761007b31SStefan Hajnoczi bdrv_round_to_clusters(bs, sector_num, nb_sectors, 78861007b31SStefan Hajnoczi &cluster_sector_num, &cluster_nb_sectors); 78961007b31SStefan Hajnoczi 79061007b31SStefan Hajnoczi trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, 79161007b31SStefan Hajnoczi cluster_sector_num, cluster_nb_sectors); 79261007b31SStefan Hajnoczi 79361007b31SStefan Hajnoczi iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE; 79461007b31SStefan Hajnoczi iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len); 79561007b31SStefan Hajnoczi if (bounce_buffer == NULL) { 79661007b31SStefan Hajnoczi ret = -ENOMEM; 79761007b31SStefan Hajnoczi goto err; 79861007b31SStefan Hajnoczi } 79961007b31SStefan Hajnoczi 80061007b31SStefan Hajnoczi qemu_iovec_init_external(&bounce_qiov, &iov, 1); 80161007b31SStefan Hajnoczi 80261007b31SStefan Hajnoczi ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors, 80361007b31SStefan Hajnoczi &bounce_qiov); 80461007b31SStefan Hajnoczi if (ret < 0) { 80561007b31SStefan Hajnoczi goto err; 80661007b31SStefan Hajnoczi } 80761007b31SStefan Hajnoczi 80861007b31SStefan Hajnoczi if (drv->bdrv_co_write_zeroes && 80961007b31SStefan Hajnoczi buffer_is_zero(bounce_buffer, iov.iov_len)) { 81061007b31SStefan Hajnoczi ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num, 81161007b31SStefan Hajnoczi cluster_nb_sectors, 0); 81261007b31SStefan Hajnoczi } else { 81361007b31SStefan Hajnoczi /* This does not change the data on the disk, it is not necessary 81461007b31SStefan Hajnoczi * to flush even in cache=writethrough mode. 81561007b31SStefan Hajnoczi */ 81661007b31SStefan Hajnoczi ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors, 81761007b31SStefan Hajnoczi &bounce_qiov); 81861007b31SStefan Hajnoczi } 81961007b31SStefan Hajnoczi 82061007b31SStefan Hajnoczi if (ret < 0) { 82161007b31SStefan Hajnoczi /* It might be okay to ignore write errors for guest requests. If this 82261007b31SStefan Hajnoczi * is a deliberate copy-on-read then we don't want to ignore the error. 82361007b31SStefan Hajnoczi * Simply report it in all cases. 82461007b31SStefan Hajnoczi */ 82561007b31SStefan Hajnoczi goto err; 82661007b31SStefan Hajnoczi } 82761007b31SStefan Hajnoczi 82861007b31SStefan Hajnoczi skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE; 82961007b31SStefan Hajnoczi qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, 83061007b31SStefan Hajnoczi nb_sectors * BDRV_SECTOR_SIZE); 83161007b31SStefan Hajnoczi 83261007b31SStefan Hajnoczi err: 83361007b31SStefan Hajnoczi qemu_vfree(bounce_buffer); 83461007b31SStefan Hajnoczi return ret; 83561007b31SStefan Hajnoczi } 83661007b31SStefan Hajnoczi 83761007b31SStefan Hajnoczi /* 83861007b31SStefan Hajnoczi * Forwards an already correctly aligned request to the BlockDriver. This 83961007b31SStefan Hajnoczi * handles copy on read and zeroing after EOF; any other features must be 84061007b31SStefan Hajnoczi * implemented by the caller. 84161007b31SStefan Hajnoczi */ 84261007b31SStefan Hajnoczi static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs, 84361007b31SStefan Hajnoczi BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 84461007b31SStefan Hajnoczi int64_t align, QEMUIOVector *qiov, int flags) 84561007b31SStefan Hajnoczi { 84661007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 84761007b31SStefan Hajnoczi int ret; 84861007b31SStefan Hajnoczi 84961007b31SStefan Hajnoczi int64_t sector_num = offset >> BDRV_SECTOR_BITS; 85061007b31SStefan Hajnoczi unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS; 85161007b31SStefan Hajnoczi 85261007b31SStefan Hajnoczi assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 85361007b31SStefan Hajnoczi assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 85461007b31SStefan Hajnoczi assert(!qiov || bytes == qiov->size); 85561007b31SStefan Hajnoczi 85661007b31SStefan Hajnoczi /* Handle Copy on Read and associated serialisation */ 85761007b31SStefan Hajnoczi if (flags & BDRV_REQ_COPY_ON_READ) { 85861007b31SStefan Hajnoczi /* If we touch the same cluster it counts as an overlap. This 85961007b31SStefan Hajnoczi * guarantees that allocating writes will be serialized and not race 86061007b31SStefan Hajnoczi * with each other for the same cluster. For example, in copy-on-read 86161007b31SStefan Hajnoczi * it ensures that the CoR read and write operations are atomic and 86261007b31SStefan Hajnoczi * guest writes cannot interleave between them. */ 86361007b31SStefan Hajnoczi mark_request_serialising(req, bdrv_get_cluster_size(bs)); 86461007b31SStefan Hajnoczi } 86561007b31SStefan Hajnoczi 86661408b25SFam Zheng if (!(flags & BDRV_REQ_NO_SERIALISING)) { 86761007b31SStefan Hajnoczi wait_serialising_requests(req); 86861408b25SFam Zheng } 86961007b31SStefan Hajnoczi 87061007b31SStefan Hajnoczi if (flags & BDRV_REQ_COPY_ON_READ) { 87161007b31SStefan Hajnoczi int pnum; 87261007b31SStefan Hajnoczi 87361007b31SStefan Hajnoczi ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum); 87461007b31SStefan Hajnoczi if (ret < 0) { 87561007b31SStefan Hajnoczi goto out; 87661007b31SStefan Hajnoczi } 87761007b31SStefan Hajnoczi 87861007b31SStefan Hajnoczi if (!ret || pnum != nb_sectors) { 87961007b31SStefan Hajnoczi ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov); 88061007b31SStefan Hajnoczi goto out; 88161007b31SStefan Hajnoczi } 88261007b31SStefan Hajnoczi } 88361007b31SStefan Hajnoczi 88461007b31SStefan Hajnoczi /* Forward the request to the BlockDriver */ 88561007b31SStefan Hajnoczi if (!bs->zero_beyond_eof) { 88661007b31SStefan Hajnoczi ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 88761007b31SStefan Hajnoczi } else { 88861007b31SStefan Hajnoczi /* Read zeros after EOF */ 88961007b31SStefan Hajnoczi int64_t total_sectors, max_nb_sectors; 89061007b31SStefan Hajnoczi 89161007b31SStefan Hajnoczi total_sectors = bdrv_nb_sectors(bs); 89261007b31SStefan Hajnoczi if (total_sectors < 0) { 89361007b31SStefan Hajnoczi ret = total_sectors; 89461007b31SStefan Hajnoczi goto out; 89561007b31SStefan Hajnoczi } 89661007b31SStefan Hajnoczi 89761007b31SStefan Hajnoczi max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num), 89861007b31SStefan Hajnoczi align >> BDRV_SECTOR_BITS); 89961007b31SStefan Hajnoczi if (nb_sectors < max_nb_sectors) { 90061007b31SStefan Hajnoczi ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 90161007b31SStefan Hajnoczi } else if (max_nb_sectors > 0) { 90261007b31SStefan Hajnoczi QEMUIOVector local_qiov; 90361007b31SStefan Hajnoczi 90461007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov); 90561007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, 90661007b31SStefan Hajnoczi max_nb_sectors * BDRV_SECTOR_SIZE); 90761007b31SStefan Hajnoczi 90861007b31SStefan Hajnoczi ret = drv->bdrv_co_readv(bs, sector_num, max_nb_sectors, 90961007b31SStefan Hajnoczi &local_qiov); 91061007b31SStefan Hajnoczi 91161007b31SStefan Hajnoczi qemu_iovec_destroy(&local_qiov); 91261007b31SStefan Hajnoczi } else { 91361007b31SStefan Hajnoczi ret = 0; 91461007b31SStefan Hajnoczi } 91561007b31SStefan Hajnoczi 91661007b31SStefan Hajnoczi /* Reading beyond end of file is supposed to produce zeroes */ 91761007b31SStefan Hajnoczi if (ret == 0 && total_sectors < sector_num + nb_sectors) { 91861007b31SStefan Hajnoczi uint64_t offset = MAX(0, total_sectors - sector_num); 91961007b31SStefan Hajnoczi uint64_t bytes = (sector_num + nb_sectors - offset) * 92061007b31SStefan Hajnoczi BDRV_SECTOR_SIZE; 92161007b31SStefan Hajnoczi qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes); 92261007b31SStefan Hajnoczi } 92361007b31SStefan Hajnoczi } 92461007b31SStefan Hajnoczi 92561007b31SStefan Hajnoczi out: 92661007b31SStefan Hajnoczi return ret; 92761007b31SStefan Hajnoczi } 92861007b31SStefan Hajnoczi 92961007b31SStefan Hajnoczi /* 93061007b31SStefan Hajnoczi * Handle a read request in coroutine context 93161007b31SStefan Hajnoczi */ 93261007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs, 93361007b31SStefan Hajnoczi int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 93461007b31SStefan Hajnoczi BdrvRequestFlags flags) 93561007b31SStefan Hajnoczi { 93661007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 93761007b31SStefan Hajnoczi BdrvTrackedRequest req; 93861007b31SStefan Hajnoczi 939d01c07f2SFam Zheng /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */ 940d01c07f2SFam Zheng uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment); 94161007b31SStefan Hajnoczi uint8_t *head_buf = NULL; 94261007b31SStefan Hajnoczi uint8_t *tail_buf = NULL; 94361007b31SStefan Hajnoczi QEMUIOVector local_qiov; 94461007b31SStefan Hajnoczi bool use_local_qiov = false; 94561007b31SStefan Hajnoczi int ret; 94661007b31SStefan Hajnoczi 94761007b31SStefan Hajnoczi if (!drv) { 94861007b31SStefan Hajnoczi return -ENOMEDIUM; 94961007b31SStefan Hajnoczi } 95061007b31SStefan Hajnoczi 95161007b31SStefan Hajnoczi ret = bdrv_check_byte_request(bs, offset, bytes); 95261007b31SStefan Hajnoczi if (ret < 0) { 95361007b31SStefan Hajnoczi return ret; 95461007b31SStefan Hajnoczi } 95561007b31SStefan Hajnoczi 9569568b511SWen Congyang /* Don't do copy-on-read if we read data before write operation */ 95761408b25SFam Zheng if (bs->copy_on_read && !(flags & BDRV_REQ_NO_SERIALISING)) { 95861007b31SStefan Hajnoczi flags |= BDRV_REQ_COPY_ON_READ; 95961007b31SStefan Hajnoczi } 96061007b31SStefan Hajnoczi 96161007b31SStefan Hajnoczi /* throttling disk I/O */ 96261007b31SStefan Hajnoczi if (bs->io_limits_enabled) { 96376f4afb4SAlberto Garcia throttle_group_co_io_limits_intercept(bs, bytes, false); 96461007b31SStefan Hajnoczi } 96561007b31SStefan Hajnoczi 96661007b31SStefan Hajnoczi /* Align read if necessary by padding qiov */ 96761007b31SStefan Hajnoczi if (offset & (align - 1)) { 96861007b31SStefan Hajnoczi head_buf = qemu_blockalign(bs, align); 96961007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov + 2); 97061007b31SStefan Hajnoczi qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); 97161007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 97261007b31SStefan Hajnoczi use_local_qiov = true; 97361007b31SStefan Hajnoczi 97461007b31SStefan Hajnoczi bytes += offset & (align - 1); 97561007b31SStefan Hajnoczi offset = offset & ~(align - 1); 97661007b31SStefan Hajnoczi } 97761007b31SStefan Hajnoczi 97861007b31SStefan Hajnoczi if ((offset + bytes) & (align - 1)) { 97961007b31SStefan Hajnoczi if (!use_local_qiov) { 98061007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov + 1); 98161007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 98261007b31SStefan Hajnoczi use_local_qiov = true; 98361007b31SStefan Hajnoczi } 98461007b31SStefan Hajnoczi tail_buf = qemu_blockalign(bs, align); 98561007b31SStefan Hajnoczi qemu_iovec_add(&local_qiov, tail_buf, 98661007b31SStefan Hajnoczi align - ((offset + bytes) & (align - 1))); 98761007b31SStefan Hajnoczi 98861007b31SStefan Hajnoczi bytes = ROUND_UP(bytes, align); 98961007b31SStefan Hajnoczi } 99061007b31SStefan Hajnoczi 991ebde595cSFam Zheng tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ); 99261007b31SStefan Hajnoczi ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align, 99361007b31SStefan Hajnoczi use_local_qiov ? &local_qiov : qiov, 99461007b31SStefan Hajnoczi flags); 99561007b31SStefan Hajnoczi tracked_request_end(&req); 99661007b31SStefan Hajnoczi 99761007b31SStefan Hajnoczi if (use_local_qiov) { 99861007b31SStefan Hajnoczi qemu_iovec_destroy(&local_qiov); 99961007b31SStefan Hajnoczi qemu_vfree(head_buf); 100061007b31SStefan Hajnoczi qemu_vfree(tail_buf); 100161007b31SStefan Hajnoczi } 100261007b31SStefan Hajnoczi 100361007b31SStefan Hajnoczi return ret; 100461007b31SStefan Hajnoczi } 100561007b31SStefan Hajnoczi 100661007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, 100761007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 100861007b31SStefan Hajnoczi BdrvRequestFlags flags) 100961007b31SStefan Hajnoczi { 101061007b31SStefan Hajnoczi if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 101161007b31SStefan Hajnoczi return -EINVAL; 101261007b31SStefan Hajnoczi } 101361007b31SStefan Hajnoczi 101461007b31SStefan Hajnoczi return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS, 101561007b31SStefan Hajnoczi nb_sectors << BDRV_SECTOR_BITS, qiov, flags); 101661007b31SStefan Hajnoczi } 101761007b31SStefan Hajnoczi 101861007b31SStefan Hajnoczi int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num, 101961007b31SStefan Hajnoczi int nb_sectors, QEMUIOVector *qiov) 102061007b31SStefan Hajnoczi { 102161007b31SStefan Hajnoczi trace_bdrv_co_readv(bs, sector_num, nb_sectors); 102261007b31SStefan Hajnoczi 102361007b31SStefan Hajnoczi return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0); 102461007b31SStefan Hajnoczi } 102561007b31SStefan Hajnoczi 102661408b25SFam Zheng int coroutine_fn bdrv_co_readv_no_serialising(BlockDriverState *bs, 10279568b511SWen Congyang int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) 10289568b511SWen Congyang { 102961408b25SFam Zheng trace_bdrv_co_readv_no_serialising(bs, sector_num, nb_sectors); 10309568b511SWen Congyang 10319568b511SWen Congyang return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 103261408b25SFam Zheng BDRV_REQ_NO_SERIALISING); 10339568b511SWen Congyang } 10349568b511SWen Congyang 103561007b31SStefan Hajnoczi int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs, 103661007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) 103761007b31SStefan Hajnoczi { 103861007b31SStefan Hajnoczi trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors); 103961007b31SStefan Hajnoczi 104061007b31SStefan Hajnoczi return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 104161007b31SStefan Hajnoczi BDRV_REQ_COPY_ON_READ); 104261007b31SStefan Hajnoczi } 104361007b31SStefan Hajnoczi 104461007b31SStefan Hajnoczi #define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768 104561007b31SStefan Hajnoczi 104661007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, 104761007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, BdrvRequestFlags flags) 104861007b31SStefan Hajnoczi { 104961007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 105061007b31SStefan Hajnoczi QEMUIOVector qiov; 105161007b31SStefan Hajnoczi struct iovec iov = {0}; 105261007b31SStefan Hajnoczi int ret = 0; 105361007b31SStefan Hajnoczi 105461007b31SStefan Hajnoczi int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_write_zeroes, 105561007b31SStefan Hajnoczi BDRV_REQUEST_MAX_SECTORS); 105661007b31SStefan Hajnoczi 105761007b31SStefan Hajnoczi while (nb_sectors > 0 && !ret) { 105861007b31SStefan Hajnoczi int num = nb_sectors; 105961007b31SStefan Hajnoczi 106061007b31SStefan Hajnoczi /* Align request. Block drivers can expect the "bulk" of the request 106161007b31SStefan Hajnoczi * to be aligned. 106261007b31SStefan Hajnoczi */ 106361007b31SStefan Hajnoczi if (bs->bl.write_zeroes_alignment 106461007b31SStefan Hajnoczi && num > bs->bl.write_zeroes_alignment) { 106561007b31SStefan Hajnoczi if (sector_num % bs->bl.write_zeroes_alignment != 0) { 106661007b31SStefan Hajnoczi /* Make a small request up to the first aligned sector. */ 106761007b31SStefan Hajnoczi num = bs->bl.write_zeroes_alignment; 106861007b31SStefan Hajnoczi num -= sector_num % bs->bl.write_zeroes_alignment; 106961007b31SStefan Hajnoczi } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) { 107061007b31SStefan Hajnoczi /* Shorten the request to the last aligned sector. num cannot 107161007b31SStefan Hajnoczi * underflow because num > bs->bl.write_zeroes_alignment. 107261007b31SStefan Hajnoczi */ 107361007b31SStefan Hajnoczi num -= (sector_num + num) % bs->bl.write_zeroes_alignment; 107461007b31SStefan Hajnoczi } 107561007b31SStefan Hajnoczi } 107661007b31SStefan Hajnoczi 107761007b31SStefan Hajnoczi /* limit request size */ 107861007b31SStefan Hajnoczi if (num > max_write_zeroes) { 107961007b31SStefan Hajnoczi num = max_write_zeroes; 108061007b31SStefan Hajnoczi } 108161007b31SStefan Hajnoczi 108261007b31SStefan Hajnoczi ret = -ENOTSUP; 108361007b31SStefan Hajnoczi /* First try the efficient write zeroes operation */ 108461007b31SStefan Hajnoczi if (drv->bdrv_co_write_zeroes) { 108561007b31SStefan Hajnoczi ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags); 108661007b31SStefan Hajnoczi } 108761007b31SStefan Hajnoczi 108861007b31SStefan Hajnoczi if (ret == -ENOTSUP) { 108961007b31SStefan Hajnoczi /* Fall back to bounce buffer if write zeroes is unsupported */ 109061007b31SStefan Hajnoczi int max_xfer_len = MIN_NON_ZERO(bs->bl.max_transfer_length, 109161007b31SStefan Hajnoczi MAX_WRITE_ZEROES_BOUNCE_BUFFER); 109261007b31SStefan Hajnoczi num = MIN(num, max_xfer_len); 109361007b31SStefan Hajnoczi iov.iov_len = num * BDRV_SECTOR_SIZE; 109461007b31SStefan Hajnoczi if (iov.iov_base == NULL) { 109561007b31SStefan Hajnoczi iov.iov_base = qemu_try_blockalign(bs, num * BDRV_SECTOR_SIZE); 109661007b31SStefan Hajnoczi if (iov.iov_base == NULL) { 109761007b31SStefan Hajnoczi ret = -ENOMEM; 109861007b31SStefan Hajnoczi goto fail; 109961007b31SStefan Hajnoczi } 110061007b31SStefan Hajnoczi memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE); 110161007b31SStefan Hajnoczi } 110261007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 110361007b31SStefan Hajnoczi 110461007b31SStefan Hajnoczi ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov); 110561007b31SStefan Hajnoczi 110661007b31SStefan Hajnoczi /* Keep bounce buffer around if it is big enough for all 110761007b31SStefan Hajnoczi * all future requests. 110861007b31SStefan Hajnoczi */ 110961007b31SStefan Hajnoczi if (num < max_xfer_len) { 111061007b31SStefan Hajnoczi qemu_vfree(iov.iov_base); 111161007b31SStefan Hajnoczi iov.iov_base = NULL; 111261007b31SStefan Hajnoczi } 111361007b31SStefan Hajnoczi } 111461007b31SStefan Hajnoczi 111561007b31SStefan Hajnoczi sector_num += num; 111661007b31SStefan Hajnoczi nb_sectors -= num; 111761007b31SStefan Hajnoczi } 111861007b31SStefan Hajnoczi 111961007b31SStefan Hajnoczi fail: 112061007b31SStefan Hajnoczi qemu_vfree(iov.iov_base); 112161007b31SStefan Hajnoczi return ret; 112261007b31SStefan Hajnoczi } 112361007b31SStefan Hajnoczi 112461007b31SStefan Hajnoczi /* 112561007b31SStefan Hajnoczi * Forwards an already correctly aligned write request to the BlockDriver. 112661007b31SStefan Hajnoczi */ 112761007b31SStefan Hajnoczi static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs, 112861007b31SStefan Hajnoczi BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 112961007b31SStefan Hajnoczi QEMUIOVector *qiov, int flags) 113061007b31SStefan Hajnoczi { 113161007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 113261007b31SStefan Hajnoczi bool waited; 113361007b31SStefan Hajnoczi int ret; 113461007b31SStefan Hajnoczi 113561007b31SStefan Hajnoczi int64_t sector_num = offset >> BDRV_SECTOR_BITS; 113661007b31SStefan Hajnoczi unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS; 113761007b31SStefan Hajnoczi 113861007b31SStefan Hajnoczi assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 113961007b31SStefan Hajnoczi assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 114061007b31SStefan Hajnoczi assert(!qiov || bytes == qiov->size); 114161007b31SStefan Hajnoczi 114261007b31SStefan Hajnoczi waited = wait_serialising_requests(req); 114361007b31SStefan Hajnoczi assert(!waited || !req->serialising); 114461007b31SStefan Hajnoczi assert(req->overlap_offset <= offset); 114561007b31SStefan Hajnoczi assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); 114661007b31SStefan Hajnoczi 114761007b31SStefan Hajnoczi ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req); 114861007b31SStefan Hajnoczi 114961007b31SStefan Hajnoczi if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && 115061007b31SStefan Hajnoczi !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes && 115161007b31SStefan Hajnoczi qemu_iovec_is_zero(qiov)) { 115261007b31SStefan Hajnoczi flags |= BDRV_REQ_ZERO_WRITE; 115361007b31SStefan Hajnoczi if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { 115461007b31SStefan Hajnoczi flags |= BDRV_REQ_MAY_UNMAP; 115561007b31SStefan Hajnoczi } 115661007b31SStefan Hajnoczi } 115761007b31SStefan Hajnoczi 115861007b31SStefan Hajnoczi if (ret < 0) { 115961007b31SStefan Hajnoczi /* Do nothing, write notifier decided to fail this request */ 116061007b31SStefan Hajnoczi } else if (flags & BDRV_REQ_ZERO_WRITE) { 11619a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO); 116261007b31SStefan Hajnoczi ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags); 116361007b31SStefan Hajnoczi } else { 11649a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV); 116561007b31SStefan Hajnoczi ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov); 116661007b31SStefan Hajnoczi } 11679a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE); 116861007b31SStefan Hajnoczi 116961007b31SStefan Hajnoczi if (ret == 0 && !bs->enable_write_cache) { 117061007b31SStefan Hajnoczi ret = bdrv_co_flush(bs); 117161007b31SStefan Hajnoczi } 117261007b31SStefan Hajnoczi 117361007b31SStefan Hajnoczi bdrv_set_dirty(bs, sector_num, nb_sectors); 117461007b31SStefan Hajnoczi 117553d8f9d8SMax Reitz if (bs->wr_highest_offset < offset + bytes) { 117653d8f9d8SMax Reitz bs->wr_highest_offset = offset + bytes; 117753d8f9d8SMax Reitz } 117861007b31SStefan Hajnoczi 117961007b31SStefan Hajnoczi if (ret >= 0) { 118061007b31SStefan Hajnoczi bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors); 118161007b31SStefan Hajnoczi } 118261007b31SStefan Hajnoczi 118361007b31SStefan Hajnoczi return ret; 118461007b31SStefan Hajnoczi } 118561007b31SStefan Hajnoczi 11869eeb6dd1SFam Zheng static int coroutine_fn bdrv_co_do_zero_pwritev(BlockDriverState *bs, 11879eeb6dd1SFam Zheng int64_t offset, 11889eeb6dd1SFam Zheng unsigned int bytes, 11899eeb6dd1SFam Zheng BdrvRequestFlags flags, 11909eeb6dd1SFam Zheng BdrvTrackedRequest *req) 11919eeb6dd1SFam Zheng { 11929eeb6dd1SFam Zheng uint8_t *buf = NULL; 11939eeb6dd1SFam Zheng QEMUIOVector local_qiov; 11949eeb6dd1SFam Zheng struct iovec iov; 11959eeb6dd1SFam Zheng uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment); 11969eeb6dd1SFam Zheng unsigned int head_padding_bytes, tail_padding_bytes; 11979eeb6dd1SFam Zheng int ret = 0; 11989eeb6dd1SFam Zheng 11999eeb6dd1SFam Zheng head_padding_bytes = offset & (align - 1); 12009eeb6dd1SFam Zheng tail_padding_bytes = align - ((offset + bytes) & (align - 1)); 12019eeb6dd1SFam Zheng 12029eeb6dd1SFam Zheng 12039eeb6dd1SFam Zheng assert(flags & BDRV_REQ_ZERO_WRITE); 12049eeb6dd1SFam Zheng if (head_padding_bytes || tail_padding_bytes) { 12059eeb6dd1SFam Zheng buf = qemu_blockalign(bs, align); 12069eeb6dd1SFam Zheng iov = (struct iovec) { 12079eeb6dd1SFam Zheng .iov_base = buf, 12089eeb6dd1SFam Zheng .iov_len = align, 12099eeb6dd1SFam Zheng }; 12109eeb6dd1SFam Zheng qemu_iovec_init_external(&local_qiov, &iov, 1); 12119eeb6dd1SFam Zheng } 12129eeb6dd1SFam Zheng if (head_padding_bytes) { 12139eeb6dd1SFam Zheng uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes); 12149eeb6dd1SFam Zheng 12159eeb6dd1SFam Zheng /* RMW the unaligned part before head. */ 12169eeb6dd1SFam Zheng mark_request_serialising(req, align); 12179eeb6dd1SFam Zheng wait_serialising_requests(req); 12189a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 12199eeb6dd1SFam Zheng ret = bdrv_aligned_preadv(bs, req, offset & ~(align - 1), align, 12209eeb6dd1SFam Zheng align, &local_qiov, 0); 12219eeb6dd1SFam Zheng if (ret < 0) { 12229eeb6dd1SFam Zheng goto fail; 12239eeb6dd1SFam Zheng } 12249a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 12259eeb6dd1SFam Zheng 12269eeb6dd1SFam Zheng memset(buf + head_padding_bytes, 0, zero_bytes); 12279eeb6dd1SFam Zheng ret = bdrv_aligned_pwritev(bs, req, offset & ~(align - 1), align, 12289eeb6dd1SFam Zheng &local_qiov, 12299eeb6dd1SFam Zheng flags & ~BDRV_REQ_ZERO_WRITE); 12309eeb6dd1SFam Zheng if (ret < 0) { 12319eeb6dd1SFam Zheng goto fail; 12329eeb6dd1SFam Zheng } 12339eeb6dd1SFam Zheng offset += zero_bytes; 12349eeb6dd1SFam Zheng bytes -= zero_bytes; 12359eeb6dd1SFam Zheng } 12369eeb6dd1SFam Zheng 12379eeb6dd1SFam Zheng assert(!bytes || (offset & (align - 1)) == 0); 12389eeb6dd1SFam Zheng if (bytes >= align) { 12399eeb6dd1SFam Zheng /* Write the aligned part in the middle. */ 12409eeb6dd1SFam Zheng uint64_t aligned_bytes = bytes & ~(align - 1); 12419eeb6dd1SFam Zheng ret = bdrv_aligned_pwritev(bs, req, offset, aligned_bytes, 12429eeb6dd1SFam Zheng NULL, flags); 12439eeb6dd1SFam Zheng if (ret < 0) { 12449eeb6dd1SFam Zheng goto fail; 12459eeb6dd1SFam Zheng } 12469eeb6dd1SFam Zheng bytes -= aligned_bytes; 12479eeb6dd1SFam Zheng offset += aligned_bytes; 12489eeb6dd1SFam Zheng } 12499eeb6dd1SFam Zheng 12509eeb6dd1SFam Zheng assert(!bytes || (offset & (align - 1)) == 0); 12519eeb6dd1SFam Zheng if (bytes) { 12529eeb6dd1SFam Zheng assert(align == tail_padding_bytes + bytes); 12539eeb6dd1SFam Zheng /* RMW the unaligned part after tail. */ 12549eeb6dd1SFam Zheng mark_request_serialising(req, align); 12559eeb6dd1SFam Zheng wait_serialising_requests(req); 12569a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 12579eeb6dd1SFam Zheng ret = bdrv_aligned_preadv(bs, req, offset, align, 12589eeb6dd1SFam Zheng align, &local_qiov, 0); 12599eeb6dd1SFam Zheng if (ret < 0) { 12609eeb6dd1SFam Zheng goto fail; 12619eeb6dd1SFam Zheng } 12629a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 12639eeb6dd1SFam Zheng 12649eeb6dd1SFam Zheng memset(buf, 0, bytes); 12659eeb6dd1SFam Zheng ret = bdrv_aligned_pwritev(bs, req, offset, align, 12669eeb6dd1SFam Zheng &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE); 12679eeb6dd1SFam Zheng } 12689eeb6dd1SFam Zheng fail: 12699eeb6dd1SFam Zheng qemu_vfree(buf); 12709eeb6dd1SFam Zheng return ret; 12719eeb6dd1SFam Zheng 12729eeb6dd1SFam Zheng } 12739eeb6dd1SFam Zheng 127461007b31SStefan Hajnoczi /* 127561007b31SStefan Hajnoczi * Handle a write request in coroutine context 127661007b31SStefan Hajnoczi */ 127761007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs, 127861007b31SStefan Hajnoczi int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 127961007b31SStefan Hajnoczi BdrvRequestFlags flags) 128061007b31SStefan Hajnoczi { 128161007b31SStefan Hajnoczi BdrvTrackedRequest req; 1282d01c07f2SFam Zheng /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */ 1283d01c07f2SFam Zheng uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment); 128461007b31SStefan Hajnoczi uint8_t *head_buf = NULL; 128561007b31SStefan Hajnoczi uint8_t *tail_buf = NULL; 128661007b31SStefan Hajnoczi QEMUIOVector local_qiov; 128761007b31SStefan Hajnoczi bool use_local_qiov = false; 128861007b31SStefan Hajnoczi int ret; 128961007b31SStefan Hajnoczi 129061007b31SStefan Hajnoczi if (!bs->drv) { 129161007b31SStefan Hajnoczi return -ENOMEDIUM; 129261007b31SStefan Hajnoczi } 129361007b31SStefan Hajnoczi if (bs->read_only) { 1294eaf5fe2dSPaolo Bonzini return -EPERM; 129561007b31SStefan Hajnoczi } 129661007b31SStefan Hajnoczi 129761007b31SStefan Hajnoczi ret = bdrv_check_byte_request(bs, offset, bytes); 129861007b31SStefan Hajnoczi if (ret < 0) { 129961007b31SStefan Hajnoczi return ret; 130061007b31SStefan Hajnoczi } 130161007b31SStefan Hajnoczi 130261007b31SStefan Hajnoczi /* throttling disk I/O */ 130361007b31SStefan Hajnoczi if (bs->io_limits_enabled) { 130476f4afb4SAlberto Garcia throttle_group_co_io_limits_intercept(bs, bytes, true); 130561007b31SStefan Hajnoczi } 130661007b31SStefan Hajnoczi 130761007b31SStefan Hajnoczi /* 130861007b31SStefan Hajnoczi * Align write if necessary by performing a read-modify-write cycle. 130961007b31SStefan Hajnoczi * Pad qiov with the read parts and be sure to have a tracked request not 131061007b31SStefan Hajnoczi * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle. 131161007b31SStefan Hajnoczi */ 1312ebde595cSFam Zheng tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE); 131361007b31SStefan Hajnoczi 13149eeb6dd1SFam Zheng if (!qiov) { 13159eeb6dd1SFam Zheng ret = bdrv_co_do_zero_pwritev(bs, offset, bytes, flags, &req); 13169eeb6dd1SFam Zheng goto out; 13179eeb6dd1SFam Zheng } 13189eeb6dd1SFam Zheng 131961007b31SStefan Hajnoczi if (offset & (align - 1)) { 132061007b31SStefan Hajnoczi QEMUIOVector head_qiov; 132161007b31SStefan Hajnoczi struct iovec head_iov; 132261007b31SStefan Hajnoczi 132361007b31SStefan Hajnoczi mark_request_serialising(&req, align); 132461007b31SStefan Hajnoczi wait_serialising_requests(&req); 132561007b31SStefan Hajnoczi 132661007b31SStefan Hajnoczi head_buf = qemu_blockalign(bs, align); 132761007b31SStefan Hajnoczi head_iov = (struct iovec) { 132861007b31SStefan Hajnoczi .iov_base = head_buf, 132961007b31SStefan Hajnoczi .iov_len = align, 133061007b31SStefan Hajnoczi }; 133161007b31SStefan Hajnoczi qemu_iovec_init_external(&head_qiov, &head_iov, 1); 133261007b31SStefan Hajnoczi 13339a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 133461007b31SStefan Hajnoczi ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align, 133561007b31SStefan Hajnoczi align, &head_qiov, 0); 133661007b31SStefan Hajnoczi if (ret < 0) { 133761007b31SStefan Hajnoczi goto fail; 133861007b31SStefan Hajnoczi } 13399a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 134061007b31SStefan Hajnoczi 134161007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov + 2); 134261007b31SStefan Hajnoczi qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); 134361007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 134461007b31SStefan Hajnoczi use_local_qiov = true; 134561007b31SStefan Hajnoczi 134661007b31SStefan Hajnoczi bytes += offset & (align - 1); 134761007b31SStefan Hajnoczi offset = offset & ~(align - 1); 134861007b31SStefan Hajnoczi } 134961007b31SStefan Hajnoczi 135061007b31SStefan Hajnoczi if ((offset + bytes) & (align - 1)) { 135161007b31SStefan Hajnoczi QEMUIOVector tail_qiov; 135261007b31SStefan Hajnoczi struct iovec tail_iov; 135361007b31SStefan Hajnoczi size_t tail_bytes; 135461007b31SStefan Hajnoczi bool waited; 135561007b31SStefan Hajnoczi 135661007b31SStefan Hajnoczi mark_request_serialising(&req, align); 135761007b31SStefan Hajnoczi waited = wait_serialising_requests(&req); 135861007b31SStefan Hajnoczi assert(!waited || !use_local_qiov); 135961007b31SStefan Hajnoczi 136061007b31SStefan Hajnoczi tail_buf = qemu_blockalign(bs, align); 136161007b31SStefan Hajnoczi tail_iov = (struct iovec) { 136261007b31SStefan Hajnoczi .iov_base = tail_buf, 136361007b31SStefan Hajnoczi .iov_len = align, 136461007b31SStefan Hajnoczi }; 136561007b31SStefan Hajnoczi qemu_iovec_init_external(&tail_qiov, &tail_iov, 1); 136661007b31SStefan Hajnoczi 13679a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 136861007b31SStefan Hajnoczi ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align, 136961007b31SStefan Hajnoczi align, &tail_qiov, 0); 137061007b31SStefan Hajnoczi if (ret < 0) { 137161007b31SStefan Hajnoczi goto fail; 137261007b31SStefan Hajnoczi } 13739a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 137461007b31SStefan Hajnoczi 137561007b31SStefan Hajnoczi if (!use_local_qiov) { 137661007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov + 1); 137761007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 137861007b31SStefan Hajnoczi use_local_qiov = true; 137961007b31SStefan Hajnoczi } 138061007b31SStefan Hajnoczi 138161007b31SStefan Hajnoczi tail_bytes = (offset + bytes) & (align - 1); 138261007b31SStefan Hajnoczi qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes); 138361007b31SStefan Hajnoczi 138461007b31SStefan Hajnoczi bytes = ROUND_UP(bytes, align); 138561007b31SStefan Hajnoczi } 138661007b31SStefan Hajnoczi 138761007b31SStefan Hajnoczi ret = bdrv_aligned_pwritev(bs, &req, offset, bytes, 138861007b31SStefan Hajnoczi use_local_qiov ? &local_qiov : qiov, 138961007b31SStefan Hajnoczi flags); 139061007b31SStefan Hajnoczi 139161007b31SStefan Hajnoczi fail: 139261007b31SStefan Hajnoczi 139361007b31SStefan Hajnoczi if (use_local_qiov) { 139461007b31SStefan Hajnoczi qemu_iovec_destroy(&local_qiov); 139561007b31SStefan Hajnoczi } 139661007b31SStefan Hajnoczi qemu_vfree(head_buf); 139761007b31SStefan Hajnoczi qemu_vfree(tail_buf); 13989eeb6dd1SFam Zheng out: 13999eeb6dd1SFam Zheng tracked_request_end(&req); 140061007b31SStefan Hajnoczi return ret; 140161007b31SStefan Hajnoczi } 140261007b31SStefan Hajnoczi 140361007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs, 140461007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 140561007b31SStefan Hajnoczi BdrvRequestFlags flags) 140661007b31SStefan Hajnoczi { 140761007b31SStefan Hajnoczi if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 140861007b31SStefan Hajnoczi return -EINVAL; 140961007b31SStefan Hajnoczi } 141061007b31SStefan Hajnoczi 141161007b31SStefan Hajnoczi return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS, 141261007b31SStefan Hajnoczi nb_sectors << BDRV_SECTOR_BITS, qiov, flags); 141361007b31SStefan Hajnoczi } 141461007b31SStefan Hajnoczi 141561007b31SStefan Hajnoczi int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num, 141661007b31SStefan Hajnoczi int nb_sectors, QEMUIOVector *qiov) 141761007b31SStefan Hajnoczi { 141861007b31SStefan Hajnoczi trace_bdrv_co_writev(bs, sector_num, nb_sectors); 141961007b31SStefan Hajnoczi 142061007b31SStefan Hajnoczi return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0); 142161007b31SStefan Hajnoczi } 142261007b31SStefan Hajnoczi 142361007b31SStefan Hajnoczi int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs, 142461007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, 142561007b31SStefan Hajnoczi BdrvRequestFlags flags) 142661007b31SStefan Hajnoczi { 142761007b31SStefan Hajnoczi trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags); 142861007b31SStefan Hajnoczi 142961007b31SStefan Hajnoczi if (!(bs->open_flags & BDRV_O_UNMAP)) { 143061007b31SStefan Hajnoczi flags &= ~BDRV_REQ_MAY_UNMAP; 143161007b31SStefan Hajnoczi } 143261007b31SStefan Hajnoczi 1433d01c07f2SFam Zheng return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL, 143461007b31SStefan Hajnoczi BDRV_REQ_ZERO_WRITE | flags); 143561007b31SStefan Hajnoczi } 143661007b31SStefan Hajnoczi 143761007b31SStefan Hajnoczi int bdrv_flush_all(void) 143861007b31SStefan Hajnoczi { 143961007b31SStefan Hajnoczi BlockDriverState *bs = NULL; 144061007b31SStefan Hajnoczi int result = 0; 144161007b31SStefan Hajnoczi 144261007b31SStefan Hajnoczi while ((bs = bdrv_next(bs))) { 144361007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 144461007b31SStefan Hajnoczi int ret; 144561007b31SStefan Hajnoczi 144661007b31SStefan Hajnoczi aio_context_acquire(aio_context); 144761007b31SStefan Hajnoczi ret = bdrv_flush(bs); 144861007b31SStefan Hajnoczi if (ret < 0 && !result) { 144961007b31SStefan Hajnoczi result = ret; 145061007b31SStefan Hajnoczi } 145161007b31SStefan Hajnoczi aio_context_release(aio_context); 145261007b31SStefan Hajnoczi } 145361007b31SStefan Hajnoczi 145461007b31SStefan Hajnoczi return result; 145561007b31SStefan Hajnoczi } 145661007b31SStefan Hajnoczi 145761007b31SStefan Hajnoczi typedef struct BdrvCoGetBlockStatusData { 145861007b31SStefan Hajnoczi BlockDriverState *bs; 145961007b31SStefan Hajnoczi BlockDriverState *base; 146061007b31SStefan Hajnoczi int64_t sector_num; 146161007b31SStefan Hajnoczi int nb_sectors; 146261007b31SStefan Hajnoczi int *pnum; 146361007b31SStefan Hajnoczi int64_t ret; 146461007b31SStefan Hajnoczi bool done; 146561007b31SStefan Hajnoczi } BdrvCoGetBlockStatusData; 146661007b31SStefan Hajnoczi 146761007b31SStefan Hajnoczi /* 146861007b31SStefan Hajnoczi * Returns the allocation status of the specified sectors. 146961007b31SStefan Hajnoczi * Drivers not implementing the functionality are assumed to not support 147061007b31SStefan Hajnoczi * backing files, hence all their sectors are reported as allocated. 147161007b31SStefan Hajnoczi * 147261007b31SStefan Hajnoczi * If 'sector_num' is beyond the end of the disk image the return value is 0 147361007b31SStefan Hajnoczi * and 'pnum' is set to 0. 147461007b31SStefan Hajnoczi * 147561007b31SStefan Hajnoczi * 'pnum' is set to the number of sectors (including and immediately following 147661007b31SStefan Hajnoczi * the specified sector) that are known to be in the same 147761007b31SStefan Hajnoczi * allocated/unallocated state. 147861007b31SStefan Hajnoczi * 147961007b31SStefan Hajnoczi * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes 148061007b31SStefan Hajnoczi * beyond the end of the disk image it will be clamped. 148161007b31SStefan Hajnoczi */ 148261007b31SStefan Hajnoczi static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs, 148361007b31SStefan Hajnoczi int64_t sector_num, 148461007b31SStefan Hajnoczi int nb_sectors, int *pnum) 148561007b31SStefan Hajnoczi { 148661007b31SStefan Hajnoczi int64_t total_sectors; 148761007b31SStefan Hajnoczi int64_t n; 148861007b31SStefan Hajnoczi int64_t ret, ret2; 148961007b31SStefan Hajnoczi 149061007b31SStefan Hajnoczi total_sectors = bdrv_nb_sectors(bs); 149161007b31SStefan Hajnoczi if (total_sectors < 0) { 149261007b31SStefan Hajnoczi return total_sectors; 149361007b31SStefan Hajnoczi } 149461007b31SStefan Hajnoczi 149561007b31SStefan Hajnoczi if (sector_num >= total_sectors) { 149661007b31SStefan Hajnoczi *pnum = 0; 149761007b31SStefan Hajnoczi return 0; 149861007b31SStefan Hajnoczi } 149961007b31SStefan Hajnoczi 150061007b31SStefan Hajnoczi n = total_sectors - sector_num; 150161007b31SStefan Hajnoczi if (n < nb_sectors) { 150261007b31SStefan Hajnoczi nb_sectors = n; 150361007b31SStefan Hajnoczi } 150461007b31SStefan Hajnoczi 150561007b31SStefan Hajnoczi if (!bs->drv->bdrv_co_get_block_status) { 150661007b31SStefan Hajnoczi *pnum = nb_sectors; 150761007b31SStefan Hajnoczi ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; 150861007b31SStefan Hajnoczi if (bs->drv->protocol_name) { 150961007b31SStefan Hajnoczi ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE); 151061007b31SStefan Hajnoczi } 151161007b31SStefan Hajnoczi return ret; 151261007b31SStefan Hajnoczi } 151361007b31SStefan Hajnoczi 151461007b31SStefan Hajnoczi ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum); 151561007b31SStefan Hajnoczi if (ret < 0) { 151661007b31SStefan Hajnoczi *pnum = 0; 151761007b31SStefan Hajnoczi return ret; 151861007b31SStefan Hajnoczi } 151961007b31SStefan Hajnoczi 152061007b31SStefan Hajnoczi if (ret & BDRV_BLOCK_RAW) { 152161007b31SStefan Hajnoczi assert(ret & BDRV_BLOCK_OFFSET_VALID); 15229a4f4c31SKevin Wolf return bdrv_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS, 152361007b31SStefan Hajnoczi *pnum, pnum); 152461007b31SStefan Hajnoczi } 152561007b31SStefan Hajnoczi 152661007b31SStefan Hajnoczi if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { 152761007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ALLOCATED; 1528a53f1a95SPaolo Bonzini } else { 152961007b31SStefan Hajnoczi if (bdrv_unallocated_blocks_are_zero(bs)) { 153061007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ZERO; 1531760e0063SKevin Wolf } else if (bs->backing) { 1532760e0063SKevin Wolf BlockDriverState *bs2 = bs->backing->bs; 153361007b31SStefan Hajnoczi int64_t nb_sectors2 = bdrv_nb_sectors(bs2); 153461007b31SStefan Hajnoczi if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) { 153561007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ZERO; 153661007b31SStefan Hajnoczi } 153761007b31SStefan Hajnoczi } 153861007b31SStefan Hajnoczi } 153961007b31SStefan Hajnoczi 154061007b31SStefan Hajnoczi if (bs->file && 154161007b31SStefan Hajnoczi (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && 154261007b31SStefan Hajnoczi (ret & BDRV_BLOCK_OFFSET_VALID)) { 154361007b31SStefan Hajnoczi int file_pnum; 154461007b31SStefan Hajnoczi 15459a4f4c31SKevin Wolf ret2 = bdrv_co_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS, 154661007b31SStefan Hajnoczi *pnum, &file_pnum); 154761007b31SStefan Hajnoczi if (ret2 >= 0) { 154861007b31SStefan Hajnoczi /* Ignore errors. This is just providing extra information, it 154961007b31SStefan Hajnoczi * is useful but not necessary. 155061007b31SStefan Hajnoczi */ 155161007b31SStefan Hajnoczi if (!file_pnum) { 155261007b31SStefan Hajnoczi /* !file_pnum indicates an offset at or beyond the EOF; it is 155361007b31SStefan Hajnoczi * perfectly valid for the format block driver to point to such 155461007b31SStefan Hajnoczi * offsets, so catch it and mark everything as zero */ 155561007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ZERO; 155661007b31SStefan Hajnoczi } else { 155761007b31SStefan Hajnoczi /* Limit request to the range reported by the protocol driver */ 155861007b31SStefan Hajnoczi *pnum = file_pnum; 155961007b31SStefan Hajnoczi ret |= (ret2 & BDRV_BLOCK_ZERO); 156061007b31SStefan Hajnoczi } 156161007b31SStefan Hajnoczi } 156261007b31SStefan Hajnoczi } 156361007b31SStefan Hajnoczi 156461007b31SStefan Hajnoczi return ret; 156561007b31SStefan Hajnoczi } 156661007b31SStefan Hajnoczi 1567ba3f0e25SFam Zheng static int64_t coroutine_fn bdrv_co_get_block_status_above(BlockDriverState *bs, 1568ba3f0e25SFam Zheng BlockDriverState *base, 1569ba3f0e25SFam Zheng int64_t sector_num, 1570ba3f0e25SFam Zheng int nb_sectors, 1571ba3f0e25SFam Zheng int *pnum) 1572ba3f0e25SFam Zheng { 1573ba3f0e25SFam Zheng BlockDriverState *p; 1574ba3f0e25SFam Zheng int64_t ret = 0; 1575ba3f0e25SFam Zheng 1576ba3f0e25SFam Zheng assert(bs != base); 1577760e0063SKevin Wolf for (p = bs; p != base; p = backing_bs(p)) { 1578ba3f0e25SFam Zheng ret = bdrv_co_get_block_status(p, sector_num, nb_sectors, pnum); 1579ba3f0e25SFam Zheng if (ret < 0 || ret & BDRV_BLOCK_ALLOCATED) { 1580ba3f0e25SFam Zheng break; 1581ba3f0e25SFam Zheng } 1582ba3f0e25SFam Zheng /* [sector_num, pnum] unallocated on this layer, which could be only 1583ba3f0e25SFam Zheng * the first part of [sector_num, nb_sectors]. */ 1584ba3f0e25SFam Zheng nb_sectors = MIN(nb_sectors, *pnum); 1585ba3f0e25SFam Zheng } 1586ba3f0e25SFam Zheng return ret; 1587ba3f0e25SFam Zheng } 1588ba3f0e25SFam Zheng 1589ba3f0e25SFam Zheng /* Coroutine wrapper for bdrv_get_block_status_above() */ 1590ba3f0e25SFam Zheng static void coroutine_fn bdrv_get_block_status_above_co_entry(void *opaque) 159161007b31SStefan Hajnoczi { 159261007b31SStefan Hajnoczi BdrvCoGetBlockStatusData *data = opaque; 159361007b31SStefan Hajnoczi 1594ba3f0e25SFam Zheng data->ret = bdrv_co_get_block_status_above(data->bs, data->base, 1595ba3f0e25SFam Zheng data->sector_num, 1596ba3f0e25SFam Zheng data->nb_sectors, 159761007b31SStefan Hajnoczi data->pnum); 159861007b31SStefan Hajnoczi data->done = true; 159961007b31SStefan Hajnoczi } 160061007b31SStefan Hajnoczi 160161007b31SStefan Hajnoczi /* 1602ba3f0e25SFam Zheng * Synchronous wrapper around bdrv_co_get_block_status_above(). 160361007b31SStefan Hajnoczi * 1604ba3f0e25SFam Zheng * See bdrv_co_get_block_status_above() for details. 160561007b31SStefan Hajnoczi */ 1606ba3f0e25SFam Zheng int64_t bdrv_get_block_status_above(BlockDriverState *bs, 1607ba3f0e25SFam Zheng BlockDriverState *base, 1608ba3f0e25SFam Zheng int64_t sector_num, 160961007b31SStefan Hajnoczi int nb_sectors, int *pnum) 161061007b31SStefan Hajnoczi { 161161007b31SStefan Hajnoczi Coroutine *co; 161261007b31SStefan Hajnoczi BdrvCoGetBlockStatusData data = { 161361007b31SStefan Hajnoczi .bs = bs, 1614ba3f0e25SFam Zheng .base = base, 161561007b31SStefan Hajnoczi .sector_num = sector_num, 161661007b31SStefan Hajnoczi .nb_sectors = nb_sectors, 161761007b31SStefan Hajnoczi .pnum = pnum, 161861007b31SStefan Hajnoczi .done = false, 161961007b31SStefan Hajnoczi }; 162061007b31SStefan Hajnoczi 162161007b31SStefan Hajnoczi if (qemu_in_coroutine()) { 162261007b31SStefan Hajnoczi /* Fast-path if already in coroutine context */ 1623ba3f0e25SFam Zheng bdrv_get_block_status_above_co_entry(&data); 162461007b31SStefan Hajnoczi } else { 162561007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 162661007b31SStefan Hajnoczi 1627ba3f0e25SFam Zheng co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry); 162861007b31SStefan Hajnoczi qemu_coroutine_enter(co, &data); 162961007b31SStefan Hajnoczi while (!data.done) { 163061007b31SStefan Hajnoczi aio_poll(aio_context, true); 163161007b31SStefan Hajnoczi } 163261007b31SStefan Hajnoczi } 163361007b31SStefan Hajnoczi return data.ret; 163461007b31SStefan Hajnoczi } 163561007b31SStefan Hajnoczi 1636ba3f0e25SFam Zheng int64_t bdrv_get_block_status(BlockDriverState *bs, 1637ba3f0e25SFam Zheng int64_t sector_num, 1638ba3f0e25SFam Zheng int nb_sectors, int *pnum) 1639ba3f0e25SFam Zheng { 1640760e0063SKevin Wolf return bdrv_get_block_status_above(bs, backing_bs(bs), 1641ba3f0e25SFam Zheng sector_num, nb_sectors, pnum); 1642ba3f0e25SFam Zheng } 1643ba3f0e25SFam Zheng 164461007b31SStefan Hajnoczi int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, 164561007b31SStefan Hajnoczi int nb_sectors, int *pnum) 164661007b31SStefan Hajnoczi { 164761007b31SStefan Hajnoczi int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum); 164861007b31SStefan Hajnoczi if (ret < 0) { 164961007b31SStefan Hajnoczi return ret; 165061007b31SStefan Hajnoczi } 165161007b31SStefan Hajnoczi return !!(ret & BDRV_BLOCK_ALLOCATED); 165261007b31SStefan Hajnoczi } 165361007b31SStefan Hajnoczi 165461007b31SStefan Hajnoczi /* 165561007b31SStefan Hajnoczi * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] 165661007b31SStefan Hajnoczi * 165761007b31SStefan Hajnoczi * Return true if the given sector is allocated in any image between 165861007b31SStefan Hajnoczi * BASE and TOP (inclusive). BASE can be NULL to check if the given 165961007b31SStefan Hajnoczi * sector is allocated in any image of the chain. Return false otherwise. 166061007b31SStefan Hajnoczi * 166161007b31SStefan Hajnoczi * 'pnum' is set to the number of sectors (including and immediately following 166261007b31SStefan Hajnoczi * the specified sector) that are known to be in the same 166361007b31SStefan Hajnoczi * allocated/unallocated state. 166461007b31SStefan Hajnoczi * 166561007b31SStefan Hajnoczi */ 166661007b31SStefan Hajnoczi int bdrv_is_allocated_above(BlockDriverState *top, 166761007b31SStefan Hajnoczi BlockDriverState *base, 166861007b31SStefan Hajnoczi int64_t sector_num, 166961007b31SStefan Hajnoczi int nb_sectors, int *pnum) 167061007b31SStefan Hajnoczi { 167161007b31SStefan Hajnoczi BlockDriverState *intermediate; 167261007b31SStefan Hajnoczi int ret, n = nb_sectors; 167361007b31SStefan Hajnoczi 167461007b31SStefan Hajnoczi intermediate = top; 167561007b31SStefan Hajnoczi while (intermediate && intermediate != base) { 167661007b31SStefan Hajnoczi int pnum_inter; 167761007b31SStefan Hajnoczi ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors, 167861007b31SStefan Hajnoczi &pnum_inter); 167961007b31SStefan Hajnoczi if (ret < 0) { 168061007b31SStefan Hajnoczi return ret; 168161007b31SStefan Hajnoczi } else if (ret) { 168261007b31SStefan Hajnoczi *pnum = pnum_inter; 168361007b31SStefan Hajnoczi return 1; 168461007b31SStefan Hajnoczi } 168561007b31SStefan Hajnoczi 168661007b31SStefan Hajnoczi /* 168761007b31SStefan Hajnoczi * [sector_num, nb_sectors] is unallocated on top but intermediate 168861007b31SStefan Hajnoczi * might have 168961007b31SStefan Hajnoczi * 169061007b31SStefan Hajnoczi * [sector_num+x, nr_sectors] allocated. 169161007b31SStefan Hajnoczi */ 169261007b31SStefan Hajnoczi if (n > pnum_inter && 169361007b31SStefan Hajnoczi (intermediate == top || 169461007b31SStefan Hajnoczi sector_num + pnum_inter < intermediate->total_sectors)) { 169561007b31SStefan Hajnoczi n = pnum_inter; 169661007b31SStefan Hajnoczi } 169761007b31SStefan Hajnoczi 1698760e0063SKevin Wolf intermediate = backing_bs(intermediate); 169961007b31SStefan Hajnoczi } 170061007b31SStefan Hajnoczi 170161007b31SStefan Hajnoczi *pnum = n; 170261007b31SStefan Hajnoczi return 0; 170361007b31SStefan Hajnoczi } 170461007b31SStefan Hajnoczi 170561007b31SStefan Hajnoczi int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num, 170661007b31SStefan Hajnoczi const uint8_t *buf, int nb_sectors) 170761007b31SStefan Hajnoczi { 170861007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 170961007b31SStefan Hajnoczi int ret; 171061007b31SStefan Hajnoczi 171161007b31SStefan Hajnoczi if (!drv) { 171261007b31SStefan Hajnoczi return -ENOMEDIUM; 171361007b31SStefan Hajnoczi } 171461007b31SStefan Hajnoczi if (!drv->bdrv_write_compressed) { 171561007b31SStefan Hajnoczi return -ENOTSUP; 171661007b31SStefan Hajnoczi } 171761007b31SStefan Hajnoczi ret = bdrv_check_request(bs, sector_num, nb_sectors); 171861007b31SStefan Hajnoczi if (ret < 0) { 171961007b31SStefan Hajnoczi return ret; 172061007b31SStefan Hajnoczi } 172161007b31SStefan Hajnoczi 172261007b31SStefan Hajnoczi assert(QLIST_EMPTY(&bs->dirty_bitmaps)); 172361007b31SStefan Hajnoczi 172461007b31SStefan Hajnoczi return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors); 172561007b31SStefan Hajnoczi } 172661007b31SStefan Hajnoczi 172761007b31SStefan Hajnoczi int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 172861007b31SStefan Hajnoczi int64_t pos, int size) 172961007b31SStefan Hajnoczi { 173061007b31SStefan Hajnoczi QEMUIOVector qiov; 173161007b31SStefan Hajnoczi struct iovec iov = { 173261007b31SStefan Hajnoczi .iov_base = (void *) buf, 173361007b31SStefan Hajnoczi .iov_len = size, 173461007b31SStefan Hajnoczi }; 173561007b31SStefan Hajnoczi 173661007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 173761007b31SStefan Hajnoczi return bdrv_writev_vmstate(bs, &qiov, pos); 173861007b31SStefan Hajnoczi } 173961007b31SStefan Hajnoczi 174061007b31SStefan Hajnoczi int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 174161007b31SStefan Hajnoczi { 174261007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 174361007b31SStefan Hajnoczi 174461007b31SStefan Hajnoczi if (!drv) { 174561007b31SStefan Hajnoczi return -ENOMEDIUM; 174661007b31SStefan Hajnoczi } else if (drv->bdrv_save_vmstate) { 174761007b31SStefan Hajnoczi return drv->bdrv_save_vmstate(bs, qiov, pos); 174861007b31SStefan Hajnoczi } else if (bs->file) { 17499a4f4c31SKevin Wolf return bdrv_writev_vmstate(bs->file->bs, qiov, pos); 175061007b31SStefan Hajnoczi } 175161007b31SStefan Hajnoczi 175261007b31SStefan Hajnoczi return -ENOTSUP; 175361007b31SStefan Hajnoczi } 175461007b31SStefan Hajnoczi 175561007b31SStefan Hajnoczi int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 175661007b31SStefan Hajnoczi int64_t pos, int size) 175761007b31SStefan Hajnoczi { 175861007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 175961007b31SStefan Hajnoczi if (!drv) 176061007b31SStefan Hajnoczi return -ENOMEDIUM; 176161007b31SStefan Hajnoczi if (drv->bdrv_load_vmstate) 176261007b31SStefan Hajnoczi return drv->bdrv_load_vmstate(bs, buf, pos, size); 176361007b31SStefan Hajnoczi if (bs->file) 17649a4f4c31SKevin Wolf return bdrv_load_vmstate(bs->file->bs, buf, pos, size); 176561007b31SStefan Hajnoczi return -ENOTSUP; 176661007b31SStefan Hajnoczi } 176761007b31SStefan Hajnoczi 176861007b31SStefan Hajnoczi /**************************************************************/ 176961007b31SStefan Hajnoczi /* async I/Os */ 177061007b31SStefan Hajnoczi 177161007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num, 177261007b31SStefan Hajnoczi QEMUIOVector *qiov, int nb_sectors, 177361007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 177461007b31SStefan Hajnoczi { 177561007b31SStefan Hajnoczi trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque); 177661007b31SStefan Hajnoczi 177761007b31SStefan Hajnoczi return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0, 177861007b31SStefan Hajnoczi cb, opaque, false); 177961007b31SStefan Hajnoczi } 178061007b31SStefan Hajnoczi 178161007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num, 178261007b31SStefan Hajnoczi QEMUIOVector *qiov, int nb_sectors, 178361007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 178461007b31SStefan Hajnoczi { 178561007b31SStefan Hajnoczi trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque); 178661007b31SStefan Hajnoczi 178761007b31SStefan Hajnoczi return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0, 178861007b31SStefan Hajnoczi cb, opaque, true); 178961007b31SStefan Hajnoczi } 179061007b31SStefan Hajnoczi 179161007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs, 179261007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, BdrvRequestFlags flags, 179361007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 179461007b31SStefan Hajnoczi { 179561007b31SStefan Hajnoczi trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque); 179661007b31SStefan Hajnoczi 179761007b31SStefan Hajnoczi return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors, 179861007b31SStefan Hajnoczi BDRV_REQ_ZERO_WRITE | flags, 179961007b31SStefan Hajnoczi cb, opaque, true); 180061007b31SStefan Hajnoczi } 180161007b31SStefan Hajnoczi 180261007b31SStefan Hajnoczi 180361007b31SStefan Hajnoczi typedef struct MultiwriteCB { 180461007b31SStefan Hajnoczi int error; 180561007b31SStefan Hajnoczi int num_requests; 180661007b31SStefan Hajnoczi int num_callbacks; 180761007b31SStefan Hajnoczi struct { 180861007b31SStefan Hajnoczi BlockCompletionFunc *cb; 180961007b31SStefan Hajnoczi void *opaque; 181061007b31SStefan Hajnoczi QEMUIOVector *free_qiov; 181161007b31SStefan Hajnoczi } callbacks[]; 181261007b31SStefan Hajnoczi } MultiwriteCB; 181361007b31SStefan Hajnoczi 181461007b31SStefan Hajnoczi static void multiwrite_user_cb(MultiwriteCB *mcb) 181561007b31SStefan Hajnoczi { 181661007b31SStefan Hajnoczi int i; 181761007b31SStefan Hajnoczi 181861007b31SStefan Hajnoczi for (i = 0; i < mcb->num_callbacks; i++) { 181961007b31SStefan Hajnoczi mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error); 182061007b31SStefan Hajnoczi if (mcb->callbacks[i].free_qiov) { 182161007b31SStefan Hajnoczi qemu_iovec_destroy(mcb->callbacks[i].free_qiov); 182261007b31SStefan Hajnoczi } 182361007b31SStefan Hajnoczi g_free(mcb->callbacks[i].free_qiov); 182461007b31SStefan Hajnoczi } 182561007b31SStefan Hajnoczi } 182661007b31SStefan Hajnoczi 182761007b31SStefan Hajnoczi static void multiwrite_cb(void *opaque, int ret) 182861007b31SStefan Hajnoczi { 182961007b31SStefan Hajnoczi MultiwriteCB *mcb = opaque; 183061007b31SStefan Hajnoczi 183161007b31SStefan Hajnoczi trace_multiwrite_cb(mcb, ret); 183261007b31SStefan Hajnoczi 183361007b31SStefan Hajnoczi if (ret < 0 && !mcb->error) { 183461007b31SStefan Hajnoczi mcb->error = ret; 183561007b31SStefan Hajnoczi } 183661007b31SStefan Hajnoczi 183761007b31SStefan Hajnoczi mcb->num_requests--; 183861007b31SStefan Hajnoczi if (mcb->num_requests == 0) { 183961007b31SStefan Hajnoczi multiwrite_user_cb(mcb); 184061007b31SStefan Hajnoczi g_free(mcb); 184161007b31SStefan Hajnoczi } 184261007b31SStefan Hajnoczi } 184361007b31SStefan Hajnoczi 184461007b31SStefan Hajnoczi static int multiwrite_req_compare(const void *a, const void *b) 184561007b31SStefan Hajnoczi { 184661007b31SStefan Hajnoczi const BlockRequest *req1 = a, *req2 = b; 184761007b31SStefan Hajnoczi 184861007b31SStefan Hajnoczi /* 184961007b31SStefan Hajnoczi * Note that we can't simply subtract req2->sector from req1->sector 185061007b31SStefan Hajnoczi * here as that could overflow the return value. 185161007b31SStefan Hajnoczi */ 185261007b31SStefan Hajnoczi if (req1->sector > req2->sector) { 185361007b31SStefan Hajnoczi return 1; 185461007b31SStefan Hajnoczi } else if (req1->sector < req2->sector) { 185561007b31SStefan Hajnoczi return -1; 185661007b31SStefan Hajnoczi } else { 185761007b31SStefan Hajnoczi return 0; 185861007b31SStefan Hajnoczi } 185961007b31SStefan Hajnoczi } 186061007b31SStefan Hajnoczi 186161007b31SStefan Hajnoczi /* 186261007b31SStefan Hajnoczi * Takes a bunch of requests and tries to merge them. Returns the number of 186361007b31SStefan Hajnoczi * requests that remain after merging. 186461007b31SStefan Hajnoczi */ 186561007b31SStefan Hajnoczi static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs, 186661007b31SStefan Hajnoczi int num_reqs, MultiwriteCB *mcb) 186761007b31SStefan Hajnoczi { 186861007b31SStefan Hajnoczi int i, outidx; 186961007b31SStefan Hajnoczi 187061007b31SStefan Hajnoczi // Sort requests by start sector 187161007b31SStefan Hajnoczi qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare); 187261007b31SStefan Hajnoczi 187361007b31SStefan Hajnoczi // Check if adjacent requests touch the same clusters. If so, combine them, 187461007b31SStefan Hajnoczi // filling up gaps with zero sectors. 187561007b31SStefan Hajnoczi outidx = 0; 187661007b31SStefan Hajnoczi for (i = 1; i < num_reqs; i++) { 187761007b31SStefan Hajnoczi int merge = 0; 187861007b31SStefan Hajnoczi int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors; 187961007b31SStefan Hajnoczi 188061007b31SStefan Hajnoczi // Handle exactly sequential writes and overlapping writes. 188161007b31SStefan Hajnoczi if (reqs[i].sector <= oldreq_last) { 188261007b31SStefan Hajnoczi merge = 1; 188361007b31SStefan Hajnoczi } 188461007b31SStefan Hajnoczi 188561007b31SStefan Hajnoczi if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) { 188661007b31SStefan Hajnoczi merge = 0; 188761007b31SStefan Hajnoczi } 188861007b31SStefan Hajnoczi 188961007b31SStefan Hajnoczi if (bs->bl.max_transfer_length && reqs[outidx].nb_sectors + 189061007b31SStefan Hajnoczi reqs[i].nb_sectors > bs->bl.max_transfer_length) { 189161007b31SStefan Hajnoczi merge = 0; 189261007b31SStefan Hajnoczi } 189361007b31SStefan Hajnoczi 189461007b31SStefan Hajnoczi if (merge) { 189561007b31SStefan Hajnoczi size_t size; 189661007b31SStefan Hajnoczi QEMUIOVector *qiov = g_malloc0(sizeof(*qiov)); 189761007b31SStefan Hajnoczi qemu_iovec_init(qiov, 189861007b31SStefan Hajnoczi reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1); 189961007b31SStefan Hajnoczi 190061007b31SStefan Hajnoczi // Add the first request to the merged one. If the requests are 190161007b31SStefan Hajnoczi // overlapping, drop the last sectors of the first request. 190261007b31SStefan Hajnoczi size = (reqs[i].sector - reqs[outidx].sector) << 9; 190361007b31SStefan Hajnoczi qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size); 190461007b31SStefan Hajnoczi 190561007b31SStefan Hajnoczi // We should need to add any zeros between the two requests 190661007b31SStefan Hajnoczi assert (reqs[i].sector <= oldreq_last); 190761007b31SStefan Hajnoczi 190861007b31SStefan Hajnoczi // Add the second request 190961007b31SStefan Hajnoczi qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size); 191061007b31SStefan Hajnoczi 191161007b31SStefan Hajnoczi // Add tail of first request, if necessary 191261007b31SStefan Hajnoczi if (qiov->size < reqs[outidx].qiov->size) { 191361007b31SStefan Hajnoczi qemu_iovec_concat(qiov, reqs[outidx].qiov, qiov->size, 191461007b31SStefan Hajnoczi reqs[outidx].qiov->size - qiov->size); 191561007b31SStefan Hajnoczi } 191661007b31SStefan Hajnoczi 191761007b31SStefan Hajnoczi reqs[outidx].nb_sectors = qiov->size >> 9; 191861007b31SStefan Hajnoczi reqs[outidx].qiov = qiov; 191961007b31SStefan Hajnoczi 192061007b31SStefan Hajnoczi mcb->callbacks[i].free_qiov = reqs[outidx].qiov; 192161007b31SStefan Hajnoczi } else { 192261007b31SStefan Hajnoczi outidx++; 192361007b31SStefan Hajnoczi reqs[outidx].sector = reqs[i].sector; 192461007b31SStefan Hajnoczi reqs[outidx].nb_sectors = reqs[i].nb_sectors; 192561007b31SStefan Hajnoczi reqs[outidx].qiov = reqs[i].qiov; 192661007b31SStefan Hajnoczi } 192761007b31SStefan Hajnoczi } 192861007b31SStefan Hajnoczi 19297f0e9da6SMax Reitz if (bs->blk) { 19307f0e9da6SMax Reitz block_acct_merge_done(blk_get_stats(bs->blk), BLOCK_ACCT_WRITE, 19317f0e9da6SMax Reitz num_reqs - outidx - 1); 19327f0e9da6SMax Reitz } 193361007b31SStefan Hajnoczi 193461007b31SStefan Hajnoczi return outidx + 1; 193561007b31SStefan Hajnoczi } 193661007b31SStefan Hajnoczi 193761007b31SStefan Hajnoczi /* 193861007b31SStefan Hajnoczi * Submit multiple AIO write requests at once. 193961007b31SStefan Hajnoczi * 194061007b31SStefan Hajnoczi * On success, the function returns 0 and all requests in the reqs array have 194161007b31SStefan Hajnoczi * been submitted. In error case this function returns -1, and any of the 194261007b31SStefan Hajnoczi * requests may or may not be submitted yet. In particular, this means that the 194361007b31SStefan Hajnoczi * callback will be called for some of the requests, for others it won't. The 194461007b31SStefan Hajnoczi * caller must check the error field of the BlockRequest to wait for the right 194561007b31SStefan Hajnoczi * callbacks (if error != 0, no callback will be called). 194661007b31SStefan Hajnoczi * 194761007b31SStefan Hajnoczi * The implementation may modify the contents of the reqs array, e.g. to merge 194861007b31SStefan Hajnoczi * requests. However, the fields opaque and error are left unmodified as they 194961007b31SStefan Hajnoczi * are used to signal failure for a single request to the caller. 195061007b31SStefan Hajnoczi */ 195161007b31SStefan Hajnoczi int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs) 195261007b31SStefan Hajnoczi { 195361007b31SStefan Hajnoczi MultiwriteCB *mcb; 195461007b31SStefan Hajnoczi int i; 195561007b31SStefan Hajnoczi 195661007b31SStefan Hajnoczi /* don't submit writes if we don't have a medium */ 195761007b31SStefan Hajnoczi if (bs->drv == NULL) { 195861007b31SStefan Hajnoczi for (i = 0; i < num_reqs; i++) { 195961007b31SStefan Hajnoczi reqs[i].error = -ENOMEDIUM; 196061007b31SStefan Hajnoczi } 196161007b31SStefan Hajnoczi return -1; 196261007b31SStefan Hajnoczi } 196361007b31SStefan Hajnoczi 196461007b31SStefan Hajnoczi if (num_reqs == 0) { 196561007b31SStefan Hajnoczi return 0; 196661007b31SStefan Hajnoczi } 196761007b31SStefan Hajnoczi 196861007b31SStefan Hajnoczi // Create MultiwriteCB structure 196961007b31SStefan Hajnoczi mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks)); 197061007b31SStefan Hajnoczi mcb->num_requests = 0; 197161007b31SStefan Hajnoczi mcb->num_callbacks = num_reqs; 197261007b31SStefan Hajnoczi 197361007b31SStefan Hajnoczi for (i = 0; i < num_reqs; i++) { 197461007b31SStefan Hajnoczi mcb->callbacks[i].cb = reqs[i].cb; 197561007b31SStefan Hajnoczi mcb->callbacks[i].opaque = reqs[i].opaque; 197661007b31SStefan Hajnoczi } 197761007b31SStefan Hajnoczi 197861007b31SStefan Hajnoczi // Check for mergable requests 197961007b31SStefan Hajnoczi num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb); 198061007b31SStefan Hajnoczi 198161007b31SStefan Hajnoczi trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs); 198261007b31SStefan Hajnoczi 198361007b31SStefan Hajnoczi /* Run the aio requests. */ 198461007b31SStefan Hajnoczi mcb->num_requests = num_reqs; 198561007b31SStefan Hajnoczi for (i = 0; i < num_reqs; i++) { 198661007b31SStefan Hajnoczi bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov, 198761007b31SStefan Hajnoczi reqs[i].nb_sectors, reqs[i].flags, 198861007b31SStefan Hajnoczi multiwrite_cb, mcb, 198961007b31SStefan Hajnoczi true); 199061007b31SStefan Hajnoczi } 199161007b31SStefan Hajnoczi 199261007b31SStefan Hajnoczi return 0; 199361007b31SStefan Hajnoczi } 199461007b31SStefan Hajnoczi 199561007b31SStefan Hajnoczi void bdrv_aio_cancel(BlockAIOCB *acb) 199661007b31SStefan Hajnoczi { 199761007b31SStefan Hajnoczi qemu_aio_ref(acb); 199861007b31SStefan Hajnoczi bdrv_aio_cancel_async(acb); 199961007b31SStefan Hajnoczi while (acb->refcnt > 1) { 200061007b31SStefan Hajnoczi if (acb->aiocb_info->get_aio_context) { 200161007b31SStefan Hajnoczi aio_poll(acb->aiocb_info->get_aio_context(acb), true); 200261007b31SStefan Hajnoczi } else if (acb->bs) { 200361007b31SStefan Hajnoczi aio_poll(bdrv_get_aio_context(acb->bs), true); 200461007b31SStefan Hajnoczi } else { 200561007b31SStefan Hajnoczi abort(); 200661007b31SStefan Hajnoczi } 200761007b31SStefan Hajnoczi } 200861007b31SStefan Hajnoczi qemu_aio_unref(acb); 200961007b31SStefan Hajnoczi } 201061007b31SStefan Hajnoczi 201161007b31SStefan Hajnoczi /* Async version of aio cancel. The caller is not blocked if the acb implements 201261007b31SStefan Hajnoczi * cancel_async, otherwise we do nothing and let the request normally complete. 201361007b31SStefan Hajnoczi * In either case the completion callback must be called. */ 201461007b31SStefan Hajnoczi void bdrv_aio_cancel_async(BlockAIOCB *acb) 201561007b31SStefan Hajnoczi { 201661007b31SStefan Hajnoczi if (acb->aiocb_info->cancel_async) { 201761007b31SStefan Hajnoczi acb->aiocb_info->cancel_async(acb); 201861007b31SStefan Hajnoczi } 201961007b31SStefan Hajnoczi } 202061007b31SStefan Hajnoczi 202161007b31SStefan Hajnoczi /**************************************************************/ 202261007b31SStefan Hajnoczi /* async block device emulation */ 202361007b31SStefan Hajnoczi 202461007b31SStefan Hajnoczi typedef struct BlockAIOCBSync { 202561007b31SStefan Hajnoczi BlockAIOCB common; 202661007b31SStefan Hajnoczi QEMUBH *bh; 202761007b31SStefan Hajnoczi int ret; 202861007b31SStefan Hajnoczi /* vector translation state */ 202961007b31SStefan Hajnoczi QEMUIOVector *qiov; 203061007b31SStefan Hajnoczi uint8_t *bounce; 203161007b31SStefan Hajnoczi int is_write; 203261007b31SStefan Hajnoczi } BlockAIOCBSync; 203361007b31SStefan Hajnoczi 203461007b31SStefan Hajnoczi static const AIOCBInfo bdrv_em_aiocb_info = { 203561007b31SStefan Hajnoczi .aiocb_size = sizeof(BlockAIOCBSync), 203661007b31SStefan Hajnoczi }; 203761007b31SStefan Hajnoczi 203861007b31SStefan Hajnoczi static void bdrv_aio_bh_cb(void *opaque) 203961007b31SStefan Hajnoczi { 204061007b31SStefan Hajnoczi BlockAIOCBSync *acb = opaque; 204161007b31SStefan Hajnoczi 204261007b31SStefan Hajnoczi if (!acb->is_write && acb->ret >= 0) { 204361007b31SStefan Hajnoczi qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size); 204461007b31SStefan Hajnoczi } 204561007b31SStefan Hajnoczi qemu_vfree(acb->bounce); 204661007b31SStefan Hajnoczi acb->common.cb(acb->common.opaque, acb->ret); 204761007b31SStefan Hajnoczi qemu_bh_delete(acb->bh); 204861007b31SStefan Hajnoczi acb->bh = NULL; 204961007b31SStefan Hajnoczi qemu_aio_unref(acb); 205061007b31SStefan Hajnoczi } 205161007b31SStefan Hajnoczi 205261007b31SStefan Hajnoczi static BlockAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs, 205361007b31SStefan Hajnoczi int64_t sector_num, 205461007b31SStefan Hajnoczi QEMUIOVector *qiov, 205561007b31SStefan Hajnoczi int nb_sectors, 205661007b31SStefan Hajnoczi BlockCompletionFunc *cb, 205761007b31SStefan Hajnoczi void *opaque, 205861007b31SStefan Hajnoczi int is_write) 205961007b31SStefan Hajnoczi 206061007b31SStefan Hajnoczi { 206161007b31SStefan Hajnoczi BlockAIOCBSync *acb; 206261007b31SStefan Hajnoczi 206361007b31SStefan Hajnoczi acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque); 206461007b31SStefan Hajnoczi acb->is_write = is_write; 206561007b31SStefan Hajnoczi acb->qiov = qiov; 206661007b31SStefan Hajnoczi acb->bounce = qemu_try_blockalign(bs, qiov->size); 206761007b31SStefan Hajnoczi acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb); 206861007b31SStefan Hajnoczi 206961007b31SStefan Hajnoczi if (acb->bounce == NULL) { 207061007b31SStefan Hajnoczi acb->ret = -ENOMEM; 207161007b31SStefan Hajnoczi } else if (is_write) { 207261007b31SStefan Hajnoczi qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size); 207361007b31SStefan Hajnoczi acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors); 207461007b31SStefan Hajnoczi } else { 207561007b31SStefan Hajnoczi acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors); 207661007b31SStefan Hajnoczi } 207761007b31SStefan Hajnoczi 207861007b31SStefan Hajnoczi qemu_bh_schedule(acb->bh); 207961007b31SStefan Hajnoczi 208061007b31SStefan Hajnoczi return &acb->common; 208161007b31SStefan Hajnoczi } 208261007b31SStefan Hajnoczi 208361007b31SStefan Hajnoczi static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, 208461007b31SStefan Hajnoczi int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 208561007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 208661007b31SStefan Hajnoczi { 208761007b31SStefan Hajnoczi return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); 208861007b31SStefan Hajnoczi } 208961007b31SStefan Hajnoczi 209061007b31SStefan Hajnoczi static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs, 209161007b31SStefan Hajnoczi int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 209261007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 209361007b31SStefan Hajnoczi { 209461007b31SStefan Hajnoczi return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1); 209561007b31SStefan Hajnoczi } 209661007b31SStefan Hajnoczi 209761007b31SStefan Hajnoczi 209861007b31SStefan Hajnoczi typedef struct BlockAIOCBCoroutine { 209961007b31SStefan Hajnoczi BlockAIOCB common; 210061007b31SStefan Hajnoczi BlockRequest req; 210161007b31SStefan Hajnoczi bool is_write; 210261007b31SStefan Hajnoczi bool need_bh; 210361007b31SStefan Hajnoczi bool *done; 210461007b31SStefan Hajnoczi QEMUBH* bh; 210561007b31SStefan Hajnoczi } BlockAIOCBCoroutine; 210661007b31SStefan Hajnoczi 210761007b31SStefan Hajnoczi static const AIOCBInfo bdrv_em_co_aiocb_info = { 210861007b31SStefan Hajnoczi .aiocb_size = sizeof(BlockAIOCBCoroutine), 210961007b31SStefan Hajnoczi }; 211061007b31SStefan Hajnoczi 211161007b31SStefan Hajnoczi static void bdrv_co_complete(BlockAIOCBCoroutine *acb) 211261007b31SStefan Hajnoczi { 211361007b31SStefan Hajnoczi if (!acb->need_bh) { 211461007b31SStefan Hajnoczi acb->common.cb(acb->common.opaque, acb->req.error); 211561007b31SStefan Hajnoczi qemu_aio_unref(acb); 211661007b31SStefan Hajnoczi } 211761007b31SStefan Hajnoczi } 211861007b31SStefan Hajnoczi 211961007b31SStefan Hajnoczi static void bdrv_co_em_bh(void *opaque) 212061007b31SStefan Hajnoczi { 212161007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb = opaque; 212261007b31SStefan Hajnoczi 212361007b31SStefan Hajnoczi assert(!acb->need_bh); 212461007b31SStefan Hajnoczi qemu_bh_delete(acb->bh); 212561007b31SStefan Hajnoczi bdrv_co_complete(acb); 212661007b31SStefan Hajnoczi } 212761007b31SStefan Hajnoczi 212861007b31SStefan Hajnoczi static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb) 212961007b31SStefan Hajnoczi { 213061007b31SStefan Hajnoczi acb->need_bh = false; 213161007b31SStefan Hajnoczi if (acb->req.error != -EINPROGRESS) { 213261007b31SStefan Hajnoczi BlockDriverState *bs = acb->common.bs; 213361007b31SStefan Hajnoczi 213461007b31SStefan Hajnoczi acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb); 213561007b31SStefan Hajnoczi qemu_bh_schedule(acb->bh); 213661007b31SStefan Hajnoczi } 213761007b31SStefan Hajnoczi } 213861007b31SStefan Hajnoczi 213961007b31SStefan Hajnoczi /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */ 214061007b31SStefan Hajnoczi static void coroutine_fn bdrv_co_do_rw(void *opaque) 214161007b31SStefan Hajnoczi { 214261007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb = opaque; 214361007b31SStefan Hajnoczi BlockDriverState *bs = acb->common.bs; 214461007b31SStefan Hajnoczi 214561007b31SStefan Hajnoczi if (!acb->is_write) { 214661007b31SStefan Hajnoczi acb->req.error = bdrv_co_do_readv(bs, acb->req.sector, 214761007b31SStefan Hajnoczi acb->req.nb_sectors, acb->req.qiov, acb->req.flags); 214861007b31SStefan Hajnoczi } else { 214961007b31SStefan Hajnoczi acb->req.error = bdrv_co_do_writev(bs, acb->req.sector, 215061007b31SStefan Hajnoczi acb->req.nb_sectors, acb->req.qiov, acb->req.flags); 215161007b31SStefan Hajnoczi } 215261007b31SStefan Hajnoczi 215361007b31SStefan Hajnoczi bdrv_co_complete(acb); 215461007b31SStefan Hajnoczi } 215561007b31SStefan Hajnoczi 215661007b31SStefan Hajnoczi static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, 215761007b31SStefan Hajnoczi int64_t sector_num, 215861007b31SStefan Hajnoczi QEMUIOVector *qiov, 215961007b31SStefan Hajnoczi int nb_sectors, 216061007b31SStefan Hajnoczi BdrvRequestFlags flags, 216161007b31SStefan Hajnoczi BlockCompletionFunc *cb, 216261007b31SStefan Hajnoczi void *opaque, 216361007b31SStefan Hajnoczi bool is_write) 216461007b31SStefan Hajnoczi { 216561007b31SStefan Hajnoczi Coroutine *co; 216661007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb; 216761007b31SStefan Hajnoczi 216861007b31SStefan Hajnoczi acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 216961007b31SStefan Hajnoczi acb->need_bh = true; 217061007b31SStefan Hajnoczi acb->req.error = -EINPROGRESS; 217161007b31SStefan Hajnoczi acb->req.sector = sector_num; 217261007b31SStefan Hajnoczi acb->req.nb_sectors = nb_sectors; 217361007b31SStefan Hajnoczi acb->req.qiov = qiov; 217461007b31SStefan Hajnoczi acb->req.flags = flags; 217561007b31SStefan Hajnoczi acb->is_write = is_write; 217661007b31SStefan Hajnoczi 217761007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_co_do_rw); 217861007b31SStefan Hajnoczi qemu_coroutine_enter(co, acb); 217961007b31SStefan Hajnoczi 218061007b31SStefan Hajnoczi bdrv_co_maybe_schedule_bh(acb); 218161007b31SStefan Hajnoczi return &acb->common; 218261007b31SStefan Hajnoczi } 218361007b31SStefan Hajnoczi 218461007b31SStefan Hajnoczi static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque) 218561007b31SStefan Hajnoczi { 218661007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb = opaque; 218761007b31SStefan Hajnoczi BlockDriverState *bs = acb->common.bs; 218861007b31SStefan Hajnoczi 218961007b31SStefan Hajnoczi acb->req.error = bdrv_co_flush(bs); 219061007b31SStefan Hajnoczi bdrv_co_complete(acb); 219161007b31SStefan Hajnoczi } 219261007b31SStefan Hajnoczi 219361007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs, 219461007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 219561007b31SStefan Hajnoczi { 219661007b31SStefan Hajnoczi trace_bdrv_aio_flush(bs, opaque); 219761007b31SStefan Hajnoczi 219861007b31SStefan Hajnoczi Coroutine *co; 219961007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb; 220061007b31SStefan Hajnoczi 220161007b31SStefan Hajnoczi acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 220261007b31SStefan Hajnoczi acb->need_bh = true; 220361007b31SStefan Hajnoczi acb->req.error = -EINPROGRESS; 220461007b31SStefan Hajnoczi 220561007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_aio_flush_co_entry); 220661007b31SStefan Hajnoczi qemu_coroutine_enter(co, acb); 220761007b31SStefan Hajnoczi 220861007b31SStefan Hajnoczi bdrv_co_maybe_schedule_bh(acb); 220961007b31SStefan Hajnoczi return &acb->common; 221061007b31SStefan Hajnoczi } 221161007b31SStefan Hajnoczi 221261007b31SStefan Hajnoczi static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque) 221361007b31SStefan Hajnoczi { 221461007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb = opaque; 221561007b31SStefan Hajnoczi BlockDriverState *bs = acb->common.bs; 221661007b31SStefan Hajnoczi 221761007b31SStefan Hajnoczi acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors); 221861007b31SStefan Hajnoczi bdrv_co_complete(acb); 221961007b31SStefan Hajnoczi } 222061007b31SStefan Hajnoczi 222161007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs, 222261007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, 222361007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 222461007b31SStefan Hajnoczi { 222561007b31SStefan Hajnoczi Coroutine *co; 222661007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb; 222761007b31SStefan Hajnoczi 222861007b31SStefan Hajnoczi trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque); 222961007b31SStefan Hajnoczi 223061007b31SStefan Hajnoczi acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 223161007b31SStefan Hajnoczi acb->need_bh = true; 223261007b31SStefan Hajnoczi acb->req.error = -EINPROGRESS; 223361007b31SStefan Hajnoczi acb->req.sector = sector_num; 223461007b31SStefan Hajnoczi acb->req.nb_sectors = nb_sectors; 223561007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_aio_discard_co_entry); 223661007b31SStefan Hajnoczi qemu_coroutine_enter(co, acb); 223761007b31SStefan Hajnoczi 223861007b31SStefan Hajnoczi bdrv_co_maybe_schedule_bh(acb); 223961007b31SStefan Hajnoczi return &acb->common; 224061007b31SStefan Hajnoczi } 224161007b31SStefan Hajnoczi 224261007b31SStefan Hajnoczi void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, 224361007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 224461007b31SStefan Hajnoczi { 224561007b31SStefan Hajnoczi BlockAIOCB *acb; 224661007b31SStefan Hajnoczi 2247c84b3192SPaolo Bonzini acb = g_malloc(aiocb_info->aiocb_size); 224861007b31SStefan Hajnoczi acb->aiocb_info = aiocb_info; 224961007b31SStefan Hajnoczi acb->bs = bs; 225061007b31SStefan Hajnoczi acb->cb = cb; 225161007b31SStefan Hajnoczi acb->opaque = opaque; 225261007b31SStefan Hajnoczi acb->refcnt = 1; 225361007b31SStefan Hajnoczi return acb; 225461007b31SStefan Hajnoczi } 225561007b31SStefan Hajnoczi 225661007b31SStefan Hajnoczi void qemu_aio_ref(void *p) 225761007b31SStefan Hajnoczi { 225861007b31SStefan Hajnoczi BlockAIOCB *acb = p; 225961007b31SStefan Hajnoczi acb->refcnt++; 226061007b31SStefan Hajnoczi } 226161007b31SStefan Hajnoczi 226261007b31SStefan Hajnoczi void qemu_aio_unref(void *p) 226361007b31SStefan Hajnoczi { 226461007b31SStefan Hajnoczi BlockAIOCB *acb = p; 226561007b31SStefan Hajnoczi assert(acb->refcnt > 0); 226661007b31SStefan Hajnoczi if (--acb->refcnt == 0) { 2267c84b3192SPaolo Bonzini g_free(acb); 226861007b31SStefan Hajnoczi } 226961007b31SStefan Hajnoczi } 227061007b31SStefan Hajnoczi 227161007b31SStefan Hajnoczi /**************************************************************/ 227261007b31SStefan Hajnoczi /* Coroutine block device emulation */ 227361007b31SStefan Hajnoczi 227461007b31SStefan Hajnoczi typedef struct CoroutineIOCompletion { 227561007b31SStefan Hajnoczi Coroutine *coroutine; 227661007b31SStefan Hajnoczi int ret; 227761007b31SStefan Hajnoczi } CoroutineIOCompletion; 227861007b31SStefan Hajnoczi 227961007b31SStefan Hajnoczi static void bdrv_co_io_em_complete(void *opaque, int ret) 228061007b31SStefan Hajnoczi { 228161007b31SStefan Hajnoczi CoroutineIOCompletion *co = opaque; 228261007b31SStefan Hajnoczi 228361007b31SStefan Hajnoczi co->ret = ret; 228461007b31SStefan Hajnoczi qemu_coroutine_enter(co->coroutine, NULL); 228561007b31SStefan Hajnoczi } 228661007b31SStefan Hajnoczi 228761007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num, 228861007b31SStefan Hajnoczi int nb_sectors, QEMUIOVector *iov, 228961007b31SStefan Hajnoczi bool is_write) 229061007b31SStefan Hajnoczi { 229161007b31SStefan Hajnoczi CoroutineIOCompletion co = { 229261007b31SStefan Hajnoczi .coroutine = qemu_coroutine_self(), 229361007b31SStefan Hajnoczi }; 229461007b31SStefan Hajnoczi BlockAIOCB *acb; 229561007b31SStefan Hajnoczi 229661007b31SStefan Hajnoczi if (is_write) { 229761007b31SStefan Hajnoczi acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors, 229861007b31SStefan Hajnoczi bdrv_co_io_em_complete, &co); 229961007b31SStefan Hajnoczi } else { 230061007b31SStefan Hajnoczi acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors, 230161007b31SStefan Hajnoczi bdrv_co_io_em_complete, &co); 230261007b31SStefan Hajnoczi } 230361007b31SStefan Hajnoczi 230461007b31SStefan Hajnoczi trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb); 230561007b31SStefan Hajnoczi if (!acb) { 230661007b31SStefan Hajnoczi return -EIO; 230761007b31SStefan Hajnoczi } 230861007b31SStefan Hajnoczi qemu_coroutine_yield(); 230961007b31SStefan Hajnoczi 231061007b31SStefan Hajnoczi return co.ret; 231161007b31SStefan Hajnoczi } 231261007b31SStefan Hajnoczi 231361007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, 231461007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, 231561007b31SStefan Hajnoczi QEMUIOVector *iov) 231661007b31SStefan Hajnoczi { 231761007b31SStefan Hajnoczi return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false); 231861007b31SStefan Hajnoczi } 231961007b31SStefan Hajnoczi 232061007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, 232161007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, 232261007b31SStefan Hajnoczi QEMUIOVector *iov) 232361007b31SStefan Hajnoczi { 232461007b31SStefan Hajnoczi return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true); 232561007b31SStefan Hajnoczi } 232661007b31SStefan Hajnoczi 232761007b31SStefan Hajnoczi static void coroutine_fn bdrv_flush_co_entry(void *opaque) 232861007b31SStefan Hajnoczi { 232961007b31SStefan Hajnoczi RwCo *rwco = opaque; 233061007b31SStefan Hajnoczi 233161007b31SStefan Hajnoczi rwco->ret = bdrv_co_flush(rwco->bs); 233261007b31SStefan Hajnoczi } 233361007b31SStefan Hajnoczi 233461007b31SStefan Hajnoczi int coroutine_fn bdrv_co_flush(BlockDriverState *bs) 233561007b31SStefan Hajnoczi { 233661007b31SStefan Hajnoczi int ret; 2337cdb5e315SFam Zheng BdrvTrackedRequest req; 233861007b31SStefan Hajnoczi 23391b6bc94dSDimitris Aragiorgis if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs) || 23401b6bc94dSDimitris Aragiorgis bdrv_is_sg(bs)) { 234161007b31SStefan Hajnoczi return 0; 234261007b31SStefan Hajnoczi } 234361007b31SStefan Hajnoczi 2344cdb5e315SFam Zheng tracked_request_begin(&req, bs, 0, 0, BDRV_TRACKED_FLUSH); 234561007b31SStefan Hajnoczi /* Write back cached data to the OS even with cache=unsafe */ 234661007b31SStefan Hajnoczi BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS); 234761007b31SStefan Hajnoczi if (bs->drv->bdrv_co_flush_to_os) { 234861007b31SStefan Hajnoczi ret = bs->drv->bdrv_co_flush_to_os(bs); 234961007b31SStefan Hajnoczi if (ret < 0) { 2350cdb5e315SFam Zheng goto out; 235161007b31SStefan Hajnoczi } 235261007b31SStefan Hajnoczi } 235361007b31SStefan Hajnoczi 235461007b31SStefan Hajnoczi /* But don't actually force it to the disk with cache=unsafe */ 235561007b31SStefan Hajnoczi if (bs->open_flags & BDRV_O_NO_FLUSH) { 235661007b31SStefan Hajnoczi goto flush_parent; 235761007b31SStefan Hajnoczi } 235861007b31SStefan Hajnoczi 235961007b31SStefan Hajnoczi BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK); 236061007b31SStefan Hajnoczi if (bs->drv->bdrv_co_flush_to_disk) { 236161007b31SStefan Hajnoczi ret = bs->drv->bdrv_co_flush_to_disk(bs); 236261007b31SStefan Hajnoczi } else if (bs->drv->bdrv_aio_flush) { 236361007b31SStefan Hajnoczi BlockAIOCB *acb; 236461007b31SStefan Hajnoczi CoroutineIOCompletion co = { 236561007b31SStefan Hajnoczi .coroutine = qemu_coroutine_self(), 236661007b31SStefan Hajnoczi }; 236761007b31SStefan Hajnoczi 236861007b31SStefan Hajnoczi acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); 236961007b31SStefan Hajnoczi if (acb == NULL) { 237061007b31SStefan Hajnoczi ret = -EIO; 237161007b31SStefan Hajnoczi } else { 237261007b31SStefan Hajnoczi qemu_coroutine_yield(); 237361007b31SStefan Hajnoczi ret = co.ret; 237461007b31SStefan Hajnoczi } 237561007b31SStefan Hajnoczi } else { 237661007b31SStefan Hajnoczi /* 237761007b31SStefan Hajnoczi * Some block drivers always operate in either writethrough or unsafe 237861007b31SStefan Hajnoczi * mode and don't support bdrv_flush therefore. Usually qemu doesn't 237961007b31SStefan Hajnoczi * know how the server works (because the behaviour is hardcoded or 238061007b31SStefan Hajnoczi * depends on server-side configuration), so we can't ensure that 238161007b31SStefan Hajnoczi * everything is safe on disk. Returning an error doesn't work because 238261007b31SStefan Hajnoczi * that would break guests even if the server operates in writethrough 238361007b31SStefan Hajnoczi * mode. 238461007b31SStefan Hajnoczi * 238561007b31SStefan Hajnoczi * Let's hope the user knows what he's doing. 238661007b31SStefan Hajnoczi */ 238761007b31SStefan Hajnoczi ret = 0; 238861007b31SStefan Hajnoczi } 238961007b31SStefan Hajnoczi if (ret < 0) { 2390cdb5e315SFam Zheng goto out; 239161007b31SStefan Hajnoczi } 239261007b31SStefan Hajnoczi 239361007b31SStefan Hajnoczi /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH 239461007b31SStefan Hajnoczi * in the case of cache=unsafe, so there are no useless flushes. 239561007b31SStefan Hajnoczi */ 239661007b31SStefan Hajnoczi flush_parent: 2397cdb5e315SFam Zheng ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0; 2398cdb5e315SFam Zheng out: 2399cdb5e315SFam Zheng tracked_request_end(&req); 2400cdb5e315SFam Zheng return ret; 240161007b31SStefan Hajnoczi } 240261007b31SStefan Hajnoczi 240361007b31SStefan Hajnoczi int bdrv_flush(BlockDriverState *bs) 240461007b31SStefan Hajnoczi { 240561007b31SStefan Hajnoczi Coroutine *co; 240661007b31SStefan Hajnoczi RwCo rwco = { 240761007b31SStefan Hajnoczi .bs = bs, 240861007b31SStefan Hajnoczi .ret = NOT_DONE, 240961007b31SStefan Hajnoczi }; 241061007b31SStefan Hajnoczi 241161007b31SStefan Hajnoczi if (qemu_in_coroutine()) { 241261007b31SStefan Hajnoczi /* Fast-path if already in coroutine context */ 241361007b31SStefan Hajnoczi bdrv_flush_co_entry(&rwco); 241461007b31SStefan Hajnoczi } else { 241561007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 241661007b31SStefan Hajnoczi 241761007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_flush_co_entry); 241861007b31SStefan Hajnoczi qemu_coroutine_enter(co, &rwco); 241961007b31SStefan Hajnoczi while (rwco.ret == NOT_DONE) { 242061007b31SStefan Hajnoczi aio_poll(aio_context, true); 242161007b31SStefan Hajnoczi } 242261007b31SStefan Hajnoczi } 242361007b31SStefan Hajnoczi 242461007b31SStefan Hajnoczi return rwco.ret; 242561007b31SStefan Hajnoczi } 242661007b31SStefan Hajnoczi 242761007b31SStefan Hajnoczi typedef struct DiscardCo { 242861007b31SStefan Hajnoczi BlockDriverState *bs; 242961007b31SStefan Hajnoczi int64_t sector_num; 243061007b31SStefan Hajnoczi int nb_sectors; 243161007b31SStefan Hajnoczi int ret; 243261007b31SStefan Hajnoczi } DiscardCo; 243361007b31SStefan Hajnoczi static void coroutine_fn bdrv_discard_co_entry(void *opaque) 243461007b31SStefan Hajnoczi { 243561007b31SStefan Hajnoczi DiscardCo *rwco = opaque; 243661007b31SStefan Hajnoczi 243761007b31SStefan Hajnoczi rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors); 243861007b31SStefan Hajnoczi } 243961007b31SStefan Hajnoczi 244061007b31SStefan Hajnoczi int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, 244161007b31SStefan Hajnoczi int nb_sectors) 244261007b31SStefan Hajnoczi { 2443b1066c87SFam Zheng BdrvTrackedRequest req; 244461007b31SStefan Hajnoczi int max_discard, ret; 244561007b31SStefan Hajnoczi 244661007b31SStefan Hajnoczi if (!bs->drv) { 244761007b31SStefan Hajnoczi return -ENOMEDIUM; 244861007b31SStefan Hajnoczi } 244961007b31SStefan Hajnoczi 245061007b31SStefan Hajnoczi ret = bdrv_check_request(bs, sector_num, nb_sectors); 245161007b31SStefan Hajnoczi if (ret < 0) { 245261007b31SStefan Hajnoczi return ret; 245361007b31SStefan Hajnoczi } else if (bs->read_only) { 2454eaf5fe2dSPaolo Bonzini return -EPERM; 245561007b31SStefan Hajnoczi } 245661007b31SStefan Hajnoczi 245761007b31SStefan Hajnoczi /* Do nothing if disabled. */ 245861007b31SStefan Hajnoczi if (!(bs->open_flags & BDRV_O_UNMAP)) { 245961007b31SStefan Hajnoczi return 0; 246061007b31SStefan Hajnoczi } 246161007b31SStefan Hajnoczi 246261007b31SStefan Hajnoczi if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) { 246361007b31SStefan Hajnoczi return 0; 246461007b31SStefan Hajnoczi } 246561007b31SStefan Hajnoczi 2466b1066c87SFam Zheng tracked_request_begin(&req, bs, sector_num, nb_sectors, 2467b1066c87SFam Zheng BDRV_TRACKED_DISCARD); 246850824995SFam Zheng bdrv_set_dirty(bs, sector_num, nb_sectors); 246950824995SFam Zheng 247061007b31SStefan Hajnoczi max_discard = MIN_NON_ZERO(bs->bl.max_discard, BDRV_REQUEST_MAX_SECTORS); 247161007b31SStefan Hajnoczi while (nb_sectors > 0) { 247261007b31SStefan Hajnoczi int ret; 247361007b31SStefan Hajnoczi int num = nb_sectors; 247461007b31SStefan Hajnoczi 247561007b31SStefan Hajnoczi /* align request */ 247661007b31SStefan Hajnoczi if (bs->bl.discard_alignment && 247761007b31SStefan Hajnoczi num >= bs->bl.discard_alignment && 247861007b31SStefan Hajnoczi sector_num % bs->bl.discard_alignment) { 247961007b31SStefan Hajnoczi if (num > bs->bl.discard_alignment) { 248061007b31SStefan Hajnoczi num = bs->bl.discard_alignment; 248161007b31SStefan Hajnoczi } 248261007b31SStefan Hajnoczi num -= sector_num % bs->bl.discard_alignment; 248361007b31SStefan Hajnoczi } 248461007b31SStefan Hajnoczi 248561007b31SStefan Hajnoczi /* limit request size */ 248661007b31SStefan Hajnoczi if (num > max_discard) { 248761007b31SStefan Hajnoczi num = max_discard; 248861007b31SStefan Hajnoczi } 248961007b31SStefan Hajnoczi 249061007b31SStefan Hajnoczi if (bs->drv->bdrv_co_discard) { 249161007b31SStefan Hajnoczi ret = bs->drv->bdrv_co_discard(bs, sector_num, num); 249261007b31SStefan Hajnoczi } else { 249361007b31SStefan Hajnoczi BlockAIOCB *acb; 249461007b31SStefan Hajnoczi CoroutineIOCompletion co = { 249561007b31SStefan Hajnoczi .coroutine = qemu_coroutine_self(), 249661007b31SStefan Hajnoczi }; 249761007b31SStefan Hajnoczi 249861007b31SStefan Hajnoczi acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors, 249961007b31SStefan Hajnoczi bdrv_co_io_em_complete, &co); 250061007b31SStefan Hajnoczi if (acb == NULL) { 2501b1066c87SFam Zheng ret = -EIO; 2502b1066c87SFam Zheng goto out; 250361007b31SStefan Hajnoczi } else { 250461007b31SStefan Hajnoczi qemu_coroutine_yield(); 250561007b31SStefan Hajnoczi ret = co.ret; 250661007b31SStefan Hajnoczi } 250761007b31SStefan Hajnoczi } 250861007b31SStefan Hajnoczi if (ret && ret != -ENOTSUP) { 2509b1066c87SFam Zheng goto out; 251061007b31SStefan Hajnoczi } 251161007b31SStefan Hajnoczi 251261007b31SStefan Hajnoczi sector_num += num; 251361007b31SStefan Hajnoczi nb_sectors -= num; 251461007b31SStefan Hajnoczi } 2515b1066c87SFam Zheng ret = 0; 2516b1066c87SFam Zheng out: 2517b1066c87SFam Zheng tracked_request_end(&req); 2518b1066c87SFam Zheng return ret; 251961007b31SStefan Hajnoczi } 252061007b31SStefan Hajnoczi 252161007b31SStefan Hajnoczi int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors) 252261007b31SStefan Hajnoczi { 252361007b31SStefan Hajnoczi Coroutine *co; 252461007b31SStefan Hajnoczi DiscardCo rwco = { 252561007b31SStefan Hajnoczi .bs = bs, 252661007b31SStefan Hajnoczi .sector_num = sector_num, 252761007b31SStefan Hajnoczi .nb_sectors = nb_sectors, 252861007b31SStefan Hajnoczi .ret = NOT_DONE, 252961007b31SStefan Hajnoczi }; 253061007b31SStefan Hajnoczi 253161007b31SStefan Hajnoczi if (qemu_in_coroutine()) { 253261007b31SStefan Hajnoczi /* Fast-path if already in coroutine context */ 253361007b31SStefan Hajnoczi bdrv_discard_co_entry(&rwco); 253461007b31SStefan Hajnoczi } else { 253561007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 253661007b31SStefan Hajnoczi 253761007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_discard_co_entry); 253861007b31SStefan Hajnoczi qemu_coroutine_enter(co, &rwco); 253961007b31SStefan Hajnoczi while (rwco.ret == NOT_DONE) { 254061007b31SStefan Hajnoczi aio_poll(aio_context, true); 254161007b31SStefan Hajnoczi } 254261007b31SStefan Hajnoczi } 254361007b31SStefan Hajnoczi 254461007b31SStefan Hajnoczi return rwco.ret; 254561007b31SStefan Hajnoczi } 254661007b31SStefan Hajnoczi 25475c5ae76aSFam Zheng typedef struct { 25485c5ae76aSFam Zheng CoroutineIOCompletion *co; 25495c5ae76aSFam Zheng QEMUBH *bh; 25505c5ae76aSFam Zheng } BdrvIoctlCompletionData; 255161007b31SStefan Hajnoczi 25525c5ae76aSFam Zheng static void bdrv_ioctl_bh_cb(void *opaque) 25535c5ae76aSFam Zheng { 25545c5ae76aSFam Zheng BdrvIoctlCompletionData *data = opaque; 25555c5ae76aSFam Zheng 25565c5ae76aSFam Zheng bdrv_co_io_em_complete(data->co, -ENOTSUP); 25575c5ae76aSFam Zheng qemu_bh_delete(data->bh); 25585c5ae76aSFam Zheng } 25595c5ae76aSFam Zheng 25605c5ae76aSFam Zheng static int bdrv_co_do_ioctl(BlockDriverState *bs, int req, void *buf) 256161007b31SStefan Hajnoczi { 256261007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 25635c5ae76aSFam Zheng BdrvTrackedRequest tracked_req; 25645c5ae76aSFam Zheng CoroutineIOCompletion co = { 25655c5ae76aSFam Zheng .coroutine = qemu_coroutine_self(), 25665c5ae76aSFam Zheng }; 25675c5ae76aSFam Zheng BlockAIOCB *acb; 256861007b31SStefan Hajnoczi 25695c5ae76aSFam Zheng tracked_request_begin(&tracked_req, bs, 0, 0, BDRV_TRACKED_IOCTL); 25705c5ae76aSFam Zheng if (!drv || !drv->bdrv_aio_ioctl) { 25715c5ae76aSFam Zheng co.ret = -ENOTSUP; 25725c5ae76aSFam Zheng goto out; 25735c5ae76aSFam Zheng } 25745c5ae76aSFam Zheng 25755c5ae76aSFam Zheng acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co); 25765c5ae76aSFam Zheng if (!acb) { 25775c5ae76aSFam Zheng BdrvIoctlCompletionData *data = g_new(BdrvIoctlCompletionData, 1); 25785c5ae76aSFam Zheng data->bh = aio_bh_new(bdrv_get_aio_context(bs), 25795c5ae76aSFam Zheng bdrv_ioctl_bh_cb, data); 25805c5ae76aSFam Zheng data->co = &co; 25815c5ae76aSFam Zheng qemu_bh_schedule(data->bh); 25825c5ae76aSFam Zheng } 25835c5ae76aSFam Zheng qemu_coroutine_yield(); 25845c5ae76aSFam Zheng out: 25855c5ae76aSFam Zheng tracked_request_end(&tracked_req); 25865c5ae76aSFam Zheng return co.ret; 25875c5ae76aSFam Zheng } 25885c5ae76aSFam Zheng 25895c5ae76aSFam Zheng typedef struct { 25905c5ae76aSFam Zheng BlockDriverState *bs; 25915c5ae76aSFam Zheng int req; 25925c5ae76aSFam Zheng void *buf; 25935c5ae76aSFam Zheng int ret; 25945c5ae76aSFam Zheng } BdrvIoctlCoData; 25955c5ae76aSFam Zheng 25965c5ae76aSFam Zheng static void coroutine_fn bdrv_co_ioctl_entry(void *opaque) 25975c5ae76aSFam Zheng { 25985c5ae76aSFam Zheng BdrvIoctlCoData *data = opaque; 25995c5ae76aSFam Zheng data->ret = bdrv_co_do_ioctl(data->bs, data->req, data->buf); 26005c5ae76aSFam Zheng } 26015c5ae76aSFam Zheng 26025c5ae76aSFam Zheng /* needed for generic scsi interface */ 26035c5ae76aSFam Zheng int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) 26045c5ae76aSFam Zheng { 26055c5ae76aSFam Zheng BdrvIoctlCoData data = { 26065c5ae76aSFam Zheng .bs = bs, 26075c5ae76aSFam Zheng .req = req, 26085c5ae76aSFam Zheng .buf = buf, 26095c5ae76aSFam Zheng .ret = -EINPROGRESS, 26105c5ae76aSFam Zheng }; 26115c5ae76aSFam Zheng 26125c5ae76aSFam Zheng if (qemu_in_coroutine()) { 26135c5ae76aSFam Zheng /* Fast-path if already in coroutine context */ 26145c5ae76aSFam Zheng bdrv_co_ioctl_entry(&data); 26155c5ae76aSFam Zheng } else { 26165c5ae76aSFam Zheng Coroutine *co = qemu_coroutine_create(bdrv_co_ioctl_entry); 2617*ba889444SPaolo Bonzini 26185c5ae76aSFam Zheng qemu_coroutine_enter(co, &data); 26195c5ae76aSFam Zheng while (data.ret == -EINPROGRESS) { 26205c5ae76aSFam Zheng aio_poll(bdrv_get_aio_context(bs), true); 26215c5ae76aSFam Zheng } 2622*ba889444SPaolo Bonzini } 26235c5ae76aSFam Zheng return data.ret; 26245c5ae76aSFam Zheng } 26255c5ae76aSFam Zheng 26265c5ae76aSFam Zheng static void coroutine_fn bdrv_co_aio_ioctl_entry(void *opaque) 26275c5ae76aSFam Zheng { 26285c5ae76aSFam Zheng BlockAIOCBCoroutine *acb = opaque; 26295c5ae76aSFam Zheng acb->req.error = bdrv_co_do_ioctl(acb->common.bs, 26305c5ae76aSFam Zheng acb->req.req, acb->req.buf); 26315c5ae76aSFam Zheng bdrv_co_complete(acb); 263261007b31SStefan Hajnoczi } 263361007b31SStefan Hajnoczi 263461007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, 263561007b31SStefan Hajnoczi unsigned long int req, void *buf, 263661007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 263761007b31SStefan Hajnoczi { 26385c5ae76aSFam Zheng BlockAIOCBCoroutine *acb = qemu_aio_get(&bdrv_em_co_aiocb_info, 26395c5ae76aSFam Zheng bs, cb, opaque); 26405c5ae76aSFam Zheng Coroutine *co; 264161007b31SStefan Hajnoczi 26425c5ae76aSFam Zheng acb->need_bh = true; 26435c5ae76aSFam Zheng acb->req.error = -EINPROGRESS; 26445c5ae76aSFam Zheng acb->req.req = req; 26455c5ae76aSFam Zheng acb->req.buf = buf; 26465c5ae76aSFam Zheng co = qemu_coroutine_create(bdrv_co_aio_ioctl_entry); 26475c5ae76aSFam Zheng qemu_coroutine_enter(co, acb); 26485c5ae76aSFam Zheng 26495c5ae76aSFam Zheng bdrv_co_maybe_schedule_bh(acb); 26505c5ae76aSFam Zheng return &acb->common; 265161007b31SStefan Hajnoczi } 265261007b31SStefan Hajnoczi 265361007b31SStefan Hajnoczi void *qemu_blockalign(BlockDriverState *bs, size_t size) 265461007b31SStefan Hajnoczi { 265561007b31SStefan Hajnoczi return qemu_memalign(bdrv_opt_mem_align(bs), size); 265661007b31SStefan Hajnoczi } 265761007b31SStefan Hajnoczi 265861007b31SStefan Hajnoczi void *qemu_blockalign0(BlockDriverState *bs, size_t size) 265961007b31SStefan Hajnoczi { 266061007b31SStefan Hajnoczi return memset(qemu_blockalign(bs, size), 0, size); 266161007b31SStefan Hajnoczi } 266261007b31SStefan Hajnoczi 266361007b31SStefan Hajnoczi void *qemu_try_blockalign(BlockDriverState *bs, size_t size) 266461007b31SStefan Hajnoczi { 266561007b31SStefan Hajnoczi size_t align = bdrv_opt_mem_align(bs); 266661007b31SStefan Hajnoczi 266761007b31SStefan Hajnoczi /* Ensure that NULL is never returned on success */ 266861007b31SStefan Hajnoczi assert(align > 0); 266961007b31SStefan Hajnoczi if (size == 0) { 267061007b31SStefan Hajnoczi size = align; 267161007b31SStefan Hajnoczi } 267261007b31SStefan Hajnoczi 267361007b31SStefan Hajnoczi return qemu_try_memalign(align, size); 267461007b31SStefan Hajnoczi } 267561007b31SStefan Hajnoczi 267661007b31SStefan Hajnoczi void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) 267761007b31SStefan Hajnoczi { 267861007b31SStefan Hajnoczi void *mem = qemu_try_blockalign(bs, size); 267961007b31SStefan Hajnoczi 268061007b31SStefan Hajnoczi if (mem) { 268161007b31SStefan Hajnoczi memset(mem, 0, size); 268261007b31SStefan Hajnoczi } 268361007b31SStefan Hajnoczi 268461007b31SStefan Hajnoczi return mem; 268561007b31SStefan Hajnoczi } 268661007b31SStefan Hajnoczi 268761007b31SStefan Hajnoczi /* 268861007b31SStefan Hajnoczi * Check if all memory in this vector is sector aligned. 268961007b31SStefan Hajnoczi */ 269061007b31SStefan Hajnoczi bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) 269161007b31SStefan Hajnoczi { 269261007b31SStefan Hajnoczi int i; 26934196d2f0SDenis V. Lunev size_t alignment = bdrv_min_mem_align(bs); 269461007b31SStefan Hajnoczi 269561007b31SStefan Hajnoczi for (i = 0; i < qiov->niov; i++) { 269661007b31SStefan Hajnoczi if ((uintptr_t) qiov->iov[i].iov_base % alignment) { 269761007b31SStefan Hajnoczi return false; 269861007b31SStefan Hajnoczi } 269961007b31SStefan Hajnoczi if (qiov->iov[i].iov_len % alignment) { 270061007b31SStefan Hajnoczi return false; 270161007b31SStefan Hajnoczi } 270261007b31SStefan Hajnoczi } 270361007b31SStefan Hajnoczi 270461007b31SStefan Hajnoczi return true; 270561007b31SStefan Hajnoczi } 270661007b31SStefan Hajnoczi 270761007b31SStefan Hajnoczi void bdrv_add_before_write_notifier(BlockDriverState *bs, 270861007b31SStefan Hajnoczi NotifierWithReturn *notifier) 270961007b31SStefan Hajnoczi { 271061007b31SStefan Hajnoczi notifier_with_return_list_add(&bs->before_write_notifiers, notifier); 271161007b31SStefan Hajnoczi } 271261007b31SStefan Hajnoczi 271361007b31SStefan Hajnoczi void bdrv_io_plug(BlockDriverState *bs) 271461007b31SStefan Hajnoczi { 271561007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 271661007b31SStefan Hajnoczi if (drv && drv->bdrv_io_plug) { 271761007b31SStefan Hajnoczi drv->bdrv_io_plug(bs); 271861007b31SStefan Hajnoczi } else if (bs->file) { 27199a4f4c31SKevin Wolf bdrv_io_plug(bs->file->bs); 272061007b31SStefan Hajnoczi } 272161007b31SStefan Hajnoczi } 272261007b31SStefan Hajnoczi 272361007b31SStefan Hajnoczi void bdrv_io_unplug(BlockDriverState *bs) 272461007b31SStefan Hajnoczi { 272561007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 272661007b31SStefan Hajnoczi if (drv && drv->bdrv_io_unplug) { 272761007b31SStefan Hajnoczi drv->bdrv_io_unplug(bs); 272861007b31SStefan Hajnoczi } else if (bs->file) { 27299a4f4c31SKevin Wolf bdrv_io_unplug(bs->file->bs); 273061007b31SStefan Hajnoczi } 273161007b31SStefan Hajnoczi } 273261007b31SStefan Hajnoczi 273361007b31SStefan Hajnoczi void bdrv_flush_io_queue(BlockDriverState *bs) 273461007b31SStefan Hajnoczi { 273561007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 273661007b31SStefan Hajnoczi if (drv && drv->bdrv_flush_io_queue) { 273761007b31SStefan Hajnoczi drv->bdrv_flush_io_queue(bs); 273861007b31SStefan Hajnoczi } else if (bs->file) { 27399a4f4c31SKevin Wolf bdrv_flush_io_queue(bs->file->bs); 274061007b31SStefan Hajnoczi } 2741f406c03cSAlexander Yarygin bdrv_start_throttled_reqs(bs); 274261007b31SStefan Hajnoczi } 274351288d79SFam Zheng 274451288d79SFam Zheng void bdrv_drained_begin(BlockDriverState *bs) 274551288d79SFam Zheng { 274651288d79SFam Zheng if (!bs->quiesce_counter++) { 274751288d79SFam Zheng aio_disable_external(bdrv_get_aio_context(bs)); 274851288d79SFam Zheng } 274951288d79SFam Zheng bdrv_drain(bs); 275051288d79SFam Zheng } 275151288d79SFam Zheng 275251288d79SFam Zheng void bdrv_drained_end(BlockDriverState *bs) 275351288d79SFam Zheng { 275451288d79SFam Zheng assert(bs->quiesce_counter > 0); 275551288d79SFam Zheng if (--bs->quiesce_counter > 0) { 275651288d79SFam Zheng return; 275751288d79SFam Zheng } 275851288d79SFam Zheng aio_enable_external(bdrv_get_aio_context(bs)); 275951288d79SFam Zheng } 2760