161007b31SStefan Hajnoczi /* 261007b31SStefan Hajnoczi * Block layer I/O functions 361007b31SStefan Hajnoczi * 461007b31SStefan Hajnoczi * Copyright (c) 2003 Fabrice Bellard 561007b31SStefan Hajnoczi * 661007b31SStefan Hajnoczi * Permission is hereby granted, free of charge, to any person obtaining a copy 761007b31SStefan Hajnoczi * of this software and associated documentation files (the "Software"), to deal 861007b31SStefan Hajnoczi * in the Software without restriction, including without limitation the rights 961007b31SStefan Hajnoczi * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 1061007b31SStefan Hajnoczi * copies of the Software, and to permit persons to whom the Software is 1161007b31SStefan Hajnoczi * furnished to do so, subject to the following conditions: 1261007b31SStefan Hajnoczi * 1361007b31SStefan Hajnoczi * The above copyright notice and this permission notice shall be included in 1461007b31SStefan Hajnoczi * all copies or substantial portions of the Software. 1561007b31SStefan Hajnoczi * 1661007b31SStefan Hajnoczi * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 1761007b31SStefan Hajnoczi * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 1861007b31SStefan Hajnoczi * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 1961007b31SStefan Hajnoczi * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 2061007b31SStefan Hajnoczi * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 2161007b31SStefan Hajnoczi * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 2261007b31SStefan Hajnoczi * THE SOFTWARE. 2361007b31SStefan Hajnoczi */ 2461007b31SStefan Hajnoczi 2580c71a24SPeter Maydell #include "qemu/osdep.h" 2661007b31SStefan Hajnoczi #include "trace.h" 277f0e9da6SMax Reitz #include "sysemu/block-backend.h" 2861007b31SStefan Hajnoczi #include "block/blockjob.h" 2961007b31SStefan Hajnoczi #include "block/block_int.h" 3076f4afb4SAlberto Garcia #include "block/throttle-groups.h" 31f348b6d1SVeronia Bahaa #include "qemu/cutils.h" 32da34e65cSMarkus Armbruster #include "qapi/error.h" 33d49b6836SMarkus Armbruster #include "qemu/error-report.h" 3461007b31SStefan Hajnoczi 3561007b31SStefan Hajnoczi #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ 3661007b31SStefan Hajnoczi 3761007b31SStefan Hajnoczi static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, 3861007b31SStefan Hajnoczi int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 3961007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque); 4061007b31SStefan Hajnoczi static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs, 4161007b31SStefan Hajnoczi int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 4261007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque); 4361007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, 4461007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, 4561007b31SStefan Hajnoczi QEMUIOVector *iov); 4661007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, 4761007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, 4861007b31SStefan Hajnoczi QEMUIOVector *iov); 4961007b31SStefan Hajnoczi static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, 5061007b31SStefan Hajnoczi int64_t sector_num, 5161007b31SStefan Hajnoczi QEMUIOVector *qiov, 5261007b31SStefan Hajnoczi int nb_sectors, 5361007b31SStefan Hajnoczi BdrvRequestFlags flags, 5461007b31SStefan Hajnoczi BlockCompletionFunc *cb, 5561007b31SStefan Hajnoczi void *opaque, 5661007b31SStefan Hajnoczi bool is_write); 5761007b31SStefan Hajnoczi static void coroutine_fn bdrv_co_do_rw(void *opaque); 5861007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, 5961007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, BdrvRequestFlags flags); 6061007b31SStefan Hajnoczi 6161007b31SStefan Hajnoczi /* throttling disk I/O limits */ 6261007b31SStefan Hajnoczi void bdrv_set_io_limits(BlockDriverState *bs, 6361007b31SStefan Hajnoczi ThrottleConfig *cfg) 6461007b31SStefan Hajnoczi { 6561007b31SStefan Hajnoczi int i; 6661007b31SStefan Hajnoczi 6776f4afb4SAlberto Garcia throttle_group_config(bs, cfg); 6861007b31SStefan Hajnoczi 6961007b31SStefan Hajnoczi for (i = 0; i < 2; i++) { 7061007b31SStefan Hajnoczi qemu_co_enter_next(&bs->throttled_reqs[i]); 7161007b31SStefan Hajnoczi } 7261007b31SStefan Hajnoczi } 7361007b31SStefan Hajnoczi 7461007b31SStefan Hajnoczi /* this function drain all the throttled IOs */ 7561007b31SStefan Hajnoczi static bool bdrv_start_throttled_reqs(BlockDriverState *bs) 7661007b31SStefan Hajnoczi { 7761007b31SStefan Hajnoczi bool drained = false; 7861007b31SStefan Hajnoczi bool enabled = bs->io_limits_enabled; 7961007b31SStefan Hajnoczi int i; 8061007b31SStefan Hajnoczi 8161007b31SStefan Hajnoczi bs->io_limits_enabled = false; 8261007b31SStefan Hajnoczi 8361007b31SStefan Hajnoczi for (i = 0; i < 2; i++) { 8461007b31SStefan Hajnoczi while (qemu_co_enter_next(&bs->throttled_reqs[i])) { 8561007b31SStefan Hajnoczi drained = true; 8661007b31SStefan Hajnoczi } 8761007b31SStefan Hajnoczi } 8861007b31SStefan Hajnoczi 8961007b31SStefan Hajnoczi bs->io_limits_enabled = enabled; 9061007b31SStefan Hajnoczi 9161007b31SStefan Hajnoczi return drained; 9261007b31SStefan Hajnoczi } 9361007b31SStefan Hajnoczi 9461007b31SStefan Hajnoczi void bdrv_io_limits_disable(BlockDriverState *bs) 9561007b31SStefan Hajnoczi { 9661007b31SStefan Hajnoczi bs->io_limits_enabled = false; 9761007b31SStefan Hajnoczi bdrv_start_throttled_reqs(bs); 9876f4afb4SAlberto Garcia throttle_group_unregister_bs(bs); 9961007b31SStefan Hajnoczi } 10061007b31SStefan Hajnoczi 10161007b31SStefan Hajnoczi /* should be called before bdrv_set_io_limits if a limit is set */ 10276f4afb4SAlberto Garcia void bdrv_io_limits_enable(BlockDriverState *bs, const char *group) 10361007b31SStefan Hajnoczi { 10461007b31SStefan Hajnoczi assert(!bs->io_limits_enabled); 10576f4afb4SAlberto Garcia throttle_group_register_bs(bs, group); 10661007b31SStefan Hajnoczi bs->io_limits_enabled = true; 10761007b31SStefan Hajnoczi } 10861007b31SStefan Hajnoczi 10976f4afb4SAlberto Garcia void bdrv_io_limits_update_group(BlockDriverState *bs, const char *group) 11061007b31SStefan Hajnoczi { 11176f4afb4SAlberto Garcia /* this bs is not part of any group */ 11276f4afb4SAlberto Garcia if (!bs->throttle_state) { 11361007b31SStefan Hajnoczi return; 11461007b31SStefan Hajnoczi } 11561007b31SStefan Hajnoczi 11676f4afb4SAlberto Garcia /* this bs is a part of the same group than the one we want */ 11776f4afb4SAlberto Garcia if (!g_strcmp0(throttle_group_get_name(bs), group)) { 11876f4afb4SAlberto Garcia return; 11976f4afb4SAlberto Garcia } 12076f4afb4SAlberto Garcia 12176f4afb4SAlberto Garcia /* need to change the group this bs belong to */ 12276f4afb4SAlberto Garcia bdrv_io_limits_disable(bs); 12376f4afb4SAlberto Garcia bdrv_io_limits_enable(bs, group); 12461007b31SStefan Hajnoczi } 12561007b31SStefan Hajnoczi 12661007b31SStefan Hajnoczi void bdrv_setup_io_funcs(BlockDriver *bdrv) 12761007b31SStefan Hajnoczi { 12861007b31SStefan Hajnoczi /* Block drivers without coroutine functions need emulation */ 12961007b31SStefan Hajnoczi if (!bdrv->bdrv_co_readv) { 13061007b31SStefan Hajnoczi bdrv->bdrv_co_readv = bdrv_co_readv_em; 13161007b31SStefan Hajnoczi bdrv->bdrv_co_writev = bdrv_co_writev_em; 13261007b31SStefan Hajnoczi 13361007b31SStefan Hajnoczi /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if 13461007b31SStefan Hajnoczi * the block driver lacks aio we need to emulate that too. 13561007b31SStefan Hajnoczi */ 13661007b31SStefan Hajnoczi if (!bdrv->bdrv_aio_readv) { 13761007b31SStefan Hajnoczi /* add AIO emulation layer */ 13861007b31SStefan Hajnoczi bdrv->bdrv_aio_readv = bdrv_aio_readv_em; 13961007b31SStefan Hajnoczi bdrv->bdrv_aio_writev = bdrv_aio_writev_em; 14061007b31SStefan Hajnoczi } 14161007b31SStefan Hajnoczi } 14261007b31SStefan Hajnoczi } 14361007b31SStefan Hajnoczi 14461007b31SStefan Hajnoczi void bdrv_refresh_limits(BlockDriverState *bs, Error **errp) 14561007b31SStefan Hajnoczi { 14661007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 14761007b31SStefan Hajnoczi Error *local_err = NULL; 14861007b31SStefan Hajnoczi 14961007b31SStefan Hajnoczi memset(&bs->bl, 0, sizeof(bs->bl)); 15061007b31SStefan Hajnoczi 15161007b31SStefan Hajnoczi if (!drv) { 15261007b31SStefan Hajnoczi return; 15361007b31SStefan Hajnoczi } 15461007b31SStefan Hajnoczi 15561007b31SStefan Hajnoczi /* Take some limits from the children as a default */ 15661007b31SStefan Hajnoczi if (bs->file) { 1579a4f4c31SKevin Wolf bdrv_refresh_limits(bs->file->bs, &local_err); 15861007b31SStefan Hajnoczi if (local_err) { 15961007b31SStefan Hajnoczi error_propagate(errp, local_err); 16061007b31SStefan Hajnoczi return; 16161007b31SStefan Hajnoczi } 1629a4f4c31SKevin Wolf bs->bl.opt_transfer_length = bs->file->bs->bl.opt_transfer_length; 1639a4f4c31SKevin Wolf bs->bl.max_transfer_length = bs->file->bs->bl.max_transfer_length; 1649a4f4c31SKevin Wolf bs->bl.min_mem_alignment = bs->file->bs->bl.min_mem_alignment; 1659a4f4c31SKevin Wolf bs->bl.opt_mem_alignment = bs->file->bs->bl.opt_mem_alignment; 166bd44feb7SStefan Hajnoczi bs->bl.max_iov = bs->file->bs->bl.max_iov; 16761007b31SStefan Hajnoczi } else { 1684196d2f0SDenis V. Lunev bs->bl.min_mem_alignment = 512; 169459b4e66SDenis V. Lunev bs->bl.opt_mem_alignment = getpagesize(); 170bd44feb7SStefan Hajnoczi 171bd44feb7SStefan Hajnoczi /* Safe default since most protocols use readv()/writev()/etc */ 172bd44feb7SStefan Hajnoczi bs->bl.max_iov = IOV_MAX; 17361007b31SStefan Hajnoczi } 17461007b31SStefan Hajnoczi 175760e0063SKevin Wolf if (bs->backing) { 176760e0063SKevin Wolf bdrv_refresh_limits(bs->backing->bs, &local_err); 17761007b31SStefan Hajnoczi if (local_err) { 17861007b31SStefan Hajnoczi error_propagate(errp, local_err); 17961007b31SStefan Hajnoczi return; 18061007b31SStefan Hajnoczi } 18161007b31SStefan Hajnoczi bs->bl.opt_transfer_length = 18261007b31SStefan Hajnoczi MAX(bs->bl.opt_transfer_length, 183760e0063SKevin Wolf bs->backing->bs->bl.opt_transfer_length); 18461007b31SStefan Hajnoczi bs->bl.max_transfer_length = 18561007b31SStefan Hajnoczi MIN_NON_ZERO(bs->bl.max_transfer_length, 186760e0063SKevin Wolf bs->backing->bs->bl.max_transfer_length); 18761007b31SStefan Hajnoczi bs->bl.opt_mem_alignment = 18861007b31SStefan Hajnoczi MAX(bs->bl.opt_mem_alignment, 189760e0063SKevin Wolf bs->backing->bs->bl.opt_mem_alignment); 1904196d2f0SDenis V. Lunev bs->bl.min_mem_alignment = 1914196d2f0SDenis V. Lunev MAX(bs->bl.min_mem_alignment, 192760e0063SKevin Wolf bs->backing->bs->bl.min_mem_alignment); 193bd44feb7SStefan Hajnoczi bs->bl.max_iov = 194bd44feb7SStefan Hajnoczi MIN(bs->bl.max_iov, 195bd44feb7SStefan Hajnoczi bs->backing->bs->bl.max_iov); 19661007b31SStefan Hajnoczi } 19761007b31SStefan Hajnoczi 19861007b31SStefan Hajnoczi /* Then let the driver override it */ 19961007b31SStefan Hajnoczi if (drv->bdrv_refresh_limits) { 20061007b31SStefan Hajnoczi drv->bdrv_refresh_limits(bs, errp); 20161007b31SStefan Hajnoczi } 20261007b31SStefan Hajnoczi } 20361007b31SStefan Hajnoczi 20461007b31SStefan Hajnoczi /** 20561007b31SStefan Hajnoczi * The copy-on-read flag is actually a reference count so multiple users may 20661007b31SStefan Hajnoczi * use the feature without worrying about clobbering its previous state. 20761007b31SStefan Hajnoczi * Copy-on-read stays enabled until all users have called to disable it. 20861007b31SStefan Hajnoczi */ 20961007b31SStefan Hajnoczi void bdrv_enable_copy_on_read(BlockDriverState *bs) 21061007b31SStefan Hajnoczi { 21161007b31SStefan Hajnoczi bs->copy_on_read++; 21261007b31SStefan Hajnoczi } 21361007b31SStefan Hajnoczi 21461007b31SStefan Hajnoczi void bdrv_disable_copy_on_read(BlockDriverState *bs) 21561007b31SStefan Hajnoczi { 21661007b31SStefan Hajnoczi assert(bs->copy_on_read > 0); 21761007b31SStefan Hajnoczi bs->copy_on_read--; 21861007b31SStefan Hajnoczi } 21961007b31SStefan Hajnoczi 22061007b31SStefan Hajnoczi /* Check if any requests are in-flight (including throttled requests) */ 221439db28cSKevin Wolf bool bdrv_requests_pending(BlockDriverState *bs) 22261007b31SStefan Hajnoczi { 22337a639a7SKevin Wolf BdrvChild *child; 22437a639a7SKevin Wolf 22561007b31SStefan Hajnoczi if (!QLIST_EMPTY(&bs->tracked_requests)) { 22661007b31SStefan Hajnoczi return true; 22761007b31SStefan Hajnoczi } 22861007b31SStefan Hajnoczi if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) { 22961007b31SStefan Hajnoczi return true; 23061007b31SStefan Hajnoczi } 23161007b31SStefan Hajnoczi if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) { 23261007b31SStefan Hajnoczi return true; 23361007b31SStefan Hajnoczi } 23437a639a7SKevin Wolf 23537a639a7SKevin Wolf QLIST_FOREACH(child, &bs->children, next) { 23637a639a7SKevin Wolf if (bdrv_requests_pending(child->bs)) { 23761007b31SStefan Hajnoczi return true; 23861007b31SStefan Hajnoczi } 23961007b31SStefan Hajnoczi } 24037a639a7SKevin Wolf 24161007b31SStefan Hajnoczi return false; 24261007b31SStefan Hajnoczi } 24361007b31SStefan Hajnoczi 24467da1dc5SFam Zheng static void bdrv_drain_recurse(BlockDriverState *bs) 24567da1dc5SFam Zheng { 24667da1dc5SFam Zheng BdrvChild *child; 24767da1dc5SFam Zheng 24867da1dc5SFam Zheng if (bs->drv && bs->drv->bdrv_drain) { 24967da1dc5SFam Zheng bs->drv->bdrv_drain(bs); 25067da1dc5SFam Zheng } 25167da1dc5SFam Zheng QLIST_FOREACH(child, &bs->children, next) { 25267da1dc5SFam Zheng bdrv_drain_recurse(child->bs); 25367da1dc5SFam Zheng } 25467da1dc5SFam Zheng } 25567da1dc5SFam Zheng 25661007b31SStefan Hajnoczi /* 25767da1dc5SFam Zheng * Wait for pending requests to complete on a single BlockDriverState subtree, 25867da1dc5SFam Zheng * and suspend block driver's internal I/O until next request arrives. 25961007b31SStefan Hajnoczi * 26061007b31SStefan Hajnoczi * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState 26161007b31SStefan Hajnoczi * AioContext. 2627a63f3cdSStefan Hajnoczi * 2637a63f3cdSStefan Hajnoczi * Only this BlockDriverState's AioContext is run, so in-flight requests must 2647a63f3cdSStefan Hajnoczi * not depend on events in other AioContexts. In that case, use 2657a63f3cdSStefan Hajnoczi * bdrv_drain_all() instead. 26661007b31SStefan Hajnoczi */ 26761007b31SStefan Hajnoczi void bdrv_drain(BlockDriverState *bs) 26861007b31SStefan Hajnoczi { 269f406c03cSAlexander Yarygin bool busy = true; 270f406c03cSAlexander Yarygin 27167da1dc5SFam Zheng bdrv_drain_recurse(bs); 272f406c03cSAlexander Yarygin while (busy) { 27361007b31SStefan Hajnoczi /* Keep iterating */ 274f406c03cSAlexander Yarygin bdrv_flush_io_queue(bs); 275f406c03cSAlexander Yarygin busy = bdrv_requests_pending(bs); 276f406c03cSAlexander Yarygin busy |= aio_poll(bdrv_get_aio_context(bs), busy); 27761007b31SStefan Hajnoczi } 27861007b31SStefan Hajnoczi } 27961007b31SStefan Hajnoczi 28061007b31SStefan Hajnoczi /* 28161007b31SStefan Hajnoczi * Wait for pending requests to complete across all BlockDriverStates 28261007b31SStefan Hajnoczi * 28361007b31SStefan Hajnoczi * This function does not flush data to disk, use bdrv_flush_all() for that 28461007b31SStefan Hajnoczi * after calling this function. 28561007b31SStefan Hajnoczi */ 28661007b31SStefan Hajnoczi void bdrv_drain_all(void) 28761007b31SStefan Hajnoczi { 28861007b31SStefan Hajnoczi /* Always run first iteration so any pending completion BHs run */ 28961007b31SStefan Hajnoczi bool busy = true; 29061007b31SStefan Hajnoczi BlockDriverState *bs = NULL; 291f406c03cSAlexander Yarygin GSList *aio_ctxs = NULL, *ctx; 29261007b31SStefan Hajnoczi 29361007b31SStefan Hajnoczi while ((bs = bdrv_next(bs))) { 29461007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 29561007b31SStefan Hajnoczi 29661007b31SStefan Hajnoczi aio_context_acquire(aio_context); 29761007b31SStefan Hajnoczi if (bs->job) { 29861007b31SStefan Hajnoczi block_job_pause(bs->job); 29961007b31SStefan Hajnoczi } 3009dcf8ecdSPaolo Bonzini bdrv_drain_recurse(bs); 30161007b31SStefan Hajnoczi aio_context_release(aio_context); 302f406c03cSAlexander Yarygin 303764ba3aeSAlberto Garcia if (!g_slist_find(aio_ctxs, aio_context)) { 304f406c03cSAlexander Yarygin aio_ctxs = g_slist_prepend(aio_ctxs, aio_context); 305f406c03cSAlexander Yarygin } 30661007b31SStefan Hajnoczi } 30761007b31SStefan Hajnoczi 3087a63f3cdSStefan Hajnoczi /* Note that completion of an asynchronous I/O operation can trigger any 3097a63f3cdSStefan Hajnoczi * number of other I/O operations on other devices---for example a 3107a63f3cdSStefan Hajnoczi * coroutine can submit an I/O request to another device in response to 3117a63f3cdSStefan Hajnoczi * request completion. Therefore we must keep looping until there was no 3127a63f3cdSStefan Hajnoczi * more activity rather than simply draining each device independently. 3137a63f3cdSStefan Hajnoczi */ 31461007b31SStefan Hajnoczi while (busy) { 31561007b31SStefan Hajnoczi busy = false; 316f406c03cSAlexander Yarygin 317f406c03cSAlexander Yarygin for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) { 318f406c03cSAlexander Yarygin AioContext *aio_context = ctx->data; 31961007b31SStefan Hajnoczi bs = NULL; 32061007b31SStefan Hajnoczi 32161007b31SStefan Hajnoczi aio_context_acquire(aio_context); 322f406c03cSAlexander Yarygin while ((bs = bdrv_next(bs))) { 323f406c03cSAlexander Yarygin if (aio_context == bdrv_get_aio_context(bs)) { 324f406c03cSAlexander Yarygin bdrv_flush_io_queue(bs); 325f406c03cSAlexander Yarygin if (bdrv_requests_pending(bs)) { 326f406c03cSAlexander Yarygin busy = true; 327f406c03cSAlexander Yarygin aio_poll(aio_context, busy); 328f406c03cSAlexander Yarygin } 329f406c03cSAlexander Yarygin } 330f406c03cSAlexander Yarygin } 331f406c03cSAlexander Yarygin busy |= aio_poll(aio_context, false); 33261007b31SStefan Hajnoczi aio_context_release(aio_context); 33361007b31SStefan Hajnoczi } 33461007b31SStefan Hajnoczi } 33561007b31SStefan Hajnoczi 33661007b31SStefan Hajnoczi bs = NULL; 33761007b31SStefan Hajnoczi while ((bs = bdrv_next(bs))) { 33861007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 33961007b31SStefan Hajnoczi 34061007b31SStefan Hajnoczi aio_context_acquire(aio_context); 34161007b31SStefan Hajnoczi if (bs->job) { 34261007b31SStefan Hajnoczi block_job_resume(bs->job); 34361007b31SStefan Hajnoczi } 34461007b31SStefan Hajnoczi aio_context_release(aio_context); 34561007b31SStefan Hajnoczi } 346f406c03cSAlexander Yarygin g_slist_free(aio_ctxs); 34761007b31SStefan Hajnoczi } 34861007b31SStefan Hajnoczi 34961007b31SStefan Hajnoczi /** 35061007b31SStefan Hajnoczi * Remove an active request from the tracked requests list 35161007b31SStefan Hajnoczi * 35261007b31SStefan Hajnoczi * This function should be called when a tracked request is completing. 35361007b31SStefan Hajnoczi */ 35461007b31SStefan Hajnoczi static void tracked_request_end(BdrvTrackedRequest *req) 35561007b31SStefan Hajnoczi { 35661007b31SStefan Hajnoczi if (req->serialising) { 35761007b31SStefan Hajnoczi req->bs->serialising_in_flight--; 35861007b31SStefan Hajnoczi } 35961007b31SStefan Hajnoczi 36061007b31SStefan Hajnoczi QLIST_REMOVE(req, list); 36161007b31SStefan Hajnoczi qemu_co_queue_restart_all(&req->wait_queue); 36261007b31SStefan Hajnoczi } 36361007b31SStefan Hajnoczi 36461007b31SStefan Hajnoczi /** 36561007b31SStefan Hajnoczi * Add an active request to the tracked requests list 36661007b31SStefan Hajnoczi */ 36761007b31SStefan Hajnoczi static void tracked_request_begin(BdrvTrackedRequest *req, 36861007b31SStefan Hajnoczi BlockDriverState *bs, 36961007b31SStefan Hajnoczi int64_t offset, 370ebde595cSFam Zheng unsigned int bytes, 371ebde595cSFam Zheng enum BdrvTrackedRequestType type) 37261007b31SStefan Hajnoczi { 37361007b31SStefan Hajnoczi *req = (BdrvTrackedRequest){ 37461007b31SStefan Hajnoczi .bs = bs, 37561007b31SStefan Hajnoczi .offset = offset, 37661007b31SStefan Hajnoczi .bytes = bytes, 377ebde595cSFam Zheng .type = type, 37861007b31SStefan Hajnoczi .co = qemu_coroutine_self(), 37961007b31SStefan Hajnoczi .serialising = false, 38061007b31SStefan Hajnoczi .overlap_offset = offset, 38161007b31SStefan Hajnoczi .overlap_bytes = bytes, 38261007b31SStefan Hajnoczi }; 38361007b31SStefan Hajnoczi 38461007b31SStefan Hajnoczi qemu_co_queue_init(&req->wait_queue); 38561007b31SStefan Hajnoczi 38661007b31SStefan Hajnoczi QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); 38761007b31SStefan Hajnoczi } 38861007b31SStefan Hajnoczi 38961007b31SStefan Hajnoczi static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) 39061007b31SStefan Hajnoczi { 39161007b31SStefan Hajnoczi int64_t overlap_offset = req->offset & ~(align - 1); 39261007b31SStefan Hajnoczi unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align) 39361007b31SStefan Hajnoczi - overlap_offset; 39461007b31SStefan Hajnoczi 39561007b31SStefan Hajnoczi if (!req->serialising) { 39661007b31SStefan Hajnoczi req->bs->serialising_in_flight++; 39761007b31SStefan Hajnoczi req->serialising = true; 39861007b31SStefan Hajnoczi } 39961007b31SStefan Hajnoczi 40061007b31SStefan Hajnoczi req->overlap_offset = MIN(req->overlap_offset, overlap_offset); 40161007b31SStefan Hajnoczi req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); 40261007b31SStefan Hajnoczi } 40361007b31SStefan Hajnoczi 40461007b31SStefan Hajnoczi /** 40561007b31SStefan Hajnoczi * Round a region to cluster boundaries 40661007b31SStefan Hajnoczi */ 40761007b31SStefan Hajnoczi void bdrv_round_to_clusters(BlockDriverState *bs, 40861007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, 40961007b31SStefan Hajnoczi int64_t *cluster_sector_num, 41061007b31SStefan Hajnoczi int *cluster_nb_sectors) 41161007b31SStefan Hajnoczi { 41261007b31SStefan Hajnoczi BlockDriverInfo bdi; 41361007b31SStefan Hajnoczi 41461007b31SStefan Hajnoczi if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 41561007b31SStefan Hajnoczi *cluster_sector_num = sector_num; 41661007b31SStefan Hajnoczi *cluster_nb_sectors = nb_sectors; 41761007b31SStefan Hajnoczi } else { 41861007b31SStefan Hajnoczi int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE; 41961007b31SStefan Hajnoczi *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c); 42061007b31SStefan Hajnoczi *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num + 42161007b31SStefan Hajnoczi nb_sectors, c); 42261007b31SStefan Hajnoczi } 42361007b31SStefan Hajnoczi } 42461007b31SStefan Hajnoczi 42561007b31SStefan Hajnoczi static int bdrv_get_cluster_size(BlockDriverState *bs) 42661007b31SStefan Hajnoczi { 42761007b31SStefan Hajnoczi BlockDriverInfo bdi; 42861007b31SStefan Hajnoczi int ret; 42961007b31SStefan Hajnoczi 43061007b31SStefan Hajnoczi ret = bdrv_get_info(bs, &bdi); 43161007b31SStefan Hajnoczi if (ret < 0 || bdi.cluster_size == 0) { 43261007b31SStefan Hajnoczi return bs->request_alignment; 43361007b31SStefan Hajnoczi } else { 43461007b31SStefan Hajnoczi return bdi.cluster_size; 43561007b31SStefan Hajnoczi } 43661007b31SStefan Hajnoczi } 43761007b31SStefan Hajnoczi 43861007b31SStefan Hajnoczi static bool tracked_request_overlaps(BdrvTrackedRequest *req, 43961007b31SStefan Hajnoczi int64_t offset, unsigned int bytes) 44061007b31SStefan Hajnoczi { 44161007b31SStefan Hajnoczi /* aaaa bbbb */ 44261007b31SStefan Hajnoczi if (offset >= req->overlap_offset + req->overlap_bytes) { 44361007b31SStefan Hajnoczi return false; 44461007b31SStefan Hajnoczi } 44561007b31SStefan Hajnoczi /* bbbb aaaa */ 44661007b31SStefan Hajnoczi if (req->overlap_offset >= offset + bytes) { 44761007b31SStefan Hajnoczi return false; 44861007b31SStefan Hajnoczi } 44961007b31SStefan Hajnoczi return true; 45061007b31SStefan Hajnoczi } 45161007b31SStefan Hajnoczi 45261007b31SStefan Hajnoczi static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) 45361007b31SStefan Hajnoczi { 45461007b31SStefan Hajnoczi BlockDriverState *bs = self->bs; 45561007b31SStefan Hajnoczi BdrvTrackedRequest *req; 45661007b31SStefan Hajnoczi bool retry; 45761007b31SStefan Hajnoczi bool waited = false; 45861007b31SStefan Hajnoczi 45961007b31SStefan Hajnoczi if (!bs->serialising_in_flight) { 46061007b31SStefan Hajnoczi return false; 46161007b31SStefan Hajnoczi } 46261007b31SStefan Hajnoczi 46361007b31SStefan Hajnoczi do { 46461007b31SStefan Hajnoczi retry = false; 46561007b31SStefan Hajnoczi QLIST_FOREACH(req, &bs->tracked_requests, list) { 46661007b31SStefan Hajnoczi if (req == self || (!req->serialising && !self->serialising)) { 46761007b31SStefan Hajnoczi continue; 46861007b31SStefan Hajnoczi } 46961007b31SStefan Hajnoczi if (tracked_request_overlaps(req, self->overlap_offset, 47061007b31SStefan Hajnoczi self->overlap_bytes)) 47161007b31SStefan Hajnoczi { 47261007b31SStefan Hajnoczi /* Hitting this means there was a reentrant request, for 47361007b31SStefan Hajnoczi * example, a block driver issuing nested requests. This must 47461007b31SStefan Hajnoczi * never happen since it means deadlock. 47561007b31SStefan Hajnoczi */ 47661007b31SStefan Hajnoczi assert(qemu_coroutine_self() != req->co); 47761007b31SStefan Hajnoczi 47861007b31SStefan Hajnoczi /* If the request is already (indirectly) waiting for us, or 47961007b31SStefan Hajnoczi * will wait for us as soon as it wakes up, then just go on 48061007b31SStefan Hajnoczi * (instead of producing a deadlock in the former case). */ 48161007b31SStefan Hajnoczi if (!req->waiting_for) { 48261007b31SStefan Hajnoczi self->waiting_for = req; 48361007b31SStefan Hajnoczi qemu_co_queue_wait(&req->wait_queue); 48461007b31SStefan Hajnoczi self->waiting_for = NULL; 48561007b31SStefan Hajnoczi retry = true; 48661007b31SStefan Hajnoczi waited = true; 48761007b31SStefan Hajnoczi break; 48861007b31SStefan Hajnoczi } 48961007b31SStefan Hajnoczi } 49061007b31SStefan Hajnoczi } 49161007b31SStefan Hajnoczi } while (retry); 49261007b31SStefan Hajnoczi 49361007b31SStefan Hajnoczi return waited; 49461007b31SStefan Hajnoczi } 49561007b31SStefan Hajnoczi 49661007b31SStefan Hajnoczi static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset, 49761007b31SStefan Hajnoczi size_t size) 49861007b31SStefan Hajnoczi { 49961007b31SStefan Hajnoczi if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) { 50061007b31SStefan Hajnoczi return -EIO; 50161007b31SStefan Hajnoczi } 50261007b31SStefan Hajnoczi 50361007b31SStefan Hajnoczi if (!bdrv_is_inserted(bs)) { 50461007b31SStefan Hajnoczi return -ENOMEDIUM; 50561007b31SStefan Hajnoczi } 50661007b31SStefan Hajnoczi 50761007b31SStefan Hajnoczi if (offset < 0) { 50861007b31SStefan Hajnoczi return -EIO; 50961007b31SStefan Hajnoczi } 51061007b31SStefan Hajnoczi 51161007b31SStefan Hajnoczi return 0; 51261007b31SStefan Hajnoczi } 51361007b31SStefan Hajnoczi 51461007b31SStefan Hajnoczi static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num, 51561007b31SStefan Hajnoczi int nb_sectors) 51661007b31SStefan Hajnoczi { 51761007b31SStefan Hajnoczi if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 51861007b31SStefan Hajnoczi return -EIO; 51961007b31SStefan Hajnoczi } 52061007b31SStefan Hajnoczi 52161007b31SStefan Hajnoczi return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE, 52261007b31SStefan Hajnoczi nb_sectors * BDRV_SECTOR_SIZE); 52361007b31SStefan Hajnoczi } 52461007b31SStefan Hajnoczi 52561007b31SStefan Hajnoczi typedef struct RwCo { 52661007b31SStefan Hajnoczi BlockDriverState *bs; 52761007b31SStefan Hajnoczi int64_t offset; 52861007b31SStefan Hajnoczi QEMUIOVector *qiov; 52961007b31SStefan Hajnoczi bool is_write; 53061007b31SStefan Hajnoczi int ret; 53161007b31SStefan Hajnoczi BdrvRequestFlags flags; 53261007b31SStefan Hajnoczi } RwCo; 53361007b31SStefan Hajnoczi 53461007b31SStefan Hajnoczi static void coroutine_fn bdrv_rw_co_entry(void *opaque) 53561007b31SStefan Hajnoczi { 53661007b31SStefan Hajnoczi RwCo *rwco = opaque; 53761007b31SStefan Hajnoczi 53861007b31SStefan Hajnoczi if (!rwco->is_write) { 53961007b31SStefan Hajnoczi rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset, 54061007b31SStefan Hajnoczi rwco->qiov->size, rwco->qiov, 54161007b31SStefan Hajnoczi rwco->flags); 54261007b31SStefan Hajnoczi } else { 54361007b31SStefan Hajnoczi rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset, 54461007b31SStefan Hajnoczi rwco->qiov->size, rwco->qiov, 54561007b31SStefan Hajnoczi rwco->flags); 54661007b31SStefan Hajnoczi } 54761007b31SStefan Hajnoczi } 54861007b31SStefan Hajnoczi 54961007b31SStefan Hajnoczi /* 55061007b31SStefan Hajnoczi * Process a vectored synchronous request using coroutines 55161007b31SStefan Hajnoczi */ 55261007b31SStefan Hajnoczi static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset, 55361007b31SStefan Hajnoczi QEMUIOVector *qiov, bool is_write, 55461007b31SStefan Hajnoczi BdrvRequestFlags flags) 55561007b31SStefan Hajnoczi { 55661007b31SStefan Hajnoczi Coroutine *co; 55761007b31SStefan Hajnoczi RwCo rwco = { 55861007b31SStefan Hajnoczi .bs = bs, 55961007b31SStefan Hajnoczi .offset = offset, 56061007b31SStefan Hajnoczi .qiov = qiov, 56161007b31SStefan Hajnoczi .is_write = is_write, 56261007b31SStefan Hajnoczi .ret = NOT_DONE, 56361007b31SStefan Hajnoczi .flags = flags, 56461007b31SStefan Hajnoczi }; 56561007b31SStefan Hajnoczi 56661007b31SStefan Hajnoczi /** 56761007b31SStefan Hajnoczi * In sync call context, when the vcpu is blocked, this throttling timer 56861007b31SStefan Hajnoczi * will not fire; so the I/O throttling function has to be disabled here 56961007b31SStefan Hajnoczi * if it has been enabled. 57061007b31SStefan Hajnoczi */ 57161007b31SStefan Hajnoczi if (bs->io_limits_enabled) { 57261007b31SStefan Hajnoczi fprintf(stderr, "Disabling I/O throttling on '%s' due " 57361007b31SStefan Hajnoczi "to synchronous I/O.\n", bdrv_get_device_name(bs)); 57461007b31SStefan Hajnoczi bdrv_io_limits_disable(bs); 57561007b31SStefan Hajnoczi } 57661007b31SStefan Hajnoczi 57761007b31SStefan Hajnoczi if (qemu_in_coroutine()) { 57861007b31SStefan Hajnoczi /* Fast-path if already in coroutine context */ 57961007b31SStefan Hajnoczi bdrv_rw_co_entry(&rwco); 58061007b31SStefan Hajnoczi } else { 58161007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 58261007b31SStefan Hajnoczi 58361007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_rw_co_entry); 58461007b31SStefan Hajnoczi qemu_coroutine_enter(co, &rwco); 58561007b31SStefan Hajnoczi while (rwco.ret == NOT_DONE) { 58661007b31SStefan Hajnoczi aio_poll(aio_context, true); 58761007b31SStefan Hajnoczi } 58861007b31SStefan Hajnoczi } 58961007b31SStefan Hajnoczi return rwco.ret; 59061007b31SStefan Hajnoczi } 59161007b31SStefan Hajnoczi 59261007b31SStefan Hajnoczi /* 59361007b31SStefan Hajnoczi * Process a synchronous request using coroutines 59461007b31SStefan Hajnoczi */ 59561007b31SStefan Hajnoczi static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, 59661007b31SStefan Hajnoczi int nb_sectors, bool is_write, BdrvRequestFlags flags) 59761007b31SStefan Hajnoczi { 59861007b31SStefan Hajnoczi QEMUIOVector qiov; 59961007b31SStefan Hajnoczi struct iovec iov = { 60061007b31SStefan Hajnoczi .iov_base = (void *)buf, 60161007b31SStefan Hajnoczi .iov_len = nb_sectors * BDRV_SECTOR_SIZE, 60261007b31SStefan Hajnoczi }; 60361007b31SStefan Hajnoczi 60461007b31SStefan Hajnoczi if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 60561007b31SStefan Hajnoczi return -EINVAL; 60661007b31SStefan Hajnoczi } 60761007b31SStefan Hajnoczi 60861007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 60961007b31SStefan Hajnoczi return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS, 61061007b31SStefan Hajnoczi &qiov, is_write, flags); 61161007b31SStefan Hajnoczi } 61261007b31SStefan Hajnoczi 61361007b31SStefan Hajnoczi /* return < 0 if error. See bdrv_write() for the return codes */ 61461007b31SStefan Hajnoczi int bdrv_read(BlockDriverState *bs, int64_t sector_num, 61561007b31SStefan Hajnoczi uint8_t *buf, int nb_sectors) 61661007b31SStefan Hajnoczi { 61761007b31SStefan Hajnoczi return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0); 61861007b31SStefan Hajnoczi } 61961007b31SStefan Hajnoczi 62061007b31SStefan Hajnoczi /* Return < 0 if error. Important errors are: 62161007b31SStefan Hajnoczi -EIO generic I/O error (may happen for all errors) 62261007b31SStefan Hajnoczi -ENOMEDIUM No media inserted. 62361007b31SStefan Hajnoczi -EINVAL Invalid sector number or nb_sectors 62461007b31SStefan Hajnoczi -EACCES Trying to write a read-only device 62561007b31SStefan Hajnoczi */ 62661007b31SStefan Hajnoczi int bdrv_write(BlockDriverState *bs, int64_t sector_num, 62761007b31SStefan Hajnoczi const uint8_t *buf, int nb_sectors) 62861007b31SStefan Hajnoczi { 62961007b31SStefan Hajnoczi return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0); 63061007b31SStefan Hajnoczi } 63161007b31SStefan Hajnoczi 63261007b31SStefan Hajnoczi int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num, 63361007b31SStefan Hajnoczi int nb_sectors, BdrvRequestFlags flags) 63461007b31SStefan Hajnoczi { 63561007b31SStefan Hajnoczi return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true, 63661007b31SStefan Hajnoczi BDRV_REQ_ZERO_WRITE | flags); 63761007b31SStefan Hajnoczi } 63861007b31SStefan Hajnoczi 63961007b31SStefan Hajnoczi /* 64061007b31SStefan Hajnoczi * Completely zero out a block device with the help of bdrv_write_zeroes. 64161007b31SStefan Hajnoczi * The operation is sped up by checking the block status and only writing 64261007b31SStefan Hajnoczi * zeroes to the device if they currently do not return zeroes. Optional 64361007b31SStefan Hajnoczi * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP). 64461007b31SStefan Hajnoczi * 64561007b31SStefan Hajnoczi * Returns < 0 on error, 0 on success. For error codes see bdrv_write(). 64661007b31SStefan Hajnoczi */ 64761007b31SStefan Hajnoczi int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags) 64861007b31SStefan Hajnoczi { 64961007b31SStefan Hajnoczi int64_t target_sectors, ret, nb_sectors, sector_num = 0; 65067a0fd2aSFam Zheng BlockDriverState *file; 65161007b31SStefan Hajnoczi int n; 65261007b31SStefan Hajnoczi 65361007b31SStefan Hajnoczi target_sectors = bdrv_nb_sectors(bs); 65461007b31SStefan Hajnoczi if (target_sectors < 0) { 65561007b31SStefan Hajnoczi return target_sectors; 65661007b31SStefan Hajnoczi } 65761007b31SStefan Hajnoczi 65861007b31SStefan Hajnoczi for (;;) { 65961007b31SStefan Hajnoczi nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS); 66061007b31SStefan Hajnoczi if (nb_sectors <= 0) { 66161007b31SStefan Hajnoczi return 0; 66261007b31SStefan Hajnoczi } 66367a0fd2aSFam Zheng ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n, &file); 66461007b31SStefan Hajnoczi if (ret < 0) { 66561007b31SStefan Hajnoczi error_report("error getting block status at sector %" PRId64 ": %s", 66661007b31SStefan Hajnoczi sector_num, strerror(-ret)); 66761007b31SStefan Hajnoczi return ret; 66861007b31SStefan Hajnoczi } 66961007b31SStefan Hajnoczi if (ret & BDRV_BLOCK_ZERO) { 67061007b31SStefan Hajnoczi sector_num += n; 67161007b31SStefan Hajnoczi continue; 67261007b31SStefan Hajnoczi } 67361007b31SStefan Hajnoczi ret = bdrv_write_zeroes(bs, sector_num, n, flags); 67461007b31SStefan Hajnoczi if (ret < 0) { 67561007b31SStefan Hajnoczi error_report("error writing zeroes at sector %" PRId64 ": %s", 67661007b31SStefan Hajnoczi sector_num, strerror(-ret)); 67761007b31SStefan Hajnoczi return ret; 67861007b31SStefan Hajnoczi } 67961007b31SStefan Hajnoczi sector_num += n; 68061007b31SStefan Hajnoczi } 68161007b31SStefan Hajnoczi } 68261007b31SStefan Hajnoczi 68361007b31SStefan Hajnoczi int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes) 68461007b31SStefan Hajnoczi { 68561007b31SStefan Hajnoczi QEMUIOVector qiov; 68661007b31SStefan Hajnoczi struct iovec iov = { 68761007b31SStefan Hajnoczi .iov_base = (void *)buf, 68861007b31SStefan Hajnoczi .iov_len = bytes, 68961007b31SStefan Hajnoczi }; 69061007b31SStefan Hajnoczi int ret; 69161007b31SStefan Hajnoczi 69261007b31SStefan Hajnoczi if (bytes < 0) { 69361007b31SStefan Hajnoczi return -EINVAL; 69461007b31SStefan Hajnoczi } 69561007b31SStefan Hajnoczi 69661007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 69761007b31SStefan Hajnoczi ret = bdrv_prwv_co(bs, offset, &qiov, false, 0); 69861007b31SStefan Hajnoczi if (ret < 0) { 69961007b31SStefan Hajnoczi return ret; 70061007b31SStefan Hajnoczi } 70161007b31SStefan Hajnoczi 70261007b31SStefan Hajnoczi return bytes; 70361007b31SStefan Hajnoczi } 70461007b31SStefan Hajnoczi 70561007b31SStefan Hajnoczi int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov) 70661007b31SStefan Hajnoczi { 70761007b31SStefan Hajnoczi int ret; 70861007b31SStefan Hajnoczi 70961007b31SStefan Hajnoczi ret = bdrv_prwv_co(bs, offset, qiov, true, 0); 71061007b31SStefan Hajnoczi if (ret < 0) { 71161007b31SStefan Hajnoczi return ret; 71261007b31SStefan Hajnoczi } 71361007b31SStefan Hajnoczi 71461007b31SStefan Hajnoczi return qiov->size; 71561007b31SStefan Hajnoczi } 71661007b31SStefan Hajnoczi 71761007b31SStefan Hajnoczi int bdrv_pwrite(BlockDriverState *bs, int64_t offset, 71861007b31SStefan Hajnoczi const void *buf, int bytes) 71961007b31SStefan Hajnoczi { 72061007b31SStefan Hajnoczi QEMUIOVector qiov; 72161007b31SStefan Hajnoczi struct iovec iov = { 72261007b31SStefan Hajnoczi .iov_base = (void *) buf, 72361007b31SStefan Hajnoczi .iov_len = bytes, 72461007b31SStefan Hajnoczi }; 72561007b31SStefan Hajnoczi 72661007b31SStefan Hajnoczi if (bytes < 0) { 72761007b31SStefan Hajnoczi return -EINVAL; 72861007b31SStefan Hajnoczi } 72961007b31SStefan Hajnoczi 73061007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 73161007b31SStefan Hajnoczi return bdrv_pwritev(bs, offset, &qiov); 73261007b31SStefan Hajnoczi } 73361007b31SStefan Hajnoczi 73461007b31SStefan Hajnoczi /* 73561007b31SStefan Hajnoczi * Writes to the file and ensures that no writes are reordered across this 73661007b31SStefan Hajnoczi * request (acts as a barrier) 73761007b31SStefan Hajnoczi * 73861007b31SStefan Hajnoczi * Returns 0 on success, -errno in error cases. 73961007b31SStefan Hajnoczi */ 74061007b31SStefan Hajnoczi int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset, 74161007b31SStefan Hajnoczi const void *buf, int count) 74261007b31SStefan Hajnoczi { 74361007b31SStefan Hajnoczi int ret; 74461007b31SStefan Hajnoczi 74561007b31SStefan Hajnoczi ret = bdrv_pwrite(bs, offset, buf, count); 74661007b31SStefan Hajnoczi if (ret < 0) { 74761007b31SStefan Hajnoczi return ret; 74861007b31SStefan Hajnoczi } 74961007b31SStefan Hajnoczi 750855a6a93SKevin Wolf ret = bdrv_flush(bs); 751855a6a93SKevin Wolf if (ret < 0) { 752855a6a93SKevin Wolf return ret; 75361007b31SStefan Hajnoczi } 75461007b31SStefan Hajnoczi 75561007b31SStefan Hajnoczi return 0; 75661007b31SStefan Hajnoczi } 75761007b31SStefan Hajnoczi 75861007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs, 75961007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) 76061007b31SStefan Hajnoczi { 76161007b31SStefan Hajnoczi /* Perform I/O through a temporary buffer so that users who scribble over 76261007b31SStefan Hajnoczi * their read buffer while the operation is in progress do not end up 76361007b31SStefan Hajnoczi * modifying the image file. This is critical for zero-copy guest I/O 76461007b31SStefan Hajnoczi * where anything might happen inside guest memory. 76561007b31SStefan Hajnoczi */ 76661007b31SStefan Hajnoczi void *bounce_buffer; 76761007b31SStefan Hajnoczi 76861007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 76961007b31SStefan Hajnoczi struct iovec iov; 77061007b31SStefan Hajnoczi QEMUIOVector bounce_qiov; 77161007b31SStefan Hajnoczi int64_t cluster_sector_num; 77261007b31SStefan Hajnoczi int cluster_nb_sectors; 77361007b31SStefan Hajnoczi size_t skip_bytes; 77461007b31SStefan Hajnoczi int ret; 77561007b31SStefan Hajnoczi 77661007b31SStefan Hajnoczi /* Cover entire cluster so no additional backing file I/O is required when 77761007b31SStefan Hajnoczi * allocating cluster in the image file. 77861007b31SStefan Hajnoczi */ 77961007b31SStefan Hajnoczi bdrv_round_to_clusters(bs, sector_num, nb_sectors, 78061007b31SStefan Hajnoczi &cluster_sector_num, &cluster_nb_sectors); 78161007b31SStefan Hajnoczi 78261007b31SStefan Hajnoczi trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, 78361007b31SStefan Hajnoczi cluster_sector_num, cluster_nb_sectors); 78461007b31SStefan Hajnoczi 78561007b31SStefan Hajnoczi iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE; 78661007b31SStefan Hajnoczi iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len); 78761007b31SStefan Hajnoczi if (bounce_buffer == NULL) { 78861007b31SStefan Hajnoczi ret = -ENOMEM; 78961007b31SStefan Hajnoczi goto err; 79061007b31SStefan Hajnoczi } 79161007b31SStefan Hajnoczi 79261007b31SStefan Hajnoczi qemu_iovec_init_external(&bounce_qiov, &iov, 1); 79361007b31SStefan Hajnoczi 79461007b31SStefan Hajnoczi ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors, 79561007b31SStefan Hajnoczi &bounce_qiov); 79661007b31SStefan Hajnoczi if (ret < 0) { 79761007b31SStefan Hajnoczi goto err; 79861007b31SStefan Hajnoczi } 79961007b31SStefan Hajnoczi 80061007b31SStefan Hajnoczi if (drv->bdrv_co_write_zeroes && 80161007b31SStefan Hajnoczi buffer_is_zero(bounce_buffer, iov.iov_len)) { 80261007b31SStefan Hajnoczi ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num, 80361007b31SStefan Hajnoczi cluster_nb_sectors, 0); 80461007b31SStefan Hajnoczi } else { 80561007b31SStefan Hajnoczi /* This does not change the data on the disk, it is not necessary 80661007b31SStefan Hajnoczi * to flush even in cache=writethrough mode. 80761007b31SStefan Hajnoczi */ 80861007b31SStefan Hajnoczi ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors, 80961007b31SStefan Hajnoczi &bounce_qiov); 81061007b31SStefan Hajnoczi } 81161007b31SStefan Hajnoczi 81261007b31SStefan Hajnoczi if (ret < 0) { 81361007b31SStefan Hajnoczi /* It might be okay to ignore write errors for guest requests. If this 81461007b31SStefan Hajnoczi * is a deliberate copy-on-read then we don't want to ignore the error. 81561007b31SStefan Hajnoczi * Simply report it in all cases. 81661007b31SStefan Hajnoczi */ 81761007b31SStefan Hajnoczi goto err; 81861007b31SStefan Hajnoczi } 81961007b31SStefan Hajnoczi 82061007b31SStefan Hajnoczi skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE; 82161007b31SStefan Hajnoczi qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, 82261007b31SStefan Hajnoczi nb_sectors * BDRV_SECTOR_SIZE); 82361007b31SStefan Hajnoczi 82461007b31SStefan Hajnoczi err: 82561007b31SStefan Hajnoczi qemu_vfree(bounce_buffer); 82661007b31SStefan Hajnoczi return ret; 82761007b31SStefan Hajnoczi } 82861007b31SStefan Hajnoczi 82961007b31SStefan Hajnoczi /* 83061007b31SStefan Hajnoczi * Forwards an already correctly aligned request to the BlockDriver. This 83161007b31SStefan Hajnoczi * handles copy on read and zeroing after EOF; any other features must be 83261007b31SStefan Hajnoczi * implemented by the caller. 83361007b31SStefan Hajnoczi */ 83461007b31SStefan Hajnoczi static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs, 83561007b31SStefan Hajnoczi BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 83661007b31SStefan Hajnoczi int64_t align, QEMUIOVector *qiov, int flags) 83761007b31SStefan Hajnoczi { 83861007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 83961007b31SStefan Hajnoczi int ret; 84061007b31SStefan Hajnoczi 84161007b31SStefan Hajnoczi int64_t sector_num = offset >> BDRV_SECTOR_BITS; 84261007b31SStefan Hajnoczi unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS; 84361007b31SStefan Hajnoczi 84461007b31SStefan Hajnoczi assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 84561007b31SStefan Hajnoczi assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 84661007b31SStefan Hajnoczi assert(!qiov || bytes == qiov->size); 847abb06c5aSDaniel P. Berrange assert((bs->open_flags & BDRV_O_NO_IO) == 0); 84861007b31SStefan Hajnoczi 84961007b31SStefan Hajnoczi /* Handle Copy on Read and associated serialisation */ 85061007b31SStefan Hajnoczi if (flags & BDRV_REQ_COPY_ON_READ) { 85161007b31SStefan Hajnoczi /* If we touch the same cluster it counts as an overlap. This 85261007b31SStefan Hajnoczi * guarantees that allocating writes will be serialized and not race 85361007b31SStefan Hajnoczi * with each other for the same cluster. For example, in copy-on-read 85461007b31SStefan Hajnoczi * it ensures that the CoR read and write operations are atomic and 85561007b31SStefan Hajnoczi * guest writes cannot interleave between them. */ 85661007b31SStefan Hajnoczi mark_request_serialising(req, bdrv_get_cluster_size(bs)); 85761007b31SStefan Hajnoczi } 85861007b31SStefan Hajnoczi 85961408b25SFam Zheng if (!(flags & BDRV_REQ_NO_SERIALISING)) { 86061007b31SStefan Hajnoczi wait_serialising_requests(req); 86161408b25SFam Zheng } 86261007b31SStefan Hajnoczi 86361007b31SStefan Hajnoczi if (flags & BDRV_REQ_COPY_ON_READ) { 86461007b31SStefan Hajnoczi int pnum; 86561007b31SStefan Hajnoczi 86661007b31SStefan Hajnoczi ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum); 86761007b31SStefan Hajnoczi if (ret < 0) { 86861007b31SStefan Hajnoczi goto out; 86961007b31SStefan Hajnoczi } 87061007b31SStefan Hajnoczi 87161007b31SStefan Hajnoczi if (!ret || pnum != nb_sectors) { 87261007b31SStefan Hajnoczi ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov); 87361007b31SStefan Hajnoczi goto out; 87461007b31SStefan Hajnoczi } 87561007b31SStefan Hajnoczi } 87661007b31SStefan Hajnoczi 87761007b31SStefan Hajnoczi /* Forward the request to the BlockDriver */ 87861007b31SStefan Hajnoczi if (!bs->zero_beyond_eof) { 87961007b31SStefan Hajnoczi ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 88061007b31SStefan Hajnoczi } else { 88161007b31SStefan Hajnoczi /* Read zeros after EOF */ 88261007b31SStefan Hajnoczi int64_t total_sectors, max_nb_sectors; 88361007b31SStefan Hajnoczi 88461007b31SStefan Hajnoczi total_sectors = bdrv_nb_sectors(bs); 88561007b31SStefan Hajnoczi if (total_sectors < 0) { 88661007b31SStefan Hajnoczi ret = total_sectors; 88761007b31SStefan Hajnoczi goto out; 88861007b31SStefan Hajnoczi } 88961007b31SStefan Hajnoczi 89061007b31SStefan Hajnoczi max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num), 89161007b31SStefan Hajnoczi align >> BDRV_SECTOR_BITS); 89261007b31SStefan Hajnoczi if (nb_sectors < max_nb_sectors) { 89361007b31SStefan Hajnoczi ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 89461007b31SStefan Hajnoczi } else if (max_nb_sectors > 0) { 89561007b31SStefan Hajnoczi QEMUIOVector local_qiov; 89661007b31SStefan Hajnoczi 89761007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov); 89861007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, 89961007b31SStefan Hajnoczi max_nb_sectors * BDRV_SECTOR_SIZE); 90061007b31SStefan Hajnoczi 90161007b31SStefan Hajnoczi ret = drv->bdrv_co_readv(bs, sector_num, max_nb_sectors, 90261007b31SStefan Hajnoczi &local_qiov); 90361007b31SStefan Hajnoczi 90461007b31SStefan Hajnoczi qemu_iovec_destroy(&local_qiov); 90561007b31SStefan Hajnoczi } else { 90661007b31SStefan Hajnoczi ret = 0; 90761007b31SStefan Hajnoczi } 90861007b31SStefan Hajnoczi 90961007b31SStefan Hajnoczi /* Reading beyond end of file is supposed to produce zeroes */ 91061007b31SStefan Hajnoczi if (ret == 0 && total_sectors < sector_num + nb_sectors) { 91161007b31SStefan Hajnoczi uint64_t offset = MAX(0, total_sectors - sector_num); 91261007b31SStefan Hajnoczi uint64_t bytes = (sector_num + nb_sectors - offset) * 91361007b31SStefan Hajnoczi BDRV_SECTOR_SIZE; 91461007b31SStefan Hajnoczi qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes); 91561007b31SStefan Hajnoczi } 91661007b31SStefan Hajnoczi } 91761007b31SStefan Hajnoczi 91861007b31SStefan Hajnoczi out: 91961007b31SStefan Hajnoczi return ret; 92061007b31SStefan Hajnoczi } 92161007b31SStefan Hajnoczi 92261007b31SStefan Hajnoczi /* 92361007b31SStefan Hajnoczi * Handle a read request in coroutine context 92461007b31SStefan Hajnoczi */ 9251bf1cbc9SKevin Wolf int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs, 92661007b31SStefan Hajnoczi int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 92761007b31SStefan Hajnoczi BdrvRequestFlags flags) 92861007b31SStefan Hajnoczi { 92961007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 93061007b31SStefan Hajnoczi BdrvTrackedRequest req; 93161007b31SStefan Hajnoczi 932d01c07f2SFam Zheng /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */ 933d01c07f2SFam Zheng uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment); 93461007b31SStefan Hajnoczi uint8_t *head_buf = NULL; 93561007b31SStefan Hajnoczi uint8_t *tail_buf = NULL; 93661007b31SStefan Hajnoczi QEMUIOVector local_qiov; 93761007b31SStefan Hajnoczi bool use_local_qiov = false; 93861007b31SStefan Hajnoczi int ret; 93961007b31SStefan Hajnoczi 94061007b31SStefan Hajnoczi if (!drv) { 94161007b31SStefan Hajnoczi return -ENOMEDIUM; 94261007b31SStefan Hajnoczi } 94361007b31SStefan Hajnoczi 94461007b31SStefan Hajnoczi ret = bdrv_check_byte_request(bs, offset, bytes); 94561007b31SStefan Hajnoczi if (ret < 0) { 94661007b31SStefan Hajnoczi return ret; 94761007b31SStefan Hajnoczi } 94861007b31SStefan Hajnoczi 9499568b511SWen Congyang /* Don't do copy-on-read if we read data before write operation */ 95061408b25SFam Zheng if (bs->copy_on_read && !(flags & BDRV_REQ_NO_SERIALISING)) { 95161007b31SStefan Hajnoczi flags |= BDRV_REQ_COPY_ON_READ; 95261007b31SStefan Hajnoczi } 95361007b31SStefan Hajnoczi 95461007b31SStefan Hajnoczi /* throttling disk I/O */ 95561007b31SStefan Hajnoczi if (bs->io_limits_enabled) { 95676f4afb4SAlberto Garcia throttle_group_co_io_limits_intercept(bs, bytes, false); 95761007b31SStefan Hajnoczi } 95861007b31SStefan Hajnoczi 95961007b31SStefan Hajnoczi /* Align read if necessary by padding qiov */ 96061007b31SStefan Hajnoczi if (offset & (align - 1)) { 96161007b31SStefan Hajnoczi head_buf = qemu_blockalign(bs, align); 96261007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov + 2); 96361007b31SStefan Hajnoczi qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); 96461007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 96561007b31SStefan Hajnoczi use_local_qiov = true; 96661007b31SStefan Hajnoczi 96761007b31SStefan Hajnoczi bytes += offset & (align - 1); 96861007b31SStefan Hajnoczi offset = offset & ~(align - 1); 96961007b31SStefan Hajnoczi } 97061007b31SStefan Hajnoczi 97161007b31SStefan Hajnoczi if ((offset + bytes) & (align - 1)) { 97261007b31SStefan Hajnoczi if (!use_local_qiov) { 97361007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov + 1); 97461007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 97561007b31SStefan Hajnoczi use_local_qiov = true; 97661007b31SStefan Hajnoczi } 97761007b31SStefan Hajnoczi tail_buf = qemu_blockalign(bs, align); 97861007b31SStefan Hajnoczi qemu_iovec_add(&local_qiov, tail_buf, 97961007b31SStefan Hajnoczi align - ((offset + bytes) & (align - 1))); 98061007b31SStefan Hajnoczi 98161007b31SStefan Hajnoczi bytes = ROUND_UP(bytes, align); 98261007b31SStefan Hajnoczi } 98361007b31SStefan Hajnoczi 984ebde595cSFam Zheng tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ); 98561007b31SStefan Hajnoczi ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align, 98661007b31SStefan Hajnoczi use_local_qiov ? &local_qiov : qiov, 98761007b31SStefan Hajnoczi flags); 98861007b31SStefan Hajnoczi tracked_request_end(&req); 98961007b31SStefan Hajnoczi 99061007b31SStefan Hajnoczi if (use_local_qiov) { 99161007b31SStefan Hajnoczi qemu_iovec_destroy(&local_qiov); 99261007b31SStefan Hajnoczi qemu_vfree(head_buf); 99361007b31SStefan Hajnoczi qemu_vfree(tail_buf); 99461007b31SStefan Hajnoczi } 99561007b31SStefan Hajnoczi 99661007b31SStefan Hajnoczi return ret; 99761007b31SStefan Hajnoczi } 99861007b31SStefan Hajnoczi 99961007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, 100061007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 100161007b31SStefan Hajnoczi BdrvRequestFlags flags) 100261007b31SStefan Hajnoczi { 100361007b31SStefan Hajnoczi if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 100461007b31SStefan Hajnoczi return -EINVAL; 100561007b31SStefan Hajnoczi } 100661007b31SStefan Hajnoczi 100761007b31SStefan Hajnoczi return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS, 100861007b31SStefan Hajnoczi nb_sectors << BDRV_SECTOR_BITS, qiov, flags); 100961007b31SStefan Hajnoczi } 101061007b31SStefan Hajnoczi 101161007b31SStefan Hajnoczi int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num, 101261007b31SStefan Hajnoczi int nb_sectors, QEMUIOVector *qiov) 101361007b31SStefan Hajnoczi { 101461007b31SStefan Hajnoczi trace_bdrv_co_readv(bs, sector_num, nb_sectors); 101561007b31SStefan Hajnoczi 101661007b31SStefan Hajnoczi return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0); 101761007b31SStefan Hajnoczi } 101861007b31SStefan Hajnoczi 101961408b25SFam Zheng int coroutine_fn bdrv_co_readv_no_serialising(BlockDriverState *bs, 10209568b511SWen Congyang int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) 10219568b511SWen Congyang { 102261408b25SFam Zheng trace_bdrv_co_readv_no_serialising(bs, sector_num, nb_sectors); 10239568b511SWen Congyang 10249568b511SWen Congyang return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 102561408b25SFam Zheng BDRV_REQ_NO_SERIALISING); 10269568b511SWen Congyang } 10279568b511SWen Congyang 102861007b31SStefan Hajnoczi int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs, 102961007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) 103061007b31SStefan Hajnoczi { 103161007b31SStefan Hajnoczi trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors); 103261007b31SStefan Hajnoczi 103361007b31SStefan Hajnoczi return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 103461007b31SStefan Hajnoczi BDRV_REQ_COPY_ON_READ); 103561007b31SStefan Hajnoczi } 103661007b31SStefan Hajnoczi 103761007b31SStefan Hajnoczi #define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768 103861007b31SStefan Hajnoczi 103961007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, 104061007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, BdrvRequestFlags flags) 104161007b31SStefan Hajnoczi { 104261007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 104361007b31SStefan Hajnoczi QEMUIOVector qiov; 104461007b31SStefan Hajnoczi struct iovec iov = {0}; 104561007b31SStefan Hajnoczi int ret = 0; 104661007b31SStefan Hajnoczi 104761007b31SStefan Hajnoczi int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_write_zeroes, 104861007b31SStefan Hajnoczi BDRV_REQUEST_MAX_SECTORS); 104961007b31SStefan Hajnoczi 105061007b31SStefan Hajnoczi while (nb_sectors > 0 && !ret) { 105161007b31SStefan Hajnoczi int num = nb_sectors; 105261007b31SStefan Hajnoczi 105361007b31SStefan Hajnoczi /* Align request. Block drivers can expect the "bulk" of the request 105461007b31SStefan Hajnoczi * to be aligned. 105561007b31SStefan Hajnoczi */ 105661007b31SStefan Hajnoczi if (bs->bl.write_zeroes_alignment 105761007b31SStefan Hajnoczi && num > bs->bl.write_zeroes_alignment) { 105861007b31SStefan Hajnoczi if (sector_num % bs->bl.write_zeroes_alignment != 0) { 105961007b31SStefan Hajnoczi /* Make a small request up to the first aligned sector. */ 106061007b31SStefan Hajnoczi num = bs->bl.write_zeroes_alignment; 106161007b31SStefan Hajnoczi num -= sector_num % bs->bl.write_zeroes_alignment; 106261007b31SStefan Hajnoczi } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) { 106361007b31SStefan Hajnoczi /* Shorten the request to the last aligned sector. num cannot 106461007b31SStefan Hajnoczi * underflow because num > bs->bl.write_zeroes_alignment. 106561007b31SStefan Hajnoczi */ 106661007b31SStefan Hajnoczi num -= (sector_num + num) % bs->bl.write_zeroes_alignment; 106761007b31SStefan Hajnoczi } 106861007b31SStefan Hajnoczi } 106961007b31SStefan Hajnoczi 107061007b31SStefan Hajnoczi /* limit request size */ 107161007b31SStefan Hajnoczi if (num > max_write_zeroes) { 107261007b31SStefan Hajnoczi num = max_write_zeroes; 107361007b31SStefan Hajnoczi } 107461007b31SStefan Hajnoczi 107561007b31SStefan Hajnoczi ret = -ENOTSUP; 107661007b31SStefan Hajnoczi /* First try the efficient write zeroes operation */ 107761007b31SStefan Hajnoczi if (drv->bdrv_co_write_zeroes) { 107861007b31SStefan Hajnoczi ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags); 107961007b31SStefan Hajnoczi } 108061007b31SStefan Hajnoczi 108161007b31SStefan Hajnoczi if (ret == -ENOTSUP) { 108261007b31SStefan Hajnoczi /* Fall back to bounce buffer if write zeroes is unsupported */ 108361007b31SStefan Hajnoczi int max_xfer_len = MIN_NON_ZERO(bs->bl.max_transfer_length, 108461007b31SStefan Hajnoczi MAX_WRITE_ZEROES_BOUNCE_BUFFER); 108561007b31SStefan Hajnoczi num = MIN(num, max_xfer_len); 108661007b31SStefan Hajnoczi iov.iov_len = num * BDRV_SECTOR_SIZE; 108761007b31SStefan Hajnoczi if (iov.iov_base == NULL) { 108861007b31SStefan Hajnoczi iov.iov_base = qemu_try_blockalign(bs, num * BDRV_SECTOR_SIZE); 108961007b31SStefan Hajnoczi if (iov.iov_base == NULL) { 109061007b31SStefan Hajnoczi ret = -ENOMEM; 109161007b31SStefan Hajnoczi goto fail; 109261007b31SStefan Hajnoczi } 109361007b31SStefan Hajnoczi memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE); 109461007b31SStefan Hajnoczi } 109561007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 109661007b31SStefan Hajnoczi 109761007b31SStefan Hajnoczi ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov); 109861007b31SStefan Hajnoczi 109961007b31SStefan Hajnoczi /* Keep bounce buffer around if it is big enough for all 110061007b31SStefan Hajnoczi * all future requests. 110161007b31SStefan Hajnoczi */ 110261007b31SStefan Hajnoczi if (num < max_xfer_len) { 110361007b31SStefan Hajnoczi qemu_vfree(iov.iov_base); 110461007b31SStefan Hajnoczi iov.iov_base = NULL; 110561007b31SStefan Hajnoczi } 110661007b31SStefan Hajnoczi } 110761007b31SStefan Hajnoczi 110861007b31SStefan Hajnoczi sector_num += num; 110961007b31SStefan Hajnoczi nb_sectors -= num; 111061007b31SStefan Hajnoczi } 111161007b31SStefan Hajnoczi 111261007b31SStefan Hajnoczi fail: 111361007b31SStefan Hajnoczi qemu_vfree(iov.iov_base); 111461007b31SStefan Hajnoczi return ret; 111561007b31SStefan Hajnoczi } 111661007b31SStefan Hajnoczi 111761007b31SStefan Hajnoczi /* 111861007b31SStefan Hajnoczi * Forwards an already correctly aligned write request to the BlockDriver. 111961007b31SStefan Hajnoczi */ 112061007b31SStefan Hajnoczi static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs, 112161007b31SStefan Hajnoczi BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 112261007b31SStefan Hajnoczi QEMUIOVector *qiov, int flags) 112361007b31SStefan Hajnoczi { 112461007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 112561007b31SStefan Hajnoczi bool waited; 112661007b31SStefan Hajnoczi int ret; 112761007b31SStefan Hajnoczi 112861007b31SStefan Hajnoczi int64_t sector_num = offset >> BDRV_SECTOR_BITS; 112961007b31SStefan Hajnoczi unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS; 113061007b31SStefan Hajnoczi 113161007b31SStefan Hajnoczi assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 113261007b31SStefan Hajnoczi assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 113361007b31SStefan Hajnoczi assert(!qiov || bytes == qiov->size); 1134abb06c5aSDaniel P. Berrange assert((bs->open_flags & BDRV_O_NO_IO) == 0); 113561007b31SStefan Hajnoczi 113661007b31SStefan Hajnoczi waited = wait_serialising_requests(req); 113761007b31SStefan Hajnoczi assert(!waited || !req->serialising); 113861007b31SStefan Hajnoczi assert(req->overlap_offset <= offset); 113961007b31SStefan Hajnoczi assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); 114061007b31SStefan Hajnoczi 114161007b31SStefan Hajnoczi ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req); 114261007b31SStefan Hajnoczi 114361007b31SStefan Hajnoczi if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && 114461007b31SStefan Hajnoczi !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes && 114561007b31SStefan Hajnoczi qemu_iovec_is_zero(qiov)) { 114661007b31SStefan Hajnoczi flags |= BDRV_REQ_ZERO_WRITE; 114761007b31SStefan Hajnoczi if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { 114861007b31SStefan Hajnoczi flags |= BDRV_REQ_MAY_UNMAP; 114961007b31SStefan Hajnoczi } 115061007b31SStefan Hajnoczi } 115161007b31SStefan Hajnoczi 115261007b31SStefan Hajnoczi if (ret < 0) { 115361007b31SStefan Hajnoczi /* Do nothing, write notifier decided to fail this request */ 115461007b31SStefan Hajnoczi } else if (flags & BDRV_REQ_ZERO_WRITE) { 11559a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO); 115661007b31SStefan Hajnoczi ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags); 1157*93f5e6d8SKevin Wolf } else if (drv->bdrv_co_writev_flags) { 1158*93f5e6d8SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV); 1159*93f5e6d8SKevin Wolf ret = drv->bdrv_co_writev_flags(bs, sector_num, nb_sectors, qiov, 1160*93f5e6d8SKevin Wolf flags); 116161007b31SStefan Hajnoczi } else { 1162*93f5e6d8SKevin Wolf assert(drv->supported_write_flags == 0); 11639a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV); 116461007b31SStefan Hajnoczi ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov); 116561007b31SStefan Hajnoczi } 11669a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE); 116761007b31SStefan Hajnoczi 1168*93f5e6d8SKevin Wolf if (ret == 0 && (flags & BDRV_REQ_FUA) && 1169*93f5e6d8SKevin Wolf !(drv->supported_write_flags & BDRV_REQ_FUA)) 1170*93f5e6d8SKevin Wolf { 117161007b31SStefan Hajnoczi ret = bdrv_co_flush(bs); 117261007b31SStefan Hajnoczi } 117361007b31SStefan Hajnoczi 117461007b31SStefan Hajnoczi bdrv_set_dirty(bs, sector_num, nb_sectors); 117561007b31SStefan Hajnoczi 117653d8f9d8SMax Reitz if (bs->wr_highest_offset < offset + bytes) { 117753d8f9d8SMax Reitz bs->wr_highest_offset = offset + bytes; 117853d8f9d8SMax Reitz } 117961007b31SStefan Hajnoczi 118061007b31SStefan Hajnoczi if (ret >= 0) { 118161007b31SStefan Hajnoczi bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors); 118261007b31SStefan Hajnoczi } 118361007b31SStefan Hajnoczi 118461007b31SStefan Hajnoczi return ret; 118561007b31SStefan Hajnoczi } 118661007b31SStefan Hajnoczi 11879eeb6dd1SFam Zheng static int coroutine_fn bdrv_co_do_zero_pwritev(BlockDriverState *bs, 11889eeb6dd1SFam Zheng int64_t offset, 11899eeb6dd1SFam Zheng unsigned int bytes, 11909eeb6dd1SFam Zheng BdrvRequestFlags flags, 11919eeb6dd1SFam Zheng BdrvTrackedRequest *req) 11929eeb6dd1SFam Zheng { 11939eeb6dd1SFam Zheng uint8_t *buf = NULL; 11949eeb6dd1SFam Zheng QEMUIOVector local_qiov; 11959eeb6dd1SFam Zheng struct iovec iov; 11969eeb6dd1SFam Zheng uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment); 11979eeb6dd1SFam Zheng unsigned int head_padding_bytes, tail_padding_bytes; 11989eeb6dd1SFam Zheng int ret = 0; 11999eeb6dd1SFam Zheng 12009eeb6dd1SFam Zheng head_padding_bytes = offset & (align - 1); 12019eeb6dd1SFam Zheng tail_padding_bytes = align - ((offset + bytes) & (align - 1)); 12029eeb6dd1SFam Zheng 12039eeb6dd1SFam Zheng 12049eeb6dd1SFam Zheng assert(flags & BDRV_REQ_ZERO_WRITE); 12059eeb6dd1SFam Zheng if (head_padding_bytes || tail_padding_bytes) { 12069eeb6dd1SFam Zheng buf = qemu_blockalign(bs, align); 12079eeb6dd1SFam Zheng iov = (struct iovec) { 12089eeb6dd1SFam Zheng .iov_base = buf, 12099eeb6dd1SFam Zheng .iov_len = align, 12109eeb6dd1SFam Zheng }; 12119eeb6dd1SFam Zheng qemu_iovec_init_external(&local_qiov, &iov, 1); 12129eeb6dd1SFam Zheng } 12139eeb6dd1SFam Zheng if (head_padding_bytes) { 12149eeb6dd1SFam Zheng uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes); 12159eeb6dd1SFam Zheng 12169eeb6dd1SFam Zheng /* RMW the unaligned part before head. */ 12179eeb6dd1SFam Zheng mark_request_serialising(req, align); 12189eeb6dd1SFam Zheng wait_serialising_requests(req); 12199a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 12209eeb6dd1SFam Zheng ret = bdrv_aligned_preadv(bs, req, offset & ~(align - 1), align, 12219eeb6dd1SFam Zheng align, &local_qiov, 0); 12229eeb6dd1SFam Zheng if (ret < 0) { 12239eeb6dd1SFam Zheng goto fail; 12249eeb6dd1SFam Zheng } 12259a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 12269eeb6dd1SFam Zheng 12279eeb6dd1SFam Zheng memset(buf + head_padding_bytes, 0, zero_bytes); 12289eeb6dd1SFam Zheng ret = bdrv_aligned_pwritev(bs, req, offset & ~(align - 1), align, 12299eeb6dd1SFam Zheng &local_qiov, 12309eeb6dd1SFam Zheng flags & ~BDRV_REQ_ZERO_WRITE); 12319eeb6dd1SFam Zheng if (ret < 0) { 12329eeb6dd1SFam Zheng goto fail; 12339eeb6dd1SFam Zheng } 12349eeb6dd1SFam Zheng offset += zero_bytes; 12359eeb6dd1SFam Zheng bytes -= zero_bytes; 12369eeb6dd1SFam Zheng } 12379eeb6dd1SFam Zheng 12389eeb6dd1SFam Zheng assert(!bytes || (offset & (align - 1)) == 0); 12399eeb6dd1SFam Zheng if (bytes >= align) { 12409eeb6dd1SFam Zheng /* Write the aligned part in the middle. */ 12419eeb6dd1SFam Zheng uint64_t aligned_bytes = bytes & ~(align - 1); 12429eeb6dd1SFam Zheng ret = bdrv_aligned_pwritev(bs, req, offset, aligned_bytes, 12439eeb6dd1SFam Zheng NULL, flags); 12449eeb6dd1SFam Zheng if (ret < 0) { 12459eeb6dd1SFam Zheng goto fail; 12469eeb6dd1SFam Zheng } 12479eeb6dd1SFam Zheng bytes -= aligned_bytes; 12489eeb6dd1SFam Zheng offset += aligned_bytes; 12499eeb6dd1SFam Zheng } 12509eeb6dd1SFam Zheng 12519eeb6dd1SFam Zheng assert(!bytes || (offset & (align - 1)) == 0); 12529eeb6dd1SFam Zheng if (bytes) { 12539eeb6dd1SFam Zheng assert(align == tail_padding_bytes + bytes); 12549eeb6dd1SFam Zheng /* RMW the unaligned part after tail. */ 12559eeb6dd1SFam Zheng mark_request_serialising(req, align); 12569eeb6dd1SFam Zheng wait_serialising_requests(req); 12579a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 12589eeb6dd1SFam Zheng ret = bdrv_aligned_preadv(bs, req, offset, align, 12599eeb6dd1SFam Zheng align, &local_qiov, 0); 12609eeb6dd1SFam Zheng if (ret < 0) { 12619eeb6dd1SFam Zheng goto fail; 12629eeb6dd1SFam Zheng } 12639a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 12649eeb6dd1SFam Zheng 12659eeb6dd1SFam Zheng memset(buf, 0, bytes); 12669eeb6dd1SFam Zheng ret = bdrv_aligned_pwritev(bs, req, offset, align, 12679eeb6dd1SFam Zheng &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE); 12689eeb6dd1SFam Zheng } 12699eeb6dd1SFam Zheng fail: 12709eeb6dd1SFam Zheng qemu_vfree(buf); 12719eeb6dd1SFam Zheng return ret; 12729eeb6dd1SFam Zheng 12739eeb6dd1SFam Zheng } 12749eeb6dd1SFam Zheng 127561007b31SStefan Hajnoczi /* 127661007b31SStefan Hajnoczi * Handle a write request in coroutine context 127761007b31SStefan Hajnoczi */ 1278a8823a3bSKevin Wolf int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs, 127961007b31SStefan Hajnoczi int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 128061007b31SStefan Hajnoczi BdrvRequestFlags flags) 128161007b31SStefan Hajnoczi { 128261007b31SStefan Hajnoczi BdrvTrackedRequest req; 1283d01c07f2SFam Zheng /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */ 1284d01c07f2SFam Zheng uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment); 128561007b31SStefan Hajnoczi uint8_t *head_buf = NULL; 128661007b31SStefan Hajnoczi uint8_t *tail_buf = NULL; 128761007b31SStefan Hajnoczi QEMUIOVector local_qiov; 128861007b31SStefan Hajnoczi bool use_local_qiov = false; 128961007b31SStefan Hajnoczi int ret; 129061007b31SStefan Hajnoczi 129161007b31SStefan Hajnoczi if (!bs->drv) { 129261007b31SStefan Hajnoczi return -ENOMEDIUM; 129361007b31SStefan Hajnoczi } 129461007b31SStefan Hajnoczi if (bs->read_only) { 1295eaf5fe2dSPaolo Bonzini return -EPERM; 129661007b31SStefan Hajnoczi } 129704c01a5cSKevin Wolf assert(!(bs->open_flags & BDRV_O_INACTIVE)); 129861007b31SStefan Hajnoczi 129961007b31SStefan Hajnoczi ret = bdrv_check_byte_request(bs, offset, bytes); 130061007b31SStefan Hajnoczi if (ret < 0) { 130161007b31SStefan Hajnoczi return ret; 130261007b31SStefan Hajnoczi } 130361007b31SStefan Hajnoczi 130461007b31SStefan Hajnoczi /* throttling disk I/O */ 130561007b31SStefan Hajnoczi if (bs->io_limits_enabled) { 130676f4afb4SAlberto Garcia throttle_group_co_io_limits_intercept(bs, bytes, true); 130761007b31SStefan Hajnoczi } 130861007b31SStefan Hajnoczi 130961007b31SStefan Hajnoczi /* 131061007b31SStefan Hajnoczi * Align write if necessary by performing a read-modify-write cycle. 131161007b31SStefan Hajnoczi * Pad qiov with the read parts and be sure to have a tracked request not 131261007b31SStefan Hajnoczi * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle. 131361007b31SStefan Hajnoczi */ 1314ebde595cSFam Zheng tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE); 131561007b31SStefan Hajnoczi 13169eeb6dd1SFam Zheng if (!qiov) { 13179eeb6dd1SFam Zheng ret = bdrv_co_do_zero_pwritev(bs, offset, bytes, flags, &req); 13189eeb6dd1SFam Zheng goto out; 13199eeb6dd1SFam Zheng } 13209eeb6dd1SFam Zheng 132161007b31SStefan Hajnoczi if (offset & (align - 1)) { 132261007b31SStefan Hajnoczi QEMUIOVector head_qiov; 132361007b31SStefan Hajnoczi struct iovec head_iov; 132461007b31SStefan Hajnoczi 132561007b31SStefan Hajnoczi mark_request_serialising(&req, align); 132661007b31SStefan Hajnoczi wait_serialising_requests(&req); 132761007b31SStefan Hajnoczi 132861007b31SStefan Hajnoczi head_buf = qemu_blockalign(bs, align); 132961007b31SStefan Hajnoczi head_iov = (struct iovec) { 133061007b31SStefan Hajnoczi .iov_base = head_buf, 133161007b31SStefan Hajnoczi .iov_len = align, 133261007b31SStefan Hajnoczi }; 133361007b31SStefan Hajnoczi qemu_iovec_init_external(&head_qiov, &head_iov, 1); 133461007b31SStefan Hajnoczi 13359a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 133661007b31SStefan Hajnoczi ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align, 133761007b31SStefan Hajnoczi align, &head_qiov, 0); 133861007b31SStefan Hajnoczi if (ret < 0) { 133961007b31SStefan Hajnoczi goto fail; 134061007b31SStefan Hajnoczi } 13419a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 134261007b31SStefan Hajnoczi 134361007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov + 2); 134461007b31SStefan Hajnoczi qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); 134561007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 134661007b31SStefan Hajnoczi use_local_qiov = true; 134761007b31SStefan Hajnoczi 134861007b31SStefan Hajnoczi bytes += offset & (align - 1); 134961007b31SStefan Hajnoczi offset = offset & ~(align - 1); 135061007b31SStefan Hajnoczi } 135161007b31SStefan Hajnoczi 135261007b31SStefan Hajnoczi if ((offset + bytes) & (align - 1)) { 135361007b31SStefan Hajnoczi QEMUIOVector tail_qiov; 135461007b31SStefan Hajnoczi struct iovec tail_iov; 135561007b31SStefan Hajnoczi size_t tail_bytes; 135661007b31SStefan Hajnoczi bool waited; 135761007b31SStefan Hajnoczi 135861007b31SStefan Hajnoczi mark_request_serialising(&req, align); 135961007b31SStefan Hajnoczi waited = wait_serialising_requests(&req); 136061007b31SStefan Hajnoczi assert(!waited || !use_local_qiov); 136161007b31SStefan Hajnoczi 136261007b31SStefan Hajnoczi tail_buf = qemu_blockalign(bs, align); 136361007b31SStefan Hajnoczi tail_iov = (struct iovec) { 136461007b31SStefan Hajnoczi .iov_base = tail_buf, 136561007b31SStefan Hajnoczi .iov_len = align, 136661007b31SStefan Hajnoczi }; 136761007b31SStefan Hajnoczi qemu_iovec_init_external(&tail_qiov, &tail_iov, 1); 136861007b31SStefan Hajnoczi 13699a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 137061007b31SStefan Hajnoczi ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align, 137161007b31SStefan Hajnoczi align, &tail_qiov, 0); 137261007b31SStefan Hajnoczi if (ret < 0) { 137361007b31SStefan Hajnoczi goto fail; 137461007b31SStefan Hajnoczi } 13759a4f4c31SKevin Wolf bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 137661007b31SStefan Hajnoczi 137761007b31SStefan Hajnoczi if (!use_local_qiov) { 137861007b31SStefan Hajnoczi qemu_iovec_init(&local_qiov, qiov->niov + 1); 137961007b31SStefan Hajnoczi qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 138061007b31SStefan Hajnoczi use_local_qiov = true; 138161007b31SStefan Hajnoczi } 138261007b31SStefan Hajnoczi 138361007b31SStefan Hajnoczi tail_bytes = (offset + bytes) & (align - 1); 138461007b31SStefan Hajnoczi qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes); 138561007b31SStefan Hajnoczi 138661007b31SStefan Hajnoczi bytes = ROUND_UP(bytes, align); 138761007b31SStefan Hajnoczi } 138861007b31SStefan Hajnoczi 138961007b31SStefan Hajnoczi ret = bdrv_aligned_pwritev(bs, &req, offset, bytes, 139061007b31SStefan Hajnoczi use_local_qiov ? &local_qiov : qiov, 139161007b31SStefan Hajnoczi flags); 139261007b31SStefan Hajnoczi 139361007b31SStefan Hajnoczi fail: 139461007b31SStefan Hajnoczi 139561007b31SStefan Hajnoczi if (use_local_qiov) { 139661007b31SStefan Hajnoczi qemu_iovec_destroy(&local_qiov); 139761007b31SStefan Hajnoczi } 139861007b31SStefan Hajnoczi qemu_vfree(head_buf); 139961007b31SStefan Hajnoczi qemu_vfree(tail_buf); 14009eeb6dd1SFam Zheng out: 14019eeb6dd1SFam Zheng tracked_request_end(&req); 140261007b31SStefan Hajnoczi return ret; 140361007b31SStefan Hajnoczi } 140461007b31SStefan Hajnoczi 140561007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs, 140661007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 140761007b31SStefan Hajnoczi BdrvRequestFlags flags) 140861007b31SStefan Hajnoczi { 140961007b31SStefan Hajnoczi if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 141061007b31SStefan Hajnoczi return -EINVAL; 141161007b31SStefan Hajnoczi } 141261007b31SStefan Hajnoczi 141361007b31SStefan Hajnoczi return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS, 141461007b31SStefan Hajnoczi nb_sectors << BDRV_SECTOR_BITS, qiov, flags); 141561007b31SStefan Hajnoczi } 141661007b31SStefan Hajnoczi 141761007b31SStefan Hajnoczi int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num, 141861007b31SStefan Hajnoczi int nb_sectors, QEMUIOVector *qiov) 141961007b31SStefan Hajnoczi { 142061007b31SStefan Hajnoczi trace_bdrv_co_writev(bs, sector_num, nb_sectors); 142161007b31SStefan Hajnoczi 142261007b31SStefan Hajnoczi return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0); 142361007b31SStefan Hajnoczi } 142461007b31SStefan Hajnoczi 142561007b31SStefan Hajnoczi int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs, 142661007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, 142761007b31SStefan Hajnoczi BdrvRequestFlags flags) 142861007b31SStefan Hajnoczi { 142961007b31SStefan Hajnoczi trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags); 143061007b31SStefan Hajnoczi 143161007b31SStefan Hajnoczi if (!(bs->open_flags & BDRV_O_UNMAP)) { 143261007b31SStefan Hajnoczi flags &= ~BDRV_REQ_MAY_UNMAP; 143361007b31SStefan Hajnoczi } 143461007b31SStefan Hajnoczi 1435d01c07f2SFam Zheng return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL, 143661007b31SStefan Hajnoczi BDRV_REQ_ZERO_WRITE | flags); 143761007b31SStefan Hajnoczi } 143861007b31SStefan Hajnoczi 143961007b31SStefan Hajnoczi typedef struct BdrvCoGetBlockStatusData { 144061007b31SStefan Hajnoczi BlockDriverState *bs; 144161007b31SStefan Hajnoczi BlockDriverState *base; 144267a0fd2aSFam Zheng BlockDriverState **file; 144361007b31SStefan Hajnoczi int64_t sector_num; 144461007b31SStefan Hajnoczi int nb_sectors; 144561007b31SStefan Hajnoczi int *pnum; 144661007b31SStefan Hajnoczi int64_t ret; 144761007b31SStefan Hajnoczi bool done; 144861007b31SStefan Hajnoczi } BdrvCoGetBlockStatusData; 144961007b31SStefan Hajnoczi 145061007b31SStefan Hajnoczi /* 145161007b31SStefan Hajnoczi * Returns the allocation status of the specified sectors. 145261007b31SStefan Hajnoczi * Drivers not implementing the functionality are assumed to not support 145361007b31SStefan Hajnoczi * backing files, hence all their sectors are reported as allocated. 145461007b31SStefan Hajnoczi * 145561007b31SStefan Hajnoczi * If 'sector_num' is beyond the end of the disk image the return value is 0 145661007b31SStefan Hajnoczi * and 'pnum' is set to 0. 145761007b31SStefan Hajnoczi * 145861007b31SStefan Hajnoczi * 'pnum' is set to the number of sectors (including and immediately following 145961007b31SStefan Hajnoczi * the specified sector) that are known to be in the same 146061007b31SStefan Hajnoczi * allocated/unallocated state. 146161007b31SStefan Hajnoczi * 146261007b31SStefan Hajnoczi * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes 146361007b31SStefan Hajnoczi * beyond the end of the disk image it will be clamped. 146467a0fd2aSFam Zheng * 146567a0fd2aSFam Zheng * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file' 146667a0fd2aSFam Zheng * points to the BDS which the sector range is allocated in. 146761007b31SStefan Hajnoczi */ 146861007b31SStefan Hajnoczi static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs, 146961007b31SStefan Hajnoczi int64_t sector_num, 147067a0fd2aSFam Zheng int nb_sectors, int *pnum, 147167a0fd2aSFam Zheng BlockDriverState **file) 147261007b31SStefan Hajnoczi { 147361007b31SStefan Hajnoczi int64_t total_sectors; 147461007b31SStefan Hajnoczi int64_t n; 147561007b31SStefan Hajnoczi int64_t ret, ret2; 147661007b31SStefan Hajnoczi 147761007b31SStefan Hajnoczi total_sectors = bdrv_nb_sectors(bs); 147861007b31SStefan Hajnoczi if (total_sectors < 0) { 147961007b31SStefan Hajnoczi return total_sectors; 148061007b31SStefan Hajnoczi } 148161007b31SStefan Hajnoczi 148261007b31SStefan Hajnoczi if (sector_num >= total_sectors) { 148361007b31SStefan Hajnoczi *pnum = 0; 148461007b31SStefan Hajnoczi return 0; 148561007b31SStefan Hajnoczi } 148661007b31SStefan Hajnoczi 148761007b31SStefan Hajnoczi n = total_sectors - sector_num; 148861007b31SStefan Hajnoczi if (n < nb_sectors) { 148961007b31SStefan Hajnoczi nb_sectors = n; 149061007b31SStefan Hajnoczi } 149161007b31SStefan Hajnoczi 149261007b31SStefan Hajnoczi if (!bs->drv->bdrv_co_get_block_status) { 149361007b31SStefan Hajnoczi *pnum = nb_sectors; 149461007b31SStefan Hajnoczi ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; 149561007b31SStefan Hajnoczi if (bs->drv->protocol_name) { 149661007b31SStefan Hajnoczi ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE); 149761007b31SStefan Hajnoczi } 149861007b31SStefan Hajnoczi return ret; 149961007b31SStefan Hajnoczi } 150061007b31SStefan Hajnoczi 150167a0fd2aSFam Zheng *file = NULL; 150267a0fd2aSFam Zheng ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum, 150367a0fd2aSFam Zheng file); 150461007b31SStefan Hajnoczi if (ret < 0) { 150561007b31SStefan Hajnoczi *pnum = 0; 150661007b31SStefan Hajnoczi return ret; 150761007b31SStefan Hajnoczi } 150861007b31SStefan Hajnoczi 150961007b31SStefan Hajnoczi if (ret & BDRV_BLOCK_RAW) { 151061007b31SStefan Hajnoczi assert(ret & BDRV_BLOCK_OFFSET_VALID); 15119a4f4c31SKevin Wolf return bdrv_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS, 151267a0fd2aSFam Zheng *pnum, pnum, file); 151361007b31SStefan Hajnoczi } 151461007b31SStefan Hajnoczi 151561007b31SStefan Hajnoczi if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { 151661007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ALLOCATED; 1517a53f1a95SPaolo Bonzini } else { 151861007b31SStefan Hajnoczi if (bdrv_unallocated_blocks_are_zero(bs)) { 151961007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ZERO; 1520760e0063SKevin Wolf } else if (bs->backing) { 1521760e0063SKevin Wolf BlockDriverState *bs2 = bs->backing->bs; 152261007b31SStefan Hajnoczi int64_t nb_sectors2 = bdrv_nb_sectors(bs2); 152361007b31SStefan Hajnoczi if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) { 152461007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ZERO; 152561007b31SStefan Hajnoczi } 152661007b31SStefan Hajnoczi } 152761007b31SStefan Hajnoczi } 152861007b31SStefan Hajnoczi 1529ac987b30SFam Zheng if (*file && *file != bs && 153061007b31SStefan Hajnoczi (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && 153161007b31SStefan Hajnoczi (ret & BDRV_BLOCK_OFFSET_VALID)) { 153267a0fd2aSFam Zheng BlockDriverState *file2; 153361007b31SStefan Hajnoczi int file_pnum; 153461007b31SStefan Hajnoczi 1535ac987b30SFam Zheng ret2 = bdrv_co_get_block_status(*file, ret >> BDRV_SECTOR_BITS, 153667a0fd2aSFam Zheng *pnum, &file_pnum, &file2); 153761007b31SStefan Hajnoczi if (ret2 >= 0) { 153861007b31SStefan Hajnoczi /* Ignore errors. This is just providing extra information, it 153961007b31SStefan Hajnoczi * is useful but not necessary. 154061007b31SStefan Hajnoczi */ 154161007b31SStefan Hajnoczi if (!file_pnum) { 154261007b31SStefan Hajnoczi /* !file_pnum indicates an offset at or beyond the EOF; it is 154361007b31SStefan Hajnoczi * perfectly valid for the format block driver to point to such 154461007b31SStefan Hajnoczi * offsets, so catch it and mark everything as zero */ 154561007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ZERO; 154661007b31SStefan Hajnoczi } else { 154761007b31SStefan Hajnoczi /* Limit request to the range reported by the protocol driver */ 154861007b31SStefan Hajnoczi *pnum = file_pnum; 154961007b31SStefan Hajnoczi ret |= (ret2 & BDRV_BLOCK_ZERO); 155061007b31SStefan Hajnoczi } 155161007b31SStefan Hajnoczi } 155261007b31SStefan Hajnoczi } 155361007b31SStefan Hajnoczi 155461007b31SStefan Hajnoczi return ret; 155561007b31SStefan Hajnoczi } 155661007b31SStefan Hajnoczi 1557ba3f0e25SFam Zheng static int64_t coroutine_fn bdrv_co_get_block_status_above(BlockDriverState *bs, 1558ba3f0e25SFam Zheng BlockDriverState *base, 1559ba3f0e25SFam Zheng int64_t sector_num, 1560ba3f0e25SFam Zheng int nb_sectors, 156167a0fd2aSFam Zheng int *pnum, 156267a0fd2aSFam Zheng BlockDriverState **file) 1563ba3f0e25SFam Zheng { 1564ba3f0e25SFam Zheng BlockDriverState *p; 1565ba3f0e25SFam Zheng int64_t ret = 0; 1566ba3f0e25SFam Zheng 1567ba3f0e25SFam Zheng assert(bs != base); 1568760e0063SKevin Wolf for (p = bs; p != base; p = backing_bs(p)) { 156967a0fd2aSFam Zheng ret = bdrv_co_get_block_status(p, sector_num, nb_sectors, pnum, file); 1570ba3f0e25SFam Zheng if (ret < 0 || ret & BDRV_BLOCK_ALLOCATED) { 1571ba3f0e25SFam Zheng break; 1572ba3f0e25SFam Zheng } 1573ba3f0e25SFam Zheng /* [sector_num, pnum] unallocated on this layer, which could be only 1574ba3f0e25SFam Zheng * the first part of [sector_num, nb_sectors]. */ 1575ba3f0e25SFam Zheng nb_sectors = MIN(nb_sectors, *pnum); 1576ba3f0e25SFam Zheng } 1577ba3f0e25SFam Zheng return ret; 1578ba3f0e25SFam Zheng } 1579ba3f0e25SFam Zheng 1580ba3f0e25SFam Zheng /* Coroutine wrapper for bdrv_get_block_status_above() */ 1581ba3f0e25SFam Zheng static void coroutine_fn bdrv_get_block_status_above_co_entry(void *opaque) 158261007b31SStefan Hajnoczi { 158361007b31SStefan Hajnoczi BdrvCoGetBlockStatusData *data = opaque; 158461007b31SStefan Hajnoczi 1585ba3f0e25SFam Zheng data->ret = bdrv_co_get_block_status_above(data->bs, data->base, 1586ba3f0e25SFam Zheng data->sector_num, 1587ba3f0e25SFam Zheng data->nb_sectors, 158867a0fd2aSFam Zheng data->pnum, 158967a0fd2aSFam Zheng data->file); 159061007b31SStefan Hajnoczi data->done = true; 159161007b31SStefan Hajnoczi } 159261007b31SStefan Hajnoczi 159361007b31SStefan Hajnoczi /* 1594ba3f0e25SFam Zheng * Synchronous wrapper around bdrv_co_get_block_status_above(). 159561007b31SStefan Hajnoczi * 1596ba3f0e25SFam Zheng * See bdrv_co_get_block_status_above() for details. 159761007b31SStefan Hajnoczi */ 1598ba3f0e25SFam Zheng int64_t bdrv_get_block_status_above(BlockDriverState *bs, 1599ba3f0e25SFam Zheng BlockDriverState *base, 1600ba3f0e25SFam Zheng int64_t sector_num, 160167a0fd2aSFam Zheng int nb_sectors, int *pnum, 160267a0fd2aSFam Zheng BlockDriverState **file) 160361007b31SStefan Hajnoczi { 160461007b31SStefan Hajnoczi Coroutine *co; 160561007b31SStefan Hajnoczi BdrvCoGetBlockStatusData data = { 160661007b31SStefan Hajnoczi .bs = bs, 1607ba3f0e25SFam Zheng .base = base, 160867a0fd2aSFam Zheng .file = file, 160961007b31SStefan Hajnoczi .sector_num = sector_num, 161061007b31SStefan Hajnoczi .nb_sectors = nb_sectors, 161161007b31SStefan Hajnoczi .pnum = pnum, 161261007b31SStefan Hajnoczi .done = false, 161361007b31SStefan Hajnoczi }; 161461007b31SStefan Hajnoczi 161561007b31SStefan Hajnoczi if (qemu_in_coroutine()) { 161661007b31SStefan Hajnoczi /* Fast-path if already in coroutine context */ 1617ba3f0e25SFam Zheng bdrv_get_block_status_above_co_entry(&data); 161861007b31SStefan Hajnoczi } else { 161961007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 162061007b31SStefan Hajnoczi 1621ba3f0e25SFam Zheng co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry); 162261007b31SStefan Hajnoczi qemu_coroutine_enter(co, &data); 162361007b31SStefan Hajnoczi while (!data.done) { 162461007b31SStefan Hajnoczi aio_poll(aio_context, true); 162561007b31SStefan Hajnoczi } 162661007b31SStefan Hajnoczi } 162761007b31SStefan Hajnoczi return data.ret; 162861007b31SStefan Hajnoczi } 162961007b31SStefan Hajnoczi 1630ba3f0e25SFam Zheng int64_t bdrv_get_block_status(BlockDriverState *bs, 1631ba3f0e25SFam Zheng int64_t sector_num, 163267a0fd2aSFam Zheng int nb_sectors, int *pnum, 163367a0fd2aSFam Zheng BlockDriverState **file) 1634ba3f0e25SFam Zheng { 1635760e0063SKevin Wolf return bdrv_get_block_status_above(bs, backing_bs(bs), 163667a0fd2aSFam Zheng sector_num, nb_sectors, pnum, file); 1637ba3f0e25SFam Zheng } 1638ba3f0e25SFam Zheng 163961007b31SStefan Hajnoczi int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, 164061007b31SStefan Hajnoczi int nb_sectors, int *pnum) 164161007b31SStefan Hajnoczi { 164267a0fd2aSFam Zheng BlockDriverState *file; 164367a0fd2aSFam Zheng int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum, 164467a0fd2aSFam Zheng &file); 164561007b31SStefan Hajnoczi if (ret < 0) { 164661007b31SStefan Hajnoczi return ret; 164761007b31SStefan Hajnoczi } 164861007b31SStefan Hajnoczi return !!(ret & BDRV_BLOCK_ALLOCATED); 164961007b31SStefan Hajnoczi } 165061007b31SStefan Hajnoczi 165161007b31SStefan Hajnoczi /* 165261007b31SStefan Hajnoczi * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] 165361007b31SStefan Hajnoczi * 165461007b31SStefan Hajnoczi * Return true if the given sector is allocated in any image between 165561007b31SStefan Hajnoczi * BASE and TOP (inclusive). BASE can be NULL to check if the given 165661007b31SStefan Hajnoczi * sector is allocated in any image of the chain. Return false otherwise. 165761007b31SStefan Hajnoczi * 165861007b31SStefan Hajnoczi * 'pnum' is set to the number of sectors (including and immediately following 165961007b31SStefan Hajnoczi * the specified sector) that are known to be in the same 166061007b31SStefan Hajnoczi * allocated/unallocated state. 166161007b31SStefan Hajnoczi * 166261007b31SStefan Hajnoczi */ 166361007b31SStefan Hajnoczi int bdrv_is_allocated_above(BlockDriverState *top, 166461007b31SStefan Hajnoczi BlockDriverState *base, 166561007b31SStefan Hajnoczi int64_t sector_num, 166661007b31SStefan Hajnoczi int nb_sectors, int *pnum) 166761007b31SStefan Hajnoczi { 166861007b31SStefan Hajnoczi BlockDriverState *intermediate; 166961007b31SStefan Hajnoczi int ret, n = nb_sectors; 167061007b31SStefan Hajnoczi 167161007b31SStefan Hajnoczi intermediate = top; 167261007b31SStefan Hajnoczi while (intermediate && intermediate != base) { 167361007b31SStefan Hajnoczi int pnum_inter; 167461007b31SStefan Hajnoczi ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors, 167561007b31SStefan Hajnoczi &pnum_inter); 167661007b31SStefan Hajnoczi if (ret < 0) { 167761007b31SStefan Hajnoczi return ret; 167861007b31SStefan Hajnoczi } else if (ret) { 167961007b31SStefan Hajnoczi *pnum = pnum_inter; 168061007b31SStefan Hajnoczi return 1; 168161007b31SStefan Hajnoczi } 168261007b31SStefan Hajnoczi 168361007b31SStefan Hajnoczi /* 168461007b31SStefan Hajnoczi * [sector_num, nb_sectors] is unallocated on top but intermediate 168561007b31SStefan Hajnoczi * might have 168661007b31SStefan Hajnoczi * 168761007b31SStefan Hajnoczi * [sector_num+x, nr_sectors] allocated. 168861007b31SStefan Hajnoczi */ 168961007b31SStefan Hajnoczi if (n > pnum_inter && 169061007b31SStefan Hajnoczi (intermediate == top || 169161007b31SStefan Hajnoczi sector_num + pnum_inter < intermediate->total_sectors)) { 169261007b31SStefan Hajnoczi n = pnum_inter; 169361007b31SStefan Hajnoczi } 169461007b31SStefan Hajnoczi 1695760e0063SKevin Wolf intermediate = backing_bs(intermediate); 169661007b31SStefan Hajnoczi } 169761007b31SStefan Hajnoczi 169861007b31SStefan Hajnoczi *pnum = n; 169961007b31SStefan Hajnoczi return 0; 170061007b31SStefan Hajnoczi } 170161007b31SStefan Hajnoczi 170261007b31SStefan Hajnoczi int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num, 170361007b31SStefan Hajnoczi const uint8_t *buf, int nb_sectors) 170461007b31SStefan Hajnoczi { 170561007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 170661007b31SStefan Hajnoczi int ret; 170761007b31SStefan Hajnoczi 170861007b31SStefan Hajnoczi if (!drv) { 170961007b31SStefan Hajnoczi return -ENOMEDIUM; 171061007b31SStefan Hajnoczi } 171161007b31SStefan Hajnoczi if (!drv->bdrv_write_compressed) { 171261007b31SStefan Hajnoczi return -ENOTSUP; 171361007b31SStefan Hajnoczi } 171461007b31SStefan Hajnoczi ret = bdrv_check_request(bs, sector_num, nb_sectors); 171561007b31SStefan Hajnoczi if (ret < 0) { 171661007b31SStefan Hajnoczi return ret; 171761007b31SStefan Hajnoczi } 171861007b31SStefan Hajnoczi 171961007b31SStefan Hajnoczi assert(QLIST_EMPTY(&bs->dirty_bitmaps)); 172061007b31SStefan Hajnoczi 172161007b31SStefan Hajnoczi return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors); 172261007b31SStefan Hajnoczi } 172361007b31SStefan Hajnoczi 172461007b31SStefan Hajnoczi int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 172561007b31SStefan Hajnoczi int64_t pos, int size) 172661007b31SStefan Hajnoczi { 172761007b31SStefan Hajnoczi QEMUIOVector qiov; 172861007b31SStefan Hajnoczi struct iovec iov = { 172961007b31SStefan Hajnoczi .iov_base = (void *) buf, 173061007b31SStefan Hajnoczi .iov_len = size, 173161007b31SStefan Hajnoczi }; 173261007b31SStefan Hajnoczi 173361007b31SStefan Hajnoczi qemu_iovec_init_external(&qiov, &iov, 1); 173461007b31SStefan Hajnoczi return bdrv_writev_vmstate(bs, &qiov, pos); 173561007b31SStefan Hajnoczi } 173661007b31SStefan Hajnoczi 173761007b31SStefan Hajnoczi int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 173861007b31SStefan Hajnoczi { 173961007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 174061007b31SStefan Hajnoczi 174161007b31SStefan Hajnoczi if (!drv) { 174261007b31SStefan Hajnoczi return -ENOMEDIUM; 174361007b31SStefan Hajnoczi } else if (drv->bdrv_save_vmstate) { 174461007b31SStefan Hajnoczi return drv->bdrv_save_vmstate(bs, qiov, pos); 174561007b31SStefan Hajnoczi } else if (bs->file) { 17469a4f4c31SKevin Wolf return bdrv_writev_vmstate(bs->file->bs, qiov, pos); 174761007b31SStefan Hajnoczi } 174861007b31SStefan Hajnoczi 174961007b31SStefan Hajnoczi return -ENOTSUP; 175061007b31SStefan Hajnoczi } 175161007b31SStefan Hajnoczi 175261007b31SStefan Hajnoczi int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 175361007b31SStefan Hajnoczi int64_t pos, int size) 175461007b31SStefan Hajnoczi { 175561007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 175661007b31SStefan Hajnoczi if (!drv) 175761007b31SStefan Hajnoczi return -ENOMEDIUM; 175861007b31SStefan Hajnoczi if (drv->bdrv_load_vmstate) 175961007b31SStefan Hajnoczi return drv->bdrv_load_vmstate(bs, buf, pos, size); 176061007b31SStefan Hajnoczi if (bs->file) 17619a4f4c31SKevin Wolf return bdrv_load_vmstate(bs->file->bs, buf, pos, size); 176261007b31SStefan Hajnoczi return -ENOTSUP; 176361007b31SStefan Hajnoczi } 176461007b31SStefan Hajnoczi 176561007b31SStefan Hajnoczi /**************************************************************/ 176661007b31SStefan Hajnoczi /* async I/Os */ 176761007b31SStefan Hajnoczi 176861007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num, 176961007b31SStefan Hajnoczi QEMUIOVector *qiov, int nb_sectors, 177061007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 177161007b31SStefan Hajnoczi { 177261007b31SStefan Hajnoczi trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque); 177361007b31SStefan Hajnoczi 177461007b31SStefan Hajnoczi return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0, 177561007b31SStefan Hajnoczi cb, opaque, false); 177661007b31SStefan Hajnoczi } 177761007b31SStefan Hajnoczi 177861007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num, 177961007b31SStefan Hajnoczi QEMUIOVector *qiov, int nb_sectors, 178061007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 178161007b31SStefan Hajnoczi { 178261007b31SStefan Hajnoczi trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque); 178361007b31SStefan Hajnoczi 178461007b31SStefan Hajnoczi return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0, 178561007b31SStefan Hajnoczi cb, opaque, true); 178661007b31SStefan Hajnoczi } 178761007b31SStefan Hajnoczi 178861007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs, 178961007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, BdrvRequestFlags flags, 179061007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 179161007b31SStefan Hajnoczi { 179261007b31SStefan Hajnoczi trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque); 179361007b31SStefan Hajnoczi 179461007b31SStefan Hajnoczi return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors, 179561007b31SStefan Hajnoczi BDRV_REQ_ZERO_WRITE | flags, 179661007b31SStefan Hajnoczi cb, opaque, true); 179761007b31SStefan Hajnoczi } 179861007b31SStefan Hajnoczi 179961007b31SStefan Hajnoczi 180061007b31SStefan Hajnoczi typedef struct MultiwriteCB { 180161007b31SStefan Hajnoczi int error; 180261007b31SStefan Hajnoczi int num_requests; 180361007b31SStefan Hajnoczi int num_callbacks; 180461007b31SStefan Hajnoczi struct { 180561007b31SStefan Hajnoczi BlockCompletionFunc *cb; 180661007b31SStefan Hajnoczi void *opaque; 180761007b31SStefan Hajnoczi QEMUIOVector *free_qiov; 180861007b31SStefan Hajnoczi } callbacks[]; 180961007b31SStefan Hajnoczi } MultiwriteCB; 181061007b31SStefan Hajnoczi 181161007b31SStefan Hajnoczi static void multiwrite_user_cb(MultiwriteCB *mcb) 181261007b31SStefan Hajnoczi { 181361007b31SStefan Hajnoczi int i; 181461007b31SStefan Hajnoczi 181561007b31SStefan Hajnoczi for (i = 0; i < mcb->num_callbacks; i++) { 181661007b31SStefan Hajnoczi mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error); 181761007b31SStefan Hajnoczi if (mcb->callbacks[i].free_qiov) { 181861007b31SStefan Hajnoczi qemu_iovec_destroy(mcb->callbacks[i].free_qiov); 181961007b31SStefan Hajnoczi } 182061007b31SStefan Hajnoczi g_free(mcb->callbacks[i].free_qiov); 182161007b31SStefan Hajnoczi } 182261007b31SStefan Hajnoczi } 182361007b31SStefan Hajnoczi 182461007b31SStefan Hajnoczi static void multiwrite_cb(void *opaque, int ret) 182561007b31SStefan Hajnoczi { 182661007b31SStefan Hajnoczi MultiwriteCB *mcb = opaque; 182761007b31SStefan Hajnoczi 182861007b31SStefan Hajnoczi trace_multiwrite_cb(mcb, ret); 182961007b31SStefan Hajnoczi 183061007b31SStefan Hajnoczi if (ret < 0 && !mcb->error) { 183161007b31SStefan Hajnoczi mcb->error = ret; 183261007b31SStefan Hajnoczi } 183361007b31SStefan Hajnoczi 183461007b31SStefan Hajnoczi mcb->num_requests--; 183561007b31SStefan Hajnoczi if (mcb->num_requests == 0) { 183661007b31SStefan Hajnoczi multiwrite_user_cb(mcb); 183761007b31SStefan Hajnoczi g_free(mcb); 183861007b31SStefan Hajnoczi } 183961007b31SStefan Hajnoczi } 184061007b31SStefan Hajnoczi 184161007b31SStefan Hajnoczi static int multiwrite_req_compare(const void *a, const void *b) 184261007b31SStefan Hajnoczi { 184361007b31SStefan Hajnoczi const BlockRequest *req1 = a, *req2 = b; 184461007b31SStefan Hajnoczi 184561007b31SStefan Hajnoczi /* 184661007b31SStefan Hajnoczi * Note that we can't simply subtract req2->sector from req1->sector 184761007b31SStefan Hajnoczi * here as that could overflow the return value. 184861007b31SStefan Hajnoczi */ 184961007b31SStefan Hajnoczi if (req1->sector > req2->sector) { 185061007b31SStefan Hajnoczi return 1; 185161007b31SStefan Hajnoczi } else if (req1->sector < req2->sector) { 185261007b31SStefan Hajnoczi return -1; 185361007b31SStefan Hajnoczi } else { 185461007b31SStefan Hajnoczi return 0; 185561007b31SStefan Hajnoczi } 185661007b31SStefan Hajnoczi } 185761007b31SStefan Hajnoczi 185861007b31SStefan Hajnoczi /* 185961007b31SStefan Hajnoczi * Takes a bunch of requests and tries to merge them. Returns the number of 186061007b31SStefan Hajnoczi * requests that remain after merging. 186161007b31SStefan Hajnoczi */ 186261007b31SStefan Hajnoczi static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs, 186361007b31SStefan Hajnoczi int num_reqs, MultiwriteCB *mcb) 186461007b31SStefan Hajnoczi { 186561007b31SStefan Hajnoczi int i, outidx; 186661007b31SStefan Hajnoczi 186761007b31SStefan Hajnoczi // Sort requests by start sector 186861007b31SStefan Hajnoczi qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare); 186961007b31SStefan Hajnoczi 187061007b31SStefan Hajnoczi // Check if adjacent requests touch the same clusters. If so, combine them, 187161007b31SStefan Hajnoczi // filling up gaps with zero sectors. 187261007b31SStefan Hajnoczi outidx = 0; 187361007b31SStefan Hajnoczi for (i = 1; i < num_reqs; i++) { 187461007b31SStefan Hajnoczi int merge = 0; 187561007b31SStefan Hajnoczi int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors; 187661007b31SStefan Hajnoczi 187761007b31SStefan Hajnoczi // Handle exactly sequential writes and overlapping writes. 187861007b31SStefan Hajnoczi if (reqs[i].sector <= oldreq_last) { 187961007b31SStefan Hajnoczi merge = 1; 188061007b31SStefan Hajnoczi } 188161007b31SStefan Hajnoczi 1882222565f6SStefan Hajnoczi if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > 1883222565f6SStefan Hajnoczi bs->bl.max_iov) { 188461007b31SStefan Hajnoczi merge = 0; 188561007b31SStefan Hajnoczi } 188661007b31SStefan Hajnoczi 188761007b31SStefan Hajnoczi if (bs->bl.max_transfer_length && reqs[outidx].nb_sectors + 188861007b31SStefan Hajnoczi reqs[i].nb_sectors > bs->bl.max_transfer_length) { 188961007b31SStefan Hajnoczi merge = 0; 189061007b31SStefan Hajnoczi } 189161007b31SStefan Hajnoczi 189261007b31SStefan Hajnoczi if (merge) { 189361007b31SStefan Hajnoczi size_t size; 189461007b31SStefan Hajnoczi QEMUIOVector *qiov = g_malloc0(sizeof(*qiov)); 189561007b31SStefan Hajnoczi qemu_iovec_init(qiov, 189661007b31SStefan Hajnoczi reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1); 189761007b31SStefan Hajnoczi 189861007b31SStefan Hajnoczi // Add the first request to the merged one. If the requests are 189961007b31SStefan Hajnoczi // overlapping, drop the last sectors of the first request. 190061007b31SStefan Hajnoczi size = (reqs[i].sector - reqs[outidx].sector) << 9; 190161007b31SStefan Hajnoczi qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size); 190261007b31SStefan Hajnoczi 190361007b31SStefan Hajnoczi // We should need to add any zeros between the two requests 190461007b31SStefan Hajnoczi assert (reqs[i].sector <= oldreq_last); 190561007b31SStefan Hajnoczi 190661007b31SStefan Hajnoczi // Add the second request 190761007b31SStefan Hajnoczi qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size); 190861007b31SStefan Hajnoczi 190961007b31SStefan Hajnoczi // Add tail of first request, if necessary 191061007b31SStefan Hajnoczi if (qiov->size < reqs[outidx].qiov->size) { 191161007b31SStefan Hajnoczi qemu_iovec_concat(qiov, reqs[outidx].qiov, qiov->size, 191261007b31SStefan Hajnoczi reqs[outidx].qiov->size - qiov->size); 191361007b31SStefan Hajnoczi } 191461007b31SStefan Hajnoczi 191561007b31SStefan Hajnoczi reqs[outidx].nb_sectors = qiov->size >> 9; 191661007b31SStefan Hajnoczi reqs[outidx].qiov = qiov; 191761007b31SStefan Hajnoczi 191861007b31SStefan Hajnoczi mcb->callbacks[i].free_qiov = reqs[outidx].qiov; 191961007b31SStefan Hajnoczi } else { 192061007b31SStefan Hajnoczi outidx++; 192161007b31SStefan Hajnoczi reqs[outidx].sector = reqs[i].sector; 192261007b31SStefan Hajnoczi reqs[outidx].nb_sectors = reqs[i].nb_sectors; 192361007b31SStefan Hajnoczi reqs[outidx].qiov = reqs[i].qiov; 192461007b31SStefan Hajnoczi } 192561007b31SStefan Hajnoczi } 192661007b31SStefan Hajnoczi 19277f0e9da6SMax Reitz if (bs->blk) { 19287f0e9da6SMax Reitz block_acct_merge_done(blk_get_stats(bs->blk), BLOCK_ACCT_WRITE, 19297f0e9da6SMax Reitz num_reqs - outidx - 1); 19307f0e9da6SMax Reitz } 193161007b31SStefan Hajnoczi 193261007b31SStefan Hajnoczi return outidx + 1; 193361007b31SStefan Hajnoczi } 193461007b31SStefan Hajnoczi 193561007b31SStefan Hajnoczi /* 193661007b31SStefan Hajnoczi * Submit multiple AIO write requests at once. 193761007b31SStefan Hajnoczi * 193861007b31SStefan Hajnoczi * On success, the function returns 0 and all requests in the reqs array have 193961007b31SStefan Hajnoczi * been submitted. In error case this function returns -1, and any of the 194061007b31SStefan Hajnoczi * requests may or may not be submitted yet. In particular, this means that the 194161007b31SStefan Hajnoczi * callback will be called for some of the requests, for others it won't. The 194261007b31SStefan Hajnoczi * caller must check the error field of the BlockRequest to wait for the right 194361007b31SStefan Hajnoczi * callbacks (if error != 0, no callback will be called). 194461007b31SStefan Hajnoczi * 194561007b31SStefan Hajnoczi * The implementation may modify the contents of the reqs array, e.g. to merge 194661007b31SStefan Hajnoczi * requests. However, the fields opaque and error are left unmodified as they 194761007b31SStefan Hajnoczi * are used to signal failure for a single request to the caller. 194861007b31SStefan Hajnoczi */ 194961007b31SStefan Hajnoczi int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs) 195061007b31SStefan Hajnoczi { 195161007b31SStefan Hajnoczi MultiwriteCB *mcb; 195261007b31SStefan Hajnoczi int i; 195361007b31SStefan Hajnoczi 195461007b31SStefan Hajnoczi /* don't submit writes if we don't have a medium */ 195561007b31SStefan Hajnoczi if (bs->drv == NULL) { 195661007b31SStefan Hajnoczi for (i = 0; i < num_reqs; i++) { 195761007b31SStefan Hajnoczi reqs[i].error = -ENOMEDIUM; 195861007b31SStefan Hajnoczi } 195961007b31SStefan Hajnoczi return -1; 196061007b31SStefan Hajnoczi } 196161007b31SStefan Hajnoczi 196261007b31SStefan Hajnoczi if (num_reqs == 0) { 196361007b31SStefan Hajnoczi return 0; 196461007b31SStefan Hajnoczi } 196561007b31SStefan Hajnoczi 196661007b31SStefan Hajnoczi // Create MultiwriteCB structure 196761007b31SStefan Hajnoczi mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks)); 196861007b31SStefan Hajnoczi mcb->num_requests = 0; 196961007b31SStefan Hajnoczi mcb->num_callbacks = num_reqs; 197061007b31SStefan Hajnoczi 197161007b31SStefan Hajnoczi for (i = 0; i < num_reqs; i++) { 197261007b31SStefan Hajnoczi mcb->callbacks[i].cb = reqs[i].cb; 197361007b31SStefan Hajnoczi mcb->callbacks[i].opaque = reqs[i].opaque; 197461007b31SStefan Hajnoczi } 197561007b31SStefan Hajnoczi 197661007b31SStefan Hajnoczi // Check for mergable requests 197761007b31SStefan Hajnoczi num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb); 197861007b31SStefan Hajnoczi 197961007b31SStefan Hajnoczi trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs); 198061007b31SStefan Hajnoczi 198161007b31SStefan Hajnoczi /* Run the aio requests. */ 198261007b31SStefan Hajnoczi mcb->num_requests = num_reqs; 198361007b31SStefan Hajnoczi for (i = 0; i < num_reqs; i++) { 198461007b31SStefan Hajnoczi bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov, 198561007b31SStefan Hajnoczi reqs[i].nb_sectors, reqs[i].flags, 198661007b31SStefan Hajnoczi multiwrite_cb, mcb, 198761007b31SStefan Hajnoczi true); 198861007b31SStefan Hajnoczi } 198961007b31SStefan Hajnoczi 199061007b31SStefan Hajnoczi return 0; 199161007b31SStefan Hajnoczi } 199261007b31SStefan Hajnoczi 199361007b31SStefan Hajnoczi void bdrv_aio_cancel(BlockAIOCB *acb) 199461007b31SStefan Hajnoczi { 199561007b31SStefan Hajnoczi qemu_aio_ref(acb); 199661007b31SStefan Hajnoczi bdrv_aio_cancel_async(acb); 199761007b31SStefan Hajnoczi while (acb->refcnt > 1) { 199861007b31SStefan Hajnoczi if (acb->aiocb_info->get_aio_context) { 199961007b31SStefan Hajnoczi aio_poll(acb->aiocb_info->get_aio_context(acb), true); 200061007b31SStefan Hajnoczi } else if (acb->bs) { 200161007b31SStefan Hajnoczi aio_poll(bdrv_get_aio_context(acb->bs), true); 200261007b31SStefan Hajnoczi } else { 200361007b31SStefan Hajnoczi abort(); 200461007b31SStefan Hajnoczi } 200561007b31SStefan Hajnoczi } 200661007b31SStefan Hajnoczi qemu_aio_unref(acb); 200761007b31SStefan Hajnoczi } 200861007b31SStefan Hajnoczi 200961007b31SStefan Hajnoczi /* Async version of aio cancel. The caller is not blocked if the acb implements 201061007b31SStefan Hajnoczi * cancel_async, otherwise we do nothing and let the request normally complete. 201161007b31SStefan Hajnoczi * In either case the completion callback must be called. */ 201261007b31SStefan Hajnoczi void bdrv_aio_cancel_async(BlockAIOCB *acb) 201361007b31SStefan Hajnoczi { 201461007b31SStefan Hajnoczi if (acb->aiocb_info->cancel_async) { 201561007b31SStefan Hajnoczi acb->aiocb_info->cancel_async(acb); 201661007b31SStefan Hajnoczi } 201761007b31SStefan Hajnoczi } 201861007b31SStefan Hajnoczi 201961007b31SStefan Hajnoczi /**************************************************************/ 202061007b31SStefan Hajnoczi /* async block device emulation */ 202161007b31SStefan Hajnoczi 202261007b31SStefan Hajnoczi typedef struct BlockAIOCBSync { 202361007b31SStefan Hajnoczi BlockAIOCB common; 202461007b31SStefan Hajnoczi QEMUBH *bh; 202561007b31SStefan Hajnoczi int ret; 202661007b31SStefan Hajnoczi /* vector translation state */ 202761007b31SStefan Hajnoczi QEMUIOVector *qiov; 202861007b31SStefan Hajnoczi uint8_t *bounce; 202961007b31SStefan Hajnoczi int is_write; 203061007b31SStefan Hajnoczi } BlockAIOCBSync; 203161007b31SStefan Hajnoczi 203261007b31SStefan Hajnoczi static const AIOCBInfo bdrv_em_aiocb_info = { 203361007b31SStefan Hajnoczi .aiocb_size = sizeof(BlockAIOCBSync), 203461007b31SStefan Hajnoczi }; 203561007b31SStefan Hajnoczi 203661007b31SStefan Hajnoczi static void bdrv_aio_bh_cb(void *opaque) 203761007b31SStefan Hajnoczi { 203861007b31SStefan Hajnoczi BlockAIOCBSync *acb = opaque; 203961007b31SStefan Hajnoczi 204061007b31SStefan Hajnoczi if (!acb->is_write && acb->ret >= 0) { 204161007b31SStefan Hajnoczi qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size); 204261007b31SStefan Hajnoczi } 204361007b31SStefan Hajnoczi qemu_vfree(acb->bounce); 204461007b31SStefan Hajnoczi acb->common.cb(acb->common.opaque, acb->ret); 204561007b31SStefan Hajnoczi qemu_bh_delete(acb->bh); 204661007b31SStefan Hajnoczi acb->bh = NULL; 204761007b31SStefan Hajnoczi qemu_aio_unref(acb); 204861007b31SStefan Hajnoczi } 204961007b31SStefan Hajnoczi 205061007b31SStefan Hajnoczi static BlockAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs, 205161007b31SStefan Hajnoczi int64_t sector_num, 205261007b31SStefan Hajnoczi QEMUIOVector *qiov, 205361007b31SStefan Hajnoczi int nb_sectors, 205461007b31SStefan Hajnoczi BlockCompletionFunc *cb, 205561007b31SStefan Hajnoczi void *opaque, 205661007b31SStefan Hajnoczi int is_write) 205761007b31SStefan Hajnoczi 205861007b31SStefan Hajnoczi { 205961007b31SStefan Hajnoczi BlockAIOCBSync *acb; 206061007b31SStefan Hajnoczi 206161007b31SStefan Hajnoczi acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque); 206261007b31SStefan Hajnoczi acb->is_write = is_write; 206361007b31SStefan Hajnoczi acb->qiov = qiov; 206461007b31SStefan Hajnoczi acb->bounce = qemu_try_blockalign(bs, qiov->size); 206561007b31SStefan Hajnoczi acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb); 206661007b31SStefan Hajnoczi 206761007b31SStefan Hajnoczi if (acb->bounce == NULL) { 206861007b31SStefan Hajnoczi acb->ret = -ENOMEM; 206961007b31SStefan Hajnoczi } else if (is_write) { 207061007b31SStefan Hajnoczi qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size); 207161007b31SStefan Hajnoczi acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors); 207261007b31SStefan Hajnoczi } else { 207361007b31SStefan Hajnoczi acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors); 207461007b31SStefan Hajnoczi } 207561007b31SStefan Hajnoczi 207661007b31SStefan Hajnoczi qemu_bh_schedule(acb->bh); 207761007b31SStefan Hajnoczi 207861007b31SStefan Hajnoczi return &acb->common; 207961007b31SStefan Hajnoczi } 208061007b31SStefan Hajnoczi 208161007b31SStefan Hajnoczi static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, 208261007b31SStefan Hajnoczi int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 208361007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 208461007b31SStefan Hajnoczi { 208561007b31SStefan Hajnoczi return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); 208661007b31SStefan Hajnoczi } 208761007b31SStefan Hajnoczi 208861007b31SStefan Hajnoczi static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs, 208961007b31SStefan Hajnoczi int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 209061007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 209161007b31SStefan Hajnoczi { 209261007b31SStefan Hajnoczi return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1); 209361007b31SStefan Hajnoczi } 209461007b31SStefan Hajnoczi 209561007b31SStefan Hajnoczi 209661007b31SStefan Hajnoczi typedef struct BlockAIOCBCoroutine { 209761007b31SStefan Hajnoczi BlockAIOCB common; 209861007b31SStefan Hajnoczi BlockRequest req; 209961007b31SStefan Hajnoczi bool is_write; 210061007b31SStefan Hajnoczi bool need_bh; 210161007b31SStefan Hajnoczi bool *done; 210261007b31SStefan Hajnoczi QEMUBH* bh; 210361007b31SStefan Hajnoczi } BlockAIOCBCoroutine; 210461007b31SStefan Hajnoczi 210561007b31SStefan Hajnoczi static const AIOCBInfo bdrv_em_co_aiocb_info = { 210661007b31SStefan Hajnoczi .aiocb_size = sizeof(BlockAIOCBCoroutine), 210761007b31SStefan Hajnoczi }; 210861007b31SStefan Hajnoczi 210961007b31SStefan Hajnoczi static void bdrv_co_complete(BlockAIOCBCoroutine *acb) 211061007b31SStefan Hajnoczi { 211161007b31SStefan Hajnoczi if (!acb->need_bh) { 211261007b31SStefan Hajnoczi acb->common.cb(acb->common.opaque, acb->req.error); 211361007b31SStefan Hajnoczi qemu_aio_unref(acb); 211461007b31SStefan Hajnoczi } 211561007b31SStefan Hajnoczi } 211661007b31SStefan Hajnoczi 211761007b31SStefan Hajnoczi static void bdrv_co_em_bh(void *opaque) 211861007b31SStefan Hajnoczi { 211961007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb = opaque; 212061007b31SStefan Hajnoczi 212161007b31SStefan Hajnoczi assert(!acb->need_bh); 212261007b31SStefan Hajnoczi qemu_bh_delete(acb->bh); 212361007b31SStefan Hajnoczi bdrv_co_complete(acb); 212461007b31SStefan Hajnoczi } 212561007b31SStefan Hajnoczi 212661007b31SStefan Hajnoczi static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb) 212761007b31SStefan Hajnoczi { 212861007b31SStefan Hajnoczi acb->need_bh = false; 212961007b31SStefan Hajnoczi if (acb->req.error != -EINPROGRESS) { 213061007b31SStefan Hajnoczi BlockDriverState *bs = acb->common.bs; 213161007b31SStefan Hajnoczi 213261007b31SStefan Hajnoczi acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb); 213361007b31SStefan Hajnoczi qemu_bh_schedule(acb->bh); 213461007b31SStefan Hajnoczi } 213561007b31SStefan Hajnoczi } 213661007b31SStefan Hajnoczi 213761007b31SStefan Hajnoczi /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */ 213861007b31SStefan Hajnoczi static void coroutine_fn bdrv_co_do_rw(void *opaque) 213961007b31SStefan Hajnoczi { 214061007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb = opaque; 214161007b31SStefan Hajnoczi BlockDriverState *bs = acb->common.bs; 214261007b31SStefan Hajnoczi 214361007b31SStefan Hajnoczi if (!acb->is_write) { 214461007b31SStefan Hajnoczi acb->req.error = bdrv_co_do_readv(bs, acb->req.sector, 214561007b31SStefan Hajnoczi acb->req.nb_sectors, acb->req.qiov, acb->req.flags); 214661007b31SStefan Hajnoczi } else { 214761007b31SStefan Hajnoczi acb->req.error = bdrv_co_do_writev(bs, acb->req.sector, 214861007b31SStefan Hajnoczi acb->req.nb_sectors, acb->req.qiov, acb->req.flags); 214961007b31SStefan Hajnoczi } 215061007b31SStefan Hajnoczi 215161007b31SStefan Hajnoczi bdrv_co_complete(acb); 215261007b31SStefan Hajnoczi } 215361007b31SStefan Hajnoczi 215461007b31SStefan Hajnoczi static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, 215561007b31SStefan Hajnoczi int64_t sector_num, 215661007b31SStefan Hajnoczi QEMUIOVector *qiov, 215761007b31SStefan Hajnoczi int nb_sectors, 215861007b31SStefan Hajnoczi BdrvRequestFlags flags, 215961007b31SStefan Hajnoczi BlockCompletionFunc *cb, 216061007b31SStefan Hajnoczi void *opaque, 216161007b31SStefan Hajnoczi bool is_write) 216261007b31SStefan Hajnoczi { 216361007b31SStefan Hajnoczi Coroutine *co; 216461007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb; 216561007b31SStefan Hajnoczi 216661007b31SStefan Hajnoczi acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 216761007b31SStefan Hajnoczi acb->need_bh = true; 216861007b31SStefan Hajnoczi acb->req.error = -EINPROGRESS; 216961007b31SStefan Hajnoczi acb->req.sector = sector_num; 217061007b31SStefan Hajnoczi acb->req.nb_sectors = nb_sectors; 217161007b31SStefan Hajnoczi acb->req.qiov = qiov; 217261007b31SStefan Hajnoczi acb->req.flags = flags; 217361007b31SStefan Hajnoczi acb->is_write = is_write; 217461007b31SStefan Hajnoczi 217561007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_co_do_rw); 217661007b31SStefan Hajnoczi qemu_coroutine_enter(co, acb); 217761007b31SStefan Hajnoczi 217861007b31SStefan Hajnoczi bdrv_co_maybe_schedule_bh(acb); 217961007b31SStefan Hajnoczi return &acb->common; 218061007b31SStefan Hajnoczi } 218161007b31SStefan Hajnoczi 218261007b31SStefan Hajnoczi static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque) 218361007b31SStefan Hajnoczi { 218461007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb = opaque; 218561007b31SStefan Hajnoczi BlockDriverState *bs = acb->common.bs; 218661007b31SStefan Hajnoczi 218761007b31SStefan Hajnoczi acb->req.error = bdrv_co_flush(bs); 218861007b31SStefan Hajnoczi bdrv_co_complete(acb); 218961007b31SStefan Hajnoczi } 219061007b31SStefan Hajnoczi 219161007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs, 219261007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 219361007b31SStefan Hajnoczi { 219461007b31SStefan Hajnoczi trace_bdrv_aio_flush(bs, opaque); 219561007b31SStefan Hajnoczi 219661007b31SStefan Hajnoczi Coroutine *co; 219761007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb; 219861007b31SStefan Hajnoczi 219961007b31SStefan Hajnoczi acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 220061007b31SStefan Hajnoczi acb->need_bh = true; 220161007b31SStefan Hajnoczi acb->req.error = -EINPROGRESS; 220261007b31SStefan Hajnoczi 220361007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_aio_flush_co_entry); 220461007b31SStefan Hajnoczi qemu_coroutine_enter(co, acb); 220561007b31SStefan Hajnoczi 220661007b31SStefan Hajnoczi bdrv_co_maybe_schedule_bh(acb); 220761007b31SStefan Hajnoczi return &acb->common; 220861007b31SStefan Hajnoczi } 220961007b31SStefan Hajnoczi 221061007b31SStefan Hajnoczi static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque) 221161007b31SStefan Hajnoczi { 221261007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb = opaque; 221361007b31SStefan Hajnoczi BlockDriverState *bs = acb->common.bs; 221461007b31SStefan Hajnoczi 221561007b31SStefan Hajnoczi acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors); 221661007b31SStefan Hajnoczi bdrv_co_complete(acb); 221761007b31SStefan Hajnoczi } 221861007b31SStefan Hajnoczi 221961007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs, 222061007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, 222161007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 222261007b31SStefan Hajnoczi { 222361007b31SStefan Hajnoczi Coroutine *co; 222461007b31SStefan Hajnoczi BlockAIOCBCoroutine *acb; 222561007b31SStefan Hajnoczi 222661007b31SStefan Hajnoczi trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque); 222761007b31SStefan Hajnoczi 222861007b31SStefan Hajnoczi acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 222961007b31SStefan Hajnoczi acb->need_bh = true; 223061007b31SStefan Hajnoczi acb->req.error = -EINPROGRESS; 223161007b31SStefan Hajnoczi acb->req.sector = sector_num; 223261007b31SStefan Hajnoczi acb->req.nb_sectors = nb_sectors; 223361007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_aio_discard_co_entry); 223461007b31SStefan Hajnoczi qemu_coroutine_enter(co, acb); 223561007b31SStefan Hajnoczi 223661007b31SStefan Hajnoczi bdrv_co_maybe_schedule_bh(acb); 223761007b31SStefan Hajnoczi return &acb->common; 223861007b31SStefan Hajnoczi } 223961007b31SStefan Hajnoczi 224061007b31SStefan Hajnoczi void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, 224161007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 224261007b31SStefan Hajnoczi { 224361007b31SStefan Hajnoczi BlockAIOCB *acb; 224461007b31SStefan Hajnoczi 2245c84b3192SPaolo Bonzini acb = g_malloc(aiocb_info->aiocb_size); 224661007b31SStefan Hajnoczi acb->aiocb_info = aiocb_info; 224761007b31SStefan Hajnoczi acb->bs = bs; 224861007b31SStefan Hajnoczi acb->cb = cb; 224961007b31SStefan Hajnoczi acb->opaque = opaque; 225061007b31SStefan Hajnoczi acb->refcnt = 1; 225161007b31SStefan Hajnoczi return acb; 225261007b31SStefan Hajnoczi } 225361007b31SStefan Hajnoczi 225461007b31SStefan Hajnoczi void qemu_aio_ref(void *p) 225561007b31SStefan Hajnoczi { 225661007b31SStefan Hajnoczi BlockAIOCB *acb = p; 225761007b31SStefan Hajnoczi acb->refcnt++; 225861007b31SStefan Hajnoczi } 225961007b31SStefan Hajnoczi 226061007b31SStefan Hajnoczi void qemu_aio_unref(void *p) 226161007b31SStefan Hajnoczi { 226261007b31SStefan Hajnoczi BlockAIOCB *acb = p; 226361007b31SStefan Hajnoczi assert(acb->refcnt > 0); 226461007b31SStefan Hajnoczi if (--acb->refcnt == 0) { 2265c84b3192SPaolo Bonzini g_free(acb); 226661007b31SStefan Hajnoczi } 226761007b31SStefan Hajnoczi } 226861007b31SStefan Hajnoczi 226961007b31SStefan Hajnoczi /**************************************************************/ 227061007b31SStefan Hajnoczi /* Coroutine block device emulation */ 227161007b31SStefan Hajnoczi 227261007b31SStefan Hajnoczi typedef struct CoroutineIOCompletion { 227361007b31SStefan Hajnoczi Coroutine *coroutine; 227461007b31SStefan Hajnoczi int ret; 227561007b31SStefan Hajnoczi } CoroutineIOCompletion; 227661007b31SStefan Hajnoczi 227761007b31SStefan Hajnoczi static void bdrv_co_io_em_complete(void *opaque, int ret) 227861007b31SStefan Hajnoczi { 227961007b31SStefan Hajnoczi CoroutineIOCompletion *co = opaque; 228061007b31SStefan Hajnoczi 228161007b31SStefan Hajnoczi co->ret = ret; 228261007b31SStefan Hajnoczi qemu_coroutine_enter(co->coroutine, NULL); 228361007b31SStefan Hajnoczi } 228461007b31SStefan Hajnoczi 228561007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num, 228661007b31SStefan Hajnoczi int nb_sectors, QEMUIOVector *iov, 228761007b31SStefan Hajnoczi bool is_write) 228861007b31SStefan Hajnoczi { 228961007b31SStefan Hajnoczi CoroutineIOCompletion co = { 229061007b31SStefan Hajnoczi .coroutine = qemu_coroutine_self(), 229161007b31SStefan Hajnoczi }; 229261007b31SStefan Hajnoczi BlockAIOCB *acb; 229361007b31SStefan Hajnoczi 229461007b31SStefan Hajnoczi if (is_write) { 229561007b31SStefan Hajnoczi acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors, 229661007b31SStefan Hajnoczi bdrv_co_io_em_complete, &co); 229761007b31SStefan Hajnoczi } else { 229861007b31SStefan Hajnoczi acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors, 229961007b31SStefan Hajnoczi bdrv_co_io_em_complete, &co); 230061007b31SStefan Hajnoczi } 230161007b31SStefan Hajnoczi 230261007b31SStefan Hajnoczi trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb); 230361007b31SStefan Hajnoczi if (!acb) { 230461007b31SStefan Hajnoczi return -EIO; 230561007b31SStefan Hajnoczi } 230661007b31SStefan Hajnoczi qemu_coroutine_yield(); 230761007b31SStefan Hajnoczi 230861007b31SStefan Hajnoczi return co.ret; 230961007b31SStefan Hajnoczi } 231061007b31SStefan Hajnoczi 231161007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, 231261007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, 231361007b31SStefan Hajnoczi QEMUIOVector *iov) 231461007b31SStefan Hajnoczi { 231561007b31SStefan Hajnoczi return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false); 231661007b31SStefan Hajnoczi } 231761007b31SStefan Hajnoczi 231861007b31SStefan Hajnoczi static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, 231961007b31SStefan Hajnoczi int64_t sector_num, int nb_sectors, 232061007b31SStefan Hajnoczi QEMUIOVector *iov) 232161007b31SStefan Hajnoczi { 232261007b31SStefan Hajnoczi return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true); 232361007b31SStefan Hajnoczi } 232461007b31SStefan Hajnoczi 232561007b31SStefan Hajnoczi static void coroutine_fn bdrv_flush_co_entry(void *opaque) 232661007b31SStefan Hajnoczi { 232761007b31SStefan Hajnoczi RwCo *rwco = opaque; 232861007b31SStefan Hajnoczi 232961007b31SStefan Hajnoczi rwco->ret = bdrv_co_flush(rwco->bs); 233061007b31SStefan Hajnoczi } 233161007b31SStefan Hajnoczi 233261007b31SStefan Hajnoczi int coroutine_fn bdrv_co_flush(BlockDriverState *bs) 233361007b31SStefan Hajnoczi { 233461007b31SStefan Hajnoczi int ret; 2335cdb5e315SFam Zheng BdrvTrackedRequest req; 233661007b31SStefan Hajnoczi 23371b6bc94dSDimitris Aragiorgis if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs) || 23381b6bc94dSDimitris Aragiorgis bdrv_is_sg(bs)) { 233961007b31SStefan Hajnoczi return 0; 234061007b31SStefan Hajnoczi } 234161007b31SStefan Hajnoczi 2342cdb5e315SFam Zheng tracked_request_begin(&req, bs, 0, 0, BDRV_TRACKED_FLUSH); 2343c32b82afSPavel Dovgalyuk 2344c32b82afSPavel Dovgalyuk /* Write back all layers by calling one driver function */ 2345c32b82afSPavel Dovgalyuk if (bs->drv->bdrv_co_flush) { 2346c32b82afSPavel Dovgalyuk ret = bs->drv->bdrv_co_flush(bs); 2347c32b82afSPavel Dovgalyuk goto out; 2348c32b82afSPavel Dovgalyuk } 2349c32b82afSPavel Dovgalyuk 235061007b31SStefan Hajnoczi /* Write back cached data to the OS even with cache=unsafe */ 235161007b31SStefan Hajnoczi BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS); 235261007b31SStefan Hajnoczi if (bs->drv->bdrv_co_flush_to_os) { 235361007b31SStefan Hajnoczi ret = bs->drv->bdrv_co_flush_to_os(bs); 235461007b31SStefan Hajnoczi if (ret < 0) { 2355cdb5e315SFam Zheng goto out; 235661007b31SStefan Hajnoczi } 235761007b31SStefan Hajnoczi } 235861007b31SStefan Hajnoczi 235961007b31SStefan Hajnoczi /* But don't actually force it to the disk with cache=unsafe */ 236061007b31SStefan Hajnoczi if (bs->open_flags & BDRV_O_NO_FLUSH) { 236161007b31SStefan Hajnoczi goto flush_parent; 236261007b31SStefan Hajnoczi } 236361007b31SStefan Hajnoczi 236461007b31SStefan Hajnoczi BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK); 236561007b31SStefan Hajnoczi if (bs->drv->bdrv_co_flush_to_disk) { 236661007b31SStefan Hajnoczi ret = bs->drv->bdrv_co_flush_to_disk(bs); 236761007b31SStefan Hajnoczi } else if (bs->drv->bdrv_aio_flush) { 236861007b31SStefan Hajnoczi BlockAIOCB *acb; 236961007b31SStefan Hajnoczi CoroutineIOCompletion co = { 237061007b31SStefan Hajnoczi .coroutine = qemu_coroutine_self(), 237161007b31SStefan Hajnoczi }; 237261007b31SStefan Hajnoczi 237361007b31SStefan Hajnoczi acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); 237461007b31SStefan Hajnoczi if (acb == NULL) { 237561007b31SStefan Hajnoczi ret = -EIO; 237661007b31SStefan Hajnoczi } else { 237761007b31SStefan Hajnoczi qemu_coroutine_yield(); 237861007b31SStefan Hajnoczi ret = co.ret; 237961007b31SStefan Hajnoczi } 238061007b31SStefan Hajnoczi } else { 238161007b31SStefan Hajnoczi /* 238261007b31SStefan Hajnoczi * Some block drivers always operate in either writethrough or unsafe 238361007b31SStefan Hajnoczi * mode and don't support bdrv_flush therefore. Usually qemu doesn't 238461007b31SStefan Hajnoczi * know how the server works (because the behaviour is hardcoded or 238561007b31SStefan Hajnoczi * depends on server-side configuration), so we can't ensure that 238661007b31SStefan Hajnoczi * everything is safe on disk. Returning an error doesn't work because 238761007b31SStefan Hajnoczi * that would break guests even if the server operates in writethrough 238861007b31SStefan Hajnoczi * mode. 238961007b31SStefan Hajnoczi * 239061007b31SStefan Hajnoczi * Let's hope the user knows what he's doing. 239161007b31SStefan Hajnoczi */ 239261007b31SStefan Hajnoczi ret = 0; 239361007b31SStefan Hajnoczi } 239461007b31SStefan Hajnoczi if (ret < 0) { 2395cdb5e315SFam Zheng goto out; 239661007b31SStefan Hajnoczi } 239761007b31SStefan Hajnoczi 239861007b31SStefan Hajnoczi /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH 239961007b31SStefan Hajnoczi * in the case of cache=unsafe, so there are no useless flushes. 240061007b31SStefan Hajnoczi */ 240161007b31SStefan Hajnoczi flush_parent: 2402cdb5e315SFam Zheng ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0; 2403cdb5e315SFam Zheng out: 2404cdb5e315SFam Zheng tracked_request_end(&req); 2405cdb5e315SFam Zheng return ret; 240661007b31SStefan Hajnoczi } 240761007b31SStefan Hajnoczi 240861007b31SStefan Hajnoczi int bdrv_flush(BlockDriverState *bs) 240961007b31SStefan Hajnoczi { 241061007b31SStefan Hajnoczi Coroutine *co; 241161007b31SStefan Hajnoczi RwCo rwco = { 241261007b31SStefan Hajnoczi .bs = bs, 241361007b31SStefan Hajnoczi .ret = NOT_DONE, 241461007b31SStefan Hajnoczi }; 241561007b31SStefan Hajnoczi 241661007b31SStefan Hajnoczi if (qemu_in_coroutine()) { 241761007b31SStefan Hajnoczi /* Fast-path if already in coroutine context */ 241861007b31SStefan Hajnoczi bdrv_flush_co_entry(&rwco); 241961007b31SStefan Hajnoczi } else { 242061007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 242161007b31SStefan Hajnoczi 242261007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_flush_co_entry); 242361007b31SStefan Hajnoczi qemu_coroutine_enter(co, &rwco); 242461007b31SStefan Hajnoczi while (rwco.ret == NOT_DONE) { 242561007b31SStefan Hajnoczi aio_poll(aio_context, true); 242661007b31SStefan Hajnoczi } 242761007b31SStefan Hajnoczi } 242861007b31SStefan Hajnoczi 242961007b31SStefan Hajnoczi return rwco.ret; 243061007b31SStefan Hajnoczi } 243161007b31SStefan Hajnoczi 243261007b31SStefan Hajnoczi typedef struct DiscardCo { 243361007b31SStefan Hajnoczi BlockDriverState *bs; 243461007b31SStefan Hajnoczi int64_t sector_num; 243561007b31SStefan Hajnoczi int nb_sectors; 243661007b31SStefan Hajnoczi int ret; 243761007b31SStefan Hajnoczi } DiscardCo; 243861007b31SStefan Hajnoczi static void coroutine_fn bdrv_discard_co_entry(void *opaque) 243961007b31SStefan Hajnoczi { 244061007b31SStefan Hajnoczi DiscardCo *rwco = opaque; 244161007b31SStefan Hajnoczi 244261007b31SStefan Hajnoczi rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors); 244361007b31SStefan Hajnoczi } 244461007b31SStefan Hajnoczi 244561007b31SStefan Hajnoczi int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, 244661007b31SStefan Hajnoczi int nb_sectors) 244761007b31SStefan Hajnoczi { 2448b1066c87SFam Zheng BdrvTrackedRequest req; 244961007b31SStefan Hajnoczi int max_discard, ret; 245061007b31SStefan Hajnoczi 245161007b31SStefan Hajnoczi if (!bs->drv) { 245261007b31SStefan Hajnoczi return -ENOMEDIUM; 245361007b31SStefan Hajnoczi } 245461007b31SStefan Hajnoczi 245561007b31SStefan Hajnoczi ret = bdrv_check_request(bs, sector_num, nb_sectors); 245661007b31SStefan Hajnoczi if (ret < 0) { 245761007b31SStefan Hajnoczi return ret; 245861007b31SStefan Hajnoczi } else if (bs->read_only) { 2459eaf5fe2dSPaolo Bonzini return -EPERM; 246061007b31SStefan Hajnoczi } 246104c01a5cSKevin Wolf assert(!(bs->open_flags & BDRV_O_INACTIVE)); 246261007b31SStefan Hajnoczi 246361007b31SStefan Hajnoczi /* Do nothing if disabled. */ 246461007b31SStefan Hajnoczi if (!(bs->open_flags & BDRV_O_UNMAP)) { 246561007b31SStefan Hajnoczi return 0; 246661007b31SStefan Hajnoczi } 246761007b31SStefan Hajnoczi 246861007b31SStefan Hajnoczi if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) { 246961007b31SStefan Hajnoczi return 0; 247061007b31SStefan Hajnoczi } 247161007b31SStefan Hajnoczi 2472b1066c87SFam Zheng tracked_request_begin(&req, bs, sector_num, nb_sectors, 2473b1066c87SFam Zheng BDRV_TRACKED_DISCARD); 247450824995SFam Zheng bdrv_set_dirty(bs, sector_num, nb_sectors); 247550824995SFam Zheng 247661007b31SStefan Hajnoczi max_discard = MIN_NON_ZERO(bs->bl.max_discard, BDRV_REQUEST_MAX_SECTORS); 247761007b31SStefan Hajnoczi while (nb_sectors > 0) { 247861007b31SStefan Hajnoczi int ret; 247961007b31SStefan Hajnoczi int num = nb_sectors; 248061007b31SStefan Hajnoczi 248161007b31SStefan Hajnoczi /* align request */ 248261007b31SStefan Hajnoczi if (bs->bl.discard_alignment && 248361007b31SStefan Hajnoczi num >= bs->bl.discard_alignment && 248461007b31SStefan Hajnoczi sector_num % bs->bl.discard_alignment) { 248561007b31SStefan Hajnoczi if (num > bs->bl.discard_alignment) { 248661007b31SStefan Hajnoczi num = bs->bl.discard_alignment; 248761007b31SStefan Hajnoczi } 248861007b31SStefan Hajnoczi num -= sector_num % bs->bl.discard_alignment; 248961007b31SStefan Hajnoczi } 249061007b31SStefan Hajnoczi 249161007b31SStefan Hajnoczi /* limit request size */ 249261007b31SStefan Hajnoczi if (num > max_discard) { 249361007b31SStefan Hajnoczi num = max_discard; 249461007b31SStefan Hajnoczi } 249561007b31SStefan Hajnoczi 249661007b31SStefan Hajnoczi if (bs->drv->bdrv_co_discard) { 249761007b31SStefan Hajnoczi ret = bs->drv->bdrv_co_discard(bs, sector_num, num); 249861007b31SStefan Hajnoczi } else { 249961007b31SStefan Hajnoczi BlockAIOCB *acb; 250061007b31SStefan Hajnoczi CoroutineIOCompletion co = { 250161007b31SStefan Hajnoczi .coroutine = qemu_coroutine_self(), 250261007b31SStefan Hajnoczi }; 250361007b31SStefan Hajnoczi 250461007b31SStefan Hajnoczi acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors, 250561007b31SStefan Hajnoczi bdrv_co_io_em_complete, &co); 250661007b31SStefan Hajnoczi if (acb == NULL) { 2507b1066c87SFam Zheng ret = -EIO; 2508b1066c87SFam Zheng goto out; 250961007b31SStefan Hajnoczi } else { 251061007b31SStefan Hajnoczi qemu_coroutine_yield(); 251161007b31SStefan Hajnoczi ret = co.ret; 251261007b31SStefan Hajnoczi } 251361007b31SStefan Hajnoczi } 251461007b31SStefan Hajnoczi if (ret && ret != -ENOTSUP) { 2515b1066c87SFam Zheng goto out; 251661007b31SStefan Hajnoczi } 251761007b31SStefan Hajnoczi 251861007b31SStefan Hajnoczi sector_num += num; 251961007b31SStefan Hajnoczi nb_sectors -= num; 252061007b31SStefan Hajnoczi } 2521b1066c87SFam Zheng ret = 0; 2522b1066c87SFam Zheng out: 2523b1066c87SFam Zheng tracked_request_end(&req); 2524b1066c87SFam Zheng return ret; 252561007b31SStefan Hajnoczi } 252661007b31SStefan Hajnoczi 252761007b31SStefan Hajnoczi int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors) 252861007b31SStefan Hajnoczi { 252961007b31SStefan Hajnoczi Coroutine *co; 253061007b31SStefan Hajnoczi DiscardCo rwco = { 253161007b31SStefan Hajnoczi .bs = bs, 253261007b31SStefan Hajnoczi .sector_num = sector_num, 253361007b31SStefan Hajnoczi .nb_sectors = nb_sectors, 253461007b31SStefan Hajnoczi .ret = NOT_DONE, 253561007b31SStefan Hajnoczi }; 253661007b31SStefan Hajnoczi 253761007b31SStefan Hajnoczi if (qemu_in_coroutine()) { 253861007b31SStefan Hajnoczi /* Fast-path if already in coroutine context */ 253961007b31SStefan Hajnoczi bdrv_discard_co_entry(&rwco); 254061007b31SStefan Hajnoczi } else { 254161007b31SStefan Hajnoczi AioContext *aio_context = bdrv_get_aio_context(bs); 254261007b31SStefan Hajnoczi 254361007b31SStefan Hajnoczi co = qemu_coroutine_create(bdrv_discard_co_entry); 254461007b31SStefan Hajnoczi qemu_coroutine_enter(co, &rwco); 254561007b31SStefan Hajnoczi while (rwco.ret == NOT_DONE) { 254661007b31SStefan Hajnoczi aio_poll(aio_context, true); 254761007b31SStefan Hajnoczi } 254861007b31SStefan Hajnoczi } 254961007b31SStefan Hajnoczi 255061007b31SStefan Hajnoczi return rwco.ret; 255161007b31SStefan Hajnoczi } 255261007b31SStefan Hajnoczi 25535c5ae76aSFam Zheng typedef struct { 25545c5ae76aSFam Zheng CoroutineIOCompletion *co; 25555c5ae76aSFam Zheng QEMUBH *bh; 25565c5ae76aSFam Zheng } BdrvIoctlCompletionData; 255761007b31SStefan Hajnoczi 25585c5ae76aSFam Zheng static void bdrv_ioctl_bh_cb(void *opaque) 25595c5ae76aSFam Zheng { 25605c5ae76aSFam Zheng BdrvIoctlCompletionData *data = opaque; 25615c5ae76aSFam Zheng 25625c5ae76aSFam Zheng bdrv_co_io_em_complete(data->co, -ENOTSUP); 25635c5ae76aSFam Zheng qemu_bh_delete(data->bh); 25645c5ae76aSFam Zheng } 25655c5ae76aSFam Zheng 25665c5ae76aSFam Zheng static int bdrv_co_do_ioctl(BlockDriverState *bs, int req, void *buf) 256761007b31SStefan Hajnoczi { 256861007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 25695c5ae76aSFam Zheng BdrvTrackedRequest tracked_req; 25705c5ae76aSFam Zheng CoroutineIOCompletion co = { 25715c5ae76aSFam Zheng .coroutine = qemu_coroutine_self(), 25725c5ae76aSFam Zheng }; 25735c5ae76aSFam Zheng BlockAIOCB *acb; 257461007b31SStefan Hajnoczi 25755c5ae76aSFam Zheng tracked_request_begin(&tracked_req, bs, 0, 0, BDRV_TRACKED_IOCTL); 25765c5ae76aSFam Zheng if (!drv || !drv->bdrv_aio_ioctl) { 25775c5ae76aSFam Zheng co.ret = -ENOTSUP; 25785c5ae76aSFam Zheng goto out; 25795c5ae76aSFam Zheng } 25805c5ae76aSFam Zheng 25815c5ae76aSFam Zheng acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co); 25825c5ae76aSFam Zheng if (!acb) { 25835c5ae76aSFam Zheng BdrvIoctlCompletionData *data = g_new(BdrvIoctlCompletionData, 1); 25845c5ae76aSFam Zheng data->bh = aio_bh_new(bdrv_get_aio_context(bs), 25855c5ae76aSFam Zheng bdrv_ioctl_bh_cb, data); 25865c5ae76aSFam Zheng data->co = &co; 25875c5ae76aSFam Zheng qemu_bh_schedule(data->bh); 25885c5ae76aSFam Zheng } 25895c5ae76aSFam Zheng qemu_coroutine_yield(); 25905c5ae76aSFam Zheng out: 25915c5ae76aSFam Zheng tracked_request_end(&tracked_req); 25925c5ae76aSFam Zheng return co.ret; 25935c5ae76aSFam Zheng } 25945c5ae76aSFam Zheng 25955c5ae76aSFam Zheng typedef struct { 25965c5ae76aSFam Zheng BlockDriverState *bs; 25975c5ae76aSFam Zheng int req; 25985c5ae76aSFam Zheng void *buf; 25995c5ae76aSFam Zheng int ret; 26005c5ae76aSFam Zheng } BdrvIoctlCoData; 26015c5ae76aSFam Zheng 26025c5ae76aSFam Zheng static void coroutine_fn bdrv_co_ioctl_entry(void *opaque) 26035c5ae76aSFam Zheng { 26045c5ae76aSFam Zheng BdrvIoctlCoData *data = opaque; 26055c5ae76aSFam Zheng data->ret = bdrv_co_do_ioctl(data->bs, data->req, data->buf); 26065c5ae76aSFam Zheng } 26075c5ae76aSFam Zheng 26085c5ae76aSFam Zheng /* needed for generic scsi interface */ 26095c5ae76aSFam Zheng int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) 26105c5ae76aSFam Zheng { 26115c5ae76aSFam Zheng BdrvIoctlCoData data = { 26125c5ae76aSFam Zheng .bs = bs, 26135c5ae76aSFam Zheng .req = req, 26145c5ae76aSFam Zheng .buf = buf, 26155c5ae76aSFam Zheng .ret = -EINPROGRESS, 26165c5ae76aSFam Zheng }; 26175c5ae76aSFam Zheng 26185c5ae76aSFam Zheng if (qemu_in_coroutine()) { 26195c5ae76aSFam Zheng /* Fast-path if already in coroutine context */ 26205c5ae76aSFam Zheng bdrv_co_ioctl_entry(&data); 26215c5ae76aSFam Zheng } else { 26225c5ae76aSFam Zheng Coroutine *co = qemu_coroutine_create(bdrv_co_ioctl_entry); 2623ba889444SPaolo Bonzini 26245c5ae76aSFam Zheng qemu_coroutine_enter(co, &data); 26255c5ae76aSFam Zheng while (data.ret == -EINPROGRESS) { 26265c5ae76aSFam Zheng aio_poll(bdrv_get_aio_context(bs), true); 26275c5ae76aSFam Zheng } 2628ba889444SPaolo Bonzini } 26295c5ae76aSFam Zheng return data.ret; 26305c5ae76aSFam Zheng } 26315c5ae76aSFam Zheng 26325c5ae76aSFam Zheng static void coroutine_fn bdrv_co_aio_ioctl_entry(void *opaque) 26335c5ae76aSFam Zheng { 26345c5ae76aSFam Zheng BlockAIOCBCoroutine *acb = opaque; 26355c5ae76aSFam Zheng acb->req.error = bdrv_co_do_ioctl(acb->common.bs, 26365c5ae76aSFam Zheng acb->req.req, acb->req.buf); 26375c5ae76aSFam Zheng bdrv_co_complete(acb); 263861007b31SStefan Hajnoczi } 263961007b31SStefan Hajnoczi 264061007b31SStefan Hajnoczi BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, 264161007b31SStefan Hajnoczi unsigned long int req, void *buf, 264261007b31SStefan Hajnoczi BlockCompletionFunc *cb, void *opaque) 264361007b31SStefan Hajnoczi { 26445c5ae76aSFam Zheng BlockAIOCBCoroutine *acb = qemu_aio_get(&bdrv_em_co_aiocb_info, 26455c5ae76aSFam Zheng bs, cb, opaque); 26465c5ae76aSFam Zheng Coroutine *co; 264761007b31SStefan Hajnoczi 26485c5ae76aSFam Zheng acb->need_bh = true; 26495c5ae76aSFam Zheng acb->req.error = -EINPROGRESS; 26505c5ae76aSFam Zheng acb->req.req = req; 26515c5ae76aSFam Zheng acb->req.buf = buf; 26525c5ae76aSFam Zheng co = qemu_coroutine_create(bdrv_co_aio_ioctl_entry); 26535c5ae76aSFam Zheng qemu_coroutine_enter(co, acb); 26545c5ae76aSFam Zheng 26555c5ae76aSFam Zheng bdrv_co_maybe_schedule_bh(acb); 26565c5ae76aSFam Zheng return &acb->common; 265761007b31SStefan Hajnoczi } 265861007b31SStefan Hajnoczi 265961007b31SStefan Hajnoczi void *qemu_blockalign(BlockDriverState *bs, size_t size) 266061007b31SStefan Hajnoczi { 266161007b31SStefan Hajnoczi return qemu_memalign(bdrv_opt_mem_align(bs), size); 266261007b31SStefan Hajnoczi } 266361007b31SStefan Hajnoczi 266461007b31SStefan Hajnoczi void *qemu_blockalign0(BlockDriverState *bs, size_t size) 266561007b31SStefan Hajnoczi { 266661007b31SStefan Hajnoczi return memset(qemu_blockalign(bs, size), 0, size); 266761007b31SStefan Hajnoczi } 266861007b31SStefan Hajnoczi 266961007b31SStefan Hajnoczi void *qemu_try_blockalign(BlockDriverState *bs, size_t size) 267061007b31SStefan Hajnoczi { 267161007b31SStefan Hajnoczi size_t align = bdrv_opt_mem_align(bs); 267261007b31SStefan Hajnoczi 267361007b31SStefan Hajnoczi /* Ensure that NULL is never returned on success */ 267461007b31SStefan Hajnoczi assert(align > 0); 267561007b31SStefan Hajnoczi if (size == 0) { 267661007b31SStefan Hajnoczi size = align; 267761007b31SStefan Hajnoczi } 267861007b31SStefan Hajnoczi 267961007b31SStefan Hajnoczi return qemu_try_memalign(align, size); 268061007b31SStefan Hajnoczi } 268161007b31SStefan Hajnoczi 268261007b31SStefan Hajnoczi void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) 268361007b31SStefan Hajnoczi { 268461007b31SStefan Hajnoczi void *mem = qemu_try_blockalign(bs, size); 268561007b31SStefan Hajnoczi 268661007b31SStefan Hajnoczi if (mem) { 268761007b31SStefan Hajnoczi memset(mem, 0, size); 268861007b31SStefan Hajnoczi } 268961007b31SStefan Hajnoczi 269061007b31SStefan Hajnoczi return mem; 269161007b31SStefan Hajnoczi } 269261007b31SStefan Hajnoczi 269361007b31SStefan Hajnoczi /* 269461007b31SStefan Hajnoczi * Check if all memory in this vector is sector aligned. 269561007b31SStefan Hajnoczi */ 269661007b31SStefan Hajnoczi bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) 269761007b31SStefan Hajnoczi { 269861007b31SStefan Hajnoczi int i; 26994196d2f0SDenis V. Lunev size_t alignment = bdrv_min_mem_align(bs); 270061007b31SStefan Hajnoczi 270161007b31SStefan Hajnoczi for (i = 0; i < qiov->niov; i++) { 270261007b31SStefan Hajnoczi if ((uintptr_t) qiov->iov[i].iov_base % alignment) { 270361007b31SStefan Hajnoczi return false; 270461007b31SStefan Hajnoczi } 270561007b31SStefan Hajnoczi if (qiov->iov[i].iov_len % alignment) { 270661007b31SStefan Hajnoczi return false; 270761007b31SStefan Hajnoczi } 270861007b31SStefan Hajnoczi } 270961007b31SStefan Hajnoczi 271061007b31SStefan Hajnoczi return true; 271161007b31SStefan Hajnoczi } 271261007b31SStefan Hajnoczi 271361007b31SStefan Hajnoczi void bdrv_add_before_write_notifier(BlockDriverState *bs, 271461007b31SStefan Hajnoczi NotifierWithReturn *notifier) 271561007b31SStefan Hajnoczi { 271661007b31SStefan Hajnoczi notifier_with_return_list_add(&bs->before_write_notifiers, notifier); 271761007b31SStefan Hajnoczi } 271861007b31SStefan Hajnoczi 271961007b31SStefan Hajnoczi void bdrv_io_plug(BlockDriverState *bs) 272061007b31SStefan Hajnoczi { 272161007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 272261007b31SStefan Hajnoczi if (drv && drv->bdrv_io_plug) { 272361007b31SStefan Hajnoczi drv->bdrv_io_plug(bs); 272461007b31SStefan Hajnoczi } else if (bs->file) { 27259a4f4c31SKevin Wolf bdrv_io_plug(bs->file->bs); 272661007b31SStefan Hajnoczi } 272761007b31SStefan Hajnoczi } 272861007b31SStefan Hajnoczi 272961007b31SStefan Hajnoczi void bdrv_io_unplug(BlockDriverState *bs) 273061007b31SStefan Hajnoczi { 273161007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 273261007b31SStefan Hajnoczi if (drv && drv->bdrv_io_unplug) { 273361007b31SStefan Hajnoczi drv->bdrv_io_unplug(bs); 273461007b31SStefan Hajnoczi } else if (bs->file) { 27359a4f4c31SKevin Wolf bdrv_io_unplug(bs->file->bs); 273661007b31SStefan Hajnoczi } 273761007b31SStefan Hajnoczi } 273861007b31SStefan Hajnoczi 273961007b31SStefan Hajnoczi void bdrv_flush_io_queue(BlockDriverState *bs) 274061007b31SStefan Hajnoczi { 274161007b31SStefan Hajnoczi BlockDriver *drv = bs->drv; 274261007b31SStefan Hajnoczi if (drv && drv->bdrv_flush_io_queue) { 274361007b31SStefan Hajnoczi drv->bdrv_flush_io_queue(bs); 274461007b31SStefan Hajnoczi } else if (bs->file) { 27459a4f4c31SKevin Wolf bdrv_flush_io_queue(bs->file->bs); 274661007b31SStefan Hajnoczi } 2747f406c03cSAlexander Yarygin bdrv_start_throttled_reqs(bs); 274861007b31SStefan Hajnoczi } 274951288d79SFam Zheng 275051288d79SFam Zheng void bdrv_drained_begin(BlockDriverState *bs) 275151288d79SFam Zheng { 275251288d79SFam Zheng if (!bs->quiesce_counter++) { 275351288d79SFam Zheng aio_disable_external(bdrv_get_aio_context(bs)); 275451288d79SFam Zheng } 275551288d79SFam Zheng bdrv_drain(bs); 275651288d79SFam Zheng } 275751288d79SFam Zheng 275851288d79SFam Zheng void bdrv_drained_end(BlockDriverState *bs) 275951288d79SFam Zheng { 276051288d79SFam Zheng assert(bs->quiesce_counter > 0); 276151288d79SFam Zheng if (--bs->quiesce_counter > 0) { 276251288d79SFam Zheng return; 276351288d79SFam Zheng } 276451288d79SFam Zheng aio_enable_external(bdrv_get_aio_context(bs)); 276551288d79SFam Zheng } 2766