14f999d05SKevin Wolf /* 2c2b38b27SPaolo Bonzini * Data plane event loop 34f999d05SKevin Wolf * 44f999d05SKevin Wolf * Copyright (c) 2003-2008 Fabrice Bellard 5c2b38b27SPaolo Bonzini * Copyright (c) 2009-2017 QEMU contributors 64f999d05SKevin Wolf * 74f999d05SKevin Wolf * Permission is hereby granted, free of charge, to any person obtaining a copy 84f999d05SKevin Wolf * of this software and associated documentation files (the "Software"), to deal 94f999d05SKevin Wolf * in the Software without restriction, including without limitation the rights 104f999d05SKevin Wolf * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 114f999d05SKevin Wolf * copies of the Software, and to permit persons to whom the Software is 124f999d05SKevin Wolf * furnished to do so, subject to the following conditions: 134f999d05SKevin Wolf * 144f999d05SKevin Wolf * The above copyright notice and this permission notice shall be included in 154f999d05SKevin Wolf * all copies or substantial portions of the Software. 164f999d05SKevin Wolf * 174f999d05SKevin Wolf * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 184f999d05SKevin Wolf * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 194f999d05SKevin Wolf * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 204f999d05SKevin Wolf * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 214f999d05SKevin Wolf * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 224f999d05SKevin Wolf * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 234f999d05SKevin Wolf * THE SOFTWARE. 244f999d05SKevin Wolf */ 254f999d05SKevin Wolf 26d38ea87aSPeter Maydell #include "qemu/osdep.h" 27da34e65cSMarkus Armbruster #include "qapi/error.h" 28737e150eSPaolo Bonzini #include "block/aio.h" 299b34277dSStefan Hajnoczi #include "block/thread-pool.h" 301de7afc9SPaolo Bonzini #include "qemu/main-loop.h" 310ceb849bSPaolo Bonzini #include "qemu/atomic.h" 328c6b0356SStefan Hajnoczi #include "qemu/rcu_queue.h" 330187f5c9SPaolo Bonzini #include "block/raw-aio.h" 340c330a73SPaolo Bonzini #include "qemu/coroutine_int.h" 350c330a73SPaolo Bonzini #include "trace.h" 369a1e9481SKevin Wolf 374f999d05SKevin Wolf /***********************************************************/ 384f999d05SKevin Wolf /* bottom halves (can be seen as timers which expire ASAP) */ 394f999d05SKevin Wolf 408c6b0356SStefan Hajnoczi /* QEMUBH::flags values */ 418c6b0356SStefan Hajnoczi enum { 428c6b0356SStefan Hajnoczi /* Already enqueued and waiting for aio_bh_poll() */ 438c6b0356SStefan Hajnoczi BH_PENDING = (1 << 0), 448c6b0356SStefan Hajnoczi 458c6b0356SStefan Hajnoczi /* Invoke the callback */ 468c6b0356SStefan Hajnoczi BH_SCHEDULED = (1 << 1), 478c6b0356SStefan Hajnoczi 488c6b0356SStefan Hajnoczi /* Delete without invoking callback */ 498c6b0356SStefan Hajnoczi BH_DELETED = (1 << 2), 508c6b0356SStefan Hajnoczi 518c6b0356SStefan Hajnoczi /* Delete after invoking callback */ 528c6b0356SStefan Hajnoczi BH_ONESHOT = (1 << 3), 538c6b0356SStefan Hajnoczi 548c6b0356SStefan Hajnoczi /* Schedule periodically when the event loop is idle */ 558c6b0356SStefan Hajnoczi BH_IDLE = (1 << 4), 568c6b0356SStefan Hajnoczi }; 578c6b0356SStefan Hajnoczi 584f999d05SKevin Wolf struct QEMUBH { 592f4dc3c1SPaolo Bonzini AioContext *ctx; 600f08586cSStefan Hajnoczi const char *name; 614f999d05SKevin Wolf QEMUBHFunc *cb; 624f999d05SKevin Wolf void *opaque; 638c6b0356SStefan Hajnoczi QSLIST_ENTRY(QEMUBH) next; 648c6b0356SStefan Hajnoczi unsigned flags; 654f999d05SKevin Wolf }; 664f999d05SKevin Wolf 678c6b0356SStefan Hajnoczi /* Called concurrently from any thread */ 688c6b0356SStefan Hajnoczi static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags) 698c6b0356SStefan Hajnoczi { 708c6b0356SStefan Hajnoczi AioContext *ctx = bh->ctx; 718c6b0356SStefan Hajnoczi unsigned old_flags; 728c6b0356SStefan Hajnoczi 738c6b0356SStefan Hajnoczi /* 74d73415a3SStefan Hajnoczi * The memory barrier implicit in qatomic_fetch_or makes sure that: 758c6b0356SStefan Hajnoczi * 1. idle & any writes needed by the callback are done before the 768c6b0356SStefan Hajnoczi * locations are read in the aio_bh_poll. 778c6b0356SStefan Hajnoczi * 2. ctx is loaded before the callback has a chance to execute and bh 788c6b0356SStefan Hajnoczi * could be freed. 798c6b0356SStefan Hajnoczi */ 80d73415a3SStefan Hajnoczi old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags); 818c6b0356SStefan Hajnoczi if (!(old_flags & BH_PENDING)) { 828c6b0356SStefan Hajnoczi QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next); 838c6b0356SStefan Hajnoczi } 848c6b0356SStefan Hajnoczi 858c6b0356SStefan Hajnoczi aio_notify(ctx); 868c6b0356SStefan Hajnoczi } 878c6b0356SStefan Hajnoczi 888c6b0356SStefan Hajnoczi /* Only called from aio_bh_poll() and aio_ctx_finalize() */ 898c6b0356SStefan Hajnoczi static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags) 908c6b0356SStefan Hajnoczi { 918c6b0356SStefan Hajnoczi QEMUBH *bh = QSLIST_FIRST_RCU(head); 928c6b0356SStefan Hajnoczi 938c6b0356SStefan Hajnoczi if (!bh) { 948c6b0356SStefan Hajnoczi return NULL; 958c6b0356SStefan Hajnoczi } 968c6b0356SStefan Hajnoczi 978c6b0356SStefan Hajnoczi QSLIST_REMOVE_HEAD(head, next); 988c6b0356SStefan Hajnoczi 998c6b0356SStefan Hajnoczi /* 100d73415a3SStefan Hajnoczi * The qatomic_and is paired with aio_bh_enqueue(). The implicit memory 1018c6b0356SStefan Hajnoczi * barrier ensures that the callback sees all writes done by the scheduling 1028c6b0356SStefan Hajnoczi * thread. It also ensures that the scheduling thread sees the cleared 1038c6b0356SStefan Hajnoczi * flag before bh->cb has run, and thus will call aio_notify again if 1048c6b0356SStefan Hajnoczi * necessary. 1058c6b0356SStefan Hajnoczi */ 106d73415a3SStefan Hajnoczi *flags = qatomic_fetch_and(&bh->flags, 1078c6b0356SStefan Hajnoczi ~(BH_PENDING | BH_SCHEDULED | BH_IDLE)); 1088c6b0356SStefan Hajnoczi return bh; 1098c6b0356SStefan Hajnoczi } 1108c6b0356SStefan Hajnoczi 1110f08586cSStefan Hajnoczi void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, 1120f08586cSStefan Hajnoczi void *opaque, const char *name) 1135b8bb359SPaolo Bonzini { 1145b8bb359SPaolo Bonzini QEMUBH *bh; 1155b8bb359SPaolo Bonzini bh = g_new(QEMUBH, 1); 1165b8bb359SPaolo Bonzini *bh = (QEMUBH){ 1175b8bb359SPaolo Bonzini .ctx = ctx, 1185b8bb359SPaolo Bonzini .cb = cb, 1195b8bb359SPaolo Bonzini .opaque = opaque, 1200f08586cSStefan Hajnoczi .name = name, 1215b8bb359SPaolo Bonzini }; 1228c6b0356SStefan Hajnoczi aio_bh_enqueue(bh, BH_SCHEDULED | BH_ONESHOT); 1235b8bb359SPaolo Bonzini } 1245b8bb359SPaolo Bonzini 1250f08586cSStefan Hajnoczi QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, 1260f08586cSStefan Hajnoczi const char *name) 1274f999d05SKevin Wolf { 1284f999d05SKevin Wolf QEMUBH *bh; 129ee82310fSPaolo Bonzini bh = g_new(QEMUBH, 1); 130ee82310fSPaolo Bonzini *bh = (QEMUBH){ 131ee82310fSPaolo Bonzini .ctx = ctx, 132ee82310fSPaolo Bonzini .cb = cb, 133ee82310fSPaolo Bonzini .opaque = opaque, 1340f08586cSStefan Hajnoczi .name = name, 135ee82310fSPaolo Bonzini }; 1364f999d05SKevin Wolf return bh; 1374f999d05SKevin Wolf } 1384f999d05SKevin Wolf 139df281b80SPavel Dovgalyuk void aio_bh_call(QEMUBH *bh) 140df281b80SPavel Dovgalyuk { 141df281b80SPavel Dovgalyuk bh->cb(bh->opaque); 142df281b80SPavel Dovgalyuk } 143df281b80SPavel Dovgalyuk 1448c6b0356SStefan Hajnoczi /* Multiple occurrences of aio_bh_poll cannot be called concurrently. */ 145f627aab1SPaolo Bonzini int aio_bh_poll(AioContext *ctx) 1464f999d05SKevin Wolf { 1478c6b0356SStefan Hajnoczi BHListSlice slice; 1488c6b0356SStefan Hajnoczi BHListSlice *s; 1498c6b0356SStefan Hajnoczi int ret = 0; 150648fb0eaSKevin Wolf 1518c6b0356SStefan Hajnoczi QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list); 1528c6b0356SStefan Hajnoczi QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next); 1538c6b0356SStefan Hajnoczi 1548c6b0356SStefan Hajnoczi while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) { 1558c6b0356SStefan Hajnoczi QEMUBH *bh; 1568c6b0356SStefan Hajnoczi unsigned flags; 1578c6b0356SStefan Hajnoczi 1588c6b0356SStefan Hajnoczi bh = aio_bh_dequeue(&s->bh_list, &flags); 1598c6b0356SStefan Hajnoczi if (!bh) { 1608c6b0356SStefan Hajnoczi QSIMPLEQ_REMOVE_HEAD(&ctx->bh_slice_list, next); 1618c6b0356SStefan Hajnoczi continue; 1628c6b0356SStefan Hajnoczi } 1638c6b0356SStefan Hajnoczi 1648c6b0356SStefan Hajnoczi if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { 16565c1b5b6SPaolo Bonzini /* Idle BHs don't count as progress */ 1668c6b0356SStefan Hajnoczi if (!(flags & BH_IDLE)) { 1674f999d05SKevin Wolf ret = 1; 168ca96ac44SStefan Hajnoczi } 169df281b80SPavel Dovgalyuk aio_bh_call(bh); 1704f999d05SKevin Wolf } 1718c6b0356SStefan Hajnoczi if (flags & (BH_DELETED | BH_ONESHOT)) { 1727267c094SAnthony Liguori g_free(bh); 1734f999d05SKevin Wolf } 174648fb0eaSKevin Wolf } 1758c6b0356SStefan Hajnoczi 1764f999d05SKevin Wolf return ret; 1774f999d05SKevin Wolf } 1784f999d05SKevin Wolf 1794f999d05SKevin Wolf void qemu_bh_schedule_idle(QEMUBH *bh) 1804f999d05SKevin Wolf { 1818c6b0356SStefan Hajnoczi aio_bh_enqueue(bh, BH_SCHEDULED | BH_IDLE); 1824f999d05SKevin Wolf } 1834f999d05SKevin Wolf 1844f999d05SKevin Wolf void qemu_bh_schedule(QEMUBH *bh) 1854f999d05SKevin Wolf { 1868c6b0356SStefan Hajnoczi aio_bh_enqueue(bh, BH_SCHEDULED); 1874f999d05SKevin Wolf } 188dcc772e2SLiu Ping Fan 189dcc772e2SLiu Ping Fan /* This func is async. 190dcc772e2SLiu Ping Fan */ 1914f999d05SKevin Wolf void qemu_bh_cancel(QEMUBH *bh) 1924f999d05SKevin Wolf { 193d73415a3SStefan Hajnoczi qatomic_and(&bh->flags, ~BH_SCHEDULED); 1944f999d05SKevin Wolf } 1954f999d05SKevin Wolf 196dcc772e2SLiu Ping Fan /* This func is async.The bottom half will do the delete action at the finial 197dcc772e2SLiu Ping Fan * end. 198dcc772e2SLiu Ping Fan */ 1994f999d05SKevin Wolf void qemu_bh_delete(QEMUBH *bh) 2004f999d05SKevin Wolf { 2018c6b0356SStefan Hajnoczi aio_bh_enqueue(bh, BH_DELETED); 2024f999d05SKevin Wolf } 2034f999d05SKevin Wolf 2048c6b0356SStefan Hajnoczi static int64_t aio_compute_bh_timeout(BHList *head, int timeout) 2054f999d05SKevin Wolf { 2064f999d05SKevin Wolf QEMUBH *bh; 2074f999d05SKevin Wolf 2088c6b0356SStefan Hajnoczi QSLIST_FOREACH_RCU(bh, head, next) { 2098c6b0356SStefan Hajnoczi if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { 2108c6b0356SStefan Hajnoczi if (bh->flags & BH_IDLE) { 2114f999d05SKevin Wolf /* idle bottom halves will be polled at least 2124f999d05SKevin Wolf * every 10ms */ 213845ca10dSPaolo Bonzini timeout = 10000000; 2144f999d05SKevin Wolf } else { 2154f999d05SKevin Wolf /* non-idle bottom halves will be executed 2164f999d05SKevin Wolf * immediately */ 217845ca10dSPaolo Bonzini return 0; 2184f999d05SKevin Wolf } 2194f999d05SKevin Wolf } 2204f999d05SKevin Wolf } 2214f999d05SKevin Wolf 2228c6b0356SStefan Hajnoczi return timeout; 2238c6b0356SStefan Hajnoczi } 2248c6b0356SStefan Hajnoczi 2258c6b0356SStefan Hajnoczi int64_t 2268c6b0356SStefan Hajnoczi aio_compute_timeout(AioContext *ctx) 2278c6b0356SStefan Hajnoczi { 2288c6b0356SStefan Hajnoczi BHListSlice *s; 2298c6b0356SStefan Hajnoczi int64_t deadline; 2308c6b0356SStefan Hajnoczi int timeout = -1; 2318c6b0356SStefan Hajnoczi 2328c6b0356SStefan Hajnoczi timeout = aio_compute_bh_timeout(&ctx->bh_list, timeout); 2338c6b0356SStefan Hajnoczi if (timeout == 0) { 2348c6b0356SStefan Hajnoczi return 0; 2358c6b0356SStefan Hajnoczi } 2368c6b0356SStefan Hajnoczi 2378c6b0356SStefan Hajnoczi QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) { 2388c6b0356SStefan Hajnoczi timeout = aio_compute_bh_timeout(&s->bh_list, timeout); 2398c6b0356SStefan Hajnoczi if (timeout == 0) { 2408c6b0356SStefan Hajnoczi return 0; 2418c6b0356SStefan Hajnoczi } 2428c6b0356SStefan Hajnoczi } 2438c6b0356SStefan Hajnoczi 244845ca10dSPaolo Bonzini deadline = timerlistgroup_deadline_ns(&ctx->tlg); 245533a8cf3SAlex Bligh if (deadline == 0) { 246845ca10dSPaolo Bonzini return 0; 247533a8cf3SAlex Bligh } else { 248845ca10dSPaolo Bonzini return qemu_soonest_timeout(timeout, deadline); 249845ca10dSPaolo Bonzini } 250533a8cf3SAlex Bligh } 251533a8cf3SAlex Bligh 252845ca10dSPaolo Bonzini static gboolean 253845ca10dSPaolo Bonzini aio_ctx_prepare(GSource *source, gint *timeout) 254845ca10dSPaolo Bonzini { 255845ca10dSPaolo Bonzini AioContext *ctx = (AioContext *) source; 256845ca10dSPaolo Bonzini 257d73415a3SStefan Hajnoczi qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) | 1); 2585710a3e0SPaolo Bonzini 2595710a3e0SPaolo Bonzini /* 2605710a3e0SPaolo Bonzini * Write ctx->notify_me before computing the timeout 2615710a3e0SPaolo Bonzini * (reading bottom half flags, etc.). Pairs with 2625710a3e0SPaolo Bonzini * smp_mb in aio_notify(). 2635710a3e0SPaolo Bonzini */ 2645710a3e0SPaolo Bonzini smp_mb(); 265eabc9779SPaolo Bonzini 266845ca10dSPaolo Bonzini /* We assume there is no timeout already supplied */ 267845ca10dSPaolo Bonzini *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)); 268a3462c65SPaolo Bonzini 269a3462c65SPaolo Bonzini if (aio_prepare(ctx)) { 270a3462c65SPaolo Bonzini *timeout = 0; 271a3462c65SPaolo Bonzini } 272a3462c65SPaolo Bonzini 273845ca10dSPaolo Bonzini return *timeout == 0; 274e3713e00SPaolo Bonzini } 275e3713e00SPaolo Bonzini 276e3713e00SPaolo Bonzini static gboolean 277e3713e00SPaolo Bonzini aio_ctx_check(GSource *source) 278e3713e00SPaolo Bonzini { 279e3713e00SPaolo Bonzini AioContext *ctx = (AioContext *) source; 280e3713e00SPaolo Bonzini QEMUBH *bh; 2818c6b0356SStefan Hajnoczi BHListSlice *s; 282e3713e00SPaolo Bonzini 2835710a3e0SPaolo Bonzini /* Finish computing the timeout before clearing the flag. */ 284d73415a3SStefan Hajnoczi qatomic_store_release(&ctx->notify_me, qatomic_read(&ctx->notify_me) & ~1); 28505e514b1SPaolo Bonzini aio_notify_accept(ctx); 28621a03d17SPaolo Bonzini 2878c6b0356SStefan Hajnoczi QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) { 2888c6b0356SStefan Hajnoczi if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { 289e3713e00SPaolo Bonzini return true; 290e3713e00SPaolo Bonzini } 291e3713e00SPaolo Bonzini } 2928c6b0356SStefan Hajnoczi 2938c6b0356SStefan Hajnoczi QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) { 2948c6b0356SStefan Hajnoczi QSLIST_FOREACH_RCU(bh, &s->bh_list, next) { 2958c6b0356SStefan Hajnoczi if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { 2968c6b0356SStefan Hajnoczi return true; 2978c6b0356SStefan Hajnoczi } 2988c6b0356SStefan Hajnoczi } 2998c6b0356SStefan Hajnoczi } 300533a8cf3SAlex Bligh return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0); 301e3713e00SPaolo Bonzini } 302e3713e00SPaolo Bonzini 303e3713e00SPaolo Bonzini static gboolean 304e3713e00SPaolo Bonzini aio_ctx_dispatch(GSource *source, 305e3713e00SPaolo Bonzini GSourceFunc callback, 306e3713e00SPaolo Bonzini gpointer user_data) 307e3713e00SPaolo Bonzini { 308e3713e00SPaolo Bonzini AioContext *ctx = (AioContext *) source; 309e3713e00SPaolo Bonzini 310e3713e00SPaolo Bonzini assert(callback == NULL); 311a153bf52SPaolo Bonzini aio_dispatch(ctx); 312e3713e00SPaolo Bonzini return true; 313e3713e00SPaolo Bonzini } 314e3713e00SPaolo Bonzini 3152f4dc3c1SPaolo Bonzini static void 3162f4dc3c1SPaolo Bonzini aio_ctx_finalize(GSource *source) 3172f4dc3c1SPaolo Bonzini { 3182f4dc3c1SPaolo Bonzini AioContext *ctx = (AioContext *) source; 3198c6b0356SStefan Hajnoczi QEMUBH *bh; 3208c6b0356SStefan Hajnoczi unsigned flags; 3212f4dc3c1SPaolo Bonzini 3229b34277dSStefan Hajnoczi thread_pool_free(ctx->thread_pool); 323a076972aSStefan Hajnoczi 3240187f5c9SPaolo Bonzini #ifdef CONFIG_LINUX_AIO 3250187f5c9SPaolo Bonzini if (ctx->linux_aio) { 3260187f5c9SPaolo Bonzini laio_detach_aio_context(ctx->linux_aio, ctx); 3270187f5c9SPaolo Bonzini laio_cleanup(ctx->linux_aio); 3280187f5c9SPaolo Bonzini ctx->linux_aio = NULL; 3290187f5c9SPaolo Bonzini } 3300187f5c9SPaolo Bonzini #endif 3310187f5c9SPaolo Bonzini 332fcb7a4a4SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING 333fcb7a4a4SAarushi Mehta if (ctx->linux_io_uring) { 334fcb7a4a4SAarushi Mehta luring_detach_aio_context(ctx->linux_io_uring, ctx); 335fcb7a4a4SAarushi Mehta luring_cleanup(ctx->linux_io_uring); 336fcb7a4a4SAarushi Mehta ctx->linux_io_uring = NULL; 337fcb7a4a4SAarushi Mehta } 338fcb7a4a4SAarushi Mehta #endif 339fcb7a4a4SAarushi Mehta 3400c330a73SPaolo Bonzini assert(QSLIST_EMPTY(&ctx->scheduled_coroutines)); 3410c330a73SPaolo Bonzini qemu_bh_delete(ctx->co_schedule_bh); 3420c330a73SPaolo Bonzini 3438c6b0356SStefan Hajnoczi /* There must be no aio_bh_poll() calls going on */ 3448c6b0356SStefan Hajnoczi assert(QSIMPLEQ_EMPTY(&ctx->bh_slice_list)); 345a076972aSStefan Hajnoczi 3468c6b0356SStefan Hajnoczi while ((bh = aio_bh_dequeue(&ctx->bh_list, &flags))) { 347023ca420SStefan Hajnoczi /* 348023ca420SStefan Hajnoczi * qemu_bh_delete() must have been called on BHs in this AioContext. In 349023ca420SStefan Hajnoczi * many cases memory leaks, hangs, or inconsistent state occur when a 350023ca420SStefan Hajnoczi * BH is leaked because something still expects it to run. 351023ca420SStefan Hajnoczi * 352023ca420SStefan Hajnoczi * If you hit this, fix the lifecycle of the BH so that 353023ca420SStefan Hajnoczi * qemu_bh_delete() and any associated cleanup is called before the 354023ca420SStefan Hajnoczi * AioContext is finalized. 355023ca420SStefan Hajnoczi */ 356023ca420SStefan Hajnoczi if (unlikely(!(flags & BH_DELETED))) { 357023ca420SStefan Hajnoczi fprintf(stderr, "%s: BH '%s' leaked, aborting...\n", 358023ca420SStefan Hajnoczi __func__, bh->name); 359023ca420SStefan Hajnoczi abort(); 360023ca420SStefan Hajnoczi } 361a076972aSStefan Hajnoczi 3628c6b0356SStefan Hajnoczi g_free(bh); 363a076972aSStefan Hajnoczi } 364a076972aSStefan Hajnoczi 365*826cc324SStefan Hajnoczi aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL, NULL); 3662f4dc3c1SPaolo Bonzini event_notifier_cleanup(&ctx->notifier); 3673fe71223SPaolo Bonzini qemu_rec_mutex_destroy(&ctx->lock); 368d7c99a12SPaolo Bonzini qemu_lockcnt_destroy(&ctx->list_lock); 369dae21b98SAlex Bligh timerlistgroup_deinit(&ctx->tlg); 370cd0a6d2bSJie Wang aio_context_destroy(ctx); 3712f4dc3c1SPaolo Bonzini } 3722f4dc3c1SPaolo Bonzini 373e3713e00SPaolo Bonzini static GSourceFuncs aio_source_funcs = { 374e3713e00SPaolo Bonzini aio_ctx_prepare, 375e3713e00SPaolo Bonzini aio_ctx_check, 376e3713e00SPaolo Bonzini aio_ctx_dispatch, 3772f4dc3c1SPaolo Bonzini aio_ctx_finalize 378e3713e00SPaolo Bonzini }; 379e3713e00SPaolo Bonzini 380e3713e00SPaolo Bonzini GSource *aio_get_g_source(AioContext *ctx) 381e3713e00SPaolo Bonzini { 382ba607ca8SStefan Hajnoczi aio_context_use_g_source(ctx); 383e3713e00SPaolo Bonzini g_source_ref(&ctx->source); 384e3713e00SPaolo Bonzini return &ctx->source; 385e3713e00SPaolo Bonzini } 386a915f4bcSPaolo Bonzini 3879b34277dSStefan Hajnoczi ThreadPool *aio_get_thread_pool(AioContext *ctx) 3889b34277dSStefan Hajnoczi { 3899b34277dSStefan Hajnoczi if (!ctx->thread_pool) { 3909b34277dSStefan Hajnoczi ctx->thread_pool = thread_pool_new(ctx); 3919b34277dSStefan Hajnoczi } 3929b34277dSStefan Hajnoczi return ctx->thread_pool; 3939b34277dSStefan Hajnoczi } 3949b34277dSStefan Hajnoczi 3950187f5c9SPaolo Bonzini #ifdef CONFIG_LINUX_AIO 396ed6e2161SNishanth Aravamudan LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp) 3970187f5c9SPaolo Bonzini { 3980187f5c9SPaolo Bonzini if (!ctx->linux_aio) { 399ed6e2161SNishanth Aravamudan ctx->linux_aio = laio_init(errp); 400ed6e2161SNishanth Aravamudan if (ctx->linux_aio) { 4010187f5c9SPaolo Bonzini laio_attach_aio_context(ctx->linux_aio, ctx); 4020187f5c9SPaolo Bonzini } 403ed6e2161SNishanth Aravamudan } 404ed6e2161SNishanth Aravamudan return ctx->linux_aio; 405ed6e2161SNishanth Aravamudan } 406ed6e2161SNishanth Aravamudan 407ed6e2161SNishanth Aravamudan LinuxAioState *aio_get_linux_aio(AioContext *ctx) 408ed6e2161SNishanth Aravamudan { 409ed6e2161SNishanth Aravamudan assert(ctx->linux_aio); 4100187f5c9SPaolo Bonzini return ctx->linux_aio; 4110187f5c9SPaolo Bonzini } 4120187f5c9SPaolo Bonzini #endif 4130187f5c9SPaolo Bonzini 414fcb7a4a4SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING 415fcb7a4a4SAarushi Mehta LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp) 416fcb7a4a4SAarushi Mehta { 417fcb7a4a4SAarushi Mehta if (ctx->linux_io_uring) { 418fcb7a4a4SAarushi Mehta return ctx->linux_io_uring; 419fcb7a4a4SAarushi Mehta } 420fcb7a4a4SAarushi Mehta 421fcb7a4a4SAarushi Mehta ctx->linux_io_uring = luring_init(errp); 422fcb7a4a4SAarushi Mehta if (!ctx->linux_io_uring) { 423fcb7a4a4SAarushi Mehta return NULL; 424fcb7a4a4SAarushi Mehta } 425fcb7a4a4SAarushi Mehta 426fcb7a4a4SAarushi Mehta luring_attach_aio_context(ctx->linux_io_uring, ctx); 427fcb7a4a4SAarushi Mehta return ctx->linux_io_uring; 428fcb7a4a4SAarushi Mehta } 429fcb7a4a4SAarushi Mehta 430fcb7a4a4SAarushi Mehta LuringState *aio_get_linux_io_uring(AioContext *ctx) 431fcb7a4a4SAarushi Mehta { 432fcb7a4a4SAarushi Mehta assert(ctx->linux_io_uring); 433fcb7a4a4SAarushi Mehta return ctx->linux_io_uring; 434fcb7a4a4SAarushi Mehta } 435fcb7a4a4SAarushi Mehta #endif 436fcb7a4a4SAarushi Mehta 4372f4dc3c1SPaolo Bonzini void aio_notify(AioContext *ctx) 4382f4dc3c1SPaolo Bonzini { 439601829f8SStefan Hajnoczi /* 440601829f8SStefan Hajnoczi * Write e.g. bh->flags before writing ctx->notified. Pairs with smp_mb in 441601829f8SStefan Hajnoczi * aio_notify_accept. 442601829f8SStefan Hajnoczi */ 443601829f8SStefan Hajnoczi smp_wmb(); 444d73415a3SStefan Hajnoczi qatomic_set(&ctx->notified, true); 445601829f8SStefan Hajnoczi 446601829f8SStefan Hajnoczi /* 447601829f8SStefan Hajnoczi * Write ctx->notified before reading ctx->notify_me. Pairs 4485710a3e0SPaolo Bonzini * with smp_mb in aio_ctx_prepare or aio_poll. 449eabc9779SPaolo Bonzini */ 4500ceb849bSPaolo Bonzini smp_mb(); 451d73415a3SStefan Hajnoczi if (qatomic_read(&ctx->notify_me)) { 4522f4dc3c1SPaolo Bonzini event_notifier_set(&ctx->notifier); 45305e514b1SPaolo Bonzini } 45405e514b1SPaolo Bonzini } 45505e514b1SPaolo Bonzini 45605e514b1SPaolo Bonzini void aio_notify_accept(AioContext *ctx) 45705e514b1SPaolo Bonzini { 458d73415a3SStefan Hajnoczi qatomic_set(&ctx->notified, false); 459601829f8SStefan Hajnoczi 460601829f8SStefan Hajnoczi /* 461601829f8SStefan Hajnoczi * Write ctx->notified before reading e.g. bh->flags. Pairs with smp_wmb 462601829f8SStefan Hajnoczi * in aio_notify. 463601829f8SStefan Hajnoczi */ 464601829f8SStefan Hajnoczi smp_mb(); 4650ceb849bSPaolo Bonzini } 4662f4dc3c1SPaolo Bonzini 4673f53bc61SPaolo Bonzini static void aio_timerlist_notify(void *opaque, QEMUClockType type) 468d5541d86SAlex Bligh { 469d5541d86SAlex Bligh aio_notify(opaque); 470d5541d86SAlex Bligh } 471d5541d86SAlex Bligh 472601829f8SStefan Hajnoczi static void aio_context_notifier_cb(EventNotifier *e) 47321a03d17SPaolo Bonzini { 474601829f8SStefan Hajnoczi AioContext *ctx = container_of(e, AioContext, notifier); 475601829f8SStefan Hajnoczi 476601829f8SStefan Hajnoczi event_notifier_test_and_clear(&ctx->notifier); 47721a03d17SPaolo Bonzini } 47821a03d17SPaolo Bonzini 4794a1cba38SStefan Hajnoczi /* Returns true if aio_notify() was called (e.g. a BH was scheduled) */ 480c13be5a1SStefan Hajnoczi static bool aio_context_notifier_poll(void *opaque) 4814a1cba38SStefan Hajnoczi { 4824a1cba38SStefan Hajnoczi EventNotifier *e = opaque; 4834a1cba38SStefan Hajnoczi AioContext *ctx = container_of(e, AioContext, notifier); 4844a1cba38SStefan Hajnoczi 485d73415a3SStefan Hajnoczi return qatomic_read(&ctx->notified); 4864a1cba38SStefan Hajnoczi } 4874a1cba38SStefan Hajnoczi 488*826cc324SStefan Hajnoczi static void aio_context_notifier_poll_ready(EventNotifier *e) 489*826cc324SStefan Hajnoczi { 490*826cc324SStefan Hajnoczi /* Do nothing, we just wanted to kick the event loop */ 491*826cc324SStefan Hajnoczi } 492*826cc324SStefan Hajnoczi 4930c330a73SPaolo Bonzini static void co_schedule_bh_cb(void *opaque) 4940c330a73SPaolo Bonzini { 4950c330a73SPaolo Bonzini AioContext *ctx = opaque; 4960c330a73SPaolo Bonzini QSLIST_HEAD(, Coroutine) straight, reversed; 4970c330a73SPaolo Bonzini 4980c330a73SPaolo Bonzini QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines); 4990c330a73SPaolo Bonzini QSLIST_INIT(&straight); 5000c330a73SPaolo Bonzini 5010c330a73SPaolo Bonzini while (!QSLIST_EMPTY(&reversed)) { 5020c330a73SPaolo Bonzini Coroutine *co = QSLIST_FIRST(&reversed); 5030c330a73SPaolo Bonzini QSLIST_REMOVE_HEAD(&reversed, co_scheduled_next); 5040c330a73SPaolo Bonzini QSLIST_INSERT_HEAD(&straight, co, co_scheduled_next); 5050c330a73SPaolo Bonzini } 5060c330a73SPaolo Bonzini 5070c330a73SPaolo Bonzini while (!QSLIST_EMPTY(&straight)) { 5080c330a73SPaolo Bonzini Coroutine *co = QSLIST_FIRST(&straight); 5090c330a73SPaolo Bonzini QSLIST_REMOVE_HEAD(&straight, co_scheduled_next); 5100c330a73SPaolo Bonzini trace_aio_co_schedule_bh_cb(ctx, co); 5111919631eSPaolo Bonzini aio_context_acquire(ctx); 5126133b39fSJeff Cody 5136133b39fSJeff Cody /* Protected by write barrier in qemu_aio_coroutine_enter */ 514d73415a3SStefan Hajnoczi qatomic_set(&co->scheduled, NULL); 5156808ae04SSergio Lopez qemu_aio_coroutine_enter(ctx, co); 5161919631eSPaolo Bonzini aio_context_release(ctx); 5170c330a73SPaolo Bonzini } 5180c330a73SPaolo Bonzini } 5190c330a73SPaolo Bonzini 5202f78e491SChrysostomos Nanakos AioContext *aio_context_new(Error **errp) 521f627aab1SPaolo Bonzini { 5222f78e491SChrysostomos Nanakos int ret; 5232f4dc3c1SPaolo Bonzini AioContext *ctx; 52437fcee5dSFam Zheng 5252f4dc3c1SPaolo Bonzini ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext)); 5268c6b0356SStefan Hajnoczi QSLIST_INIT(&ctx->bh_list); 5278c6b0356SStefan Hajnoczi QSIMPLEQ_INIT(&ctx->bh_slice_list); 5287e003465SCao jin aio_context_setup(ctx); 5297e003465SCao jin 5302f78e491SChrysostomos Nanakos ret = event_notifier_init(&ctx->notifier, false); 5312f78e491SChrysostomos Nanakos if (ret < 0) { 5322f78e491SChrysostomos Nanakos error_setg_errno(errp, -ret, "Failed to initialize event notifier"); 53337fcee5dSFam Zheng goto fail; 5342f78e491SChrysostomos Nanakos } 535fcf5def1SPaolo Bonzini g_source_set_can_recurse(&ctx->source, true); 536d7c99a12SPaolo Bonzini qemu_lockcnt_init(&ctx->list_lock); 5370c330a73SPaolo Bonzini 5380c330a73SPaolo Bonzini ctx->co_schedule_bh = aio_bh_new(ctx, co_schedule_bh_cb, ctx); 5390c330a73SPaolo Bonzini QSLIST_INIT(&ctx->scheduled_coroutines); 5400c330a73SPaolo Bonzini 5412f78e491SChrysostomos Nanakos aio_set_event_notifier(ctx, &ctx->notifier, 542dca21ef2SFam Zheng false, 543601829f8SStefan Hajnoczi aio_context_notifier_cb, 544*826cc324SStefan Hajnoczi aio_context_notifier_poll, 545*826cc324SStefan Hajnoczi aio_context_notifier_poll_ready); 5460187f5c9SPaolo Bonzini #ifdef CONFIG_LINUX_AIO 5470187f5c9SPaolo Bonzini ctx->linux_aio = NULL; 5480187f5c9SPaolo Bonzini #endif 549fcb7a4a4SAarushi Mehta 550fcb7a4a4SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING 551fcb7a4a4SAarushi Mehta ctx->linux_io_uring = NULL; 552fcb7a4a4SAarushi Mehta #endif 553fcb7a4a4SAarushi Mehta 5549b34277dSStefan Hajnoczi ctx->thread_pool = NULL; 5553fe71223SPaolo Bonzini qemu_rec_mutex_init(&ctx->lock); 556d5541d86SAlex Bligh timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx); 5572f4dc3c1SPaolo Bonzini 55882a41186SStefan Hajnoczi ctx->poll_ns = 0; 5594a1cba38SStefan Hajnoczi ctx->poll_max_ns = 0; 56082a41186SStefan Hajnoczi ctx->poll_grow = 0; 56182a41186SStefan Hajnoczi ctx->poll_shrink = 0; 5624a1cba38SStefan Hajnoczi 5631793ad02SStefano Garzarella ctx->aio_max_batch = 0; 5641793ad02SStefano Garzarella 5652f4dc3c1SPaolo Bonzini return ctx; 56637fcee5dSFam Zheng fail: 56737fcee5dSFam Zheng g_source_destroy(&ctx->source); 56837fcee5dSFam Zheng return NULL; 569e3713e00SPaolo Bonzini } 570e3713e00SPaolo Bonzini 5710c330a73SPaolo Bonzini void aio_co_schedule(AioContext *ctx, Coroutine *co) 5720c330a73SPaolo Bonzini { 5730c330a73SPaolo Bonzini trace_aio_co_schedule(ctx, co); 574d73415a3SStefan Hajnoczi const char *scheduled = qatomic_cmpxchg(&co->scheduled, NULL, 5756133b39fSJeff Cody __func__); 5766133b39fSJeff Cody 5776133b39fSJeff Cody if (scheduled) { 5786133b39fSJeff Cody fprintf(stderr, 5796133b39fSJeff Cody "%s: Co-routine was already scheduled in '%s'\n", 5806133b39fSJeff Cody __func__, scheduled); 5816133b39fSJeff Cody abort(); 5826133b39fSJeff Cody } 5836133b39fSJeff Cody 584f0f81002SStefan Hajnoczi /* The coroutine might run and release the last ctx reference before we 585f0f81002SStefan Hajnoczi * invoke qemu_bh_schedule(). Take a reference to keep ctx alive until 586f0f81002SStefan Hajnoczi * we're done. 587f0f81002SStefan Hajnoczi */ 588f0f81002SStefan Hajnoczi aio_context_ref(ctx); 589f0f81002SStefan Hajnoczi 5900c330a73SPaolo Bonzini QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines, 5910c330a73SPaolo Bonzini co, co_scheduled_next); 5920c330a73SPaolo Bonzini qemu_bh_schedule(ctx->co_schedule_bh); 593f0f81002SStefan Hajnoczi 594f0f81002SStefan Hajnoczi aio_context_unref(ctx); 5950c330a73SPaolo Bonzini } 5960c330a73SPaolo Bonzini 59726b0b698SKevin Wolf typedef struct AioCoRescheduleSelf { 59826b0b698SKevin Wolf Coroutine *co; 59926b0b698SKevin Wolf AioContext *new_ctx; 60026b0b698SKevin Wolf } AioCoRescheduleSelf; 60126b0b698SKevin Wolf 60226b0b698SKevin Wolf static void aio_co_reschedule_self_bh(void *opaque) 60326b0b698SKevin Wolf { 60426b0b698SKevin Wolf AioCoRescheduleSelf *data = opaque; 60526b0b698SKevin Wolf aio_co_schedule(data->new_ctx, data->co); 60626b0b698SKevin Wolf } 60726b0b698SKevin Wolf 60826b0b698SKevin Wolf void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx) 60926b0b698SKevin Wolf { 61026b0b698SKevin Wolf AioContext *old_ctx = qemu_get_current_aio_context(); 61126b0b698SKevin Wolf 61226b0b698SKevin Wolf if (old_ctx != new_ctx) { 61326b0b698SKevin Wolf AioCoRescheduleSelf data = { 61426b0b698SKevin Wolf .co = qemu_coroutine_self(), 61526b0b698SKevin Wolf .new_ctx = new_ctx, 61626b0b698SKevin Wolf }; 61726b0b698SKevin Wolf /* 61826b0b698SKevin Wolf * We can't directly schedule the coroutine in the target context 61926b0b698SKevin Wolf * because this would be racy: The other thread could try to enter the 62026b0b698SKevin Wolf * coroutine before it has yielded in this one. 62126b0b698SKevin Wolf */ 62226b0b698SKevin Wolf aio_bh_schedule_oneshot(old_ctx, aio_co_reschedule_self_bh, &data); 62326b0b698SKevin Wolf qemu_coroutine_yield(); 62426b0b698SKevin Wolf } 62526b0b698SKevin Wolf } 62626b0b698SKevin Wolf 6270c330a73SPaolo Bonzini void aio_co_wake(struct Coroutine *co) 6280c330a73SPaolo Bonzini { 6290c330a73SPaolo Bonzini AioContext *ctx; 6300c330a73SPaolo Bonzini 6310c330a73SPaolo Bonzini /* Read coroutine before co->ctx. Matches smp_wmb in 6320c330a73SPaolo Bonzini * qemu_coroutine_enter. 6330c330a73SPaolo Bonzini */ 6340c330a73SPaolo Bonzini smp_read_barrier_depends(); 635d73415a3SStefan Hajnoczi ctx = qatomic_read(&co->ctx); 6360c330a73SPaolo Bonzini 6378865852eSFam Zheng aio_co_enter(ctx, co); 6388865852eSFam Zheng } 6398865852eSFam Zheng 6408865852eSFam Zheng void aio_co_enter(AioContext *ctx, struct Coroutine *co) 6418865852eSFam Zheng { 6420c330a73SPaolo Bonzini if (ctx != qemu_get_current_aio_context()) { 6430c330a73SPaolo Bonzini aio_co_schedule(ctx, co); 6440c330a73SPaolo Bonzini return; 6450c330a73SPaolo Bonzini } 6460c330a73SPaolo Bonzini 6470c330a73SPaolo Bonzini if (qemu_in_coroutine()) { 6480c330a73SPaolo Bonzini Coroutine *self = qemu_coroutine_self(); 6490c330a73SPaolo Bonzini assert(self != co); 6500c330a73SPaolo Bonzini QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next); 6510c330a73SPaolo Bonzini } else { 6520c330a73SPaolo Bonzini aio_context_acquire(ctx); 6538865852eSFam Zheng qemu_aio_coroutine_enter(ctx, co); 6540c330a73SPaolo Bonzini aio_context_release(ctx); 6550c330a73SPaolo Bonzini } 6560c330a73SPaolo Bonzini } 6570c330a73SPaolo Bonzini 658e3713e00SPaolo Bonzini void aio_context_ref(AioContext *ctx) 659e3713e00SPaolo Bonzini { 660e3713e00SPaolo Bonzini g_source_ref(&ctx->source); 661e3713e00SPaolo Bonzini } 662e3713e00SPaolo Bonzini 663e3713e00SPaolo Bonzini void aio_context_unref(AioContext *ctx) 664e3713e00SPaolo Bonzini { 665e3713e00SPaolo Bonzini g_source_unref(&ctx->source); 666f627aab1SPaolo Bonzini } 66798563fc3SStefan Hajnoczi 66898563fc3SStefan Hajnoczi void aio_context_acquire(AioContext *ctx) 66998563fc3SStefan Hajnoczi { 6703fe71223SPaolo Bonzini qemu_rec_mutex_lock(&ctx->lock); 67198563fc3SStefan Hajnoczi } 67298563fc3SStefan Hajnoczi 67398563fc3SStefan Hajnoczi void aio_context_release(AioContext *ctx) 67498563fc3SStefan Hajnoczi { 6753fe71223SPaolo Bonzini qemu_rec_mutex_unlock(&ctx->lock); 67698563fc3SStefan Hajnoczi } 6775f50be9bSPaolo Bonzini 6785f50be9bSPaolo Bonzini static __thread AioContext *my_aiocontext; 6795f50be9bSPaolo Bonzini 6805f50be9bSPaolo Bonzini AioContext *qemu_get_current_aio_context(void) 6815f50be9bSPaolo Bonzini { 6825f50be9bSPaolo Bonzini if (my_aiocontext) { 6835f50be9bSPaolo Bonzini return my_aiocontext; 6845f50be9bSPaolo Bonzini } 6855f50be9bSPaolo Bonzini if (qemu_mutex_iothread_locked()) { 6865f50be9bSPaolo Bonzini /* Possibly in a vCPU thread. */ 6875f50be9bSPaolo Bonzini return qemu_get_aio_context(); 6885f50be9bSPaolo Bonzini } 6895f50be9bSPaolo Bonzini return NULL; 6905f50be9bSPaolo Bonzini } 6915f50be9bSPaolo Bonzini 6925f50be9bSPaolo Bonzini void qemu_set_current_aio_context(AioContext *ctx) 6935f50be9bSPaolo Bonzini { 6945f50be9bSPaolo Bonzini assert(!my_aiocontext); 6955f50be9bSPaolo Bonzini my_aiocontext = ctx; 6965f50be9bSPaolo Bonzini } 697