xref: /qemu/util/async.c (revision 587d82fae258794e33cacc9bf4ba61949184e822)
14f999d05SKevin Wolf /*
2c2b38b27SPaolo Bonzini  * Data plane event loop
34f999d05SKevin Wolf  *
44f999d05SKevin Wolf  * Copyright (c) 2003-2008 Fabrice Bellard
5c2b38b27SPaolo Bonzini  * Copyright (c) 2009-2017 QEMU contributors
64f999d05SKevin Wolf  *
74f999d05SKevin Wolf  * Permission is hereby granted, free of charge, to any person obtaining a copy
84f999d05SKevin Wolf  * of this software and associated documentation files (the "Software"), to deal
94f999d05SKevin Wolf  * in the Software without restriction, including without limitation the rights
104f999d05SKevin Wolf  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
114f999d05SKevin Wolf  * copies of the Software, and to permit persons to whom the Software is
124f999d05SKevin Wolf  * furnished to do so, subject to the following conditions:
134f999d05SKevin Wolf  *
144f999d05SKevin Wolf  * The above copyright notice and this permission notice shall be included in
154f999d05SKevin Wolf  * all copies or substantial portions of the Software.
164f999d05SKevin Wolf  *
174f999d05SKevin Wolf  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
184f999d05SKevin Wolf  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
194f999d05SKevin Wolf  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
204f999d05SKevin Wolf  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
214f999d05SKevin Wolf  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
224f999d05SKevin Wolf  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
234f999d05SKevin Wolf  * THE SOFTWARE.
244f999d05SKevin Wolf  */
254f999d05SKevin Wolf 
26d38ea87aSPeter Maydell #include "qemu/osdep.h"
27da34e65cSMarkus Armbruster #include "qapi/error.h"
28737e150eSPaolo Bonzini #include "block/aio.h"
299b34277dSStefan Hajnoczi #include "block/thread-pool.h"
30*587d82faSEmanuele Giuseppe Esposito #include "block/graph-lock.h"
311de7afc9SPaolo Bonzini #include "qemu/main-loop.h"
320ceb849bSPaolo Bonzini #include "qemu/atomic.h"
338c6b0356SStefan Hajnoczi #include "qemu/rcu_queue.h"
340187f5c9SPaolo Bonzini #include "block/raw-aio.h"
350c330a73SPaolo Bonzini #include "qemu/coroutine_int.h"
3647b74464SStefan Hajnoczi #include "qemu/coroutine-tls.h"
3775bbe5e5SPavel Dovgalyuk #include "sysemu/cpu-timers.h"
380c330a73SPaolo Bonzini #include "trace.h"
399a1e9481SKevin Wolf 
404f999d05SKevin Wolf /***********************************************************/
414f999d05SKevin Wolf /* bottom halves (can be seen as timers which expire ASAP) */
424f999d05SKevin Wolf 
438c6b0356SStefan Hajnoczi /* QEMUBH::flags values */
448c6b0356SStefan Hajnoczi enum {
458c6b0356SStefan Hajnoczi     /* Already enqueued and waiting for aio_bh_poll() */
468c6b0356SStefan Hajnoczi     BH_PENDING   = (1 << 0),
478c6b0356SStefan Hajnoczi 
488c6b0356SStefan Hajnoczi     /* Invoke the callback */
498c6b0356SStefan Hajnoczi     BH_SCHEDULED = (1 << 1),
508c6b0356SStefan Hajnoczi 
518c6b0356SStefan Hajnoczi     /* Delete without invoking callback */
528c6b0356SStefan Hajnoczi     BH_DELETED   = (1 << 2),
538c6b0356SStefan Hajnoczi 
548c6b0356SStefan Hajnoczi     /* Delete after invoking callback */
558c6b0356SStefan Hajnoczi     BH_ONESHOT   = (1 << 3),
568c6b0356SStefan Hajnoczi 
578c6b0356SStefan Hajnoczi     /* Schedule periodically when the event loop is idle */
588c6b0356SStefan Hajnoczi     BH_IDLE      = (1 << 4),
598c6b0356SStefan Hajnoczi };
608c6b0356SStefan Hajnoczi 
614f999d05SKevin Wolf struct QEMUBH {
622f4dc3c1SPaolo Bonzini     AioContext *ctx;
630f08586cSStefan Hajnoczi     const char *name;
644f999d05SKevin Wolf     QEMUBHFunc *cb;
654f999d05SKevin Wolf     void *opaque;
668c6b0356SStefan Hajnoczi     QSLIST_ENTRY(QEMUBH) next;
678c6b0356SStefan Hajnoczi     unsigned flags;
684f999d05SKevin Wolf };
694f999d05SKevin Wolf 
708c6b0356SStefan Hajnoczi /* Called concurrently from any thread */
718c6b0356SStefan Hajnoczi static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags)
728c6b0356SStefan Hajnoczi {
738c6b0356SStefan Hajnoczi     AioContext *ctx = bh->ctx;
748c6b0356SStefan Hajnoczi     unsigned old_flags;
758c6b0356SStefan Hajnoczi 
768c6b0356SStefan Hajnoczi     /*
77d73415a3SStefan Hajnoczi      * The memory barrier implicit in qatomic_fetch_or makes sure that:
788c6b0356SStefan Hajnoczi      * 1. idle & any writes needed by the callback are done before the
798c6b0356SStefan Hajnoczi      *    locations are read in the aio_bh_poll.
808c6b0356SStefan Hajnoczi      * 2. ctx is loaded before the callback has a chance to execute and bh
818c6b0356SStefan Hajnoczi      *    could be freed.
828c6b0356SStefan Hajnoczi      */
83d73415a3SStefan Hajnoczi     old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags);
848c6b0356SStefan Hajnoczi     if (!(old_flags & BH_PENDING)) {
858c6b0356SStefan Hajnoczi         QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next);
868c6b0356SStefan Hajnoczi     }
878c6b0356SStefan Hajnoczi 
888c6b0356SStefan Hajnoczi     aio_notify(ctx);
8975bbe5e5SPavel Dovgalyuk     /*
9075bbe5e5SPavel Dovgalyuk      * Workaround for record/replay.
9175bbe5e5SPavel Dovgalyuk      * vCPU execution should be suspended when new BH is set.
9275bbe5e5SPavel Dovgalyuk      * This is needed to avoid guest timeouts caused
9375bbe5e5SPavel Dovgalyuk      * by the long cycles of the execution.
9475bbe5e5SPavel Dovgalyuk      */
9575bbe5e5SPavel Dovgalyuk     icount_notify_exit();
968c6b0356SStefan Hajnoczi }
978c6b0356SStefan Hajnoczi 
988c6b0356SStefan Hajnoczi /* Only called from aio_bh_poll() and aio_ctx_finalize() */
998c6b0356SStefan Hajnoczi static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags)
1008c6b0356SStefan Hajnoczi {
1018c6b0356SStefan Hajnoczi     QEMUBH *bh = QSLIST_FIRST_RCU(head);
1028c6b0356SStefan Hajnoczi 
1038c6b0356SStefan Hajnoczi     if (!bh) {
1048c6b0356SStefan Hajnoczi         return NULL;
1058c6b0356SStefan Hajnoczi     }
1068c6b0356SStefan Hajnoczi 
1078c6b0356SStefan Hajnoczi     QSLIST_REMOVE_HEAD(head, next);
1088c6b0356SStefan Hajnoczi 
1098c6b0356SStefan Hajnoczi     /*
110d73415a3SStefan Hajnoczi      * The qatomic_and is paired with aio_bh_enqueue().  The implicit memory
1118c6b0356SStefan Hajnoczi      * barrier ensures that the callback sees all writes done by the scheduling
1128c6b0356SStefan Hajnoczi      * thread.  It also ensures that the scheduling thread sees the cleared
1138c6b0356SStefan Hajnoczi      * flag before bh->cb has run, and thus will call aio_notify again if
1148c6b0356SStefan Hajnoczi      * necessary.
1158c6b0356SStefan Hajnoczi      */
116d73415a3SStefan Hajnoczi     *flags = qatomic_fetch_and(&bh->flags,
1178c6b0356SStefan Hajnoczi                               ~(BH_PENDING | BH_SCHEDULED | BH_IDLE));
1188c6b0356SStefan Hajnoczi     return bh;
1198c6b0356SStefan Hajnoczi }
1208c6b0356SStefan Hajnoczi 
1210f08586cSStefan Hajnoczi void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb,
1220f08586cSStefan Hajnoczi                                   void *opaque, const char *name)
1235b8bb359SPaolo Bonzini {
1245b8bb359SPaolo Bonzini     QEMUBH *bh;
1255b8bb359SPaolo Bonzini     bh = g_new(QEMUBH, 1);
1265b8bb359SPaolo Bonzini     *bh = (QEMUBH){
1275b8bb359SPaolo Bonzini         .ctx = ctx,
1285b8bb359SPaolo Bonzini         .cb = cb,
1295b8bb359SPaolo Bonzini         .opaque = opaque,
1300f08586cSStefan Hajnoczi         .name = name,
1315b8bb359SPaolo Bonzini     };
1328c6b0356SStefan Hajnoczi     aio_bh_enqueue(bh, BH_SCHEDULED | BH_ONESHOT);
1335b8bb359SPaolo Bonzini }
1345b8bb359SPaolo Bonzini 
1350f08586cSStefan Hajnoczi QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
1360f08586cSStefan Hajnoczi                         const char *name)
1374f999d05SKevin Wolf {
1384f999d05SKevin Wolf     QEMUBH *bh;
139ee82310fSPaolo Bonzini     bh = g_new(QEMUBH, 1);
140ee82310fSPaolo Bonzini     *bh = (QEMUBH){
141ee82310fSPaolo Bonzini         .ctx = ctx,
142ee82310fSPaolo Bonzini         .cb = cb,
143ee82310fSPaolo Bonzini         .opaque = opaque,
1440f08586cSStefan Hajnoczi         .name = name,
145ee82310fSPaolo Bonzini     };
1464f999d05SKevin Wolf     return bh;
1474f999d05SKevin Wolf }
1484f999d05SKevin Wolf 
149df281b80SPavel Dovgalyuk void aio_bh_call(QEMUBH *bh)
150df281b80SPavel Dovgalyuk {
151df281b80SPavel Dovgalyuk     bh->cb(bh->opaque);
152df281b80SPavel Dovgalyuk }
153df281b80SPavel Dovgalyuk 
1548c6b0356SStefan Hajnoczi /* Multiple occurrences of aio_bh_poll cannot be called concurrently. */
155f627aab1SPaolo Bonzini int aio_bh_poll(AioContext *ctx)
1564f999d05SKevin Wolf {
1578c6b0356SStefan Hajnoczi     BHListSlice slice;
1588c6b0356SStefan Hajnoczi     BHListSlice *s;
1598c6b0356SStefan Hajnoczi     int ret = 0;
160648fb0eaSKevin Wolf 
1618c6b0356SStefan Hajnoczi     QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list);
1628c6b0356SStefan Hajnoczi     QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next);
1638c6b0356SStefan Hajnoczi 
1648c6b0356SStefan Hajnoczi     while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) {
1658c6b0356SStefan Hajnoczi         QEMUBH *bh;
1668c6b0356SStefan Hajnoczi         unsigned flags;
1678c6b0356SStefan Hajnoczi 
1688c6b0356SStefan Hajnoczi         bh = aio_bh_dequeue(&s->bh_list, &flags);
1698c6b0356SStefan Hajnoczi         if (!bh) {
1708c6b0356SStefan Hajnoczi             QSIMPLEQ_REMOVE_HEAD(&ctx->bh_slice_list, next);
1718c6b0356SStefan Hajnoczi             continue;
1728c6b0356SStefan Hajnoczi         }
1738c6b0356SStefan Hajnoczi 
1748c6b0356SStefan Hajnoczi         if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
17565c1b5b6SPaolo Bonzini             /* Idle BHs don't count as progress */
1768c6b0356SStefan Hajnoczi             if (!(flags & BH_IDLE)) {
1774f999d05SKevin Wolf                 ret = 1;
178ca96ac44SStefan Hajnoczi             }
179df281b80SPavel Dovgalyuk             aio_bh_call(bh);
1804f999d05SKevin Wolf         }
1818c6b0356SStefan Hajnoczi         if (flags & (BH_DELETED | BH_ONESHOT)) {
1827267c094SAnthony Liguori             g_free(bh);
1834f999d05SKevin Wolf         }
184648fb0eaSKevin Wolf     }
1858c6b0356SStefan Hajnoczi 
1864f999d05SKevin Wolf     return ret;
1874f999d05SKevin Wolf }
1884f999d05SKevin Wolf 
1894f999d05SKevin Wolf void qemu_bh_schedule_idle(QEMUBH *bh)
1904f999d05SKevin Wolf {
1918c6b0356SStefan Hajnoczi     aio_bh_enqueue(bh, BH_SCHEDULED | BH_IDLE);
1924f999d05SKevin Wolf }
1934f999d05SKevin Wolf 
1944f999d05SKevin Wolf void qemu_bh_schedule(QEMUBH *bh)
1954f999d05SKevin Wolf {
1968c6b0356SStefan Hajnoczi     aio_bh_enqueue(bh, BH_SCHEDULED);
1974f999d05SKevin Wolf }
198dcc772e2SLiu Ping Fan 
199dcc772e2SLiu Ping Fan /* This func is async.
200dcc772e2SLiu Ping Fan  */
2014f999d05SKevin Wolf void qemu_bh_cancel(QEMUBH *bh)
2024f999d05SKevin Wolf {
203d73415a3SStefan Hajnoczi     qatomic_and(&bh->flags, ~BH_SCHEDULED);
2044f999d05SKevin Wolf }
2054f999d05SKevin Wolf 
206dcc772e2SLiu Ping Fan /* This func is async.The bottom half will do the delete action at the finial
207dcc772e2SLiu Ping Fan  * end.
208dcc772e2SLiu Ping Fan  */
2094f999d05SKevin Wolf void qemu_bh_delete(QEMUBH *bh)
2104f999d05SKevin Wolf {
2118c6b0356SStefan Hajnoczi     aio_bh_enqueue(bh, BH_DELETED);
2124f999d05SKevin Wolf }
2134f999d05SKevin Wolf 
2148c6b0356SStefan Hajnoczi static int64_t aio_compute_bh_timeout(BHList *head, int timeout)
2154f999d05SKevin Wolf {
2164f999d05SKevin Wolf     QEMUBH *bh;
2174f999d05SKevin Wolf 
2188c6b0356SStefan Hajnoczi     QSLIST_FOREACH_RCU(bh, head, next) {
2198c6b0356SStefan Hajnoczi         if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
2208c6b0356SStefan Hajnoczi             if (bh->flags & BH_IDLE) {
2214f999d05SKevin Wolf                 /* idle bottom halves will be polled at least
2224f999d05SKevin Wolf                  * every 10ms */
223845ca10dSPaolo Bonzini                 timeout = 10000000;
2244f999d05SKevin Wolf             } else {
2254f999d05SKevin Wolf                 /* non-idle bottom halves will be executed
2264f999d05SKevin Wolf                  * immediately */
227845ca10dSPaolo Bonzini                 return 0;
2284f999d05SKevin Wolf             }
2294f999d05SKevin Wolf         }
2304f999d05SKevin Wolf     }
2314f999d05SKevin Wolf 
2328c6b0356SStefan Hajnoczi     return timeout;
2338c6b0356SStefan Hajnoczi }
2348c6b0356SStefan Hajnoczi 
2358c6b0356SStefan Hajnoczi int64_t
2368c6b0356SStefan Hajnoczi aio_compute_timeout(AioContext *ctx)
2378c6b0356SStefan Hajnoczi {
2388c6b0356SStefan Hajnoczi     BHListSlice *s;
2398c6b0356SStefan Hajnoczi     int64_t deadline;
2408c6b0356SStefan Hajnoczi     int timeout = -1;
2418c6b0356SStefan Hajnoczi 
2428c6b0356SStefan Hajnoczi     timeout = aio_compute_bh_timeout(&ctx->bh_list, timeout);
2438c6b0356SStefan Hajnoczi     if (timeout == 0) {
2448c6b0356SStefan Hajnoczi         return 0;
2458c6b0356SStefan Hajnoczi     }
2468c6b0356SStefan Hajnoczi 
2478c6b0356SStefan Hajnoczi     QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
2488c6b0356SStefan Hajnoczi         timeout = aio_compute_bh_timeout(&s->bh_list, timeout);
2498c6b0356SStefan Hajnoczi         if (timeout == 0) {
2508c6b0356SStefan Hajnoczi             return 0;
2518c6b0356SStefan Hajnoczi         }
2528c6b0356SStefan Hajnoczi     }
2538c6b0356SStefan Hajnoczi 
254845ca10dSPaolo Bonzini     deadline = timerlistgroup_deadline_ns(&ctx->tlg);
255533a8cf3SAlex Bligh     if (deadline == 0) {
256845ca10dSPaolo Bonzini         return 0;
257533a8cf3SAlex Bligh     } else {
258845ca10dSPaolo Bonzini         return qemu_soonest_timeout(timeout, deadline);
259845ca10dSPaolo Bonzini     }
260533a8cf3SAlex Bligh }
261533a8cf3SAlex Bligh 
262845ca10dSPaolo Bonzini static gboolean
263845ca10dSPaolo Bonzini aio_ctx_prepare(GSource *source, gint    *timeout)
264845ca10dSPaolo Bonzini {
265845ca10dSPaolo Bonzini     AioContext *ctx = (AioContext *) source;
266845ca10dSPaolo Bonzini 
267d73415a3SStefan Hajnoczi     qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) | 1);
2685710a3e0SPaolo Bonzini 
2695710a3e0SPaolo Bonzini     /*
2705710a3e0SPaolo Bonzini      * Write ctx->notify_me before computing the timeout
2715710a3e0SPaolo Bonzini      * (reading bottom half flags, etc.).  Pairs with
2725710a3e0SPaolo Bonzini      * smp_mb in aio_notify().
2735710a3e0SPaolo Bonzini      */
2745710a3e0SPaolo Bonzini     smp_mb();
275eabc9779SPaolo Bonzini 
276845ca10dSPaolo Bonzini     /* We assume there is no timeout already supplied */
277845ca10dSPaolo Bonzini     *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
278a3462c65SPaolo Bonzini 
279a3462c65SPaolo Bonzini     if (aio_prepare(ctx)) {
280a3462c65SPaolo Bonzini         *timeout = 0;
281a3462c65SPaolo Bonzini     }
282a3462c65SPaolo Bonzini 
283845ca10dSPaolo Bonzini     return *timeout == 0;
284e3713e00SPaolo Bonzini }
285e3713e00SPaolo Bonzini 
286e3713e00SPaolo Bonzini static gboolean
287e3713e00SPaolo Bonzini aio_ctx_check(GSource *source)
288e3713e00SPaolo Bonzini {
289e3713e00SPaolo Bonzini     AioContext *ctx = (AioContext *) source;
290e3713e00SPaolo Bonzini     QEMUBH *bh;
2918c6b0356SStefan Hajnoczi     BHListSlice *s;
292e3713e00SPaolo Bonzini 
2935710a3e0SPaolo Bonzini     /* Finish computing the timeout before clearing the flag.  */
294d73415a3SStefan Hajnoczi     qatomic_store_release(&ctx->notify_me, qatomic_read(&ctx->notify_me) & ~1);
29505e514b1SPaolo Bonzini     aio_notify_accept(ctx);
29621a03d17SPaolo Bonzini 
2978c6b0356SStefan Hajnoczi     QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) {
2988c6b0356SStefan Hajnoczi         if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
299e3713e00SPaolo Bonzini             return true;
300e3713e00SPaolo Bonzini         }
301e3713e00SPaolo Bonzini     }
3028c6b0356SStefan Hajnoczi 
3038c6b0356SStefan Hajnoczi     QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
3048c6b0356SStefan Hajnoczi         QSLIST_FOREACH_RCU(bh, &s->bh_list, next) {
3058c6b0356SStefan Hajnoczi             if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
3068c6b0356SStefan Hajnoczi                 return true;
3078c6b0356SStefan Hajnoczi             }
3088c6b0356SStefan Hajnoczi         }
3098c6b0356SStefan Hajnoczi     }
310533a8cf3SAlex Bligh     return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
311e3713e00SPaolo Bonzini }
312e3713e00SPaolo Bonzini 
313e3713e00SPaolo Bonzini static gboolean
314e3713e00SPaolo Bonzini aio_ctx_dispatch(GSource     *source,
315e3713e00SPaolo Bonzini                  GSourceFunc  callback,
316e3713e00SPaolo Bonzini                  gpointer     user_data)
317e3713e00SPaolo Bonzini {
318e3713e00SPaolo Bonzini     AioContext *ctx = (AioContext *) source;
319e3713e00SPaolo Bonzini 
320e3713e00SPaolo Bonzini     assert(callback == NULL);
321a153bf52SPaolo Bonzini     aio_dispatch(ctx);
322e3713e00SPaolo Bonzini     return true;
323e3713e00SPaolo Bonzini }
324e3713e00SPaolo Bonzini 
3252f4dc3c1SPaolo Bonzini static void
3262f4dc3c1SPaolo Bonzini aio_ctx_finalize(GSource     *source)
3272f4dc3c1SPaolo Bonzini {
3282f4dc3c1SPaolo Bonzini     AioContext *ctx = (AioContext *) source;
3298c6b0356SStefan Hajnoczi     QEMUBH *bh;
3308c6b0356SStefan Hajnoczi     unsigned flags;
3312f4dc3c1SPaolo Bonzini 
3329b34277dSStefan Hajnoczi     thread_pool_free(ctx->thread_pool);
333a076972aSStefan Hajnoczi 
3340187f5c9SPaolo Bonzini #ifdef CONFIG_LINUX_AIO
3350187f5c9SPaolo Bonzini     if (ctx->linux_aio) {
3360187f5c9SPaolo Bonzini         laio_detach_aio_context(ctx->linux_aio, ctx);
3370187f5c9SPaolo Bonzini         laio_cleanup(ctx->linux_aio);
3380187f5c9SPaolo Bonzini         ctx->linux_aio = NULL;
3390187f5c9SPaolo Bonzini     }
3400187f5c9SPaolo Bonzini #endif
3410187f5c9SPaolo Bonzini 
342fcb7a4a4SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING
343fcb7a4a4SAarushi Mehta     if (ctx->linux_io_uring) {
344fcb7a4a4SAarushi Mehta         luring_detach_aio_context(ctx->linux_io_uring, ctx);
345fcb7a4a4SAarushi Mehta         luring_cleanup(ctx->linux_io_uring);
346fcb7a4a4SAarushi Mehta         ctx->linux_io_uring = NULL;
347fcb7a4a4SAarushi Mehta     }
348fcb7a4a4SAarushi Mehta #endif
349fcb7a4a4SAarushi Mehta 
3500c330a73SPaolo Bonzini     assert(QSLIST_EMPTY(&ctx->scheduled_coroutines));
3510c330a73SPaolo Bonzini     qemu_bh_delete(ctx->co_schedule_bh);
3520c330a73SPaolo Bonzini 
3538c6b0356SStefan Hajnoczi     /* There must be no aio_bh_poll() calls going on */
3548c6b0356SStefan Hajnoczi     assert(QSIMPLEQ_EMPTY(&ctx->bh_slice_list));
355a076972aSStefan Hajnoczi 
3568c6b0356SStefan Hajnoczi     while ((bh = aio_bh_dequeue(&ctx->bh_list, &flags))) {
357023ca420SStefan Hajnoczi         /*
358023ca420SStefan Hajnoczi          * qemu_bh_delete() must have been called on BHs in this AioContext. In
359023ca420SStefan Hajnoczi          * many cases memory leaks, hangs, or inconsistent state occur when a
360023ca420SStefan Hajnoczi          * BH is leaked because something still expects it to run.
361023ca420SStefan Hajnoczi          *
362023ca420SStefan Hajnoczi          * If you hit this, fix the lifecycle of the BH so that
363023ca420SStefan Hajnoczi          * qemu_bh_delete() and any associated cleanup is called before the
364023ca420SStefan Hajnoczi          * AioContext is finalized.
365023ca420SStefan Hajnoczi          */
366023ca420SStefan Hajnoczi         if (unlikely(!(flags & BH_DELETED))) {
367023ca420SStefan Hajnoczi             fprintf(stderr, "%s: BH '%s' leaked, aborting...\n",
368023ca420SStefan Hajnoczi                     __func__, bh->name);
369023ca420SStefan Hajnoczi             abort();
370023ca420SStefan Hajnoczi         }
371a076972aSStefan Hajnoczi 
3728c6b0356SStefan Hajnoczi         g_free(bh);
373a076972aSStefan Hajnoczi     }
374a076972aSStefan Hajnoczi 
375826cc324SStefan Hajnoczi     aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL, NULL);
3762f4dc3c1SPaolo Bonzini     event_notifier_cleanup(&ctx->notifier);
3773fe71223SPaolo Bonzini     qemu_rec_mutex_destroy(&ctx->lock);
378d7c99a12SPaolo Bonzini     qemu_lockcnt_destroy(&ctx->list_lock);
379dae21b98SAlex Bligh     timerlistgroup_deinit(&ctx->tlg);
380*587d82faSEmanuele Giuseppe Esposito     unregister_aiocontext(ctx);
381cd0a6d2bSJie Wang     aio_context_destroy(ctx);
3822f4dc3c1SPaolo Bonzini }
3832f4dc3c1SPaolo Bonzini 
384e3713e00SPaolo Bonzini static GSourceFuncs aio_source_funcs = {
385e3713e00SPaolo Bonzini     aio_ctx_prepare,
386e3713e00SPaolo Bonzini     aio_ctx_check,
387e3713e00SPaolo Bonzini     aio_ctx_dispatch,
3882f4dc3c1SPaolo Bonzini     aio_ctx_finalize
389e3713e00SPaolo Bonzini };
390e3713e00SPaolo Bonzini 
391e3713e00SPaolo Bonzini GSource *aio_get_g_source(AioContext *ctx)
392e3713e00SPaolo Bonzini {
393ba607ca8SStefan Hajnoczi     aio_context_use_g_source(ctx);
394e3713e00SPaolo Bonzini     g_source_ref(&ctx->source);
395e3713e00SPaolo Bonzini     return &ctx->source;
396e3713e00SPaolo Bonzini }
397a915f4bcSPaolo Bonzini 
3989b34277dSStefan Hajnoczi ThreadPool *aio_get_thread_pool(AioContext *ctx)
3999b34277dSStefan Hajnoczi {
4009b34277dSStefan Hajnoczi     if (!ctx->thread_pool) {
4019b34277dSStefan Hajnoczi         ctx->thread_pool = thread_pool_new(ctx);
4029b34277dSStefan Hajnoczi     }
4039b34277dSStefan Hajnoczi     return ctx->thread_pool;
4049b34277dSStefan Hajnoczi }
4059b34277dSStefan Hajnoczi 
4060187f5c9SPaolo Bonzini #ifdef CONFIG_LINUX_AIO
407ed6e2161SNishanth Aravamudan LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp)
4080187f5c9SPaolo Bonzini {
4090187f5c9SPaolo Bonzini     if (!ctx->linux_aio) {
410ed6e2161SNishanth Aravamudan         ctx->linux_aio = laio_init(errp);
411ed6e2161SNishanth Aravamudan         if (ctx->linux_aio) {
4120187f5c9SPaolo Bonzini             laio_attach_aio_context(ctx->linux_aio, ctx);
4130187f5c9SPaolo Bonzini         }
414ed6e2161SNishanth Aravamudan     }
415ed6e2161SNishanth Aravamudan     return ctx->linux_aio;
416ed6e2161SNishanth Aravamudan }
417ed6e2161SNishanth Aravamudan 
418ed6e2161SNishanth Aravamudan LinuxAioState *aio_get_linux_aio(AioContext *ctx)
419ed6e2161SNishanth Aravamudan {
420ed6e2161SNishanth Aravamudan     assert(ctx->linux_aio);
4210187f5c9SPaolo Bonzini     return ctx->linux_aio;
4220187f5c9SPaolo Bonzini }
4230187f5c9SPaolo Bonzini #endif
4240187f5c9SPaolo Bonzini 
425fcb7a4a4SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING
426fcb7a4a4SAarushi Mehta LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp)
427fcb7a4a4SAarushi Mehta {
428fcb7a4a4SAarushi Mehta     if (ctx->linux_io_uring) {
429fcb7a4a4SAarushi Mehta         return ctx->linux_io_uring;
430fcb7a4a4SAarushi Mehta     }
431fcb7a4a4SAarushi Mehta 
432fcb7a4a4SAarushi Mehta     ctx->linux_io_uring = luring_init(errp);
433fcb7a4a4SAarushi Mehta     if (!ctx->linux_io_uring) {
434fcb7a4a4SAarushi Mehta         return NULL;
435fcb7a4a4SAarushi Mehta     }
436fcb7a4a4SAarushi Mehta 
437fcb7a4a4SAarushi Mehta     luring_attach_aio_context(ctx->linux_io_uring, ctx);
438fcb7a4a4SAarushi Mehta     return ctx->linux_io_uring;
439fcb7a4a4SAarushi Mehta }
440fcb7a4a4SAarushi Mehta 
441fcb7a4a4SAarushi Mehta LuringState *aio_get_linux_io_uring(AioContext *ctx)
442fcb7a4a4SAarushi Mehta {
443fcb7a4a4SAarushi Mehta     assert(ctx->linux_io_uring);
444fcb7a4a4SAarushi Mehta     return ctx->linux_io_uring;
445fcb7a4a4SAarushi Mehta }
446fcb7a4a4SAarushi Mehta #endif
447fcb7a4a4SAarushi Mehta 
4482f4dc3c1SPaolo Bonzini void aio_notify(AioContext *ctx)
4492f4dc3c1SPaolo Bonzini {
450601829f8SStefan Hajnoczi     /*
451601829f8SStefan Hajnoczi      * Write e.g. bh->flags before writing ctx->notified.  Pairs with smp_mb in
452601829f8SStefan Hajnoczi      * aio_notify_accept.
453601829f8SStefan Hajnoczi      */
454601829f8SStefan Hajnoczi     smp_wmb();
455d73415a3SStefan Hajnoczi     qatomic_set(&ctx->notified, true);
456601829f8SStefan Hajnoczi 
457601829f8SStefan Hajnoczi     /*
458601829f8SStefan Hajnoczi      * Write ctx->notified before reading ctx->notify_me.  Pairs
4595710a3e0SPaolo Bonzini      * with smp_mb in aio_ctx_prepare or aio_poll.
460eabc9779SPaolo Bonzini      */
4610ceb849bSPaolo Bonzini     smp_mb();
462d73415a3SStefan Hajnoczi     if (qatomic_read(&ctx->notify_me)) {
4632f4dc3c1SPaolo Bonzini         event_notifier_set(&ctx->notifier);
46405e514b1SPaolo Bonzini     }
46505e514b1SPaolo Bonzini }
46605e514b1SPaolo Bonzini 
46705e514b1SPaolo Bonzini void aio_notify_accept(AioContext *ctx)
46805e514b1SPaolo Bonzini {
469d73415a3SStefan Hajnoczi     qatomic_set(&ctx->notified, false);
470601829f8SStefan Hajnoczi 
471601829f8SStefan Hajnoczi     /*
472601829f8SStefan Hajnoczi      * Write ctx->notified before reading e.g. bh->flags.  Pairs with smp_wmb
473601829f8SStefan Hajnoczi      * in aio_notify.
474601829f8SStefan Hajnoczi      */
475601829f8SStefan Hajnoczi     smp_mb();
4760ceb849bSPaolo Bonzini }
4772f4dc3c1SPaolo Bonzini 
4783f53bc61SPaolo Bonzini static void aio_timerlist_notify(void *opaque, QEMUClockType type)
479d5541d86SAlex Bligh {
480d5541d86SAlex Bligh     aio_notify(opaque);
481d5541d86SAlex Bligh }
482d5541d86SAlex Bligh 
483601829f8SStefan Hajnoczi static void aio_context_notifier_cb(EventNotifier *e)
48421a03d17SPaolo Bonzini {
485601829f8SStefan Hajnoczi     AioContext *ctx = container_of(e, AioContext, notifier);
486601829f8SStefan Hajnoczi 
487601829f8SStefan Hajnoczi     event_notifier_test_and_clear(&ctx->notifier);
48821a03d17SPaolo Bonzini }
48921a03d17SPaolo Bonzini 
4904a1cba38SStefan Hajnoczi /* Returns true if aio_notify() was called (e.g. a BH was scheduled) */
491c13be5a1SStefan Hajnoczi static bool aio_context_notifier_poll(void *opaque)
4924a1cba38SStefan Hajnoczi {
4934a1cba38SStefan Hajnoczi     EventNotifier *e = opaque;
4944a1cba38SStefan Hajnoczi     AioContext *ctx = container_of(e, AioContext, notifier);
4954a1cba38SStefan Hajnoczi 
496d73415a3SStefan Hajnoczi     return qatomic_read(&ctx->notified);
4974a1cba38SStefan Hajnoczi }
4984a1cba38SStefan Hajnoczi 
499826cc324SStefan Hajnoczi static void aio_context_notifier_poll_ready(EventNotifier *e)
500826cc324SStefan Hajnoczi {
501826cc324SStefan Hajnoczi     /* Do nothing, we just wanted to kick the event loop */
502826cc324SStefan Hajnoczi }
503826cc324SStefan Hajnoczi 
5040c330a73SPaolo Bonzini static void co_schedule_bh_cb(void *opaque)
5050c330a73SPaolo Bonzini {
5060c330a73SPaolo Bonzini     AioContext *ctx = opaque;
5070c330a73SPaolo Bonzini     QSLIST_HEAD(, Coroutine) straight, reversed;
5080c330a73SPaolo Bonzini 
5090c330a73SPaolo Bonzini     QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines);
5100c330a73SPaolo Bonzini     QSLIST_INIT(&straight);
5110c330a73SPaolo Bonzini 
5120c330a73SPaolo Bonzini     while (!QSLIST_EMPTY(&reversed)) {
5130c330a73SPaolo Bonzini         Coroutine *co = QSLIST_FIRST(&reversed);
5140c330a73SPaolo Bonzini         QSLIST_REMOVE_HEAD(&reversed, co_scheduled_next);
5150c330a73SPaolo Bonzini         QSLIST_INSERT_HEAD(&straight, co, co_scheduled_next);
5160c330a73SPaolo Bonzini     }
5170c330a73SPaolo Bonzini 
5180c330a73SPaolo Bonzini     while (!QSLIST_EMPTY(&straight)) {
5190c330a73SPaolo Bonzini         Coroutine *co = QSLIST_FIRST(&straight);
5200c330a73SPaolo Bonzini         QSLIST_REMOVE_HEAD(&straight, co_scheduled_next);
5210c330a73SPaolo Bonzini         trace_aio_co_schedule_bh_cb(ctx, co);
5221919631eSPaolo Bonzini         aio_context_acquire(ctx);
5236133b39fSJeff Cody 
5246133b39fSJeff Cody         /* Protected by write barrier in qemu_aio_coroutine_enter */
525d73415a3SStefan Hajnoczi         qatomic_set(&co->scheduled, NULL);
5266808ae04SSergio Lopez         qemu_aio_coroutine_enter(ctx, co);
5271919631eSPaolo Bonzini         aio_context_release(ctx);
5280c330a73SPaolo Bonzini     }
5290c330a73SPaolo Bonzini }
5300c330a73SPaolo Bonzini 
5312f78e491SChrysostomos Nanakos AioContext *aio_context_new(Error **errp)
532f627aab1SPaolo Bonzini {
5332f78e491SChrysostomos Nanakos     int ret;
5342f4dc3c1SPaolo Bonzini     AioContext *ctx;
53537fcee5dSFam Zheng 
5362f4dc3c1SPaolo Bonzini     ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
5378c6b0356SStefan Hajnoczi     QSLIST_INIT(&ctx->bh_list);
5388c6b0356SStefan Hajnoczi     QSIMPLEQ_INIT(&ctx->bh_slice_list);
5397e003465SCao jin     aio_context_setup(ctx);
5407e003465SCao jin 
5412f78e491SChrysostomos Nanakos     ret = event_notifier_init(&ctx->notifier, false);
5422f78e491SChrysostomos Nanakos     if (ret < 0) {
5432f78e491SChrysostomos Nanakos         error_setg_errno(errp, -ret, "Failed to initialize event notifier");
54437fcee5dSFam Zheng         goto fail;
5452f78e491SChrysostomos Nanakos     }
546fcf5def1SPaolo Bonzini     g_source_set_can_recurse(&ctx->source, true);
547d7c99a12SPaolo Bonzini     qemu_lockcnt_init(&ctx->list_lock);
5480c330a73SPaolo Bonzini 
5490c330a73SPaolo Bonzini     ctx->co_schedule_bh = aio_bh_new(ctx, co_schedule_bh_cb, ctx);
5500c330a73SPaolo Bonzini     QSLIST_INIT(&ctx->scheduled_coroutines);
5510c330a73SPaolo Bonzini 
5522f78e491SChrysostomos Nanakos     aio_set_event_notifier(ctx, &ctx->notifier,
553dca21ef2SFam Zheng                            false,
554601829f8SStefan Hajnoczi                            aio_context_notifier_cb,
555826cc324SStefan Hajnoczi                            aio_context_notifier_poll,
556826cc324SStefan Hajnoczi                            aio_context_notifier_poll_ready);
5570187f5c9SPaolo Bonzini #ifdef CONFIG_LINUX_AIO
5580187f5c9SPaolo Bonzini     ctx->linux_aio = NULL;
5590187f5c9SPaolo Bonzini #endif
560fcb7a4a4SAarushi Mehta 
561fcb7a4a4SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING
562fcb7a4a4SAarushi Mehta     ctx->linux_io_uring = NULL;
563fcb7a4a4SAarushi Mehta #endif
564fcb7a4a4SAarushi Mehta 
5659b34277dSStefan Hajnoczi     ctx->thread_pool = NULL;
5663fe71223SPaolo Bonzini     qemu_rec_mutex_init(&ctx->lock);
567d5541d86SAlex Bligh     timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
5682f4dc3c1SPaolo Bonzini 
56982a41186SStefan Hajnoczi     ctx->poll_ns = 0;
5704a1cba38SStefan Hajnoczi     ctx->poll_max_ns = 0;
57182a41186SStefan Hajnoczi     ctx->poll_grow = 0;
57282a41186SStefan Hajnoczi     ctx->poll_shrink = 0;
5734a1cba38SStefan Hajnoczi 
5741793ad02SStefano Garzarella     ctx->aio_max_batch = 0;
5751793ad02SStefano Garzarella 
57671ad4713SNicolas Saenz Julienne     ctx->thread_pool_min = 0;
57771ad4713SNicolas Saenz Julienne     ctx->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT;
57871ad4713SNicolas Saenz Julienne 
579*587d82faSEmanuele Giuseppe Esposito     register_aiocontext(ctx);
580*587d82faSEmanuele Giuseppe Esposito 
5812f4dc3c1SPaolo Bonzini     return ctx;
58237fcee5dSFam Zheng fail:
58337fcee5dSFam Zheng     g_source_destroy(&ctx->source);
58437fcee5dSFam Zheng     return NULL;
585e3713e00SPaolo Bonzini }
586e3713e00SPaolo Bonzini 
5870c330a73SPaolo Bonzini void aio_co_schedule(AioContext *ctx, Coroutine *co)
5880c330a73SPaolo Bonzini {
5890c330a73SPaolo Bonzini     trace_aio_co_schedule(ctx, co);
590d73415a3SStefan Hajnoczi     const char *scheduled = qatomic_cmpxchg(&co->scheduled, NULL,
5916133b39fSJeff Cody                                            __func__);
5926133b39fSJeff Cody 
5936133b39fSJeff Cody     if (scheduled) {
5946133b39fSJeff Cody         fprintf(stderr,
5956133b39fSJeff Cody                 "%s: Co-routine was already scheduled in '%s'\n",
5966133b39fSJeff Cody                 __func__, scheduled);
5976133b39fSJeff Cody         abort();
5986133b39fSJeff Cody     }
5996133b39fSJeff Cody 
600f0f81002SStefan Hajnoczi     /* The coroutine might run and release the last ctx reference before we
601f0f81002SStefan Hajnoczi      * invoke qemu_bh_schedule().  Take a reference to keep ctx alive until
602f0f81002SStefan Hajnoczi      * we're done.
603f0f81002SStefan Hajnoczi      */
604f0f81002SStefan Hajnoczi     aio_context_ref(ctx);
605f0f81002SStefan Hajnoczi 
6060c330a73SPaolo Bonzini     QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines,
6070c330a73SPaolo Bonzini                               co, co_scheduled_next);
6080c330a73SPaolo Bonzini     qemu_bh_schedule(ctx->co_schedule_bh);
609f0f81002SStefan Hajnoczi 
610f0f81002SStefan Hajnoczi     aio_context_unref(ctx);
6110c330a73SPaolo Bonzini }
6120c330a73SPaolo Bonzini 
61326b0b698SKevin Wolf typedef struct AioCoRescheduleSelf {
61426b0b698SKevin Wolf     Coroutine *co;
61526b0b698SKevin Wolf     AioContext *new_ctx;
61626b0b698SKevin Wolf } AioCoRescheduleSelf;
61726b0b698SKevin Wolf 
61826b0b698SKevin Wolf static void aio_co_reschedule_self_bh(void *opaque)
61926b0b698SKevin Wolf {
62026b0b698SKevin Wolf     AioCoRescheduleSelf *data = opaque;
62126b0b698SKevin Wolf     aio_co_schedule(data->new_ctx, data->co);
62226b0b698SKevin Wolf }
62326b0b698SKevin Wolf 
62426b0b698SKevin Wolf void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx)
62526b0b698SKevin Wolf {
62626b0b698SKevin Wolf     AioContext *old_ctx = qemu_get_current_aio_context();
62726b0b698SKevin Wolf 
62826b0b698SKevin Wolf     if (old_ctx != new_ctx) {
62926b0b698SKevin Wolf         AioCoRescheduleSelf data = {
63026b0b698SKevin Wolf             .co = qemu_coroutine_self(),
63126b0b698SKevin Wolf             .new_ctx = new_ctx,
63226b0b698SKevin Wolf         };
63326b0b698SKevin Wolf         /*
63426b0b698SKevin Wolf          * We can't directly schedule the coroutine in the target context
63526b0b698SKevin Wolf          * because this would be racy: The other thread could try to enter the
63626b0b698SKevin Wolf          * coroutine before it has yielded in this one.
63726b0b698SKevin Wolf          */
63826b0b698SKevin Wolf         aio_bh_schedule_oneshot(old_ctx, aio_co_reschedule_self_bh, &data);
63926b0b698SKevin Wolf         qemu_coroutine_yield();
64026b0b698SKevin Wolf     }
64126b0b698SKevin Wolf }
64226b0b698SKevin Wolf 
6430c330a73SPaolo Bonzini void aio_co_wake(struct Coroutine *co)
6440c330a73SPaolo Bonzini {
6450c330a73SPaolo Bonzini     AioContext *ctx;
6460c330a73SPaolo Bonzini 
6470c330a73SPaolo Bonzini     /* Read coroutine before co->ctx.  Matches smp_wmb in
6480c330a73SPaolo Bonzini      * qemu_coroutine_enter.
6490c330a73SPaolo Bonzini      */
6500c330a73SPaolo Bonzini     smp_read_barrier_depends();
651d73415a3SStefan Hajnoczi     ctx = qatomic_read(&co->ctx);
6520c330a73SPaolo Bonzini 
6538865852eSFam Zheng     aio_co_enter(ctx, co);
6548865852eSFam Zheng }
6558865852eSFam Zheng 
6568865852eSFam Zheng void aio_co_enter(AioContext *ctx, struct Coroutine *co)
6578865852eSFam Zheng {
6580c330a73SPaolo Bonzini     if (ctx != qemu_get_current_aio_context()) {
6590c330a73SPaolo Bonzini         aio_co_schedule(ctx, co);
6600c330a73SPaolo Bonzini         return;
6610c330a73SPaolo Bonzini     }
6620c330a73SPaolo Bonzini 
6630c330a73SPaolo Bonzini     if (qemu_in_coroutine()) {
6640c330a73SPaolo Bonzini         Coroutine *self = qemu_coroutine_self();
6650c330a73SPaolo Bonzini         assert(self != co);
6660c330a73SPaolo Bonzini         QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next);
6670c330a73SPaolo Bonzini     } else {
6680c330a73SPaolo Bonzini         aio_context_acquire(ctx);
6698865852eSFam Zheng         qemu_aio_coroutine_enter(ctx, co);
6700c330a73SPaolo Bonzini         aio_context_release(ctx);
6710c330a73SPaolo Bonzini     }
6720c330a73SPaolo Bonzini }
6730c330a73SPaolo Bonzini 
674e3713e00SPaolo Bonzini void aio_context_ref(AioContext *ctx)
675e3713e00SPaolo Bonzini {
676e3713e00SPaolo Bonzini     g_source_ref(&ctx->source);
677e3713e00SPaolo Bonzini }
678e3713e00SPaolo Bonzini 
679e3713e00SPaolo Bonzini void aio_context_unref(AioContext *ctx)
680e3713e00SPaolo Bonzini {
681e3713e00SPaolo Bonzini     g_source_unref(&ctx->source);
682f627aab1SPaolo Bonzini }
68398563fc3SStefan Hajnoczi 
68498563fc3SStefan Hajnoczi void aio_context_acquire(AioContext *ctx)
68598563fc3SStefan Hajnoczi {
6863fe71223SPaolo Bonzini     qemu_rec_mutex_lock(&ctx->lock);
68798563fc3SStefan Hajnoczi }
68898563fc3SStefan Hajnoczi 
68998563fc3SStefan Hajnoczi void aio_context_release(AioContext *ctx)
69098563fc3SStefan Hajnoczi {
6913fe71223SPaolo Bonzini     qemu_rec_mutex_unlock(&ctx->lock);
69298563fc3SStefan Hajnoczi }
6935f50be9bSPaolo Bonzini 
69447b74464SStefan Hajnoczi QEMU_DEFINE_STATIC_CO_TLS(AioContext *, my_aiocontext)
6955f50be9bSPaolo Bonzini 
6965f50be9bSPaolo Bonzini AioContext *qemu_get_current_aio_context(void)
6975f50be9bSPaolo Bonzini {
69847b74464SStefan Hajnoczi     AioContext *ctx = get_my_aiocontext();
69947b74464SStefan Hajnoczi     if (ctx) {
70047b74464SStefan Hajnoczi         return ctx;
7015f50be9bSPaolo Bonzini     }
7025f50be9bSPaolo Bonzini     if (qemu_mutex_iothread_locked()) {
7035f50be9bSPaolo Bonzini         /* Possibly in a vCPU thread.  */
7045f50be9bSPaolo Bonzini         return qemu_get_aio_context();
7055f50be9bSPaolo Bonzini     }
7065f50be9bSPaolo Bonzini     return NULL;
7075f50be9bSPaolo Bonzini }
7085f50be9bSPaolo Bonzini 
7095f50be9bSPaolo Bonzini void qemu_set_current_aio_context(AioContext *ctx)
7105f50be9bSPaolo Bonzini {
71147b74464SStefan Hajnoczi     assert(!get_my_aiocontext());
71247b74464SStefan Hajnoczi     set_my_aiocontext(ctx);
7135f50be9bSPaolo Bonzini }
71471ad4713SNicolas Saenz Julienne 
71571ad4713SNicolas Saenz Julienne void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
71671ad4713SNicolas Saenz Julienne                                         int64_t max, Error **errp)
71771ad4713SNicolas Saenz Julienne {
71871ad4713SNicolas Saenz Julienne 
71971ad4713SNicolas Saenz Julienne     if (min > max || !max || min > INT_MAX || max > INT_MAX) {
72071ad4713SNicolas Saenz Julienne         error_setg(errp, "bad thread-pool-min/thread-pool-max values");
72171ad4713SNicolas Saenz Julienne         return;
72271ad4713SNicolas Saenz Julienne     }
72371ad4713SNicolas Saenz Julienne 
72471ad4713SNicolas Saenz Julienne     ctx->thread_pool_min = min;
72571ad4713SNicolas Saenz Julienne     ctx->thread_pool_max = max;
72671ad4713SNicolas Saenz Julienne 
72771ad4713SNicolas Saenz Julienne     if (ctx->thread_pool) {
72871ad4713SNicolas Saenz Julienne         thread_pool_update_params(ctx->thread_pool, ctx);
72971ad4713SNicolas Saenz Julienne     }
73071ad4713SNicolas Saenz Julienne }
731