xref: /qemu/util/async.c (revision c2b38b277a7882a592f4f2ec955084b2b756daaa)
14f999d05SKevin Wolf /*
2*c2b38b27SPaolo Bonzini  * Data plane event loop
34f999d05SKevin Wolf  *
44f999d05SKevin Wolf  * Copyright (c) 2003-2008 Fabrice Bellard
5*c2b38b27SPaolo Bonzini  * Copyright (c) 2009-2017 QEMU contributors
64f999d05SKevin Wolf  *
74f999d05SKevin Wolf  * Permission is hereby granted, free of charge, to any person obtaining a copy
84f999d05SKevin Wolf  * of this software and associated documentation files (the "Software"), to deal
94f999d05SKevin Wolf  * in the Software without restriction, including without limitation the rights
104f999d05SKevin Wolf  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
114f999d05SKevin Wolf  * copies of the Software, and to permit persons to whom the Software is
124f999d05SKevin Wolf  * furnished to do so, subject to the following conditions:
134f999d05SKevin Wolf  *
144f999d05SKevin Wolf  * The above copyright notice and this permission notice shall be included in
154f999d05SKevin Wolf  * all copies or substantial portions of the Software.
164f999d05SKevin Wolf  *
174f999d05SKevin Wolf  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
184f999d05SKevin Wolf  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
194f999d05SKevin Wolf  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
204f999d05SKevin Wolf  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
214f999d05SKevin Wolf  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
224f999d05SKevin Wolf  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
234f999d05SKevin Wolf  * THE SOFTWARE.
244f999d05SKevin Wolf  */
254f999d05SKevin Wolf 
26d38ea87aSPeter Maydell #include "qemu/osdep.h"
27da34e65cSMarkus Armbruster #include "qapi/error.h"
284f999d05SKevin Wolf #include "qemu-common.h"
29737e150eSPaolo Bonzini #include "block/aio.h"
309b34277dSStefan Hajnoczi #include "block/thread-pool.h"
311de7afc9SPaolo Bonzini #include "qemu/main-loop.h"
320ceb849bSPaolo Bonzini #include "qemu/atomic.h"
330187f5c9SPaolo Bonzini #include "block/raw-aio.h"
349a1e9481SKevin Wolf 
354f999d05SKevin Wolf /***********************************************************/
364f999d05SKevin Wolf /* bottom halves (can be seen as timers which expire ASAP) */
374f999d05SKevin Wolf 
384f999d05SKevin Wolf struct QEMUBH {
392f4dc3c1SPaolo Bonzini     AioContext *ctx;
404f999d05SKevin Wolf     QEMUBHFunc *cb;
414f999d05SKevin Wolf     void *opaque;
424f999d05SKevin Wolf     QEMUBH *next;
439b47b17eSStefan Weil     bool scheduled;
449b47b17eSStefan Weil     bool idle;
459b47b17eSStefan Weil     bool deleted;
464f999d05SKevin Wolf };
474f999d05SKevin Wolf 
485b8bb359SPaolo Bonzini void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
495b8bb359SPaolo Bonzini {
505b8bb359SPaolo Bonzini     QEMUBH *bh;
515b8bb359SPaolo Bonzini     bh = g_new(QEMUBH, 1);
525b8bb359SPaolo Bonzini     *bh = (QEMUBH){
535b8bb359SPaolo Bonzini         .ctx = ctx,
545b8bb359SPaolo Bonzini         .cb = cb,
555b8bb359SPaolo Bonzini         .opaque = opaque,
565b8bb359SPaolo Bonzini     };
57d7c99a12SPaolo Bonzini     qemu_lockcnt_lock(&ctx->list_lock);
585b8bb359SPaolo Bonzini     bh->next = ctx->first_bh;
595b8bb359SPaolo Bonzini     bh->scheduled = 1;
605b8bb359SPaolo Bonzini     bh->deleted = 1;
615b8bb359SPaolo Bonzini     /* Make sure that the members are ready before putting bh into list */
625b8bb359SPaolo Bonzini     smp_wmb();
635b8bb359SPaolo Bonzini     ctx->first_bh = bh;
64d7c99a12SPaolo Bonzini     qemu_lockcnt_unlock(&ctx->list_lock);
65c9d1a561SPaolo Bonzini     aio_notify(ctx);
665b8bb359SPaolo Bonzini }
675b8bb359SPaolo Bonzini 
68f627aab1SPaolo Bonzini QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
694f999d05SKevin Wolf {
704f999d05SKevin Wolf     QEMUBH *bh;
71ee82310fSPaolo Bonzini     bh = g_new(QEMUBH, 1);
72ee82310fSPaolo Bonzini     *bh = (QEMUBH){
73ee82310fSPaolo Bonzini         .ctx = ctx,
74ee82310fSPaolo Bonzini         .cb = cb,
75ee82310fSPaolo Bonzini         .opaque = opaque,
76ee82310fSPaolo Bonzini     };
77d7c99a12SPaolo Bonzini     qemu_lockcnt_lock(&ctx->list_lock);
78f627aab1SPaolo Bonzini     bh->next = ctx->first_bh;
79dcc772e2SLiu Ping Fan     /* Make sure that the members are ready before putting bh into list */
80dcc772e2SLiu Ping Fan     smp_wmb();
81f627aab1SPaolo Bonzini     ctx->first_bh = bh;
82d7c99a12SPaolo Bonzini     qemu_lockcnt_unlock(&ctx->list_lock);
834f999d05SKevin Wolf     return bh;
844f999d05SKevin Wolf }
854f999d05SKevin Wolf 
86df281b80SPavel Dovgalyuk void aio_bh_call(QEMUBH *bh)
87df281b80SPavel Dovgalyuk {
88df281b80SPavel Dovgalyuk     bh->cb(bh->opaque);
89df281b80SPavel Dovgalyuk }
90df281b80SPavel Dovgalyuk 
91dcc772e2SLiu Ping Fan /* Multiple occurrences of aio_bh_poll cannot be called concurrently */
92f627aab1SPaolo Bonzini int aio_bh_poll(AioContext *ctx)
934f999d05SKevin Wolf {
947887f620SKevin Wolf     QEMUBH *bh, **bhp, *next;
954f999d05SKevin Wolf     int ret;
967d506c90SPaolo Bonzini     bool deleted = false;
97648fb0eaSKevin Wolf 
98d7c99a12SPaolo Bonzini     qemu_lockcnt_inc(&ctx->list_lock);
994f999d05SKevin Wolf 
1004f999d05SKevin Wolf     ret = 0;
101d7c99a12SPaolo Bonzini     for (bh = atomic_rcu_read(&ctx->first_bh); bh; bh = next) {
102d7c99a12SPaolo Bonzini         next = atomic_rcu_read(&bh->next);
103e8d3b1a2SPaolo Bonzini         /* The atomic_xchg is paired with the one in qemu_bh_schedule.  The
104e8d3b1a2SPaolo Bonzini          * implicit memory barrier ensures that the callback sees all writes
105e8d3b1a2SPaolo Bonzini          * done by the scheduling thread.  It also ensures that the scheduling
106e8d3b1a2SPaolo Bonzini          * thread sees the zero before bh->cb has run, and thus will call
107e8d3b1a2SPaolo Bonzini          * aio_notify again if necessary.
108dcc772e2SLiu Ping Fan          */
1095b8bb359SPaolo Bonzini         if (atomic_xchg(&bh->scheduled, 0)) {
11065c1b5b6SPaolo Bonzini             /* Idle BHs don't count as progress */
11165c1b5b6SPaolo Bonzini             if (!bh->idle) {
1124f999d05SKevin Wolf                 ret = 1;
113ca96ac44SStefan Hajnoczi             }
1144f999d05SKevin Wolf             bh->idle = 0;
115df281b80SPavel Dovgalyuk             aio_bh_call(bh);
1164f999d05SKevin Wolf         }
1177d506c90SPaolo Bonzini         if (bh->deleted) {
1187d506c90SPaolo Bonzini             deleted = true;
1197d506c90SPaolo Bonzini         }
1204f999d05SKevin Wolf     }
1214f999d05SKevin Wolf 
1224f999d05SKevin Wolf     /* remove deleted bhs */
1237d506c90SPaolo Bonzini     if (!deleted) {
1247d506c90SPaolo Bonzini         qemu_lockcnt_dec(&ctx->list_lock);
1257d506c90SPaolo Bonzini         return ret;
1267d506c90SPaolo Bonzini     }
1277d506c90SPaolo Bonzini 
128d7c99a12SPaolo Bonzini     if (qemu_lockcnt_dec_and_lock(&ctx->list_lock)) {
129f627aab1SPaolo Bonzini         bhp = &ctx->first_bh;
1304f999d05SKevin Wolf         while (*bhp) {
1314f999d05SKevin Wolf             bh = *bhp;
1325b8bb359SPaolo Bonzini             if (bh->deleted && !bh->scheduled) {
1334f999d05SKevin Wolf                 *bhp = bh->next;
1347267c094SAnthony Liguori                 g_free(bh);
135648fb0eaSKevin Wolf             } else {
1364f999d05SKevin Wolf                 bhp = &bh->next;
1374f999d05SKevin Wolf             }
138648fb0eaSKevin Wolf         }
139d7c99a12SPaolo Bonzini         qemu_lockcnt_unlock(&ctx->list_lock);
140648fb0eaSKevin Wolf     }
1414f999d05SKevin Wolf     return ret;
1424f999d05SKevin Wolf }
1434f999d05SKevin Wolf 
1444f999d05SKevin Wolf void qemu_bh_schedule_idle(QEMUBH *bh)
1454f999d05SKevin Wolf {
1464f999d05SKevin Wolf     bh->idle = 1;
147dcc772e2SLiu Ping Fan     /* Make sure that idle & any writes needed by the callback are done
148dcc772e2SLiu Ping Fan      * before the locations are read in the aio_bh_poll.
149dcc772e2SLiu Ping Fan      */
150e8d3b1a2SPaolo Bonzini     atomic_mb_set(&bh->scheduled, 1);
1514f999d05SKevin Wolf }
1524f999d05SKevin Wolf 
1534f999d05SKevin Wolf void qemu_bh_schedule(QEMUBH *bh)
1544f999d05SKevin Wolf {
155924fe129SStefan Hajnoczi     AioContext *ctx;
156924fe129SStefan Hajnoczi 
157924fe129SStefan Hajnoczi     ctx = bh->ctx;
1584f999d05SKevin Wolf     bh->idle = 0;
159e8d3b1a2SPaolo Bonzini     /* The memory barrier implicit in atomic_xchg makes sure that:
160924fe129SStefan Hajnoczi      * 1. idle & any writes needed by the callback are done before the
161924fe129SStefan Hajnoczi      *    locations are read in the aio_bh_poll.
162924fe129SStefan Hajnoczi      * 2. ctx is loaded before scheduled is set and the callback has a chance
163924fe129SStefan Hajnoczi      *    to execute.
164dcc772e2SLiu Ping Fan      */
165e8d3b1a2SPaolo Bonzini     if (atomic_xchg(&bh->scheduled, 1) == 0) {
166924fe129SStefan Hajnoczi         aio_notify(ctx);
1674f999d05SKevin Wolf     }
168e8d3b1a2SPaolo Bonzini }
1694f999d05SKevin Wolf 
170dcc772e2SLiu Ping Fan 
171dcc772e2SLiu Ping Fan /* This func is async.
172dcc772e2SLiu Ping Fan  */
1734f999d05SKevin Wolf void qemu_bh_cancel(QEMUBH *bh)
1744f999d05SKevin Wolf {
1754f999d05SKevin Wolf     bh->scheduled = 0;
1764f999d05SKevin Wolf }
1774f999d05SKevin Wolf 
178dcc772e2SLiu Ping Fan /* This func is async.The bottom half will do the delete action at the finial
179dcc772e2SLiu Ping Fan  * end.
180dcc772e2SLiu Ping Fan  */
1814f999d05SKevin Wolf void qemu_bh_delete(QEMUBH *bh)
1824f999d05SKevin Wolf {
1834f999d05SKevin Wolf     bh->scheduled = 0;
1844f999d05SKevin Wolf     bh->deleted = 1;
1854f999d05SKevin Wolf }
1864f999d05SKevin Wolf 
187845ca10dSPaolo Bonzini int64_t
188845ca10dSPaolo Bonzini aio_compute_timeout(AioContext *ctx)
1894f999d05SKevin Wolf {
190845ca10dSPaolo Bonzini     int64_t deadline;
191845ca10dSPaolo Bonzini     int timeout = -1;
1924f999d05SKevin Wolf     QEMUBH *bh;
1934f999d05SKevin Wolf 
194d7c99a12SPaolo Bonzini     for (bh = atomic_rcu_read(&ctx->first_bh); bh;
195d7c99a12SPaolo Bonzini          bh = atomic_rcu_read(&bh->next)) {
1965b8bb359SPaolo Bonzini         if (bh->scheduled) {
1974f999d05SKevin Wolf             if (bh->idle) {
1984f999d05SKevin Wolf                 /* idle bottom halves will be polled at least
1994f999d05SKevin Wolf                  * every 10ms */
200845ca10dSPaolo Bonzini                 timeout = 10000000;
2014f999d05SKevin Wolf             } else {
2024f999d05SKevin Wolf                 /* non-idle bottom halves will be executed
2034f999d05SKevin Wolf                  * immediately */
204845ca10dSPaolo Bonzini                 return 0;
2054f999d05SKevin Wolf             }
2064f999d05SKevin Wolf         }
2074f999d05SKevin Wolf     }
2084f999d05SKevin Wolf 
209845ca10dSPaolo Bonzini     deadline = timerlistgroup_deadline_ns(&ctx->tlg);
210533a8cf3SAlex Bligh     if (deadline == 0) {
211845ca10dSPaolo Bonzini         return 0;
212533a8cf3SAlex Bligh     } else {
213845ca10dSPaolo Bonzini         return qemu_soonest_timeout(timeout, deadline);
214845ca10dSPaolo Bonzini     }
215533a8cf3SAlex Bligh }
216533a8cf3SAlex Bligh 
217845ca10dSPaolo Bonzini static gboolean
218845ca10dSPaolo Bonzini aio_ctx_prepare(GSource *source, gint    *timeout)
219845ca10dSPaolo Bonzini {
220845ca10dSPaolo Bonzini     AioContext *ctx = (AioContext *) source;
221845ca10dSPaolo Bonzini 
222eabc9779SPaolo Bonzini     atomic_or(&ctx->notify_me, 1);
223eabc9779SPaolo Bonzini 
224845ca10dSPaolo Bonzini     /* We assume there is no timeout already supplied */
225845ca10dSPaolo Bonzini     *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
226a3462c65SPaolo Bonzini 
227a3462c65SPaolo Bonzini     if (aio_prepare(ctx)) {
228a3462c65SPaolo Bonzini         *timeout = 0;
229a3462c65SPaolo Bonzini     }
230a3462c65SPaolo Bonzini 
231845ca10dSPaolo Bonzini     return *timeout == 0;
232e3713e00SPaolo Bonzini }
233e3713e00SPaolo Bonzini 
234e3713e00SPaolo Bonzini static gboolean
235e3713e00SPaolo Bonzini aio_ctx_check(GSource *source)
236e3713e00SPaolo Bonzini {
237e3713e00SPaolo Bonzini     AioContext *ctx = (AioContext *) source;
238e3713e00SPaolo Bonzini     QEMUBH *bh;
239e3713e00SPaolo Bonzini 
240eabc9779SPaolo Bonzini     atomic_and(&ctx->notify_me, ~1);
24105e514b1SPaolo Bonzini     aio_notify_accept(ctx);
24221a03d17SPaolo Bonzini 
243e3713e00SPaolo Bonzini     for (bh = ctx->first_bh; bh; bh = bh->next) {
2445b8bb359SPaolo Bonzini         if (bh->scheduled) {
245e3713e00SPaolo Bonzini             return true;
246e3713e00SPaolo Bonzini         }
247e3713e00SPaolo Bonzini     }
248533a8cf3SAlex Bligh     return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
249e3713e00SPaolo Bonzini }
250e3713e00SPaolo Bonzini 
251e3713e00SPaolo Bonzini static gboolean
252e3713e00SPaolo Bonzini aio_ctx_dispatch(GSource     *source,
253e3713e00SPaolo Bonzini                  GSourceFunc  callback,
254e3713e00SPaolo Bonzini                  gpointer     user_data)
255e3713e00SPaolo Bonzini {
256e3713e00SPaolo Bonzini     AioContext *ctx = (AioContext *) source;
257e3713e00SPaolo Bonzini 
258e3713e00SPaolo Bonzini     assert(callback == NULL);
259721671adSStefan Hajnoczi     aio_dispatch(ctx, true);
260e3713e00SPaolo Bonzini     return true;
261e3713e00SPaolo Bonzini }
262e3713e00SPaolo Bonzini 
2632f4dc3c1SPaolo Bonzini static void
2642f4dc3c1SPaolo Bonzini aio_ctx_finalize(GSource     *source)
2652f4dc3c1SPaolo Bonzini {
2662f4dc3c1SPaolo Bonzini     AioContext *ctx = (AioContext *) source;
2672f4dc3c1SPaolo Bonzini 
2689b34277dSStefan Hajnoczi     thread_pool_free(ctx->thread_pool);
269a076972aSStefan Hajnoczi 
2700187f5c9SPaolo Bonzini #ifdef CONFIG_LINUX_AIO
2710187f5c9SPaolo Bonzini     if (ctx->linux_aio) {
2720187f5c9SPaolo Bonzini         laio_detach_aio_context(ctx->linux_aio, ctx);
2730187f5c9SPaolo Bonzini         laio_cleanup(ctx->linux_aio);
2740187f5c9SPaolo Bonzini         ctx->linux_aio = NULL;
2750187f5c9SPaolo Bonzini     }
2760187f5c9SPaolo Bonzini #endif
2770187f5c9SPaolo Bonzini 
278d7c99a12SPaolo Bonzini     qemu_lockcnt_lock(&ctx->list_lock);
279d7c99a12SPaolo Bonzini     assert(!qemu_lockcnt_count(&ctx->list_lock));
280a076972aSStefan Hajnoczi     while (ctx->first_bh) {
281a076972aSStefan Hajnoczi         QEMUBH *next = ctx->first_bh->next;
282a076972aSStefan Hajnoczi 
283a076972aSStefan Hajnoczi         /* qemu_bh_delete() must have been called on BHs in this AioContext */
284a076972aSStefan Hajnoczi         assert(ctx->first_bh->deleted);
285a076972aSStefan Hajnoczi 
286a076972aSStefan Hajnoczi         g_free(ctx->first_bh);
287a076972aSStefan Hajnoczi         ctx->first_bh = next;
288a076972aSStefan Hajnoczi     }
289d7c99a12SPaolo Bonzini     qemu_lockcnt_unlock(&ctx->list_lock);
290a076972aSStefan Hajnoczi 
291f6a51c84SStefan Hajnoczi     aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL);
2922f4dc3c1SPaolo Bonzini     event_notifier_cleanup(&ctx->notifier);
2933fe71223SPaolo Bonzini     qemu_rec_mutex_destroy(&ctx->lock);
294d7c99a12SPaolo Bonzini     qemu_lockcnt_destroy(&ctx->list_lock);
295dae21b98SAlex Bligh     timerlistgroup_deinit(&ctx->tlg);
2962f4dc3c1SPaolo Bonzini }
2972f4dc3c1SPaolo Bonzini 
298e3713e00SPaolo Bonzini static GSourceFuncs aio_source_funcs = {
299e3713e00SPaolo Bonzini     aio_ctx_prepare,
300e3713e00SPaolo Bonzini     aio_ctx_check,
301e3713e00SPaolo Bonzini     aio_ctx_dispatch,
3022f4dc3c1SPaolo Bonzini     aio_ctx_finalize
303e3713e00SPaolo Bonzini };
304e3713e00SPaolo Bonzini 
305e3713e00SPaolo Bonzini GSource *aio_get_g_source(AioContext *ctx)
306e3713e00SPaolo Bonzini {
307e3713e00SPaolo Bonzini     g_source_ref(&ctx->source);
308e3713e00SPaolo Bonzini     return &ctx->source;
309e3713e00SPaolo Bonzini }
310a915f4bcSPaolo Bonzini 
3119b34277dSStefan Hajnoczi ThreadPool *aio_get_thread_pool(AioContext *ctx)
3129b34277dSStefan Hajnoczi {
3139b34277dSStefan Hajnoczi     if (!ctx->thread_pool) {
3149b34277dSStefan Hajnoczi         ctx->thread_pool = thread_pool_new(ctx);
3159b34277dSStefan Hajnoczi     }
3169b34277dSStefan Hajnoczi     return ctx->thread_pool;
3179b34277dSStefan Hajnoczi }
3189b34277dSStefan Hajnoczi 
3190187f5c9SPaolo Bonzini #ifdef CONFIG_LINUX_AIO
3200187f5c9SPaolo Bonzini LinuxAioState *aio_get_linux_aio(AioContext *ctx)
3210187f5c9SPaolo Bonzini {
3220187f5c9SPaolo Bonzini     if (!ctx->linux_aio) {
3230187f5c9SPaolo Bonzini         ctx->linux_aio = laio_init();
3240187f5c9SPaolo Bonzini         laio_attach_aio_context(ctx->linux_aio, ctx);
3250187f5c9SPaolo Bonzini     }
3260187f5c9SPaolo Bonzini     return ctx->linux_aio;
3270187f5c9SPaolo Bonzini }
3280187f5c9SPaolo Bonzini #endif
3290187f5c9SPaolo Bonzini 
3302f4dc3c1SPaolo Bonzini void aio_notify(AioContext *ctx)
3312f4dc3c1SPaolo Bonzini {
332eabc9779SPaolo Bonzini     /* Write e.g. bh->scheduled before reading ctx->notify_me.  Pairs
333eabc9779SPaolo Bonzini      * with atomic_or in aio_ctx_prepare or atomic_add in aio_poll.
334eabc9779SPaolo Bonzini      */
3350ceb849bSPaolo Bonzini     smp_mb();
336eabc9779SPaolo Bonzini     if (ctx->notify_me) {
3372f4dc3c1SPaolo Bonzini         event_notifier_set(&ctx->notifier);
33805e514b1SPaolo Bonzini         atomic_mb_set(&ctx->notified, true);
33905e514b1SPaolo Bonzini     }
34005e514b1SPaolo Bonzini }
34105e514b1SPaolo Bonzini 
34205e514b1SPaolo Bonzini void aio_notify_accept(AioContext *ctx)
34305e514b1SPaolo Bonzini {
34405e514b1SPaolo Bonzini     if (atomic_xchg(&ctx->notified, false)) {
34505e514b1SPaolo Bonzini         event_notifier_test_and_clear(&ctx->notifier);
3462f4dc3c1SPaolo Bonzini     }
3470ceb849bSPaolo Bonzini }
3482f4dc3c1SPaolo Bonzini 
349d5541d86SAlex Bligh static void aio_timerlist_notify(void *opaque)
350d5541d86SAlex Bligh {
351d5541d86SAlex Bligh     aio_notify(opaque);
352d5541d86SAlex Bligh }
353d5541d86SAlex Bligh 
35421a03d17SPaolo Bonzini static void event_notifier_dummy_cb(EventNotifier *e)
35521a03d17SPaolo Bonzini {
35621a03d17SPaolo Bonzini }
35721a03d17SPaolo Bonzini 
3584a1cba38SStefan Hajnoczi /* Returns true if aio_notify() was called (e.g. a BH was scheduled) */
3594a1cba38SStefan Hajnoczi static bool event_notifier_poll(void *opaque)
3604a1cba38SStefan Hajnoczi {
3614a1cba38SStefan Hajnoczi     EventNotifier *e = opaque;
3624a1cba38SStefan Hajnoczi     AioContext *ctx = container_of(e, AioContext, notifier);
3634a1cba38SStefan Hajnoczi 
3644a1cba38SStefan Hajnoczi     return atomic_read(&ctx->notified);
3654a1cba38SStefan Hajnoczi }
3664a1cba38SStefan Hajnoczi 
3672f78e491SChrysostomos Nanakos AioContext *aio_context_new(Error **errp)
368f627aab1SPaolo Bonzini {
3692f78e491SChrysostomos Nanakos     int ret;
3702f4dc3c1SPaolo Bonzini     AioContext *ctx;
37137fcee5dSFam Zheng 
3722f4dc3c1SPaolo Bonzini     ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
3737e003465SCao jin     aio_context_setup(ctx);
3747e003465SCao jin 
3752f78e491SChrysostomos Nanakos     ret = event_notifier_init(&ctx->notifier, false);
3762f78e491SChrysostomos Nanakos     if (ret < 0) {
3772f78e491SChrysostomos Nanakos         error_setg_errno(errp, -ret, "Failed to initialize event notifier");
37837fcee5dSFam Zheng         goto fail;
3792f78e491SChrysostomos Nanakos     }
380fcf5def1SPaolo Bonzini     g_source_set_can_recurse(&ctx->source, true);
381d7c99a12SPaolo Bonzini     qemu_lockcnt_init(&ctx->list_lock);
3822f78e491SChrysostomos Nanakos     aio_set_event_notifier(ctx, &ctx->notifier,
383dca21ef2SFam Zheng                            false,
3842f78e491SChrysostomos Nanakos                            (EventNotifierHandler *)
385f6a51c84SStefan Hajnoczi                            event_notifier_dummy_cb,
3864a1cba38SStefan Hajnoczi                            event_notifier_poll);
3870187f5c9SPaolo Bonzini #ifdef CONFIG_LINUX_AIO
3880187f5c9SPaolo Bonzini     ctx->linux_aio = NULL;
3890187f5c9SPaolo Bonzini #endif
3909b34277dSStefan Hajnoczi     ctx->thread_pool = NULL;
3913fe71223SPaolo Bonzini     qemu_rec_mutex_init(&ctx->lock);
392d5541d86SAlex Bligh     timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
3932f4dc3c1SPaolo Bonzini 
39482a41186SStefan Hajnoczi     ctx->poll_ns = 0;
3954a1cba38SStefan Hajnoczi     ctx->poll_max_ns = 0;
39682a41186SStefan Hajnoczi     ctx->poll_grow = 0;
39782a41186SStefan Hajnoczi     ctx->poll_shrink = 0;
3984a1cba38SStefan Hajnoczi 
3992f4dc3c1SPaolo Bonzini     return ctx;
40037fcee5dSFam Zheng fail:
40137fcee5dSFam Zheng     g_source_destroy(&ctx->source);
40237fcee5dSFam Zheng     return NULL;
403e3713e00SPaolo Bonzini }
404e3713e00SPaolo Bonzini 
405e3713e00SPaolo Bonzini void aio_context_ref(AioContext *ctx)
406e3713e00SPaolo Bonzini {
407e3713e00SPaolo Bonzini     g_source_ref(&ctx->source);
408e3713e00SPaolo Bonzini }
409e3713e00SPaolo Bonzini 
410e3713e00SPaolo Bonzini void aio_context_unref(AioContext *ctx)
411e3713e00SPaolo Bonzini {
412e3713e00SPaolo Bonzini     g_source_unref(&ctx->source);
413f627aab1SPaolo Bonzini }
41498563fc3SStefan Hajnoczi 
41598563fc3SStefan Hajnoczi void aio_context_acquire(AioContext *ctx)
41698563fc3SStefan Hajnoczi {
4173fe71223SPaolo Bonzini     qemu_rec_mutex_lock(&ctx->lock);
41898563fc3SStefan Hajnoczi }
41998563fc3SStefan Hajnoczi 
42098563fc3SStefan Hajnoczi void aio_context_release(AioContext *ctx)
42198563fc3SStefan Hajnoczi {
4223fe71223SPaolo Bonzini     qemu_rec_mutex_unlock(&ctx->lock);
42398563fc3SStefan Hajnoczi }
424