xref: /qemu/util/async.c (revision 3fe71223374e71436d4aced8865e50fd36588ff7)
14f999d05SKevin Wolf /*
24f999d05SKevin Wolf  * QEMU System Emulator
34f999d05SKevin Wolf  *
44f999d05SKevin Wolf  * Copyright (c) 2003-2008 Fabrice Bellard
54f999d05SKevin Wolf  *
64f999d05SKevin Wolf  * Permission is hereby granted, free of charge, to any person obtaining a copy
74f999d05SKevin Wolf  * of this software and associated documentation files (the "Software"), to deal
84f999d05SKevin Wolf  * in the Software without restriction, including without limitation the rights
94f999d05SKevin Wolf  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
104f999d05SKevin Wolf  * copies of the Software, and to permit persons to whom the Software is
114f999d05SKevin Wolf  * furnished to do so, subject to the following conditions:
124f999d05SKevin Wolf  *
134f999d05SKevin Wolf  * The above copyright notice and this permission notice shall be included in
144f999d05SKevin Wolf  * all copies or substantial portions of the Software.
154f999d05SKevin Wolf  *
164f999d05SKevin Wolf  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
174f999d05SKevin Wolf  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
184f999d05SKevin Wolf  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
194f999d05SKevin Wolf  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
204f999d05SKevin Wolf  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
214f999d05SKevin Wolf  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
224f999d05SKevin Wolf  * THE SOFTWARE.
234f999d05SKevin Wolf  */
244f999d05SKevin Wolf 
25d38ea87aSPeter Maydell #include "qemu/osdep.h"
26da34e65cSMarkus Armbruster #include "qapi/error.h"
274f999d05SKevin Wolf #include "qemu-common.h"
28737e150eSPaolo Bonzini #include "block/aio.h"
299b34277dSStefan Hajnoczi #include "block/thread-pool.h"
301de7afc9SPaolo Bonzini #include "qemu/main-loop.h"
310ceb849bSPaolo Bonzini #include "qemu/atomic.h"
320187f5c9SPaolo Bonzini #include "block/raw-aio.h"
339a1e9481SKevin Wolf 
344f999d05SKevin Wolf /***********************************************************/
354f999d05SKevin Wolf /* bottom halves (can be seen as timers which expire ASAP) */
364f999d05SKevin Wolf 
374f999d05SKevin Wolf struct QEMUBH {
382f4dc3c1SPaolo Bonzini     AioContext *ctx;
394f999d05SKevin Wolf     QEMUBHFunc *cb;
404f999d05SKevin Wolf     void *opaque;
414f999d05SKevin Wolf     QEMUBH *next;
429b47b17eSStefan Weil     bool scheduled;
439b47b17eSStefan Weil     bool idle;
449b47b17eSStefan Weil     bool deleted;
454f999d05SKevin Wolf };
464f999d05SKevin Wolf 
475b8bb359SPaolo Bonzini void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
485b8bb359SPaolo Bonzini {
495b8bb359SPaolo Bonzini     QEMUBH *bh;
505b8bb359SPaolo Bonzini     bh = g_new(QEMUBH, 1);
515b8bb359SPaolo Bonzini     *bh = (QEMUBH){
525b8bb359SPaolo Bonzini         .ctx = ctx,
535b8bb359SPaolo Bonzini         .cb = cb,
545b8bb359SPaolo Bonzini         .opaque = opaque,
555b8bb359SPaolo Bonzini     };
565b8bb359SPaolo Bonzini     qemu_mutex_lock(&ctx->bh_lock);
575b8bb359SPaolo Bonzini     bh->next = ctx->first_bh;
585b8bb359SPaolo Bonzini     bh->scheduled = 1;
595b8bb359SPaolo Bonzini     bh->deleted = 1;
605b8bb359SPaolo Bonzini     /* Make sure that the members are ready before putting bh into list */
615b8bb359SPaolo Bonzini     smp_wmb();
625b8bb359SPaolo Bonzini     ctx->first_bh = bh;
635b8bb359SPaolo Bonzini     qemu_mutex_unlock(&ctx->bh_lock);
64c9d1a561SPaolo Bonzini     aio_notify(ctx);
655b8bb359SPaolo Bonzini }
665b8bb359SPaolo Bonzini 
67f627aab1SPaolo Bonzini QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
684f999d05SKevin Wolf {
694f999d05SKevin Wolf     QEMUBH *bh;
70ee82310fSPaolo Bonzini     bh = g_new(QEMUBH, 1);
71ee82310fSPaolo Bonzini     *bh = (QEMUBH){
72ee82310fSPaolo Bonzini         .ctx = ctx,
73ee82310fSPaolo Bonzini         .cb = cb,
74ee82310fSPaolo Bonzini         .opaque = opaque,
75ee82310fSPaolo Bonzini     };
76dcc772e2SLiu Ping Fan     qemu_mutex_lock(&ctx->bh_lock);
77f627aab1SPaolo Bonzini     bh->next = ctx->first_bh;
78dcc772e2SLiu Ping Fan     /* Make sure that the members are ready before putting bh into list */
79dcc772e2SLiu Ping Fan     smp_wmb();
80f627aab1SPaolo Bonzini     ctx->first_bh = bh;
81dcc772e2SLiu Ping Fan     qemu_mutex_unlock(&ctx->bh_lock);
824f999d05SKevin Wolf     return bh;
834f999d05SKevin Wolf }
844f999d05SKevin Wolf 
85df281b80SPavel Dovgalyuk void aio_bh_call(QEMUBH *bh)
86df281b80SPavel Dovgalyuk {
87df281b80SPavel Dovgalyuk     bh->cb(bh->opaque);
88df281b80SPavel Dovgalyuk }
89df281b80SPavel Dovgalyuk 
90dcc772e2SLiu Ping Fan /* Multiple occurrences of aio_bh_poll cannot be called concurrently */
91f627aab1SPaolo Bonzini int aio_bh_poll(AioContext *ctx)
924f999d05SKevin Wolf {
937887f620SKevin Wolf     QEMUBH *bh, **bhp, *next;
944f999d05SKevin Wolf     int ret;
95648fb0eaSKevin Wolf 
96f627aab1SPaolo Bonzini     ctx->walking_bh++;
974f999d05SKevin Wolf 
984f999d05SKevin Wolf     ret = 0;
99f627aab1SPaolo Bonzini     for (bh = ctx->first_bh; bh; bh = next) {
100dcc772e2SLiu Ping Fan         /* Make sure that fetching bh happens before accessing its members */
101dcc772e2SLiu Ping Fan         smp_read_barrier_depends();
1027887f620SKevin Wolf         next = bh->next;
103e8d3b1a2SPaolo Bonzini         /* The atomic_xchg is paired with the one in qemu_bh_schedule.  The
104e8d3b1a2SPaolo Bonzini          * implicit memory barrier ensures that the callback sees all writes
105e8d3b1a2SPaolo Bonzini          * done by the scheduling thread.  It also ensures that the scheduling
106e8d3b1a2SPaolo Bonzini          * thread sees the zero before bh->cb has run, and thus will call
107e8d3b1a2SPaolo Bonzini          * aio_notify again if necessary.
108dcc772e2SLiu Ping Fan          */
1095b8bb359SPaolo Bonzini         if (atomic_xchg(&bh->scheduled, 0)) {
11065c1b5b6SPaolo Bonzini             /* Idle BHs don't count as progress */
11165c1b5b6SPaolo Bonzini             if (!bh->idle) {
1124f999d05SKevin Wolf                 ret = 1;
113ca96ac44SStefan Hajnoczi             }
1144f999d05SKevin Wolf             bh->idle = 0;
115df281b80SPavel Dovgalyuk             aio_bh_call(bh);
1164f999d05SKevin Wolf         }
1174f999d05SKevin Wolf     }
1184f999d05SKevin Wolf 
119f627aab1SPaolo Bonzini     ctx->walking_bh--;
120648fb0eaSKevin Wolf 
1214f999d05SKevin Wolf     /* remove deleted bhs */
122f627aab1SPaolo Bonzini     if (!ctx->walking_bh) {
123dcc772e2SLiu Ping Fan         qemu_mutex_lock(&ctx->bh_lock);
124f627aab1SPaolo Bonzini         bhp = &ctx->first_bh;
1254f999d05SKevin Wolf         while (*bhp) {
1264f999d05SKevin Wolf             bh = *bhp;
1275b8bb359SPaolo Bonzini             if (bh->deleted && !bh->scheduled) {
1284f999d05SKevin Wolf                 *bhp = bh->next;
1297267c094SAnthony Liguori                 g_free(bh);
130648fb0eaSKevin Wolf             } else {
1314f999d05SKevin Wolf                 bhp = &bh->next;
1324f999d05SKevin Wolf             }
133648fb0eaSKevin Wolf         }
134dcc772e2SLiu Ping Fan         qemu_mutex_unlock(&ctx->bh_lock);
135648fb0eaSKevin Wolf     }
1364f999d05SKevin Wolf 
1374f999d05SKevin Wolf     return ret;
1384f999d05SKevin Wolf }
1394f999d05SKevin Wolf 
1404f999d05SKevin Wolf void qemu_bh_schedule_idle(QEMUBH *bh)
1414f999d05SKevin Wolf {
1424f999d05SKevin Wolf     bh->idle = 1;
143dcc772e2SLiu Ping Fan     /* Make sure that idle & any writes needed by the callback are done
144dcc772e2SLiu Ping Fan      * before the locations are read in the aio_bh_poll.
145dcc772e2SLiu Ping Fan      */
146e8d3b1a2SPaolo Bonzini     atomic_mb_set(&bh->scheduled, 1);
1474f999d05SKevin Wolf }
1484f999d05SKevin Wolf 
1494f999d05SKevin Wolf void qemu_bh_schedule(QEMUBH *bh)
1504f999d05SKevin Wolf {
151924fe129SStefan Hajnoczi     AioContext *ctx;
152924fe129SStefan Hajnoczi 
153924fe129SStefan Hajnoczi     ctx = bh->ctx;
1544f999d05SKevin Wolf     bh->idle = 0;
155e8d3b1a2SPaolo Bonzini     /* The memory barrier implicit in atomic_xchg makes sure that:
156924fe129SStefan Hajnoczi      * 1. idle & any writes needed by the callback are done before the
157924fe129SStefan Hajnoczi      *    locations are read in the aio_bh_poll.
158924fe129SStefan Hajnoczi      * 2. ctx is loaded before scheduled is set and the callback has a chance
159924fe129SStefan Hajnoczi      *    to execute.
160dcc772e2SLiu Ping Fan      */
161e8d3b1a2SPaolo Bonzini     if (atomic_xchg(&bh->scheduled, 1) == 0) {
162924fe129SStefan Hajnoczi         aio_notify(ctx);
1634f999d05SKevin Wolf     }
164e8d3b1a2SPaolo Bonzini }
1654f999d05SKevin Wolf 
166dcc772e2SLiu Ping Fan 
167dcc772e2SLiu Ping Fan /* This func is async.
168dcc772e2SLiu Ping Fan  */
1694f999d05SKevin Wolf void qemu_bh_cancel(QEMUBH *bh)
1704f999d05SKevin Wolf {
1714f999d05SKevin Wolf     bh->scheduled = 0;
1724f999d05SKevin Wolf }
1734f999d05SKevin Wolf 
174dcc772e2SLiu Ping Fan /* This func is async.The bottom half will do the delete action at the finial
175dcc772e2SLiu Ping Fan  * end.
176dcc772e2SLiu Ping Fan  */
1774f999d05SKevin Wolf void qemu_bh_delete(QEMUBH *bh)
1784f999d05SKevin Wolf {
1794f999d05SKevin Wolf     bh->scheduled = 0;
1804f999d05SKevin Wolf     bh->deleted = 1;
1814f999d05SKevin Wolf }
1824f999d05SKevin Wolf 
183845ca10dSPaolo Bonzini int64_t
184845ca10dSPaolo Bonzini aio_compute_timeout(AioContext *ctx)
1854f999d05SKevin Wolf {
186845ca10dSPaolo Bonzini     int64_t deadline;
187845ca10dSPaolo Bonzini     int timeout = -1;
1884f999d05SKevin Wolf     QEMUBH *bh;
1894f999d05SKevin Wolf 
190f627aab1SPaolo Bonzini     for (bh = ctx->first_bh; bh; bh = bh->next) {
1915b8bb359SPaolo Bonzini         if (bh->scheduled) {
1924f999d05SKevin Wolf             if (bh->idle) {
1934f999d05SKevin Wolf                 /* idle bottom halves will be polled at least
1944f999d05SKevin Wolf                  * every 10ms */
195845ca10dSPaolo Bonzini                 timeout = 10000000;
1964f999d05SKevin Wolf             } else {
1974f999d05SKevin Wolf                 /* non-idle bottom halves will be executed
1984f999d05SKevin Wolf                  * immediately */
199845ca10dSPaolo Bonzini                 return 0;
2004f999d05SKevin Wolf             }
2014f999d05SKevin Wolf         }
2024f999d05SKevin Wolf     }
2034f999d05SKevin Wolf 
204845ca10dSPaolo Bonzini     deadline = timerlistgroup_deadline_ns(&ctx->tlg);
205533a8cf3SAlex Bligh     if (deadline == 0) {
206845ca10dSPaolo Bonzini         return 0;
207533a8cf3SAlex Bligh     } else {
208845ca10dSPaolo Bonzini         return qemu_soonest_timeout(timeout, deadline);
209845ca10dSPaolo Bonzini     }
210533a8cf3SAlex Bligh }
211533a8cf3SAlex Bligh 
212845ca10dSPaolo Bonzini static gboolean
213845ca10dSPaolo Bonzini aio_ctx_prepare(GSource *source, gint    *timeout)
214845ca10dSPaolo Bonzini {
215845ca10dSPaolo Bonzini     AioContext *ctx = (AioContext *) source;
216845ca10dSPaolo Bonzini 
217eabc9779SPaolo Bonzini     atomic_or(&ctx->notify_me, 1);
218eabc9779SPaolo Bonzini 
219845ca10dSPaolo Bonzini     /* We assume there is no timeout already supplied */
220845ca10dSPaolo Bonzini     *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
221a3462c65SPaolo Bonzini 
222a3462c65SPaolo Bonzini     if (aio_prepare(ctx)) {
223a3462c65SPaolo Bonzini         *timeout = 0;
224a3462c65SPaolo Bonzini     }
225a3462c65SPaolo Bonzini 
226845ca10dSPaolo Bonzini     return *timeout == 0;
227e3713e00SPaolo Bonzini }
228e3713e00SPaolo Bonzini 
229e3713e00SPaolo Bonzini static gboolean
230e3713e00SPaolo Bonzini aio_ctx_check(GSource *source)
231e3713e00SPaolo Bonzini {
232e3713e00SPaolo Bonzini     AioContext *ctx = (AioContext *) source;
233e3713e00SPaolo Bonzini     QEMUBH *bh;
234e3713e00SPaolo Bonzini 
235eabc9779SPaolo Bonzini     atomic_and(&ctx->notify_me, ~1);
23605e514b1SPaolo Bonzini     aio_notify_accept(ctx);
23721a03d17SPaolo Bonzini 
238e3713e00SPaolo Bonzini     for (bh = ctx->first_bh; bh; bh = bh->next) {
2395b8bb359SPaolo Bonzini         if (bh->scheduled) {
240e3713e00SPaolo Bonzini             return true;
241e3713e00SPaolo Bonzini         }
242e3713e00SPaolo Bonzini     }
243533a8cf3SAlex Bligh     return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
244e3713e00SPaolo Bonzini }
245e3713e00SPaolo Bonzini 
246e3713e00SPaolo Bonzini static gboolean
247e3713e00SPaolo Bonzini aio_ctx_dispatch(GSource     *source,
248e3713e00SPaolo Bonzini                  GSourceFunc  callback,
249e3713e00SPaolo Bonzini                  gpointer     user_data)
250e3713e00SPaolo Bonzini {
251e3713e00SPaolo Bonzini     AioContext *ctx = (AioContext *) source;
252e3713e00SPaolo Bonzini 
253e3713e00SPaolo Bonzini     assert(callback == NULL);
254e4c7e2d1SPaolo Bonzini     aio_dispatch(ctx);
255e3713e00SPaolo Bonzini     return true;
256e3713e00SPaolo Bonzini }
257e3713e00SPaolo Bonzini 
2582f4dc3c1SPaolo Bonzini static void
2592f4dc3c1SPaolo Bonzini aio_ctx_finalize(GSource     *source)
2602f4dc3c1SPaolo Bonzini {
2612f4dc3c1SPaolo Bonzini     AioContext *ctx = (AioContext *) source;
2622f4dc3c1SPaolo Bonzini 
2639b34277dSStefan Hajnoczi     thread_pool_free(ctx->thread_pool);
264a076972aSStefan Hajnoczi 
2650187f5c9SPaolo Bonzini #ifdef CONFIG_LINUX_AIO
2660187f5c9SPaolo Bonzini     if (ctx->linux_aio) {
2670187f5c9SPaolo Bonzini         laio_detach_aio_context(ctx->linux_aio, ctx);
2680187f5c9SPaolo Bonzini         laio_cleanup(ctx->linux_aio);
2690187f5c9SPaolo Bonzini         ctx->linux_aio = NULL;
2700187f5c9SPaolo Bonzini     }
2710187f5c9SPaolo Bonzini #endif
2720187f5c9SPaolo Bonzini 
273a076972aSStefan Hajnoczi     qemu_mutex_lock(&ctx->bh_lock);
274a076972aSStefan Hajnoczi     while (ctx->first_bh) {
275a076972aSStefan Hajnoczi         QEMUBH *next = ctx->first_bh->next;
276a076972aSStefan Hajnoczi 
277a076972aSStefan Hajnoczi         /* qemu_bh_delete() must have been called on BHs in this AioContext */
278a076972aSStefan Hajnoczi         assert(ctx->first_bh->deleted);
279a076972aSStefan Hajnoczi 
280a076972aSStefan Hajnoczi         g_free(ctx->first_bh);
281a076972aSStefan Hajnoczi         ctx->first_bh = next;
282a076972aSStefan Hajnoczi     }
283a076972aSStefan Hajnoczi     qemu_mutex_unlock(&ctx->bh_lock);
284a076972aSStefan Hajnoczi 
285dca21ef2SFam Zheng     aio_set_event_notifier(ctx, &ctx->notifier, false, NULL);
2862f4dc3c1SPaolo Bonzini     event_notifier_cleanup(&ctx->notifier);
287*3fe71223SPaolo Bonzini     qemu_rec_mutex_destroy(&ctx->lock);
288dcc772e2SLiu Ping Fan     qemu_mutex_destroy(&ctx->bh_lock);
289dae21b98SAlex Bligh     timerlistgroup_deinit(&ctx->tlg);
2902f4dc3c1SPaolo Bonzini }
2912f4dc3c1SPaolo Bonzini 
292e3713e00SPaolo Bonzini static GSourceFuncs aio_source_funcs = {
293e3713e00SPaolo Bonzini     aio_ctx_prepare,
294e3713e00SPaolo Bonzini     aio_ctx_check,
295e3713e00SPaolo Bonzini     aio_ctx_dispatch,
2962f4dc3c1SPaolo Bonzini     aio_ctx_finalize
297e3713e00SPaolo Bonzini };
298e3713e00SPaolo Bonzini 
299e3713e00SPaolo Bonzini GSource *aio_get_g_source(AioContext *ctx)
300e3713e00SPaolo Bonzini {
301e3713e00SPaolo Bonzini     g_source_ref(&ctx->source);
302e3713e00SPaolo Bonzini     return &ctx->source;
303e3713e00SPaolo Bonzini }
304a915f4bcSPaolo Bonzini 
3059b34277dSStefan Hajnoczi ThreadPool *aio_get_thread_pool(AioContext *ctx)
3069b34277dSStefan Hajnoczi {
3079b34277dSStefan Hajnoczi     if (!ctx->thread_pool) {
3089b34277dSStefan Hajnoczi         ctx->thread_pool = thread_pool_new(ctx);
3099b34277dSStefan Hajnoczi     }
3109b34277dSStefan Hajnoczi     return ctx->thread_pool;
3119b34277dSStefan Hajnoczi }
3129b34277dSStefan Hajnoczi 
3130187f5c9SPaolo Bonzini #ifdef CONFIG_LINUX_AIO
3140187f5c9SPaolo Bonzini LinuxAioState *aio_get_linux_aio(AioContext *ctx)
3150187f5c9SPaolo Bonzini {
3160187f5c9SPaolo Bonzini     if (!ctx->linux_aio) {
3170187f5c9SPaolo Bonzini         ctx->linux_aio = laio_init();
3180187f5c9SPaolo Bonzini         laio_attach_aio_context(ctx->linux_aio, ctx);
3190187f5c9SPaolo Bonzini     }
3200187f5c9SPaolo Bonzini     return ctx->linux_aio;
3210187f5c9SPaolo Bonzini }
3220187f5c9SPaolo Bonzini #endif
3230187f5c9SPaolo Bonzini 
3242f4dc3c1SPaolo Bonzini void aio_notify(AioContext *ctx)
3252f4dc3c1SPaolo Bonzini {
326eabc9779SPaolo Bonzini     /* Write e.g. bh->scheduled before reading ctx->notify_me.  Pairs
327eabc9779SPaolo Bonzini      * with atomic_or in aio_ctx_prepare or atomic_add in aio_poll.
328eabc9779SPaolo Bonzini      */
3290ceb849bSPaolo Bonzini     smp_mb();
330eabc9779SPaolo Bonzini     if (ctx->notify_me) {
3312f4dc3c1SPaolo Bonzini         event_notifier_set(&ctx->notifier);
33205e514b1SPaolo Bonzini         atomic_mb_set(&ctx->notified, true);
33305e514b1SPaolo Bonzini     }
33405e514b1SPaolo Bonzini }
33505e514b1SPaolo Bonzini 
33605e514b1SPaolo Bonzini void aio_notify_accept(AioContext *ctx)
33705e514b1SPaolo Bonzini {
33805e514b1SPaolo Bonzini     if (atomic_xchg(&ctx->notified, false)) {
33905e514b1SPaolo Bonzini         event_notifier_test_and_clear(&ctx->notifier);
3402f4dc3c1SPaolo Bonzini     }
3410ceb849bSPaolo Bonzini }
3422f4dc3c1SPaolo Bonzini 
343d5541d86SAlex Bligh static void aio_timerlist_notify(void *opaque)
344d5541d86SAlex Bligh {
345d5541d86SAlex Bligh     aio_notify(opaque);
346d5541d86SAlex Bligh }
347d5541d86SAlex Bligh 
34821a03d17SPaolo Bonzini static void event_notifier_dummy_cb(EventNotifier *e)
34921a03d17SPaolo Bonzini {
35021a03d17SPaolo Bonzini }
35121a03d17SPaolo Bonzini 
3522f78e491SChrysostomos Nanakos AioContext *aio_context_new(Error **errp)
353f627aab1SPaolo Bonzini {
3542f78e491SChrysostomos Nanakos     int ret;
3552f4dc3c1SPaolo Bonzini     AioContext *ctx;
35637fcee5dSFam Zheng 
3572f4dc3c1SPaolo Bonzini     ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
3587e003465SCao jin     aio_context_setup(ctx);
3597e003465SCao jin 
3602f78e491SChrysostomos Nanakos     ret = event_notifier_init(&ctx->notifier, false);
3612f78e491SChrysostomos Nanakos     if (ret < 0) {
3622f78e491SChrysostomos Nanakos         error_setg_errno(errp, -ret, "Failed to initialize event notifier");
36337fcee5dSFam Zheng         goto fail;
3642f78e491SChrysostomos Nanakos     }
365fcf5def1SPaolo Bonzini     g_source_set_can_recurse(&ctx->source, true);
3662f78e491SChrysostomos Nanakos     aio_set_event_notifier(ctx, &ctx->notifier,
367dca21ef2SFam Zheng                            false,
3682f78e491SChrysostomos Nanakos                            (EventNotifierHandler *)
36921a03d17SPaolo Bonzini                            event_notifier_dummy_cb);
3700187f5c9SPaolo Bonzini #ifdef CONFIG_LINUX_AIO
3710187f5c9SPaolo Bonzini     ctx->linux_aio = NULL;
3720187f5c9SPaolo Bonzini #endif
3739b34277dSStefan Hajnoczi     ctx->thread_pool = NULL;
374dcc772e2SLiu Ping Fan     qemu_mutex_init(&ctx->bh_lock);
375*3fe71223SPaolo Bonzini     qemu_rec_mutex_init(&ctx->lock);
376d5541d86SAlex Bligh     timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
3772f4dc3c1SPaolo Bonzini 
3782f4dc3c1SPaolo Bonzini     return ctx;
37937fcee5dSFam Zheng fail:
38037fcee5dSFam Zheng     g_source_destroy(&ctx->source);
38137fcee5dSFam Zheng     return NULL;
382e3713e00SPaolo Bonzini }
383e3713e00SPaolo Bonzini 
384e3713e00SPaolo Bonzini void aio_context_ref(AioContext *ctx)
385e3713e00SPaolo Bonzini {
386e3713e00SPaolo Bonzini     g_source_ref(&ctx->source);
387e3713e00SPaolo Bonzini }
388e3713e00SPaolo Bonzini 
389e3713e00SPaolo Bonzini void aio_context_unref(AioContext *ctx)
390e3713e00SPaolo Bonzini {
391e3713e00SPaolo Bonzini     g_source_unref(&ctx->source);
392f627aab1SPaolo Bonzini }
39398563fc3SStefan Hajnoczi 
39498563fc3SStefan Hajnoczi void aio_context_acquire(AioContext *ctx)
39598563fc3SStefan Hajnoczi {
396*3fe71223SPaolo Bonzini     qemu_rec_mutex_lock(&ctx->lock);
39798563fc3SStefan Hajnoczi }
39898563fc3SStefan Hajnoczi 
39998563fc3SStefan Hajnoczi void aio_context_release(AioContext *ctx)
40098563fc3SStefan Hajnoczi {
401*3fe71223SPaolo Bonzini     qemu_rec_mutex_unlock(&ctx->lock);
40298563fc3SStefan Hajnoczi }
403