xref: /qemu/util/async.c (revision d38ea87ac54af64ef611de434d07c12dc0399216)
14f999d05SKevin Wolf /*
24f999d05SKevin Wolf  * QEMU System Emulator
34f999d05SKevin Wolf  *
44f999d05SKevin Wolf  * Copyright (c) 2003-2008 Fabrice Bellard
54f999d05SKevin Wolf  *
64f999d05SKevin Wolf  * Permission is hereby granted, free of charge, to any person obtaining a copy
74f999d05SKevin Wolf  * of this software and associated documentation files (the "Software"), to deal
84f999d05SKevin Wolf  * in the Software without restriction, including without limitation the rights
94f999d05SKevin Wolf  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
104f999d05SKevin Wolf  * copies of the Software, and to permit persons to whom the Software is
114f999d05SKevin Wolf  * furnished to do so, subject to the following conditions:
124f999d05SKevin Wolf  *
134f999d05SKevin Wolf  * The above copyright notice and this permission notice shall be included in
144f999d05SKevin Wolf  * all copies or substantial portions of the Software.
154f999d05SKevin Wolf  *
164f999d05SKevin Wolf  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
174f999d05SKevin Wolf  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
184f999d05SKevin Wolf  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
194f999d05SKevin Wolf  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
204f999d05SKevin Wolf  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
214f999d05SKevin Wolf  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
224f999d05SKevin Wolf  * THE SOFTWARE.
234f999d05SKevin Wolf  */
244f999d05SKevin Wolf 
25*d38ea87aSPeter Maydell #include "qemu/osdep.h"
264f999d05SKevin Wolf #include "qemu-common.h"
27737e150eSPaolo Bonzini #include "block/aio.h"
289b34277dSStefan Hajnoczi #include "block/thread-pool.h"
291de7afc9SPaolo Bonzini #include "qemu/main-loop.h"
300ceb849bSPaolo Bonzini #include "qemu/atomic.h"
319a1e9481SKevin Wolf 
324f999d05SKevin Wolf /***********************************************************/
334f999d05SKevin Wolf /* bottom halves (can be seen as timers which expire ASAP) */
344f999d05SKevin Wolf 
354f999d05SKevin Wolf struct QEMUBH {
362f4dc3c1SPaolo Bonzini     AioContext *ctx;
374f999d05SKevin Wolf     QEMUBHFunc *cb;
384f999d05SKevin Wolf     void *opaque;
394f999d05SKevin Wolf     QEMUBH *next;
409b47b17eSStefan Weil     bool scheduled;
419b47b17eSStefan Weil     bool idle;
429b47b17eSStefan Weil     bool deleted;
434f999d05SKevin Wolf };
444f999d05SKevin Wolf 
45f627aab1SPaolo Bonzini QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
464f999d05SKevin Wolf {
474f999d05SKevin Wolf     QEMUBH *bh;
48ee82310fSPaolo Bonzini     bh = g_new(QEMUBH, 1);
49ee82310fSPaolo Bonzini     *bh = (QEMUBH){
50ee82310fSPaolo Bonzini         .ctx = ctx,
51ee82310fSPaolo Bonzini         .cb = cb,
52ee82310fSPaolo Bonzini         .opaque = opaque,
53ee82310fSPaolo Bonzini     };
54dcc772e2SLiu Ping Fan     qemu_mutex_lock(&ctx->bh_lock);
55f627aab1SPaolo Bonzini     bh->next = ctx->first_bh;
56dcc772e2SLiu Ping Fan     /* Make sure that the members are ready before putting bh into list */
57dcc772e2SLiu Ping Fan     smp_wmb();
58f627aab1SPaolo Bonzini     ctx->first_bh = bh;
59dcc772e2SLiu Ping Fan     qemu_mutex_unlock(&ctx->bh_lock);
604f999d05SKevin Wolf     return bh;
614f999d05SKevin Wolf }
624f999d05SKevin Wolf 
63df281b80SPavel Dovgalyuk void aio_bh_call(QEMUBH *bh)
64df281b80SPavel Dovgalyuk {
65df281b80SPavel Dovgalyuk     bh->cb(bh->opaque);
66df281b80SPavel Dovgalyuk }
67df281b80SPavel Dovgalyuk 
68dcc772e2SLiu Ping Fan /* Multiple occurrences of aio_bh_poll cannot be called concurrently */
69f627aab1SPaolo Bonzini int aio_bh_poll(AioContext *ctx)
704f999d05SKevin Wolf {
717887f620SKevin Wolf     QEMUBH *bh, **bhp, *next;
724f999d05SKevin Wolf     int ret;
73648fb0eaSKevin Wolf 
74f627aab1SPaolo Bonzini     ctx->walking_bh++;
754f999d05SKevin Wolf 
764f999d05SKevin Wolf     ret = 0;
77f627aab1SPaolo Bonzini     for (bh = ctx->first_bh; bh; bh = next) {
78dcc772e2SLiu Ping Fan         /* Make sure that fetching bh happens before accessing its members */
79dcc772e2SLiu Ping Fan         smp_read_barrier_depends();
807887f620SKevin Wolf         next = bh->next;
81e8d3b1a2SPaolo Bonzini         /* The atomic_xchg is paired with the one in qemu_bh_schedule.  The
82e8d3b1a2SPaolo Bonzini          * implicit memory barrier ensures that the callback sees all writes
83e8d3b1a2SPaolo Bonzini          * done by the scheduling thread.  It also ensures that the scheduling
84e8d3b1a2SPaolo Bonzini          * thread sees the zero before bh->cb has run, and thus will call
85e8d3b1a2SPaolo Bonzini          * aio_notify again if necessary.
86dcc772e2SLiu Ping Fan          */
87e8d3b1a2SPaolo Bonzini         if (!bh->deleted && atomic_xchg(&bh->scheduled, 0)) {
88ca96ac44SStefan Hajnoczi             /* Idle BHs and the notify BH don't count as progress */
89ca96ac44SStefan Hajnoczi             if (!bh->idle && bh != ctx->notify_dummy_bh) {
904f999d05SKevin Wolf                 ret = 1;
91ca96ac44SStefan Hajnoczi             }
924f999d05SKevin Wolf             bh->idle = 0;
93df281b80SPavel Dovgalyuk             aio_bh_call(bh);
944f999d05SKevin Wolf         }
954f999d05SKevin Wolf     }
964f999d05SKevin Wolf 
97f627aab1SPaolo Bonzini     ctx->walking_bh--;
98648fb0eaSKevin Wolf 
994f999d05SKevin Wolf     /* remove deleted bhs */
100f627aab1SPaolo Bonzini     if (!ctx->walking_bh) {
101dcc772e2SLiu Ping Fan         qemu_mutex_lock(&ctx->bh_lock);
102f627aab1SPaolo Bonzini         bhp = &ctx->first_bh;
1034f999d05SKevin Wolf         while (*bhp) {
1044f999d05SKevin Wolf             bh = *bhp;
1054f999d05SKevin Wolf             if (bh->deleted) {
1064f999d05SKevin Wolf                 *bhp = bh->next;
1077267c094SAnthony Liguori                 g_free(bh);
108648fb0eaSKevin Wolf             } else {
1094f999d05SKevin Wolf                 bhp = &bh->next;
1104f999d05SKevin Wolf             }
111648fb0eaSKevin Wolf         }
112dcc772e2SLiu Ping Fan         qemu_mutex_unlock(&ctx->bh_lock);
113648fb0eaSKevin Wolf     }
1144f999d05SKevin Wolf 
1154f999d05SKevin Wolf     return ret;
1164f999d05SKevin Wolf }
1174f999d05SKevin Wolf 
1184f999d05SKevin Wolf void qemu_bh_schedule_idle(QEMUBH *bh)
1194f999d05SKevin Wolf {
1204f999d05SKevin Wolf     bh->idle = 1;
121dcc772e2SLiu Ping Fan     /* Make sure that idle & any writes needed by the callback are done
122dcc772e2SLiu Ping Fan      * before the locations are read in the aio_bh_poll.
123dcc772e2SLiu Ping Fan      */
124e8d3b1a2SPaolo Bonzini     atomic_mb_set(&bh->scheduled, 1);
1254f999d05SKevin Wolf }
1264f999d05SKevin Wolf 
1274f999d05SKevin Wolf void qemu_bh_schedule(QEMUBH *bh)
1284f999d05SKevin Wolf {
129924fe129SStefan Hajnoczi     AioContext *ctx;
130924fe129SStefan Hajnoczi 
131924fe129SStefan Hajnoczi     ctx = bh->ctx;
1324f999d05SKevin Wolf     bh->idle = 0;
133e8d3b1a2SPaolo Bonzini     /* The memory barrier implicit in atomic_xchg makes sure that:
134924fe129SStefan Hajnoczi      * 1. idle & any writes needed by the callback are done before the
135924fe129SStefan Hajnoczi      *    locations are read in the aio_bh_poll.
136924fe129SStefan Hajnoczi      * 2. ctx is loaded before scheduled is set and the callback has a chance
137924fe129SStefan Hajnoczi      *    to execute.
138dcc772e2SLiu Ping Fan      */
139e8d3b1a2SPaolo Bonzini     if (atomic_xchg(&bh->scheduled, 1) == 0) {
140924fe129SStefan Hajnoczi         aio_notify(ctx);
1414f999d05SKevin Wolf     }
142e8d3b1a2SPaolo Bonzini }
1434f999d05SKevin Wolf 
144dcc772e2SLiu Ping Fan 
145dcc772e2SLiu Ping Fan /* This func is async.
146dcc772e2SLiu Ping Fan  */
1474f999d05SKevin Wolf void qemu_bh_cancel(QEMUBH *bh)
1484f999d05SKevin Wolf {
1494f999d05SKevin Wolf     bh->scheduled = 0;
1504f999d05SKevin Wolf }
1514f999d05SKevin Wolf 
152dcc772e2SLiu Ping Fan /* This func is async.The bottom half will do the delete action at the finial
153dcc772e2SLiu Ping Fan  * end.
154dcc772e2SLiu Ping Fan  */
1554f999d05SKevin Wolf void qemu_bh_delete(QEMUBH *bh)
1564f999d05SKevin Wolf {
1574f999d05SKevin Wolf     bh->scheduled = 0;
1584f999d05SKevin Wolf     bh->deleted = 1;
1594f999d05SKevin Wolf }
1604f999d05SKevin Wolf 
161845ca10dSPaolo Bonzini int64_t
162845ca10dSPaolo Bonzini aio_compute_timeout(AioContext *ctx)
1634f999d05SKevin Wolf {
164845ca10dSPaolo Bonzini     int64_t deadline;
165845ca10dSPaolo Bonzini     int timeout = -1;
1664f999d05SKevin Wolf     QEMUBH *bh;
1674f999d05SKevin Wolf 
168f627aab1SPaolo Bonzini     for (bh = ctx->first_bh; bh; bh = bh->next) {
1694f999d05SKevin Wolf         if (!bh->deleted && bh->scheduled) {
1704f999d05SKevin Wolf             if (bh->idle) {
1714f999d05SKevin Wolf                 /* idle bottom halves will be polled at least
1724f999d05SKevin Wolf                  * every 10ms */
173845ca10dSPaolo Bonzini                 timeout = 10000000;
1744f999d05SKevin Wolf             } else {
1754f999d05SKevin Wolf                 /* non-idle bottom halves will be executed
1764f999d05SKevin Wolf                  * immediately */
177845ca10dSPaolo Bonzini                 return 0;
1784f999d05SKevin Wolf             }
1794f999d05SKevin Wolf         }
1804f999d05SKevin Wolf     }
1814f999d05SKevin Wolf 
182845ca10dSPaolo Bonzini     deadline = timerlistgroup_deadline_ns(&ctx->tlg);
183533a8cf3SAlex Bligh     if (deadline == 0) {
184845ca10dSPaolo Bonzini         return 0;
185533a8cf3SAlex Bligh     } else {
186845ca10dSPaolo Bonzini         return qemu_soonest_timeout(timeout, deadline);
187845ca10dSPaolo Bonzini     }
188533a8cf3SAlex Bligh }
189533a8cf3SAlex Bligh 
190845ca10dSPaolo Bonzini static gboolean
191845ca10dSPaolo Bonzini aio_ctx_prepare(GSource *source, gint    *timeout)
192845ca10dSPaolo Bonzini {
193845ca10dSPaolo Bonzini     AioContext *ctx = (AioContext *) source;
194845ca10dSPaolo Bonzini 
195eabc9779SPaolo Bonzini     atomic_or(&ctx->notify_me, 1);
196eabc9779SPaolo Bonzini 
197845ca10dSPaolo Bonzini     /* We assume there is no timeout already supplied */
198845ca10dSPaolo Bonzini     *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
199a3462c65SPaolo Bonzini 
200a3462c65SPaolo Bonzini     if (aio_prepare(ctx)) {
201a3462c65SPaolo Bonzini         *timeout = 0;
202a3462c65SPaolo Bonzini     }
203a3462c65SPaolo Bonzini 
204845ca10dSPaolo Bonzini     return *timeout == 0;
205e3713e00SPaolo Bonzini }
206e3713e00SPaolo Bonzini 
207e3713e00SPaolo Bonzini static gboolean
208e3713e00SPaolo Bonzini aio_ctx_check(GSource *source)
209e3713e00SPaolo Bonzini {
210e3713e00SPaolo Bonzini     AioContext *ctx = (AioContext *) source;
211e3713e00SPaolo Bonzini     QEMUBH *bh;
212e3713e00SPaolo Bonzini 
213eabc9779SPaolo Bonzini     atomic_and(&ctx->notify_me, ~1);
21405e514b1SPaolo Bonzini     aio_notify_accept(ctx);
21521a03d17SPaolo Bonzini 
216e3713e00SPaolo Bonzini     for (bh = ctx->first_bh; bh; bh = bh->next) {
217e3713e00SPaolo Bonzini         if (!bh->deleted && bh->scheduled) {
218e3713e00SPaolo Bonzini             return true;
219e3713e00SPaolo Bonzini 	}
220e3713e00SPaolo Bonzini     }
221533a8cf3SAlex Bligh     return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
222e3713e00SPaolo Bonzini }
223e3713e00SPaolo Bonzini 
224e3713e00SPaolo Bonzini static gboolean
225e3713e00SPaolo Bonzini aio_ctx_dispatch(GSource     *source,
226e3713e00SPaolo Bonzini                  GSourceFunc  callback,
227e3713e00SPaolo Bonzini                  gpointer     user_data)
228e3713e00SPaolo Bonzini {
229e3713e00SPaolo Bonzini     AioContext *ctx = (AioContext *) source;
230e3713e00SPaolo Bonzini 
231e3713e00SPaolo Bonzini     assert(callback == NULL);
232e4c7e2d1SPaolo Bonzini     aio_dispatch(ctx);
233e3713e00SPaolo Bonzini     return true;
234e3713e00SPaolo Bonzini }
235e3713e00SPaolo Bonzini 
2362f4dc3c1SPaolo Bonzini static void
2372f4dc3c1SPaolo Bonzini aio_ctx_finalize(GSource     *source)
2382f4dc3c1SPaolo Bonzini {
2392f4dc3c1SPaolo Bonzini     AioContext *ctx = (AioContext *) source;
2402f4dc3c1SPaolo Bonzini 
241ca96ac44SStefan Hajnoczi     qemu_bh_delete(ctx->notify_dummy_bh);
2429b34277dSStefan Hajnoczi     thread_pool_free(ctx->thread_pool);
243a076972aSStefan Hajnoczi 
244a076972aSStefan Hajnoczi     qemu_mutex_lock(&ctx->bh_lock);
245a076972aSStefan Hajnoczi     while (ctx->first_bh) {
246a076972aSStefan Hajnoczi         QEMUBH *next = ctx->first_bh->next;
247a076972aSStefan Hajnoczi 
248a076972aSStefan Hajnoczi         /* qemu_bh_delete() must have been called on BHs in this AioContext */
249a076972aSStefan Hajnoczi         assert(ctx->first_bh->deleted);
250a076972aSStefan Hajnoczi 
251a076972aSStefan Hajnoczi         g_free(ctx->first_bh);
252a076972aSStefan Hajnoczi         ctx->first_bh = next;
253a076972aSStefan Hajnoczi     }
254a076972aSStefan Hajnoczi     qemu_mutex_unlock(&ctx->bh_lock);
255a076972aSStefan Hajnoczi 
256dca21ef2SFam Zheng     aio_set_event_notifier(ctx, &ctx->notifier, false, NULL);
2572f4dc3c1SPaolo Bonzini     event_notifier_cleanup(&ctx->notifier);
25898563fc3SStefan Hajnoczi     rfifolock_destroy(&ctx->lock);
259dcc772e2SLiu Ping Fan     qemu_mutex_destroy(&ctx->bh_lock);
260dae21b98SAlex Bligh     timerlistgroup_deinit(&ctx->tlg);
2612f4dc3c1SPaolo Bonzini }
2622f4dc3c1SPaolo Bonzini 
263e3713e00SPaolo Bonzini static GSourceFuncs aio_source_funcs = {
264e3713e00SPaolo Bonzini     aio_ctx_prepare,
265e3713e00SPaolo Bonzini     aio_ctx_check,
266e3713e00SPaolo Bonzini     aio_ctx_dispatch,
2672f4dc3c1SPaolo Bonzini     aio_ctx_finalize
268e3713e00SPaolo Bonzini };
269e3713e00SPaolo Bonzini 
270e3713e00SPaolo Bonzini GSource *aio_get_g_source(AioContext *ctx)
271e3713e00SPaolo Bonzini {
272e3713e00SPaolo Bonzini     g_source_ref(&ctx->source);
273e3713e00SPaolo Bonzini     return &ctx->source;
274e3713e00SPaolo Bonzini }
275a915f4bcSPaolo Bonzini 
2769b34277dSStefan Hajnoczi ThreadPool *aio_get_thread_pool(AioContext *ctx)
2779b34277dSStefan Hajnoczi {
2789b34277dSStefan Hajnoczi     if (!ctx->thread_pool) {
2799b34277dSStefan Hajnoczi         ctx->thread_pool = thread_pool_new(ctx);
2809b34277dSStefan Hajnoczi     }
2819b34277dSStefan Hajnoczi     return ctx->thread_pool;
2829b34277dSStefan Hajnoczi }
2839b34277dSStefan Hajnoczi 
2842f4dc3c1SPaolo Bonzini void aio_notify(AioContext *ctx)
2852f4dc3c1SPaolo Bonzini {
286eabc9779SPaolo Bonzini     /* Write e.g. bh->scheduled before reading ctx->notify_me.  Pairs
287eabc9779SPaolo Bonzini      * with atomic_or in aio_ctx_prepare or atomic_add in aio_poll.
288eabc9779SPaolo Bonzini      */
2890ceb849bSPaolo Bonzini     smp_mb();
290eabc9779SPaolo Bonzini     if (ctx->notify_me) {
2912f4dc3c1SPaolo Bonzini         event_notifier_set(&ctx->notifier);
29205e514b1SPaolo Bonzini         atomic_mb_set(&ctx->notified, true);
29305e514b1SPaolo Bonzini     }
29405e514b1SPaolo Bonzini }
29505e514b1SPaolo Bonzini 
29605e514b1SPaolo Bonzini void aio_notify_accept(AioContext *ctx)
29705e514b1SPaolo Bonzini {
29805e514b1SPaolo Bonzini     if (atomic_xchg(&ctx->notified, false)) {
29905e514b1SPaolo Bonzini         event_notifier_test_and_clear(&ctx->notifier);
3002f4dc3c1SPaolo Bonzini     }
3010ceb849bSPaolo Bonzini }
3022f4dc3c1SPaolo Bonzini 
303d5541d86SAlex Bligh static void aio_timerlist_notify(void *opaque)
304d5541d86SAlex Bligh {
305d5541d86SAlex Bligh     aio_notify(opaque);
306d5541d86SAlex Bligh }
307d5541d86SAlex Bligh 
308da5e1de9SStefan Hajnoczi static void aio_rfifolock_cb(void *opaque)
309da5e1de9SStefan Hajnoczi {
310ca96ac44SStefan Hajnoczi     AioContext *ctx = opaque;
311ca96ac44SStefan Hajnoczi 
312da5e1de9SStefan Hajnoczi     /* Kick owner thread in case they are blocked in aio_poll() */
313ca96ac44SStefan Hajnoczi     qemu_bh_schedule(ctx->notify_dummy_bh);
314ca96ac44SStefan Hajnoczi }
315ca96ac44SStefan Hajnoczi 
316ca96ac44SStefan Hajnoczi static void notify_dummy_bh(void *opaque)
317ca96ac44SStefan Hajnoczi {
318ca96ac44SStefan Hajnoczi     /* Do nothing, we were invoked just to force the event loop to iterate */
319da5e1de9SStefan Hajnoczi }
320da5e1de9SStefan Hajnoczi 
32121a03d17SPaolo Bonzini static void event_notifier_dummy_cb(EventNotifier *e)
32221a03d17SPaolo Bonzini {
32321a03d17SPaolo Bonzini }
32421a03d17SPaolo Bonzini 
3252f78e491SChrysostomos Nanakos AioContext *aio_context_new(Error **errp)
326f627aab1SPaolo Bonzini {
3272f78e491SChrysostomos Nanakos     int ret;
3282f4dc3c1SPaolo Bonzini     AioContext *ctx;
32937fcee5dSFam Zheng     Error *local_err = NULL;
33037fcee5dSFam Zheng 
3312f4dc3c1SPaolo Bonzini     ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
33237fcee5dSFam Zheng     aio_context_setup(ctx, &local_err);
33337fcee5dSFam Zheng     if (local_err) {
33437fcee5dSFam Zheng         error_propagate(errp, local_err);
33537fcee5dSFam Zheng         goto fail;
33637fcee5dSFam Zheng     }
3372f78e491SChrysostomos Nanakos     ret = event_notifier_init(&ctx->notifier, false);
3382f78e491SChrysostomos Nanakos     if (ret < 0) {
3392f78e491SChrysostomos Nanakos         error_setg_errno(errp, -ret, "Failed to initialize event notifier");
34037fcee5dSFam Zheng         goto fail;
3412f78e491SChrysostomos Nanakos     }
342fcf5def1SPaolo Bonzini     g_source_set_can_recurse(&ctx->source, true);
3432f78e491SChrysostomos Nanakos     aio_set_event_notifier(ctx, &ctx->notifier,
344dca21ef2SFam Zheng                            false,
3452f78e491SChrysostomos Nanakos                            (EventNotifierHandler *)
34621a03d17SPaolo Bonzini                            event_notifier_dummy_cb);
3479b34277dSStefan Hajnoczi     ctx->thread_pool = NULL;
348dcc772e2SLiu Ping Fan     qemu_mutex_init(&ctx->bh_lock);
349da5e1de9SStefan Hajnoczi     rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
350d5541d86SAlex Bligh     timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
3512f4dc3c1SPaolo Bonzini 
352ca96ac44SStefan Hajnoczi     ctx->notify_dummy_bh = aio_bh_new(ctx, notify_dummy_bh, NULL);
353ca96ac44SStefan Hajnoczi 
3542f4dc3c1SPaolo Bonzini     return ctx;
35537fcee5dSFam Zheng fail:
35637fcee5dSFam Zheng     g_source_destroy(&ctx->source);
35737fcee5dSFam Zheng     return NULL;
358e3713e00SPaolo Bonzini }
359e3713e00SPaolo Bonzini 
360e3713e00SPaolo Bonzini void aio_context_ref(AioContext *ctx)
361e3713e00SPaolo Bonzini {
362e3713e00SPaolo Bonzini     g_source_ref(&ctx->source);
363e3713e00SPaolo Bonzini }
364e3713e00SPaolo Bonzini 
365e3713e00SPaolo Bonzini void aio_context_unref(AioContext *ctx)
366e3713e00SPaolo Bonzini {
367e3713e00SPaolo Bonzini     g_source_unref(&ctx->source);
368f627aab1SPaolo Bonzini }
36998563fc3SStefan Hajnoczi 
37098563fc3SStefan Hajnoczi void aio_context_acquire(AioContext *ctx)
37198563fc3SStefan Hajnoczi {
37298563fc3SStefan Hajnoczi     rfifolock_lock(&ctx->lock);
37398563fc3SStefan Hajnoczi }
37498563fc3SStefan Hajnoczi 
37598563fc3SStefan Hajnoczi void aio_context_release(AioContext *ctx)
37698563fc3SStefan Hajnoczi {
37798563fc3SStefan Hajnoczi     rfifolock_unlock(&ctx->lock);
37898563fc3SStefan Hajnoczi }
379