1 /*
2 * QEMU aio implementation
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef QEMU_AIO_H
15 #define QEMU_AIO_H
16
17 #ifdef CONFIG_LINUX_IO_URING
18 #include <liburing.h>
19 #endif
20 #include "qemu/coroutine-core.h"
21 #include "qemu/queue.h"
22 #include "qemu/event_notifier.h"
23 #include "qemu/lockcnt.h"
24 #include "qemu/thread.h"
25 #include "qemu/timer.h"
26 #include "block/graph-lock.h"
27 #include "hw/qdev-core.h"
28
29
30 typedef struct BlockAIOCB BlockAIOCB;
31 typedef void BlockCompletionFunc(void *opaque, int ret);
32
33 typedef struct AIOCBInfo {
34 void (*cancel_async)(BlockAIOCB *acb);
35 size_t aiocb_size;
36 } AIOCBInfo;
37
38 struct BlockAIOCB {
39 const AIOCBInfo *aiocb_info;
40 BlockDriverState *bs;
41 BlockCompletionFunc *cb;
42 void *opaque;
43 int refcnt;
44 };
45
46 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
47 BlockCompletionFunc *cb, void *opaque);
48 void qemu_aio_unref(void *p);
49 void qemu_aio_ref(void *p);
50
51 typedef struct AioHandler AioHandler;
52 typedef QLIST_HEAD(, AioHandler) AioHandlerList;
53 typedef void QEMUBHFunc(void *opaque);
54 typedef bool AioPollFn(void *opaque);
55 typedef void IOHandler(void *opaque);
56
57 struct ThreadPoolAio;
58 struct LinuxAioState;
59 typedef struct LuringState LuringState;
60
61 /* Is polling disabled? */
62 bool aio_poll_disabled(AioContext *ctx);
63
64 /* Callbacks for file descriptor monitoring implementations */
65 typedef struct {
66 /*
67 * update:
68 * @ctx: the AioContext
69 * @old_node: the existing handler or NULL if this file descriptor is being
70 * monitored for the first time
71 * @new_node: the new handler or NULL if this file descriptor is being
72 * removed
73 *
74 * Add/remove/modify a monitored file descriptor.
75 *
76 * Called with ctx->list_lock acquired.
77 */
78 void (*update)(AioContext *ctx, AioHandler *old_node, AioHandler *new_node);
79
80 /*
81 * wait:
82 * @ctx: the AioContext
83 * @ready_list: list for handlers that become ready
84 * @timeout: maximum duration to wait, in nanoseconds
85 *
86 * Wait for file descriptors to become ready and place them on ready_list.
87 *
88 * Called with ctx->list_lock incremented but not locked.
89 *
90 * Returns: number of ready file descriptors.
91 */
92 int (*wait)(AioContext *ctx, AioHandlerList *ready_list, int64_t timeout);
93
94 /*
95 * need_wait:
96 * @ctx: the AioContext
97 *
98 * Tell aio_poll() when to stop userspace polling early because ->wait()
99 * has fds ready.
100 *
101 * File descriptor monitoring implementations that cannot poll fd readiness
102 * from userspace should use aio_poll_disabled() here. This ensures that
103 * file descriptors are not starved by handlers that frequently make
104 * progress via userspace polling.
105 *
106 * Returns: true if ->wait() should be called, false otherwise.
107 */
108 bool (*need_wait)(AioContext *ctx);
109 } FDMonOps;
110
111 /*
112 * Each aio_bh_poll() call carves off a slice of the BH list, so that newly
113 * scheduled BHs are not processed until the next aio_bh_poll() call. All
114 * active aio_bh_poll() calls chain their slices together in a list, so that
115 * nested aio_bh_poll() calls process all scheduled bottom halves.
116 */
117 typedef QSLIST_HEAD(, QEMUBH) BHList;
118 typedef struct BHListSlice BHListSlice;
119 struct BHListSlice {
120 BHList bh_list;
121 QSIMPLEQ_ENTRY(BHListSlice) next;
122 };
123
124 typedef QSLIST_HEAD(, AioHandler) AioHandlerSList;
125
126 typedef struct AioPolledEvent {
127 int64_t ns; /* current polling time in nanoseconds */
128 } AioPolledEvent;
129
130 struct AioContext {
131 GSource source;
132
133 /* Used by AioContext users to protect from multi-threaded access. */
134 QemuRecMutex lock;
135
136 /*
137 * Keep track of readers and writers of the block layer graph.
138 * This is essential to avoid performing additions and removal
139 * of nodes and edges from block graph while some
140 * other thread is traversing it.
141 */
142 BdrvGraphRWlock *bdrv_graph;
143
144 /* The list of registered AIO handlers. Protected by ctx->list_lock. */
145 AioHandlerList aio_handlers;
146
147 /* The list of AIO handlers to be deleted. Protected by ctx->list_lock. */
148 AioHandlerList deleted_aio_handlers;
149
150 /* Used to avoid unnecessary event_notifier_set calls in aio_notify;
151 * only written from the AioContext home thread, or under the BQL in
152 * the case of the main AioContext. However, it is read from any
153 * thread so it is still accessed with atomic primitives.
154 *
155 * If this field is 0, everything (file descriptors, bottom halves,
156 * timers) will be re-evaluated before the next blocking poll() or
157 * io_uring wait; therefore, the event_notifier_set call can be
158 * skipped. If it is non-zero, you may need to wake up a concurrent
159 * aio_poll or the glib main event loop, making event_notifier_set
160 * necessary.
161 *
162 * Bit 0 is reserved for GSource usage of the AioContext, and is 1
163 * between a call to aio_ctx_prepare and the next call to aio_ctx_check.
164 * Bits 1-31 simply count the number of active calls to aio_poll
165 * that are in the prepare or poll phase.
166 *
167 * The GSource and aio_poll must use a different mechanism because
168 * there is no certainty that a call to GSource's prepare callback
169 * (via g_main_context_prepare) is indeed followed by check and
170 * dispatch. It's not clear whether this would be a bug, but let's
171 * play safe and allow it---it will just cause extra calls to
172 * event_notifier_set until the next call to dispatch.
173 *
174 * Instead, the aio_poll calls include both the prepare and the
175 * dispatch phase, hence a simple counter is enough for them.
176 */
177 uint32_t notify_me;
178
179 /* A lock to protect between QEMUBH and AioHandler adders and deleter,
180 * and to ensure that no callbacks are removed while we're walking and
181 * dispatching them.
182 */
183 QemuLockCnt list_lock;
184
185 /* Bottom Halves pending aio_bh_poll() processing */
186 BHList bh_list;
187
188 /* Chained BH list slices for each nested aio_bh_poll() call */
189 QSIMPLEQ_HEAD(, BHListSlice) bh_slice_list;
190
191 /* Used by aio_notify.
192 *
193 * "notified" is used to avoid expensive event_notifier_test_and_clear
194 * calls. When it is clear, the EventNotifier is clear, or one thread
195 * is going to clear "notified" before processing more events. False
196 * positives are possible, i.e. "notified" could be set even though the
197 * EventNotifier is clear.
198 *
199 * Note that event_notifier_set *cannot* be optimized the same way. For
200 * more information on the problem that would result, see "#ifdef BUG2"
201 * in the docs/aio_notify_accept.promela formal model.
202 */
203 bool notified;
204 EventNotifier notifier;
205
206 QSLIST_HEAD(, Coroutine) scheduled_coroutines;
207 QEMUBH *co_schedule_bh;
208
209 int thread_pool_min;
210 int thread_pool_max;
211 /* Thread pool for performing work and receiving completion callbacks.
212 * Has its own locking.
213 */
214 struct ThreadPoolAio *thread_pool;
215
216 #ifdef CONFIG_LINUX_AIO
217 struct LinuxAioState *linux_aio;
218 #endif
219 #ifdef CONFIG_LINUX_IO_URING
220 LuringState *linux_io_uring;
221
222 /* State for file descriptor monitoring using Linux io_uring */
223 struct io_uring fdmon_io_uring;
224 AioHandlerSList submit_list;
225 #endif
226
227 /* TimerLists for calling timers - one per clock type. Has its own
228 * locking.
229 */
230 QEMUTimerListGroup tlg;
231
232 /* Number of AioHandlers without .io_poll() */
233 int poll_disable_cnt;
234
235 /* Polling mode parameters */
236 int64_t poll_max_ns; /* maximum polling time in nanoseconds */
237 int64_t poll_grow; /* polling time growth factor */
238 int64_t poll_shrink; /* polling time shrink factor */
239
240 /* AIO engine parameters */
241 int64_t aio_max_batch; /* maximum number of requests in a batch */
242
243 /*
244 * List of handlers participating in userspace polling. Protected by
245 * ctx->list_lock. Iterated and modified mostly by the event loop thread
246 * from aio_poll() with ctx->list_lock incremented. aio_set_fd_handler()
247 * only touches the list to delete nodes if ctx->list_lock's count is zero.
248 */
249 AioHandlerList poll_aio_handlers;
250
251 /* Are we in polling mode or monitoring file descriptors? */
252 bool poll_started;
253
254 /* epoll(7) state used when built with CONFIG_EPOLL */
255 int epollfd;
256
257 const FDMonOps *fdmon_ops;
258 };
259
260 /**
261 * aio_context_new: Allocate a new AioContext.
262 *
263 * AioContext provide a mini event-loop that can be waited on synchronously.
264 * They also provide bottom halves, a service to execute a piece of code
265 * as soon as possible.
266 */
267 AioContext *aio_context_new(Error **errp);
268
269 /**
270 * aio_context_ref:
271 * @ctx: The AioContext to operate on.
272 *
273 * Add a reference to an AioContext.
274 */
275 void aio_context_ref(AioContext *ctx);
276
277 /**
278 * aio_context_unref:
279 * @ctx: The AioContext to operate on.
280 *
281 * Drop a reference to an AioContext.
282 */
283 void aio_context_unref(AioContext *ctx);
284
285 /**
286 * aio_bh_schedule_oneshot_full: Allocate a new bottom half structure that will
287 * run only once and as soon as possible.
288 *
289 * @name: A human-readable identifier for debugging purposes.
290 */
291 void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
292 const char *name);
293
294 /**
295 * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run
296 * only once and as soon as possible.
297 *
298 * A convenience wrapper for aio_bh_schedule_oneshot_full() that uses cb as the
299 * name string.
300 */
301 #define aio_bh_schedule_oneshot(ctx, cb, opaque) \
302 aio_bh_schedule_oneshot_full((ctx), (cb), (opaque), (stringify(cb)))
303
304 /**
305 * aio_bh_new_full: Allocate a new bottom half structure.
306 *
307 * Bottom halves are lightweight callbacks whose invocation is guaranteed
308 * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure
309 * is opaque and must be allocated prior to its use.
310 *
311 * @name: A human-readable identifier for debugging purposes.
312 * @reentrancy_guard: A guard set when entering a cb to prevent
313 * device-reentrancy issues
314 */
315 QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
316 const char *name, MemReentrancyGuard *reentrancy_guard);
317
318 /**
319 * aio_bh_new: Allocate a new bottom half structure
320 *
321 * A convenience wrapper for aio_bh_new_full() that uses the cb as the name
322 * string.
323 */
324 #define aio_bh_new(ctx, cb, opaque) \
325 aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), NULL)
326
327 /**
328 * aio_bh_new_guarded: Allocate a new bottom half structure with a
329 * reentrancy_guard
330 *
331 * A convenience wrapper for aio_bh_new_full() that uses the cb as the name
332 * string.
333 */
334 #define aio_bh_new_guarded(ctx, cb, opaque, guard) \
335 aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), guard)
336
337 /**
338 * aio_notify: Force processing of pending events.
339 *
340 * Similar to signaling a condition variable, aio_notify forces
341 * aio_poll to exit, so that the next call will re-examine pending events.
342 * The caller of aio_notify will usually call aio_poll again very soon,
343 * or go through another iteration of the GLib main loop. Hence, aio_notify
344 * also has the side effect of recalculating the sets of file descriptors
345 * that the main loop waits for.
346 *
347 * Calling aio_notify is rarely necessary, because for example scheduling
348 * a bottom half calls it already.
349 */
350 void aio_notify(AioContext *ctx);
351
352 /**
353 * aio_notify_accept: Acknowledge receiving an aio_notify.
354 *
355 * aio_notify() uses an EventNotifier in order to wake up a sleeping
356 * aio_poll() or g_main_context_iteration(). Calls to aio_notify() are
357 * usually rare, but the AioContext has to clear the EventNotifier on
358 * every aio_poll() or g_main_context_iteration() in order to avoid
359 * busy waiting. This event_notifier_test_and_clear() cannot be done
360 * using the usual aio_context_set_event_notifier(), because it must
361 * be done before processing all events (file descriptors, bottom halves,
362 * timers).
363 *
364 * aio_notify_accept() is an optimized event_notifier_test_and_clear()
365 * that is specific to an AioContext's notifier; it is used internally
366 * to clear the EventNotifier only if aio_notify() had been called.
367 */
368 void aio_notify_accept(AioContext *ctx);
369
370 /**
371 * aio_bh_call: Executes callback function of the specified BH.
372 */
373 void aio_bh_call(QEMUBH *bh);
374
375 /**
376 * aio_bh_poll: Poll bottom halves for an AioContext.
377 *
378 * These are internal functions used by the QEMU main loop.
379 * And notice that multiple occurrences of aio_bh_poll cannot
380 * be called concurrently
381 */
382 int aio_bh_poll(AioContext *ctx);
383
384 /**
385 * qemu_bh_schedule: Schedule a bottom half.
386 *
387 * Scheduling a bottom half interrupts the main loop and causes the
388 * execution of the callback that was passed to qemu_bh_new.
389 *
390 * Bottom halves that are scheduled from a bottom half handler are instantly
391 * invoked. This can create an infinite loop if a bottom half handler
392 * schedules itself.
393 *
394 * @bh: The bottom half to be scheduled.
395 */
396 void qemu_bh_schedule(QEMUBH *bh);
397
398 /**
399 * qemu_bh_cancel: Cancel execution of a bottom half.
400 *
401 * Canceling execution of a bottom half undoes the effect of calls to
402 * qemu_bh_schedule without freeing its resources yet. While cancellation
403 * itself is also wait-free and thread-safe, it can of course race with the
404 * loop that executes bottom halves unless you are holding the iothread
405 * mutex. This makes it mostly useless if you are not holding the mutex.
406 *
407 * @bh: The bottom half to be canceled.
408 */
409 void qemu_bh_cancel(QEMUBH *bh);
410
411 /**
412 *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
413 *
414 * Deleting a bottom half frees the memory that was allocated for it by
415 * qemu_bh_new. It also implies canceling the bottom half if it was
416 * scheduled.
417 * This func is async. The bottom half will do the delete action at the finial
418 * end.
419 *
420 * @bh: The bottom half to be deleted.
421 */
422 void qemu_bh_delete(QEMUBH *bh);
423
424 /* Return whether there are any pending callbacks from the GSource
425 * attached to the AioContext, before g_poll is invoked.
426 *
427 * This is used internally in the implementation of the GSource.
428 */
429 bool aio_prepare(AioContext *ctx);
430
431 /* Return whether there are any pending callbacks from the GSource
432 * attached to the AioContext, after g_poll is invoked.
433 *
434 * This is used internally in the implementation of the GSource.
435 */
436 bool aio_pending(AioContext *ctx);
437
438 /* Dispatch any pending callbacks from the GSource attached to the AioContext.
439 *
440 * This is used internally in the implementation of the GSource.
441 */
442 void aio_dispatch(AioContext *ctx);
443
444 /* Progress in completing AIO work to occur. This can issue new pending
445 * aio as a result of executing I/O completion or bh callbacks.
446 *
447 * Return whether any progress was made by executing AIO or bottom half
448 * handlers. If @blocking == true, this should always be true except
449 * if someone called aio_notify.
450 *
451 * If there are no pending bottom halves, but there are pending AIO
452 * operations, it may not be possible to make any progress without
453 * blocking. If @blocking is true, this function will wait until one
454 * or more AIO events have completed, to ensure something has moved
455 * before returning.
456 */
457 bool no_coroutine_fn aio_poll(AioContext *ctx, bool blocking);
458
459 /* Register a file descriptor and associated callbacks. Behaves very similarly
460 * to qemu_set_fd_handler. Unlike qemu_set_fd_handler, these callbacks will
461 * be invoked when using aio_poll().
462 *
463 * Code that invokes AIO completion functions should rely on this function
464 * instead of qemu_set_fd_handler[2].
465 */
466 void aio_set_fd_handler(AioContext *ctx,
467 int fd,
468 IOHandler *io_read,
469 IOHandler *io_write,
470 AioPollFn *io_poll,
471 IOHandler *io_poll_ready,
472 void *opaque);
473
474 /* Register an event notifier and associated callbacks. Behaves very similarly
475 * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks
476 * will be invoked when using aio_poll().
477 *
478 * Code that invokes AIO completion functions should rely on this function
479 * instead of event_notifier_set_handler.
480 */
481 void aio_set_event_notifier(AioContext *ctx,
482 EventNotifier *notifier,
483 EventNotifierHandler *io_read,
484 AioPollFn *io_poll,
485 EventNotifierHandler *io_poll_ready);
486
487 /*
488 * Set polling begin/end callbacks for an event notifier that has already been
489 * registered with aio_set_event_notifier. Do nothing if the event notifier is
490 * not registered.
491 *
492 * Note that if the io_poll_end() callback (or the entire notifier) is removed
493 * during polling, it will not be called, so an io_poll_begin() is not
494 * necessarily always followed by an io_poll_end().
495 */
496 void aio_set_event_notifier_poll(AioContext *ctx,
497 EventNotifier *notifier,
498 EventNotifierHandler *io_poll_begin,
499 EventNotifierHandler *io_poll_end);
500
501 /* Return a GSource that lets the main loop poll the file descriptors attached
502 * to this AioContext.
503 */
504 GSource *aio_get_g_source(AioContext *ctx);
505
506 /* Return the ThreadPoolAio bound to this AioContext */
507 struct ThreadPoolAio *aio_get_thread_pool(AioContext *ctx);
508
509 /* Setup the LinuxAioState bound to this AioContext */
510 struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp);
511
512 /* Return the LinuxAioState bound to this AioContext */
513 struct LinuxAioState *aio_get_linux_aio(AioContext *ctx);
514
515 /* Setup the LuringState bound to this AioContext */
516 LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp);
517
518 /* Return the LuringState bound to this AioContext */
519 LuringState *aio_get_linux_io_uring(AioContext *ctx);
520 /**
521 * aio_timer_new_with_attrs:
522 * @ctx: the aio context
523 * @type: the clock type
524 * @scale: the scale
525 * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values
526 * to assign
527 * @cb: the callback to call on timer expiry
528 * @opaque: the opaque pointer to pass to the callback
529 *
530 * Allocate a new timer (with attributes) attached to the context @ctx.
531 * The function is responsible for memory allocation.
532 *
533 * The preferred interface is aio_timer_init or aio_timer_init_with_attrs.
534 * Use that unless you really need dynamic memory allocation.
535 *
536 * Returns: a pointer to the new timer
537 */
aio_timer_new_with_attrs(AioContext * ctx,QEMUClockType type,int scale,int attributes,QEMUTimerCB * cb,void * opaque)538 static inline QEMUTimer *aio_timer_new_with_attrs(AioContext *ctx,
539 QEMUClockType type,
540 int scale, int attributes,
541 QEMUTimerCB *cb, void *opaque)
542 {
543 return timer_new_full(&ctx->tlg, type, scale, attributes, cb, opaque);
544 }
545
546 /**
547 * aio_timer_new:
548 * @ctx: the aio context
549 * @type: the clock type
550 * @scale: the scale
551 * @cb: the callback to call on timer expiry
552 * @opaque: the opaque pointer to pass to the callback
553 *
554 * Allocate a new timer attached to the context @ctx.
555 * See aio_timer_new_with_attrs for details.
556 *
557 * Returns: a pointer to the new timer
558 */
aio_timer_new(AioContext * ctx,QEMUClockType type,int scale,QEMUTimerCB * cb,void * opaque)559 static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type,
560 int scale,
561 QEMUTimerCB *cb, void *opaque)
562 {
563 return timer_new_full(&ctx->tlg, type, scale, 0, cb, opaque);
564 }
565
566 /**
567 * aio_timer_init_with_attrs:
568 * @ctx: the aio context
569 * @ts: the timer
570 * @type: the clock type
571 * @scale: the scale
572 * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values
573 * to assign
574 * @cb: the callback to call on timer expiry
575 * @opaque: the opaque pointer to pass to the callback
576 *
577 * Initialise a new timer (with attributes) attached to the context @ctx.
578 * The caller is responsible for memory allocation.
579 */
aio_timer_init_with_attrs(AioContext * ctx,QEMUTimer * ts,QEMUClockType type,int scale,int attributes,QEMUTimerCB * cb,void * opaque)580 static inline void aio_timer_init_with_attrs(AioContext *ctx,
581 QEMUTimer *ts, QEMUClockType type,
582 int scale, int attributes,
583 QEMUTimerCB *cb, void *opaque)
584 {
585 timer_init_full(ts, &ctx->tlg, type, scale, attributes, cb, opaque);
586 }
587
588 /**
589 * aio_timer_init:
590 * @ctx: the aio context
591 * @ts: the timer
592 * @type: the clock type
593 * @scale: the scale
594 * @cb: the callback to call on timer expiry
595 * @opaque: the opaque pointer to pass to the callback
596 *
597 * Initialise a new timer attached to the context @ctx.
598 * See aio_timer_init_with_attrs for details.
599 */
aio_timer_init(AioContext * ctx,QEMUTimer * ts,QEMUClockType type,int scale,QEMUTimerCB * cb,void * opaque)600 static inline void aio_timer_init(AioContext *ctx,
601 QEMUTimer *ts, QEMUClockType type,
602 int scale,
603 QEMUTimerCB *cb, void *opaque)
604 {
605 timer_init_full(ts, &ctx->tlg, type, scale, 0, cb, opaque);
606 }
607
608 /**
609 * aio_compute_timeout:
610 * @ctx: the aio context
611 *
612 * Compute the timeout that a blocking aio_poll should use.
613 */
614 int64_t aio_compute_timeout(AioContext *ctx);
615
616 /**
617 * aio_co_schedule:
618 * @ctx: the aio context
619 * @co: the coroutine
620 *
621 * Start a coroutine on a remote AioContext.
622 *
623 * The coroutine must not be entered by anyone else while aio_co_schedule()
624 * is active. In addition the coroutine must have yielded unless ctx
625 * is the context in which the coroutine is running (i.e. the value of
626 * qemu_get_current_aio_context() from the coroutine itself).
627 */
628 void aio_co_schedule(AioContext *ctx, Coroutine *co);
629
630 /**
631 * aio_co_reschedule_self:
632 * @new_ctx: the new context
633 *
634 * Move the currently running coroutine to new_ctx. If the coroutine is already
635 * running in new_ctx, do nothing.
636 *
637 * Note that this function cannot reschedule from iohandler_ctx to
638 * qemu_aio_context.
639 */
640 void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx);
641
642 /**
643 * aio_co_wake:
644 * @co: the coroutine
645 *
646 * Restart a coroutine on the AioContext where it was running last, thus
647 * preventing coroutines from jumping from one context to another when they
648 * go to sleep.
649 *
650 * aio_co_wake may be executed either in coroutine or non-coroutine
651 * context. The coroutine must not be entered by anyone else while
652 * aio_co_wake() is active.
653 */
654 void aio_co_wake(Coroutine *co);
655
656 /**
657 * aio_co_enter:
658 * @ctx: the context to run the coroutine
659 * @co: the coroutine to run
660 *
661 * Enter a coroutine in the specified AioContext.
662 */
663 void aio_co_enter(AioContext *ctx, Coroutine *co);
664
665 /**
666 * Return the AioContext whose event loop runs in the current thread.
667 *
668 * If called from an IOThread this will be the IOThread's AioContext. If
669 * called from the main thread or with the "big QEMU lock" taken it
670 * will be the main loop AioContext.
671 *
672 * Note that the return value is never the main loop's iohandler_ctx and the
673 * return value is the main loop AioContext instead.
674 */
675 AioContext *qemu_get_current_aio_context(void);
676
677 void qemu_set_current_aio_context(AioContext *ctx);
678
679 /**
680 * aio_context_setup:
681 * @ctx: the aio context
682 *
683 * Initialize the aio context.
684 */
685 void aio_context_setup(AioContext *ctx);
686
687 /**
688 * aio_context_destroy:
689 * @ctx: the aio context
690 *
691 * Destroy the aio context.
692 */
693 void aio_context_destroy(AioContext *ctx);
694
695 /* Used internally, do not call outside AioContext code */
696 void aio_context_use_g_source(AioContext *ctx);
697
698 /**
699 * aio_context_set_poll_params:
700 * @ctx: the aio context
701 * @max_ns: how long to busy poll for, in nanoseconds
702 * @grow: polling time growth factor
703 * @shrink: polling time shrink factor
704 *
705 * Poll mode can be disabled by setting poll_max_ns to 0.
706 */
707 void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
708 int64_t grow, int64_t shrink,
709 Error **errp);
710
711 /**
712 * aio_context_set_aio_params:
713 * @ctx: the aio context
714 * @max_batch: maximum number of requests in a batch, 0 means that the
715 * engine will use its default
716 */
717 void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch);
718
719 /**
720 * aio_context_set_thread_pool_params:
721 * @ctx: the aio context
722 * @min: min number of threads to have readily available in the thread pool
723 * @min: max number of threads the thread pool can contain
724 */
725 void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
726 int64_t max, Error **errp);
727 #endif
728