Lines Matching +full:- +full:- +full:with +full:- +full:coroutine
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2009-2017 QEMU contributors
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 #include "block/thread-pool.h"
30 #include "block/graph-lock.h"
31 #include "qemu/main-loop.h"
35 #include "block/raw-aio.h"
37 #include "qemu/coroutine-tls.h"
75 AioContext *ctx = bh->ctx; in aio_bh_enqueue()
79 * Synchronizes with atomic_fetch_and() in aio_bh_dequeue(), ensuring that in aio_bh_enqueue()
82 old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags); in aio_bh_enqueue()
87 * This insertion thus synchronizes with QSLIST_MOVE_ATOMIC in in aio_bh_enqueue()
94 QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next); in aio_bh_enqueue()
121 * Synchronizes with qatomic_fetch_or() in aio_bh_enqueue(), ensuring that in aio_bh_dequeue()
124 *flags = qatomic_fetch_and(&bh->flags, in aio_bh_dequeue()
162 /* Make a copy of the guard-pointer as cb may free the bh */ in aio_bh_call()
163 MemReentrancyGuard *reentrancy_guard = bh->reentrancy_guard; in aio_bh_call()
165 last_engaged_in_io = reentrancy_guard->engaged_in_io; in aio_bh_call()
166 if (reentrancy_guard->engaged_in_io) { in aio_bh_call()
167 trace_reentrant_aio(bh->ctx, bh->name); in aio_bh_call()
169 reentrancy_guard->engaged_in_io = true; in aio_bh_call()
172 bh->cb(bh->opaque); in aio_bh_call()
175 reentrancy_guard->engaged_in_io = last_engaged_in_io; in aio_bh_call()
186 /* Synchronizes with QSLIST_INSERT_HEAD_ATOMIC in aio_bh_enqueue(). */ in aio_bh_poll()
187 QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list); in aio_bh_poll()
190 * GCC13 [-Werror=dangling-pointer=] complains that the local variable in aio_bh_poll()
191 * 'slice' is being stored in the global 'ctx->bh_slice_list' but the in aio_bh_poll()
196 #pragma GCC diagnostic ignored "-Wpragmas" in aio_bh_poll()
197 #pragma GCC diagnostic ignored "-Wdangling-pointer=" in aio_bh_poll()
199 QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next); in aio_bh_poll()
204 while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) { in aio_bh_poll()
208 bh = aio_bh_dequeue(&s->bh_list, &flags); in aio_bh_poll()
210 QSIMPLEQ_REMOVE_HEAD(&ctx->bh_slice_list, next); in aio_bh_poll()
243 qatomic_and(&bh->flags, ~BH_SCHEDULED); in qemu_bh_cancel()
259 if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { in aio_compute_bh_timeout()
260 if (bh->flags & BH_IDLE) { in aio_compute_bh_timeout()
265 /* non-idle bottom halves will be executed in aio_compute_bh_timeout()
280 int timeout = -1; in aio_compute_timeout()
282 timeout = aio_compute_bh_timeout(&ctx->bh_list, timeout); in aio_compute_timeout()
287 QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) { in aio_compute_timeout()
288 timeout = aio_compute_bh_timeout(&s->bh_list, timeout); in aio_compute_timeout()
294 deadline = timerlistgroup_deadline_ns(&ctx->tlg); in aio_compute_timeout()
307 qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) | 1); in aio_ctx_prepare()
310 * Write ctx->notify_me before computing the timeout in aio_ctx_prepare()
311 * (reading bottom half flags, etc.). Pairs with in aio_ctx_prepare()
334 qatomic_store_release(&ctx->notify_me, qatomic_read(&ctx->notify_me) & ~1); in aio_ctx_check()
337 QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) { in aio_ctx_check()
338 if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { in aio_ctx_check()
343 QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) { in aio_ctx_check()
344 QSLIST_FOREACH_RCU(bh, &s->bh_list, next) { in aio_ctx_check()
345 if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { in aio_ctx_check()
350 return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0); in aio_ctx_check()
372 thread_pool_free_aio(ctx->thread_pool); in aio_ctx_finalize()
375 if (ctx->linux_aio) { in aio_ctx_finalize()
376 laio_detach_aio_context(ctx->linux_aio, ctx); in aio_ctx_finalize()
377 laio_cleanup(ctx->linux_aio); in aio_ctx_finalize()
378 ctx->linux_aio = NULL; in aio_ctx_finalize()
383 if (ctx->linux_io_uring) { in aio_ctx_finalize()
384 luring_detach_aio_context(ctx->linux_io_uring, ctx); in aio_ctx_finalize()
385 luring_cleanup(ctx->linux_io_uring); in aio_ctx_finalize()
386 ctx->linux_io_uring = NULL; in aio_ctx_finalize()
390 assert(QSLIST_EMPTY(&ctx->scheduled_coroutines)); in aio_ctx_finalize()
391 qemu_bh_delete(ctx->co_schedule_bh); in aio_ctx_finalize()
394 assert(QSIMPLEQ_EMPTY(&ctx->bh_slice_list)); in aio_ctx_finalize()
396 while ((bh = aio_bh_dequeue(&ctx->bh_list, &flags))) { in aio_ctx_finalize()
408 __func__, bh->name); in aio_ctx_finalize()
415 aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL, NULL); in aio_ctx_finalize()
416 event_notifier_cleanup(&ctx->notifier); in aio_ctx_finalize()
417 qemu_rec_mutex_destroy(&ctx->lock); in aio_ctx_finalize()
418 qemu_lockcnt_destroy(&ctx->list_lock); in aio_ctx_finalize()
419 timerlistgroup_deinit(&ctx->tlg); in aio_ctx_finalize()
434 g_source_ref(&ctx->source); in aio_get_g_source()
435 return &ctx->source; in aio_get_g_source()
440 if (!ctx->thread_pool) { in aio_get_thread_pool()
441 ctx->thread_pool = thread_pool_new_aio(ctx); in aio_get_thread_pool()
443 return ctx->thread_pool; in aio_get_thread_pool()
449 if (!ctx->linux_aio) { in aio_setup_linux_aio()
450 ctx->linux_aio = laio_init(errp); in aio_setup_linux_aio()
451 if (ctx->linux_aio) { in aio_setup_linux_aio()
452 laio_attach_aio_context(ctx->linux_aio, ctx); in aio_setup_linux_aio()
455 return ctx->linux_aio; in aio_setup_linux_aio()
460 assert(ctx->linux_aio); in aio_get_linux_aio()
461 return ctx->linux_aio; in aio_get_linux_aio()
468 if (ctx->linux_io_uring) { in aio_setup_linux_io_uring()
469 return ctx->linux_io_uring; in aio_setup_linux_io_uring()
472 ctx->linux_io_uring = luring_init(errp); in aio_setup_linux_io_uring()
473 if (!ctx->linux_io_uring) { in aio_setup_linux_io_uring()
477 luring_attach_aio_context(ctx->linux_io_uring, ctx); in aio_setup_linux_io_uring()
478 return ctx->linux_io_uring; in aio_setup_linux_io_uring()
483 assert(ctx->linux_io_uring); in aio_get_linux_io_uring()
484 return ctx->linux_io_uring; in aio_get_linux_io_uring()
491 * Write e.g. ctx->bh_list before writing ctx->notified. Pairs with in aio_notify()
495 qatomic_set(&ctx->notified, true); in aio_notify()
498 * Write ctx->notified (and also ctx->bh_list) before reading ctx->notify_me. in aio_notify()
499 * Pairs with smp_mb() in aio_ctx_prepare or aio_poll. in aio_notify()
502 if (qatomic_read(&ctx->notify_me)) { in aio_notify()
503 event_notifier_set(&ctx->notifier); in aio_notify()
509 qatomic_set(&ctx->notified, false); in aio_notify_accept()
512 * Order reads of ctx->notified (in aio_context_notifier_poll()) and the in aio_notify_accept()
513 * above clearing of ctx->notified before reads of e.g. bh->flags. Pairs in aio_notify_accept()
514 * with smp_wmb() in aio_notify. in aio_notify_accept()
528 event_notifier_test_and_clear(&ctx->notifier); in aio_context_notifier_cb()
538 * No need for load-acquire because we just want to kick the in aio_context_notifier_poll()
540 * the event loop with the producers. in aio_context_notifier_poll()
542 return qatomic_read(&ctx->notified); in aio_context_notifier_poll()
553 QSLIST_HEAD(, Coroutine) straight, reversed; in co_schedule_bh_cb()
555 QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines); in co_schedule_bh_cb()
559 Coroutine *co = QSLIST_FIRST(&reversed); in co_schedule_bh_cb()
565 Coroutine *co = QSLIST_FIRST(&straight); in co_schedule_bh_cb()
570 qatomic_set(&co->scheduled, NULL); in co_schedule_bh_cb()
581 QSLIST_INIT(&ctx->bh_list); in aio_context_new()
582 QSIMPLEQ_INIT(&ctx->bh_slice_list); in aio_context_new()
585 ret = event_notifier_init(&ctx->notifier, false); in aio_context_new()
587 error_setg_errno(errp, -ret, "Failed to initialize event notifier"); in aio_context_new()
590 g_source_set_can_recurse(&ctx->source, true); in aio_context_new()
591 qemu_lockcnt_init(&ctx->list_lock); in aio_context_new()
593 ctx->co_schedule_bh = aio_bh_new(ctx, co_schedule_bh_cb, ctx); in aio_context_new()
594 QSLIST_INIT(&ctx->scheduled_coroutines); in aio_context_new()
596 aio_set_event_notifier(ctx, &ctx->notifier, in aio_context_new()
601 ctx->linux_aio = NULL; in aio_context_new()
605 ctx->linux_io_uring = NULL; in aio_context_new()
608 ctx->thread_pool = NULL; in aio_context_new()
609 qemu_rec_mutex_init(&ctx->lock); in aio_context_new()
610 timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx); in aio_context_new()
612 ctx->poll_max_ns = 0; in aio_context_new()
613 ctx->poll_grow = 0; in aio_context_new()
614 ctx->poll_shrink = 0; in aio_context_new()
616 ctx->aio_max_batch = 0; in aio_context_new()
618 ctx->thread_pool_min = 0; in aio_context_new()
619 ctx->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT; in aio_context_new()
625 g_source_destroy(&ctx->source); in aio_context_new()
629 void aio_co_schedule(AioContext *ctx, Coroutine *co) in aio_co_schedule()
632 const char *scheduled = qatomic_cmpxchg(&co->scheduled, NULL, in aio_co_schedule()
637 "%s: Co-routine was already scheduled in '%s'\n", in aio_co_schedule()
642 /* The coroutine might run and release the last ctx reference before we in aio_co_schedule()
648 QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines, in aio_co_schedule()
650 qemu_bh_schedule(ctx->co_schedule_bh); in aio_co_schedule()
656 Coroutine *co;
663 aio_co_schedule(data->new_ctx, data->co); in aio_co_reschedule_self_bh()
676 * We can't directly schedule the coroutine in the target context in aio_co_reschedule_self()
678 * coroutine before it has yielded in this one. in aio_co_reschedule_self()
685 void aio_co_wake(Coroutine *co) in aio_co_wake()
689 /* Read coroutine before co->ctx. Matches smp_wmb in in aio_co_wake()
693 ctx = qatomic_read(&co->ctx); in aio_co_wake()
698 void aio_co_enter(AioContext *ctx, Coroutine *co) in aio_co_enter()
706 Coroutine *self = qemu_coroutine_self(); in aio_co_enter()
708 QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next); in aio_co_enter()
716 g_source_ref(&ctx->source); in aio_context_ref()
721 g_source_unref(&ctx->source); in aio_context_unref()
750 error_setg(errp, "bad thread-pool-min/thread-pool-max values"); in aio_context_set_thread_pool_params()
754 ctx->thread_pool_min = min; in aio_context_set_thread_pool_params()
755 ctx->thread_pool_max = max; in aio_context_set_thread_pool_params()
757 if (ctx->thread_pool) { in aio_context_set_thread_pool_params()
758 thread_pool_update_params(ctx->thread_pool, ctx); in aio_context_set_thread_pool_params()