17719f3c9SStefan Hajnoczi /* 27719f3c9SStefan Hajnoczi * AioContext wait support 37719f3c9SStefan Hajnoczi * 47719f3c9SStefan Hajnoczi * Copyright (C) 2018 Red Hat, Inc. 57719f3c9SStefan Hajnoczi * 67719f3c9SStefan Hajnoczi * Permission is hereby granted, free of charge, to any person obtaining a copy 77719f3c9SStefan Hajnoczi * of this software and associated documentation files (the "Software"), to deal 87719f3c9SStefan Hajnoczi * in the Software without restriction, including without limitation the rights 97719f3c9SStefan Hajnoczi * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 107719f3c9SStefan Hajnoczi * copies of the Software, and to permit persons to whom the Software is 117719f3c9SStefan Hajnoczi * furnished to do so, subject to the following conditions: 127719f3c9SStefan Hajnoczi * 137719f3c9SStefan Hajnoczi * The above copyright notice and this permission notice shall be included in 147719f3c9SStefan Hajnoczi * all copies or substantial portions of the Software. 157719f3c9SStefan Hajnoczi * 167719f3c9SStefan Hajnoczi * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 177719f3c9SStefan Hajnoczi * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 187719f3c9SStefan Hajnoczi * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 197719f3c9SStefan Hajnoczi * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 207719f3c9SStefan Hajnoczi * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 217719f3c9SStefan Hajnoczi * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 227719f3c9SStefan Hajnoczi * THE SOFTWARE. 237719f3c9SStefan Hajnoczi */ 247719f3c9SStefan Hajnoczi 257719f3c9SStefan Hajnoczi #ifndef QEMU_AIO_WAIT_H 267719f3c9SStefan Hajnoczi #define QEMU_AIO_WAIT_H 277719f3c9SStefan Hajnoczi 287719f3c9SStefan Hajnoczi #include "block/aio.h" 293c18a92dSPaolo Bonzini #include "qemu/main-loop.h" 307719f3c9SStefan Hajnoczi 317719f3c9SStefan Hajnoczi /** 327719f3c9SStefan Hajnoczi * AioWait: 337719f3c9SStefan Hajnoczi * 34cfe29d82SKevin Wolf * An object that facilitates synchronous waiting on a condition. A single 35cfe29d82SKevin Wolf * global AioWait object (global_aio_wait) is used internally. 367719f3c9SStefan Hajnoczi * 37cfe29d82SKevin Wolf * The main loop can wait on an operation running in an IOThread as follows: 38cfe29d82SKevin Wolf * 397719f3c9SStefan Hajnoczi * AioContext *ctx = ...; 407719f3c9SStefan Hajnoczi * MyWork work = { .done = false }; 417719f3c9SStefan Hajnoczi * schedule_my_work_in_iothread(ctx, &work); 42cfe29d82SKevin Wolf * AIO_WAIT_WHILE(ctx, !work.done); 437719f3c9SStefan Hajnoczi * 447719f3c9SStefan Hajnoczi * The IOThread must call aio_wait_kick() to notify the main loop when 457719f3c9SStefan Hajnoczi * work.done changes: 467719f3c9SStefan Hajnoczi * 477719f3c9SStefan Hajnoczi * static void do_work(...) 487719f3c9SStefan Hajnoczi * { 497719f3c9SStefan Hajnoczi * ... 507719f3c9SStefan Hajnoczi * work.done = true; 51cfe29d82SKevin Wolf * aio_wait_kick(); 527719f3c9SStefan Hajnoczi * } 537719f3c9SStefan Hajnoczi */ 547719f3c9SStefan Hajnoczi typedef struct { 557376eda7SStefan Hajnoczi /* Number of waiting AIO_WAIT_WHILE() callers. Accessed with atomic ops. */ 567376eda7SStefan Hajnoczi unsigned num_waiters; 577719f3c9SStefan Hajnoczi } AioWait; 587719f3c9SStefan Hajnoczi 59cfe29d82SKevin Wolf extern AioWait global_aio_wait; 60cfe29d82SKevin Wolf 617719f3c9SStefan Hajnoczi /** 62fd4b14e2SEmanuele Giuseppe Esposito * AIO_WAIT_WHILE_INTERNAL: 634d22bbf4SKevin Wolf * @ctx: the aio context, or NULL if multiple aio contexts (for which the 644d22bbf4SKevin Wolf * caller does not hold a lock) are involved in the polling condition. 657719f3c9SStefan Hajnoczi * @cond: wait while this conditional expression is true 667719f3c9SStefan Hajnoczi * 677719f3c9SStefan Hajnoczi * Wait while a condition is true. Use this to implement synchronous 687719f3c9SStefan Hajnoczi * operations that require event loop activity. 697719f3c9SStefan Hajnoczi * 707719f3c9SStefan Hajnoczi * The caller must be sure that something calls aio_wait_kick() when the value 717719f3c9SStefan Hajnoczi * of @cond might have changed. 727719f3c9SStefan Hajnoczi * 737719f3c9SStefan Hajnoczi * The caller's thread must be the IOThread that owns @ctx or the main loop 747719f3c9SStefan Hajnoczi * thread (with @ctx acquired exactly once). This function cannot be used to 757719f3c9SStefan Hajnoczi * wait on conditions between two IOThreads since that could lead to deadlock, 767719f3c9SStefan Hajnoczi * go via the main loop instead. 777719f3c9SStefan Hajnoczi */ 78*95bbddf9SStefan Hajnoczi #define AIO_WAIT_WHILE_INTERNAL(ctx, cond) ({ \ 797719f3c9SStefan Hajnoczi bool waited_ = false; \ 80cfe29d82SKevin Wolf AioWait *wait_ = &global_aio_wait; \ 817719f3c9SStefan Hajnoczi AioContext *ctx_ = (ctx); \ 8248657448SKevin Wolf /* Increment wait_->num_waiters before evaluating cond. */ \ 83d73415a3SStefan Hajnoczi qatomic_inc(&wait_->num_waiters); \ 847455ff1aSEmanuele Giuseppe Esposito /* Paired with smp_mb in aio_wait_kick(). */ \ 85b532526aSPaolo Bonzini smp_mb__after_rmw(); \ 864d22bbf4SKevin Wolf if (ctx_ && in_aio_context_home_thread(ctx_)) { \ 871cc8e54aSKevin Wolf while ((cond)) { \ 881cc8e54aSKevin Wolf aio_poll(ctx_, true); \ 891cc8e54aSKevin Wolf waited_ = true; \ 907719f3c9SStefan Hajnoczi } \ 917719f3c9SStefan Hajnoczi } else { \ 927719f3c9SStefan Hajnoczi assert(qemu_get_current_aio_context() == \ 937719f3c9SStefan Hajnoczi qemu_get_aio_context()); \ 941cc8e54aSKevin Wolf while ((cond)) { \ 957719f3c9SStefan Hajnoczi aio_poll(qemu_get_aio_context(), true); \ 961cc8e54aSKevin Wolf waited_ = true; \ 977719f3c9SStefan Hajnoczi } \ 987719f3c9SStefan Hajnoczi } \ 99d73415a3SStefan Hajnoczi qatomic_dec(&wait_->num_waiters); \ 1007719f3c9SStefan Hajnoczi waited_; }) 1017719f3c9SStefan Hajnoczi 102fd4b14e2SEmanuele Giuseppe Esposito #define AIO_WAIT_WHILE(ctx, cond) \ 103*95bbddf9SStefan Hajnoczi AIO_WAIT_WHILE_INTERNAL(ctx, cond) 104fd4b14e2SEmanuele Giuseppe Esposito 105*95bbddf9SStefan Hajnoczi /* TODO replace this with AIO_WAIT_WHILE() in a future patch */ 106fd4b14e2SEmanuele Giuseppe Esposito #define AIO_WAIT_WHILE_UNLOCKED(ctx, cond) \ 107*95bbddf9SStefan Hajnoczi AIO_WAIT_WHILE_INTERNAL(ctx, cond) 108fd4b14e2SEmanuele Giuseppe Esposito 1097719f3c9SStefan Hajnoczi /** 1107719f3c9SStefan Hajnoczi * aio_wait_kick: 1117719f3c9SStefan Hajnoczi * Wake up the main thread if it is waiting on AIO_WAIT_WHILE(). During 1127719f3c9SStefan Hajnoczi * synchronous operations performed in an IOThread, the main thread lets the 1137719f3c9SStefan Hajnoczi * IOThread's event loop run, waiting for the operation to complete. A 1147719f3c9SStefan Hajnoczi * aio_wait_kick() call will wake up the main thread. 1157719f3c9SStefan Hajnoczi */ 116cfe29d82SKevin Wolf void aio_wait_kick(void); 1177719f3c9SStefan Hajnoczi 118b89d92f3SStefan Hajnoczi /** 119b89d92f3SStefan Hajnoczi * aio_wait_bh_oneshot: 120b89d92f3SStefan Hajnoczi * @ctx: the aio context 121b89d92f3SStefan Hajnoczi * @cb: the BH callback function 122b89d92f3SStefan Hajnoczi * @opaque: user data for the BH callback function 123b89d92f3SStefan Hajnoczi * 124b89d92f3SStefan Hajnoczi * Run a BH in @ctx and wait for it to complete. 125b89d92f3SStefan Hajnoczi * 1263edf660aSStefan Hajnoczi * Must be called from the main loop thread without @ctx acquired. 127b89d92f3SStefan Hajnoczi * Note that main loop event processing may occur. 128b89d92f3SStefan Hajnoczi */ 129b89d92f3SStefan Hajnoczi void aio_wait_bh_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque); 130b89d92f3SStefan Hajnoczi 1313c18a92dSPaolo Bonzini /** 1323c18a92dSPaolo Bonzini * in_aio_context_home_thread: 1333c18a92dSPaolo Bonzini * @ctx: the aio context 1343c18a92dSPaolo Bonzini * 1353c18a92dSPaolo Bonzini * Return whether we are running in the thread that normally runs @ctx. Note 1363c18a92dSPaolo Bonzini * that acquiring/releasing ctx does not affect the outcome, each AioContext 1373c18a92dSPaolo Bonzini * still only has one home thread that is responsible for running it. 1383c18a92dSPaolo Bonzini */ 1393c18a92dSPaolo Bonzini static inline bool in_aio_context_home_thread(AioContext *ctx) 1403c18a92dSPaolo Bonzini { 1413c18a92dSPaolo Bonzini if (ctx == qemu_get_current_aio_context()) { 1423c18a92dSPaolo Bonzini return true; 1433c18a92dSPaolo Bonzini } 1443c18a92dSPaolo Bonzini 1453c18a92dSPaolo Bonzini if (ctx == qemu_get_aio_context()) { 1463c18a92dSPaolo Bonzini return qemu_mutex_iothread_locked(); 1473c18a92dSPaolo Bonzini } else { 1483c18a92dSPaolo Bonzini return false; 1493c18a92dSPaolo Bonzini } 1503c18a92dSPaolo Bonzini } 1513c18a92dSPaolo Bonzini 1526834c3f4SMarkus Armbruster #endif /* QEMU_AIO_WAIT_H */ 153