Lines Matching +full:- +full:- +full:with +full:- +full:coroutine

2  * coroutine queues and locks
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * The lock-free mutex implementation is based on OSv
38 QSIMPLEQ_INIT(&queue->entries); in qemu_co_queue_init()
44 Coroutine *self = qemu_coroutine_self(); in qemu_co_queue_wait_impl()
46 QSIMPLEQ_INSERT_HEAD(&queue->entries, self, co_queue_next); in qemu_co_queue_wait_impl()
48 QSIMPLEQ_INSERT_TAIL(&queue->entries, self, co_queue_next); in qemu_co_queue_wait_impl()
57 * coroutine but only after this yield and after the main loop in qemu_co_queue_wait_impl()
64 * primitive automatically places the woken coroutine on the in qemu_co_queue_wait_impl()
76 Coroutine *next; in qemu_co_enter_next_impl()
78 next = QSIMPLEQ_FIRST(&queue->entries); in qemu_co_enter_next_impl()
83 QSIMPLEQ_REMOVE_HEAD(&queue->entries, co_queue_next); in qemu_co_enter_next_impl()
96 /* No unlock/lock needed in coroutine context. */ in qemu_co_queue_next()
109 /* No unlock/lock needed in coroutine context. */ in qemu_co_queue_restart_all()
115 return QSIMPLEQ_FIRST(&queue->entries) == NULL; in qemu_co_queue_empty()
118 /* The wait records are handled with a multiple-producer, single-consumer
119 * lock-free queue. There cannot be two concurrent pop_waiter() calls
120 * because pop_waiter() can only be called while mutex->handoff is zero.
122 * - in qemu_co_mutex_unlock, before the hand-off protocol has started.
123 * In this case, qemu_co_mutex_lock will see mutex->handoff == 0 and
125 * - in qemu_co_mutex_lock, if it steals the hand-off responsibility from
128 * exit. The next hand-off cannot begin until qemu_co_mutex_lock has
130 * - in qemu_co_mutex_unlock, if it takes the hand-off token itself.
131 * In this case another iteration starts with mutex->handoff == 0;
138 Coroutine *co;
144 w->co = qemu_coroutine_self(); in push_waiter()
145 QSLIST_INSERT_HEAD_ATOMIC(&mutex->from_push, w, next); in push_waiter()
151 QSLIST_MOVE_ATOMIC(&reversed, &mutex->from_push); in move_waiters()
155 QSLIST_INSERT_HEAD(&mutex->to_pop, w, next); in move_waiters()
163 if (QSLIST_EMPTY(&mutex->to_pop)) { in pop_waiter()
165 if (QSLIST_EMPTY(&mutex->to_pop)) { in pop_waiter()
169 w = QSLIST_FIRST(&mutex->to_pop); in pop_waiter()
170 QSLIST_REMOVE_HEAD(&mutex->to_pop, next); in pop_waiter()
176 return QSLIST_EMPTY(&mutex->to_pop) || QSLIST_EMPTY(&mutex->from_push); in has_waiters()
184 static void coroutine_fn qemu_co_mutex_wake(CoMutex *mutex, Coroutine *co) in qemu_co_mutex_wake()
186 /* Read co before co->ctx; pairs with smp_wmb() in in qemu_co_mutex_wake()
190 mutex->ctx = co->ctx; in qemu_co_mutex_wake()
197 Coroutine *self = qemu_coroutine_self(); in qemu_co_mutex_lock_slowpath()
205 * Add waiter before reading mutex->handoff. Pairs with qatomic_set_mb in qemu_co_mutex_lock_slowpath()
210 /* This is the "Responsibility Hand-Off" protocol; a lock() picks from in qemu_co_mutex_lock_slowpath()
213 old_handoff = qatomic_read(&mutex->handoff); in qemu_co_mutex_lock_slowpath()
216 qatomic_cmpxchg(&mutex->handoff, old_handoff, 0) == old_handoff) { in qemu_co_mutex_lock_slowpath()
221 Coroutine *co = to_wake->co; in qemu_co_mutex_lock_slowpath()
225 mutex->ctx = ctx; in qemu_co_mutex_lock_slowpath()
239 Coroutine *self = qemu_coroutine_self(); in qemu_co_mutex_lock()
246 * fails. With CoMutex there is no such latency but you still want to in qemu_co_mutex_lock()
251 waiters = qatomic_cmpxchg(&mutex->locked, 0, 1); in qemu_co_mutex_lock()
254 if (qatomic_read(&mutex->ctx) == ctx) { in qemu_co_mutex_lock()
257 if (qatomic_read(&mutex->locked) == 0) { in qemu_co_mutex_lock()
262 waiters = qatomic_fetch_inc(&mutex->locked); in qemu_co_mutex_lock()
268 mutex->ctx = ctx; in qemu_co_mutex_lock()
272 mutex->holder = self; in qemu_co_mutex_lock()
273 self->locks_held++; in qemu_co_mutex_lock()
278 Coroutine *self = qemu_coroutine_self(); in qemu_co_mutex_unlock()
282 assert(mutex->locked); in qemu_co_mutex_unlock()
283 assert(mutex->holder == self); in qemu_co_mutex_unlock()
286 mutex->ctx = NULL; in qemu_co_mutex_unlock()
287 mutex->holder = NULL; in qemu_co_mutex_unlock()
288 self->locks_held--; in qemu_co_mutex_unlock()
289 if (qatomic_fetch_dec(&mutex->locked) == 1) { in qemu_co_mutex_unlock()
299 qemu_co_mutex_wake(mutex, to_wake->co); in qemu_co_mutex_unlock()
304 * mutex->locked was >1) but it hasn't yet put itself on the wait in qemu_co_mutex_unlock()
307 if (++mutex->sequence == 0) { in qemu_co_mutex_unlock()
308 mutex->sequence = 1; in qemu_co_mutex_unlock()
311 our_handoff = mutex->sequence; in qemu_co_mutex_unlock()
313 qatomic_set_mb(&mutex->handoff, our_handoff); in qemu_co_mutex_unlock()
324 if (qatomic_cmpxchg(&mutex->handoff, our_handoff, 0) != our_handoff) { in qemu_co_mutex_unlock()
334 Coroutine *co;
340 qemu_co_mutex_init(&lock->mutex); in qemu_co_rwlock_init()
341 lock->owners = 0; in qemu_co_rwlock_init()
342 QSIMPLEQ_INIT(&lock->tickets); in qemu_co_rwlock_init()
348 CoRwTicket *tkt = QSIMPLEQ_FIRST(&lock->tickets); in qemu_co_rwlock_maybe_wake_one()
349 Coroutine *co = NULL; in qemu_co_rwlock_maybe_wake_one()
352 * Setting lock->owners here prevents rdlock and wrlock from in qemu_co_rwlock_maybe_wake_one()
357 if (tkt->read) { in qemu_co_rwlock_maybe_wake_one()
358 if (lock->owners >= 0) { in qemu_co_rwlock_maybe_wake_one()
359 lock->owners++; in qemu_co_rwlock_maybe_wake_one()
360 co = tkt->co; in qemu_co_rwlock_maybe_wake_one()
363 if (lock->owners == 0) { in qemu_co_rwlock_maybe_wake_one()
364 lock->owners = -1; in qemu_co_rwlock_maybe_wake_one()
365 co = tkt->co; in qemu_co_rwlock_maybe_wake_one()
371 QSIMPLEQ_REMOVE_HEAD(&lock->tickets, next); in qemu_co_rwlock_maybe_wake_one()
372 qemu_co_mutex_unlock(&lock->mutex); in qemu_co_rwlock_maybe_wake_one()
375 qemu_co_mutex_unlock(&lock->mutex); in qemu_co_rwlock_maybe_wake_one()
381 Coroutine *self = qemu_coroutine_self(); in qemu_co_rwlock_rdlock()
383 qemu_co_mutex_lock(&lock->mutex); in qemu_co_rwlock_rdlock()
385 if (lock->owners == 0 || (lock->owners > 0 && QSIMPLEQ_EMPTY(&lock->tickets))) { in qemu_co_rwlock_rdlock()
386 lock->owners++; in qemu_co_rwlock_rdlock()
387 qemu_co_mutex_unlock(&lock->mutex); in qemu_co_rwlock_rdlock()
391 QSIMPLEQ_INSERT_TAIL(&lock->tickets, &my_ticket, next); in qemu_co_rwlock_rdlock()
392 qemu_co_mutex_unlock(&lock->mutex); in qemu_co_rwlock_rdlock()
394 assert(lock->owners >= 1); in qemu_co_rwlock_rdlock()
397 qemu_co_mutex_lock(&lock->mutex); in qemu_co_rwlock_rdlock()
401 self->locks_held++; in qemu_co_rwlock_rdlock()
406 Coroutine *self = qemu_coroutine_self(); in qemu_co_rwlock_unlock()
409 self->locks_held--; in qemu_co_rwlock_unlock()
411 qemu_co_mutex_lock(&lock->mutex); in qemu_co_rwlock_unlock()
412 if (lock->owners > 0) { in qemu_co_rwlock_unlock()
413 lock->owners--; in qemu_co_rwlock_unlock()
415 assert(lock->owners == -1); in qemu_co_rwlock_unlock()
416 lock->owners = 0; in qemu_co_rwlock_unlock()
424 qemu_co_mutex_lock(&lock->mutex); in qemu_co_rwlock_downgrade()
425 assert(lock->owners == -1); in qemu_co_rwlock_downgrade()
426 lock->owners = 1; in qemu_co_rwlock_downgrade()
434 Coroutine *self = qemu_coroutine_self(); in qemu_co_rwlock_wrlock()
436 qemu_co_mutex_lock(&lock->mutex); in qemu_co_rwlock_wrlock()
437 if (lock->owners == 0) { in qemu_co_rwlock_wrlock()
438 lock->owners = -1; in qemu_co_rwlock_wrlock()
439 qemu_co_mutex_unlock(&lock->mutex); in qemu_co_rwlock_wrlock()
443 QSIMPLEQ_INSERT_TAIL(&lock->tickets, &my_ticket, next); in qemu_co_rwlock_wrlock()
444 qemu_co_mutex_unlock(&lock->mutex); in qemu_co_rwlock_wrlock()
446 assert(lock->owners == -1); in qemu_co_rwlock_wrlock()
449 self->locks_held++; in qemu_co_rwlock_wrlock()
454 qemu_co_mutex_lock(&lock->mutex); in qemu_co_rwlock_upgrade()
455 assert(lock->owners > 0); in qemu_co_rwlock_upgrade()
457 if (lock->owners == 1 && QSIMPLEQ_EMPTY(&lock->tickets)) { in qemu_co_rwlock_upgrade()
458 lock->owners = -1; in qemu_co_rwlock_upgrade()
459 qemu_co_mutex_unlock(&lock->mutex); in qemu_co_rwlock_upgrade()
463 lock->owners--; in qemu_co_rwlock_upgrade()
464 QSIMPLEQ_INSERT_TAIL(&lock->tickets, &my_ticket, next); in qemu_co_rwlock_upgrade()
467 assert(lock->owners == -1); in qemu_co_rwlock_upgrade()