Lines Matching +full:wait +full:- +full:state
1 // SPDX-License-Identifier: GPL-2.0
74 if ((atomic_read(&lock->state) & mask) != mask) in six_set_bitmask()
75 atomic_or(mask, &lock->state); in six_set_bitmask()
80 if (atomic_read(&lock->state) & mask) in six_clear_bitmask()
81 atomic_and(~mask, &lock->state); in six_clear_bitmask()
91 EBUG_ON(lock->owner); in six_set_owner()
92 lock->owner = owner; in six_set_owner()
94 EBUG_ON(lock->owner != current); in six_set_owner()
104 read_count += *per_cpu_ptr(lock->readers, cpu); in pcpu_read_count()
109 * __do_six_trylock() - main trylock routine
115 * wakeup: when a wakeup is required, we return -1 - wakeup_type.
123 EBUG_ON(type == SIX_LOCK_write && lock->owner != task); in __do_six_trylock()
125 (try != !(atomic_read(&lock->state) & SIX_LOCK_HELD_write))); in __do_six_trylock()
154 if (type == SIX_LOCK_read && lock->readers) { in __do_six_trylock()
156 this_cpu_inc(*lock->readers); /* signal that we own lock */ in __do_six_trylock()
160 old = atomic_read(&lock->state); in __do_six_trylock()
163 this_cpu_sub(*lock->readers, !ret); in __do_six_trylock()
168 if (atomic_read(&lock->state) & SIX_LOCK_WAITING_write) in __do_six_trylock()
169 ret = -1 - SIX_LOCK_write; in __do_six_trylock()
171 } else if (type == SIX_LOCK_write && lock->readers) { in __do_six_trylock()
173 atomic_add(SIX_LOCK_HELD_write, &lock->state); in __do_six_trylock()
180 old = atomic_sub_return(SIX_LOCK_HELD_write, &lock->state); in __do_six_trylock()
182 ret = -1 - SIX_LOCK_read; in __do_six_trylock()
185 old = atomic_read(&lock->state); in __do_six_trylock()
192 } while (!atomic_try_cmpxchg_acquire(&lock->state, &old, old + l[type].lock_val)); in __do_six_trylock()
194 EBUG_ON(ret && !(atomic_read(&lock->state) & l[type].held_mask)); in __do_six_trylock()
201 (atomic_read(&lock->state) & SIX_LOCK_HELD_write)); in __do_six_trylock()
215 raw_spin_lock(&lock->wait_lock); in __six_lock_wakeup()
217 list_for_each_entry_safe(w, next, &lock->wait_list, list) { in __six_lock_wakeup()
218 if (w->lock_want != lock_type) in __six_lock_wakeup()
225 ret = __do_six_trylock(lock, lock_type, w->task, false); in __six_lock_wakeup()
231 * against the wakee noticing w->lock_acquired, returning, and in __six_lock_wakeup()
234 task = get_task_struct(w->task); in __six_lock_wakeup()
235 __list_del(w->list.prev, w->list.next); in __six_lock_wakeup()
238 * __list_del before setting w->lock_acquired; @w is on the in __six_lock_wakeup()
240 * after it sees w->lock_acquired with no other locking: in __six_lock_wakeup()
243 smp_store_release(&w->lock_acquired, true); in __six_lock_wakeup()
250 raw_spin_unlock(&lock->wait_lock); in __six_lock_wakeup()
253 lock_type = -ret - 1; in __six_lock_wakeup()
259 static void six_lock_wakeup(struct six_lock *lock, u32 state, in six_lock_wakeup() argument
262 if (lock_type == SIX_LOCK_write && (state & SIX_LOCK_HELD_read)) in six_lock_wakeup()
265 if (!(state & (SIX_LOCK_WAITING_read << lock_type))) in six_lock_wakeup()
278 __six_lock_wakeup(lock, -ret - 1); in do_six_trylock()
284 * six_trylock_ip - attempt to take a six lock without blocking
297 six_acquire(&lock->dep_map, 1, type == SIX_LOCK_read, ip); in six_trylock_ip()
303 * six_relock_ip - attempt to re-take a lock that was held previously
334 * that will live-lock because we won't let the owner complete. in six_owner_running()
337 struct task_struct *owner = READ_ONCE(lock->owner); in six_owner_running()
345 struct six_lock_waiter *wait, in six_optimistic_spin() argument
354 if (lock->wait_list.next != &wait->list) in six_optimistic_spin()
357 if (atomic_read(&lock->state) & SIX_LOCK_NOSPIN) in six_optimistic_spin()
366 * wait->lock_acquired: pairs with the smp_store_release in in six_optimistic_spin()
369 if (smp_load_acquire(&wait->lock_acquired)) { in six_optimistic_spin()
381 * everything in this loop to be re-loaded. We don't need in six_optimistic_spin()
395 struct six_lock_waiter *wait, in six_optimistic_spin() argument
405 struct six_lock_waiter *wait, in six_lock_slowpath() argument
412 EBUG_ON(atomic_read(&lock->state) & SIX_LOCK_HELD_write); in six_lock_slowpath()
413 atomic_add(SIX_LOCK_HELD_write, &lock->state); in six_lock_slowpath()
418 lock_contended(&lock->dep_map, ip); in six_lock_slowpath()
420 wait->task = current; in six_lock_slowpath()
421 wait->lock_want = type; in six_lock_slowpath()
422 wait->lock_acquired = false; in six_lock_slowpath()
424 raw_spin_lock(&lock->wait_lock); in six_lock_slowpath()
432 wait->start_time = local_clock(); in six_lock_slowpath()
434 if (!list_empty(&lock->wait_list)) { in six_lock_slowpath()
436 list_last_entry(&lock->wait_list, in six_lock_slowpath()
439 if (time_before_eq64(wait->start_time, last->start_time)) in six_lock_slowpath()
440 wait->start_time = last->start_time + 1; in six_lock_slowpath()
443 list_add_tail(&wait->list, &lock->wait_list); in six_lock_slowpath()
445 raw_spin_unlock(&lock->wait_lock); in six_lock_slowpath()
453 __six_lock_wakeup(lock, -ret - 1); in six_lock_slowpath()
457 if (six_optimistic_spin(lock, wait, type)) in six_lock_slowpath()
465 * wait->lock_acquired: pairs with the smp_store_release in in six_lock_slowpath()
468 if (smp_load_acquire(&wait->lock_acquired)) in six_lock_slowpath()
478 * acquired the lock - should_sleep_fn() might have in six_lock_slowpath()
479 * modified external state (e.g. when the deadlock cycle in six_lock_slowpath()
482 raw_spin_lock(&lock->wait_lock); in six_lock_slowpath()
483 acquired = wait->lock_acquired; in six_lock_slowpath()
485 list_del(&wait->list); in six_lock_slowpath()
486 raw_spin_unlock(&lock->wait_lock); in six_lock_slowpath()
500 six_lock_wakeup(lock, atomic_read(&lock->state), SIX_LOCK_read); in six_lock_slowpath()
508 * six_lock_ip_waiter - take a lock, with full waitlist interface
511 * @wait: pointer to wait object, which will be added to lock's waitlist
521 * @wait object should be embedded into the struct that tracks held locks -
522 * which must also be accessible in a thread-safe way.
527 * When this function must block, @wait will be added to @lock's waitlist before
528 * calling trylock, and before calling @should_sleep_fn, and @wait will not be
532 * @wait.start_time will be monotonically increasing for any given waitlist, and
538 struct six_lock_waiter *wait, in six_lock_ip_waiter() argument
544 wait->start_time = 0; in six_lock_ip_waiter()
547 six_acquire(&lock->dep_map, 0, type == SIX_LOCK_read, ip); in six_lock_ip_waiter()
550 : six_lock_slowpath(lock, type, wait, should_sleep_fn, p, ip); in six_lock_ip_waiter()
553 six_release(&lock->dep_map, ip); in six_lock_ip_waiter()
555 lock_acquired(&lock->dep_map, ip); in six_lock_ip_waiter()
564 u32 state; in do_six_unlock_type() local
567 lock->owner = NULL; in do_six_unlock_type()
570 lock->readers) { in do_six_unlock_type()
572 this_cpu_dec(*lock->readers); in do_six_unlock_type()
574 state = atomic_read(&lock->state); in do_six_unlock_type()
579 v += atomic_read(&lock->state) & SIX_LOCK_NOSPIN; in do_six_unlock_type()
581 EBUG_ON(!(atomic_read(&lock->state) & l[type].held_mask)); in do_six_unlock_type()
582 state = atomic_sub_return_release(v, &lock->state); in do_six_unlock_type()
585 six_lock_wakeup(lock, state, l[type].unlock_wakeup); in do_six_unlock_type()
589 * six_unlock_ip - drop a six lock
598 * six_lock_read(&foo->lock); read count 1
599 * six_lock_increment(&foo->lock, SIX_LOCK_read); read count 2
600 * six_lock_unlock(&foo->lock, SIX_LOCK_read); read count 1
601 * six_lock_unlock(&foo->lock, SIX_LOCK_read); read count 0
606 !(atomic_read(&lock->state) & SIX_LOCK_HELD_intent)); in six_unlock_ip()
609 lock->owner != current); in six_unlock_ip()
612 six_release(&lock->dep_map, ip); in six_unlock_ip()
614 lock->seq++; in six_unlock_ip()
617 lock->intent_lock_recurse) { in six_unlock_ip()
618 --lock->intent_lock_recurse; in six_unlock_ip()
627 * six_lock_downgrade - convert an intent lock to a read lock
640 * six_lock_tryupgrade - attempt to convert read lock to an intent lock
650 u32 old = atomic_read(&lock->state), new; in six_lock_tryupgrade()
658 if (!lock->readers) { in six_lock_tryupgrade()
660 new -= l[SIX_LOCK_read].lock_val; in six_lock_tryupgrade()
664 } while (!atomic_try_cmpxchg_acquire(&lock->state, &old, new)); in six_lock_tryupgrade()
666 if (lock->readers) in six_lock_tryupgrade()
667 this_cpu_dec(*lock->readers); in six_lock_tryupgrade()
676 * six_trylock_convert - attempt to convert a held lock from one type to another
705 * six_lock_increment - increase held lock count on a lock that is already held
717 six_acquire(&lock->dep_map, 0, type == SIX_LOCK_read, _RET_IP_); in six_lock_increment()
723 if (lock->readers) { in six_lock_increment()
724 this_cpu_inc(*lock->readers); in six_lock_increment()
726 EBUG_ON(!(atomic_read(&lock->state) & in six_lock_increment()
729 atomic_add(l[type].lock_val, &lock->state); in six_lock_increment()
733 EBUG_ON(!(atomic_read(&lock->state) & SIX_LOCK_HELD_intent)); in six_lock_increment()
734 lock->intent_lock_recurse++; in six_lock_increment()
744 * six_lock_wakeup_all - wake up all waiters on @lock
747 * Wakeing up waiters will cause them to re-run should_sleep_fn, which may then
750 * This function is never needed in a bug-free program; it's only useful in
755 u32 state = atomic_read(&lock->state); in six_lock_wakeup_all() local
758 six_lock_wakeup(lock, state, SIX_LOCK_read); in six_lock_wakeup_all()
759 six_lock_wakeup(lock, state, SIX_LOCK_intent); in six_lock_wakeup_all()
760 six_lock_wakeup(lock, state, SIX_LOCK_write); in six_lock_wakeup_all()
762 raw_spin_lock(&lock->wait_lock); in six_lock_wakeup_all()
763 list_for_each_entry(w, &lock->wait_list, list) in six_lock_wakeup_all()
764 wake_up_process(w->task); in six_lock_wakeup_all()
765 raw_spin_unlock(&lock->wait_lock); in six_lock_wakeup_all()
770 * six_lock_counts - return held lock counts, for each lock type
779 ret.n[SIX_LOCK_read] = !lock->readers in six_lock_counts()
780 ? atomic_read(&lock->state) & SIX_LOCK_HELD_read in six_lock_counts()
782 ret.n[SIX_LOCK_intent] = !!(atomic_read(&lock->state) & SIX_LOCK_HELD_intent) + in six_lock_counts()
783 lock->intent_lock_recurse; in six_lock_counts()
784 ret.n[SIX_LOCK_write] = !!(atomic_read(&lock->state) & SIX_LOCK_HELD_write); in six_lock_counts()
791 * six_lock_readers_add - directly manipulate reader count of a lock
798 * When we need to take a write lock, the read locks will cause self-deadlock,
800 * current thread and which are held by a different thread - it does no
801 * per-thread tracking of held locks.
805 * then re-add them.
812 if (lock->readers) { in six_lock_readers_add()
813 this_cpu_add(*lock->readers, nr); in six_lock_readers_add()
815 EBUG_ON((int) (atomic_read(&lock->state) & SIX_LOCK_HELD_read) + nr < 0); in six_lock_readers_add()
817 atomic_add(nr, &lock->state); in six_lock_readers_add()
823 * six_lock_exit - release resources held by a lock prior to freeing
831 WARN_ON(lock->readers && pcpu_read_count(lock)); in six_lock_exit()
832 WARN_ON(atomic_read(&lock->state) & SIX_LOCK_HELD_read); in six_lock_exit()
834 free_percpu(lock->readers); in six_lock_exit()
835 lock->readers = NULL; in six_lock_exit()
842 atomic_set(&lock->state, 0); in __six_lock_init()
843 raw_spin_lock_init(&lock->wait_lock); in __six_lock_init()
844 INIT_LIST_HEAD(&lock->wait_list); in __six_lock_init()
847 lockdep_init_map(&lock->dep_map, name, key, 0); in __six_lock_init()
859 * same semantics in non-percpu mode: callers can check for in __six_lock_init()
860 * failure if they wish by checking lock->readers, but generally in __six_lock_init()
863 lock->readers = alloc_percpu(unsigned); in __six_lock_init()