Lines Matching +full:foo +full:- +full:queue
1 // SPDX-License-Identifier: GPL-2.0-only
3 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
7 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
12 * See Documentation/locking/rt-mutex-design.rst for details.
26 * lock->owner state tracking:
28 * lock->owner holds the task_struct pointer of the owner. Bit 0
39 * possible when bit 0 of lock->owner is 0.
42 * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
60 WRITE_ONCE(lock->owner, (struct task_struct *)val); in rt_mutex_set_owner()
65 lock->owner = (struct task_struct *) in clear_rt_mutex_waiters()
66 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); in clear_rt_mutex_waiters()
71 unsigned long owner, *p = (unsigned long *) &lock->owner; in fixup_rt_mutex_waiters()
78 * lock->owner still has the waiters bit set, otherwise the in fixup_rt_mutex_waiters()
82 * l->owner=T1 in fixup_rt_mutex_waiters()
84 * lock(l->lock) in fixup_rt_mutex_waiters()
85 * l->owner = T1 | HAS_WAITERS; in fixup_rt_mutex_waiters()
88 * unlock(l->lock) in fixup_rt_mutex_waiters()
92 * lock(l->lock) in fixup_rt_mutex_waiters()
93 * l->owner = T1 | HAS_WAITERS; in fixup_rt_mutex_waiters()
96 * unlock(l->lock) in fixup_rt_mutex_waiters()
98 * signal(->T2) signal(->T3) in fixup_rt_mutex_waiters()
99 * lock(l->lock) in fixup_rt_mutex_waiters()
102 * unlock(l->lock) in fixup_rt_mutex_waiters()
103 * lock(l->lock) in fixup_rt_mutex_waiters()
107 * unlock(l->lock) in fixup_rt_mutex_waiters()
108 * lock(l->lock) in fixup_rt_mutex_waiters()
111 * l->owner = owner in fixup_rt_mutex_waiters()
112 * owner = l->owner & ~HAS_WAITERS; in fixup_rt_mutex_waiters()
113 * ==> l->owner = T1 in fixup_rt_mutex_waiters()
115 * lock(l->lock) in fixup_rt_mutex_waiters()
118 * owner = l->owner & ~HAS_WAITERS; in fixup_rt_mutex_waiters()
119 * cmpxchg(l->owner, T1, NULL) in fixup_rt_mutex_waiters()
120 * ===> Success (l->owner = NULL) in fixup_rt_mutex_waiters()
122 * l->owner = owner in fixup_rt_mutex_waiters()
123 * ==> l->owner = T1 in fixup_rt_mutex_waiters()
128 * serialized by l->lock, so nothing else can modify the waiters in fixup_rt_mutex_waiters()
129 * bit. If the bit is set then nothing can change l->owner either in fixup_rt_mutex_waiters()
144 # define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c)
145 # define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c)
148 * Callers must hold the ->wait_lock -- which is the whole purpose as we force
154 unsigned long owner, *p = (unsigned long *) &lock->owner; in mark_rt_mutex_waiters()
165 * 2) Drop lock->wait_lock
170 __releases(lock->wait_lock) in unlock_rt_mutex_safe()
175 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in unlock_rt_mutex_safe()
209 lock->owner = (struct task_struct *) in mark_rt_mutex_waiters()
210 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); in mark_rt_mutex_waiters()
214 * Simple slow path only version: lock->owner is protected by lock->wait_lock.
218 __releases(lock->wait_lock) in unlock_rt_mutex_safe()
220 lock->owner = NULL; in unlock_rt_mutex_safe()
221 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in unlock_rt_mutex_safe()
230 &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
236 if (left->prio < right->prio) in rt_mutex_waiter_less()
245 if (dl_prio(left->prio)) in rt_mutex_waiter_less()
246 return dl_time_before(left->deadline, right->deadline); in rt_mutex_waiter_less()
255 if (left->prio != right->prio) in rt_mutex_waiter_equal()
264 if (dl_prio(left->prio)) in rt_mutex_waiter_equal()
265 return left->deadline == right->deadline; in rt_mutex_waiter_equal()
273 struct rb_node **link = &lock->waiters.rb_root.rb_node; in rt_mutex_enqueue()
282 link = &parent->rb_left; in rt_mutex_enqueue()
284 link = &parent->rb_right; in rt_mutex_enqueue()
289 rb_link_node(&waiter->tree_entry, parent, link); in rt_mutex_enqueue()
290 rb_insert_color_cached(&waiter->tree_entry, &lock->waiters, leftmost); in rt_mutex_enqueue()
296 if (RB_EMPTY_NODE(&waiter->tree_entry)) in rt_mutex_dequeue()
299 rb_erase_cached(&waiter->tree_entry, &lock->waiters); in rt_mutex_dequeue()
300 RB_CLEAR_NODE(&waiter->tree_entry); in rt_mutex_dequeue()
306 struct rb_node **link = &task->pi_waiters.rb_root.rb_node; in rt_mutex_enqueue_pi()
315 link = &parent->rb_left; in rt_mutex_enqueue_pi()
317 link = &parent->rb_right; in rt_mutex_enqueue_pi()
322 rb_link_node(&waiter->pi_tree_entry, parent, link); in rt_mutex_enqueue_pi()
323 rb_insert_color_cached(&waiter->pi_tree_entry, &task->pi_waiters, leftmost); in rt_mutex_enqueue_pi()
329 if (RB_EMPTY_NODE(&waiter->pi_tree_entry)) in rt_mutex_dequeue_pi()
332 rb_erase_cached(&waiter->pi_tree_entry, &task->pi_waiters); in rt_mutex_dequeue_pi()
333 RB_CLEAR_NODE(&waiter->pi_tree_entry); in rt_mutex_dequeue_pi()
340 lockdep_assert_held(&p->pi_lock); in rt_mutex_adjust_prio()
343 pi_task = task_top_pi_waiter(p)->task; in rt_mutex_adjust_prio()
381 return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; in task_blocked_on_lock()
386 * Decreases task's usage by one - may thus free the task.
403 * Returns 0 or -EDEADLK.
408 * [P] task->pi_lock held
409 * [L] rtmutex->wait_lock held
425 * [1] lock(task->pi_lock); [R] acquire [P]
426 * [2] waiter = task->pi_blocked_on; [P]
428 * [4] lock = waiter->lock; [P]
429 * [5] if (!try_lock(lock->wait_lock)) { [P] try to acquire [L]
430 * unlock(task->pi_lock); release [P]
435 * [8] unlock(task->pi_lock); release [P]
440 * lock(task->pi_lock); [L] acquire [P]
443 * [13] unlock(task->pi_lock); release [P]
444 * unlock(lock->wait_lock); release [L]
484 top_task->comm, task_pid_nr(top_task)); in rt_mutex_adjust_prio_chain()
488 return -EDEADLK; in rt_mutex_adjust_prio_chain()
501 raw_spin_lock_irq(&task->pi_lock); in rt_mutex_adjust_prio_chain()
506 waiter = task->pi_blocked_on; in rt_mutex_adjust_prio_chain()
509 * [3] check_exit_conditions_1() protected by task->pi_lock. in rt_mutex_adjust_prio_chain()
536 if (next_lock != waiter->lock) in rt_mutex_adjust_prio_chain()
578 lock = waiter->lock; in rt_mutex_adjust_prio_chain()
580 * [5] We need to trylock here as we are holding task->pi_lock, in rt_mutex_adjust_prio_chain()
584 if (!raw_spin_trylock(&lock->wait_lock)) { in rt_mutex_adjust_prio_chain()
585 raw_spin_unlock_irq(&task->pi_lock); in rt_mutex_adjust_prio_chain()
591 * [6] check_exit_conditions_2() protected by task->pi_lock and in rt_mutex_adjust_prio_chain()
592 * lock->wait_lock. in rt_mutex_adjust_prio_chain()
601 raw_spin_unlock(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
602 ret = -EDEADLK; in rt_mutex_adjust_prio_chain()
616 raw_spin_unlock(&task->pi_lock); in rt_mutex_adjust_prio_chain()
620 * [9] check_exit_conditions_3 protected by lock->wait_lock. in rt_mutex_adjust_prio_chain()
624 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
630 raw_spin_lock(&task->pi_lock); in rt_mutex_adjust_prio_chain()
645 raw_spin_unlock(&task->pi_lock); in rt_mutex_adjust_prio_chain()
646 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
680 waiter->prio = task->prio; in rt_mutex_adjust_prio_chain()
681 waiter->deadline = task->dl.deadline; in rt_mutex_adjust_prio_chain()
686 raw_spin_unlock(&task->pi_lock); in rt_mutex_adjust_prio_chain()
690 * [9] check_exit_conditions_3 protected by lock->wait_lock. in rt_mutex_adjust_prio_chain()
703 wake_up_process(rt_mutex_top_waiter(lock)->task); in rt_mutex_adjust_prio_chain()
704 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
710 raw_spin_lock(&task->pi_lock); in rt_mutex_adjust_prio_chain()
747 * [12] check_exit_conditions_4() protected by task->pi_lock in rt_mutex_adjust_prio_chain()
748 * and lock->wait_lock. The actual decisions are made after we in rt_mutex_adjust_prio_chain()
754 * task->pi_lock next_lock cannot be dereferenced anymore. in rt_mutex_adjust_prio_chain()
764 raw_spin_unlock(&task->pi_lock); in rt_mutex_adjust_prio_chain()
765 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
788 raw_spin_unlock_irq(&task->pi_lock); in rt_mutex_adjust_prio_chain()
796 * Try to take an rt-mutex
798 * Must be called with lock->wait_lock held and interrupts disabled
808 lockdep_assert_held(&lock->wait_lock); in try_to_take_rt_mutex()
812 * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all in try_to_take_rt_mutex()
814 * and they serialize on @lock->wait_lock. in try_to_take_rt_mutex()
819 * - There is a lock owner. The caller must fixup the in try_to_take_rt_mutex()
823 * - @task acquires the lock and there are no other in try_to_take_rt_mutex()
860 * the lock. @task->pi_blocked_on is NULL, so it does in try_to_take_rt_mutex()
865 * If @task->prio is greater than or equal to in try_to_take_rt_mutex()
881 * pi_lock dance.@task->pi_blocked_on is NULL in try_to_take_rt_mutex()
890 * Clear @task->pi_blocked_on. Requires protection by in try_to_take_rt_mutex()
891 * @task->pi_lock. Redundant operation for the @waiter == NULL in try_to_take_rt_mutex()
895 raw_spin_lock(&task->pi_lock); in try_to_take_rt_mutex()
896 task->pi_blocked_on = NULL; in try_to_take_rt_mutex()
900 * waiter into @task->pi_waiters tree. in try_to_take_rt_mutex()
904 raw_spin_unlock(&task->pi_lock); in try_to_take_rt_mutex()
924 * This must be called with lock->wait_lock held and interrupts disabled
936 lockdep_assert_held(&lock->wait_lock); in task_blocks_on_rt_mutex()
943 * the other will detect the deadlock and return -EDEADLOCK, in task_blocks_on_rt_mutex()
948 return -EDEADLK; in task_blocks_on_rt_mutex()
950 raw_spin_lock(&task->pi_lock); in task_blocks_on_rt_mutex()
951 waiter->task = task; in task_blocks_on_rt_mutex()
952 waiter->lock = lock; in task_blocks_on_rt_mutex()
953 waiter->prio = task->prio; in task_blocks_on_rt_mutex()
954 waiter->deadline = task->dl.deadline; in task_blocks_on_rt_mutex()
961 task->pi_blocked_on = waiter; in task_blocks_on_rt_mutex()
963 raw_spin_unlock(&task->pi_lock); in task_blocks_on_rt_mutex()
968 raw_spin_lock(&owner->pi_lock); in task_blocks_on_rt_mutex()
974 if (owner->pi_blocked_on) in task_blocks_on_rt_mutex()
983 raw_spin_unlock(&owner->pi_lock); in task_blocks_on_rt_mutex()
999 raw_spin_unlock_irq(&lock->wait_lock); in task_blocks_on_rt_mutex()
1004 raw_spin_lock_irq(&lock->wait_lock); in task_blocks_on_rt_mutex()
1011 * queue it up.
1013 * Called with lock->wait_lock held and interrupts disabled.
1020 raw_spin_lock(¤t->pi_lock); in mark_wakeup_next_waiter()
1025 * Remove it from current->pi_waiters and deboost. in mark_wakeup_next_waiter()
1028 * rt_mutex_setprio() to update p->pi_top_task before the in mark_wakeup_next_waiter()
1042 lock->owner = (void *) RT_MUTEX_HAS_WAITERS; in mark_wakeup_next_waiter()
1047 * p->pi_top_task pointer points to a blocked task). This however can in mark_wakeup_next_waiter()
1055 wake_q_add(wake_q, waiter->task); in mark_wakeup_next_waiter()
1056 raw_spin_unlock(¤t->pi_lock); in mark_wakeup_next_waiter()
1062 * Must be called with lock->wait_lock held and interrupts disabled. I must
1072 lockdep_assert_held(&lock->wait_lock); in remove_waiter()
1074 raw_spin_lock(¤t->pi_lock); in remove_waiter()
1076 current->pi_blocked_on = NULL; in remove_waiter()
1077 raw_spin_unlock(¤t->pi_lock); in remove_waiter()
1086 raw_spin_lock(&owner->pi_lock); in remove_waiter()
1098 raw_spin_unlock(&owner->pi_lock); in remove_waiter()
1110 raw_spin_unlock_irq(&lock->wait_lock); in remove_waiter()
1115 raw_spin_lock_irq(&lock->wait_lock); in remove_waiter()
1129 raw_spin_lock_irqsave(&task->pi_lock, flags); in rt_mutex_adjust_pi()
1131 waiter = task->pi_blocked_on; in rt_mutex_adjust_pi()
1133 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_pi()
1136 next_lock = waiter->lock; in rt_mutex_adjust_pi()
1137 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_pi()
1149 RB_CLEAR_NODE(&waiter->pi_tree_entry); in rt_mutex_init_waiter()
1150 RB_CLEAR_NODE(&waiter->tree_entry); in rt_mutex_init_waiter()
1151 waiter->task = NULL; in rt_mutex_init_waiter()
1155 * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
1159 * @timeout: the pre-initialized and started timer, or NULL for none
1160 * @waiter: the pre-initialized rt_mutex_waiter
1162 * Must be called with lock->wait_lock held and interrupts disabled
1183 ret = -EINTR; in __rt_mutex_slowlock()
1184 if (timeout && !timeout->task) in __rt_mutex_slowlock()
1185 ret = -ETIMEDOUT; in __rt_mutex_slowlock()
1190 raw_spin_unlock_irq(&lock->wait_lock); in __rt_mutex_slowlock()
1196 raw_spin_lock_irq(&lock->wait_lock); in __rt_mutex_slowlock()
1208 * If the result is not -EDEADLOCK or the caller requested in rt_mutex_handle_deadlock()
1211 if (res != -EDEADLOCK || detect_deadlock) in rt_mutex_handle_deadlock()
1242 * rtmutex with lock->wait_lock held. But we cannot unconditionally in rt_mutex_slowlock()
1246 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowlock()
1250 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowlock()
1258 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); in rt_mutex_slowlock()
1278 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowlock()
1282 hrtimer_cancel(&timeout->timer); in rt_mutex_slowlock()
1303 * Slow path try-lock function:
1312 * This can be done without taking the @lock->wait_lock as in rt_mutex_slowtrylock()
1322 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowtrylock()
1326 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowtrylock()
1332 * Slow path to release a rt-mutex.
1342 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1351 * foo->lock->owner = NULL; in rt_mutex_slowunlock()
1352 * rtmutex_lock(foo->lock); <- fast path in rt_mutex_slowunlock()
1353 * free = atomic_dec_and_test(foo->refcnt); in rt_mutex_slowunlock()
1354 * rtmutex_unlock(foo->lock); <- fast path in rt_mutex_slowunlock()
1356 * kfree(foo); in rt_mutex_slowunlock()
1357 * raw_spin_unlock(foo->lock->wait_lock); in rt_mutex_slowunlock()
1362 * lock->wait_lock. So we do the following sequence: in rt_mutex_slowunlock()
1366 * raw_spin_unlock(&lock->wait_lock); in rt_mutex_slowunlock()
1367 * if (cmpxchg(&lock->owner, owner, 0) == owner) in rt_mutex_slowunlock()
1372 * lock->owner is serialized by lock->wait_lock: in rt_mutex_slowunlock()
1374 * lock->owner = NULL; in rt_mutex_slowunlock()
1375 * raw_spin_unlock(&lock->wait_lock); in rt_mutex_slowunlock()
1378 /* Drops lock->wait_lock ! */ in rt_mutex_slowunlock()
1382 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1389 * Queue the next waiter for wakeup once we release the wait_lock. in rt_mutex_slowunlock()
1392 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1441 * Performs the wakeup of the the top-waiter and re-enables preemption.
1469 mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); in __rt_mutex_lock()
1475 * rt_mutex_lock_nested - lock a rt_mutex
1489 * rt_mutex_lock - lock a rt_mutex
1501 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
1507 * -EINTR when interrupted by a signal
1515 mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); in rt_mutex_lock_interruptible()
1518 mutex_release(&lock->dep_map, _RET_IP_); in rt_mutex_lock_interruptible()
1538 * rt_mutex_timed_lock - lock a rt_mutex interruptible
1547 * -EINTR when interrupted by a signal
1548 * -ETIMEDOUT when the timeout expired
1557 mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); in rt_mutex_timed_lock()
1562 mutex_release(&lock->dep_map, _RET_IP_); in rt_mutex_timed_lock()
1569 * rt_mutex_trylock - try to lock a rt_mutex
1588 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); in rt_mutex_trylock()
1595 * rt_mutex_unlock - unlock a rt_mutex
1601 mutex_release(&lock->dep_map, _RET_IP_); in rt_mutex_unlock()
1607 * Futex variant, that since futex variants do not use the fast-path, can be
1613 lockdep_assert_held(&lock->wait_lock); in __rt_mutex_futex_unlock()
1618 lock->owner = NULL; in __rt_mutex_futex_unlock()
1639 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_futex_unlock()
1641 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_futex_unlock()
1648 * rt_mutex_destroy - mark a mutex unusable
1659 lock->magic = NULL; in rt_mutex_destroy()
1665 * __rt_mutex_init - initialize the rt lock
1676 lock->owner = NULL; in __rt_mutex_init()
1677 raw_spin_lock_init(&lock->wait_lock); in __rt_mutex_init()
1678 lock->waiters = RB_ROOT_CACHED; in __rt_mutex_init()
1686 * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
1694 * Special API call for PI-futex support. This initializes the rtmutex and
1708 * rt_mutex_proxy_unlock - release a lock on behalf of owner
1714 * Special API call for PI-futex support. This merrily cleans up the rtmutex
1727 * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
1729 * @waiter: the pre-initialized rt_mutex_waiter
1739 * 0 - task blocked on lock
1740 * 1 - acquired the lock for task, caller should wake it up
1741 * <0 - error
1743 * Special API call for PI-futex support.
1751 lockdep_assert_held(&lock->wait_lock); in __rt_mutex_start_proxy_lock()
1763 * returned with -EDEADLK and the owner in __rt_mutex_start_proxy_lock()
1776 * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
1778 * @waiter: the pre-initialized rt_mutex_waiter
1788 * 0 - task blocked on lock
1789 * 1 - acquired the lock for task, caller should wake it up
1790 * <0 - error
1792 * Special API call for PI-futex support.
1800 raw_spin_lock_irq(&lock->wait_lock); in rt_mutex_start_proxy_lock()
1804 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_start_proxy_lock()
1810 * rt_mutex_next_owner - return the next owner of the lock
1819 * Special API call for PI-futex support
1826 return rt_mutex_top_waiter(lock)->task; in rt_mutex_next_owner()
1830 * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
1834 * @waiter: the pre-initialized rt_mutex_waiter
1841 * 0 - success
1842 * <0 - error, one of -EINTR, -ETIMEDOUT
1844 * Special API call for PI-futex support
1852 raw_spin_lock_irq(&lock->wait_lock); in rt_mutex_wait_proxy_lock()
1861 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_wait_proxy_lock()
1867 * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
1869 * @waiter: the pre-initialized rt_mutex_waiter
1874 * Unless we acquired the lock; we're still enqueued on the wait-list and can
1880 * true - did the cleanup, we done.
1881 * false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
1884 * Special API call for PI-futex support
1891 raw_spin_lock_irq(&lock->wait_lock); in rt_mutex_cleanup_proxy_lock()
1893 * Do an unconditional try-lock, this deals with the lock stealing in rt_mutex_cleanup_proxy_lock()
1894 * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter() in rt_mutex_cleanup_proxy_lock()
1918 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_cleanup_proxy_lock()