Lines Matching full:lock
26 * lock->owner state tracking:
28 * lock->owner holds the task_struct pointer of the owner. Bit 0
29 * is used to keep track of the "lock has waiters" state.
32 * NULL 0 lock is free (fast acquire possible)
33 * NULL 1 lock is free and has waiters and the top waiter
34 * is going to take the lock*
35 * taskpointer 0 lock is held (fast release possible)
36 * taskpointer 1 lock is held and has waiters**
39 * possible when bit 0 of lock->owner is 0.
41 * (*) It also can be a transitional state when grabbing the lock
42 * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
43 * we need to set the bit0 before looking at the lock, and the owner may be
47 * waiters. This can happen when grabbing the lock in the slow path.
48 * To prevent a cmpxchg of the owner releasing the lock, we need to
49 * set this bit before looking at the lock.
53 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) in rt_mutex_set_owner() argument
57 if (rt_mutex_has_waiters(lock)) in rt_mutex_set_owner()
60 WRITE_ONCE(lock->owner, (struct task_struct *)val); in rt_mutex_set_owner()
63 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) in clear_rt_mutex_waiters() argument
65 lock->owner = (struct task_struct *) in clear_rt_mutex_waiters()
66 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); in clear_rt_mutex_waiters()
69 static void fixup_rt_mutex_waiters(struct rt_mutex *lock) in fixup_rt_mutex_waiters() argument
71 unsigned long owner, *p = (unsigned long *) &lock->owner; in fixup_rt_mutex_waiters()
73 if (rt_mutex_has_waiters(lock)) in fixup_rt_mutex_waiters()
78 * lock->owner still has the waiters bit set, otherwise the in fixup_rt_mutex_waiters()
84 * lock(l->lock) in fixup_rt_mutex_waiters()
88 * unlock(l->lock) in fixup_rt_mutex_waiters()
92 * lock(l->lock) in fixup_rt_mutex_waiters()
96 * unlock(l->lock) in fixup_rt_mutex_waiters()
99 * lock(l->lock) in fixup_rt_mutex_waiters()
102 * unlock(l->lock) in fixup_rt_mutex_waiters()
103 * lock(l->lock) in fixup_rt_mutex_waiters()
107 * unlock(l->lock) in fixup_rt_mutex_waiters()
108 * lock(l->lock) in fixup_rt_mutex_waiters()
115 * lock(l->lock) in fixup_rt_mutex_waiters()
128 * serialized by l->lock, so nothing else can modify the waiters in fixup_rt_mutex_waiters()
149 * all future threads that attempt to [Rmw] the lock to the slowpath. As such
152 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) in mark_rt_mutex_waiters() argument
154 unsigned long owner, *p = (unsigned long *) &lock->owner; in mark_rt_mutex_waiters()
165 * 2) Drop lock->wait_lock
166 * 3) Try to unlock the lock with cmpxchg
168 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, in unlock_rt_mutex_safe() argument
170 __releases(lock->wait_lock) in unlock_rt_mutex_safe()
172 struct task_struct *owner = rt_mutex_owner(lock); in unlock_rt_mutex_safe()
174 clear_rt_mutex_waiters(lock); in unlock_rt_mutex_safe()
175 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in unlock_rt_mutex_safe()
181 * lock(wait_lock); in unlock_rt_mutex_safe()
183 * mark_rt_mutex_waiters(lock); in unlock_rt_mutex_safe()
184 * acquire(lock); in unlock_rt_mutex_safe()
188 * lock(wait_lock); in unlock_rt_mutex_safe()
189 * mark_rt_mutex_waiters(lock); in unlock_rt_mutex_safe()
194 * lock(wait_lock); in unlock_rt_mutex_safe()
197 * lock(wait_lock); in unlock_rt_mutex_safe()
198 * acquire(lock); in unlock_rt_mutex_safe()
200 return rt_mutex_cmpxchg_release(lock, owner, NULL); in unlock_rt_mutex_safe()
207 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) in mark_rt_mutex_waiters() argument
209 lock->owner = (struct task_struct *) in mark_rt_mutex_waiters()
210 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); in mark_rt_mutex_waiters()
214 * Simple slow path only version: lock->owner is protected by lock->wait_lock.
216 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, in unlock_rt_mutex_safe() argument
218 __releases(lock->wait_lock) in unlock_rt_mutex_safe()
220 lock->owner = NULL; in unlock_rt_mutex_safe()
221 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in unlock_rt_mutex_safe()
271 rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) in rt_mutex_enqueue() argument
273 struct rb_node **link = &lock->waiters.rb_root.rb_node; in rt_mutex_enqueue()
290 rb_insert_color_cached(&waiter->tree_entry, &lock->waiters, leftmost); in rt_mutex_enqueue()
294 rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) in rt_mutex_dequeue() argument
299 rb_erase_cached(&waiter->tree_entry, &lock->waiters); in rt_mutex_dequeue()
381 return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; in task_blocked_on_lock()
396 * comparison to detect lock chain changes.
425 * [1] lock(task->pi_lock); [R] acquire [P]
428 * [4] lock = waiter->lock; [P]
429 * [5] if (!try_lock(lock->wait_lock)) { [P] try to acquire [L]
434 * [7] requeue_lock_waiter(lock, waiter); [P] + [L]
438 * [10] task = owner(lock); [L]
440 * lock(task->pi_lock); [L] acquire [P]
441 * [11] requeue_pi_waiter(tsk, waiters(lock));[P] + [L]
444 * unlock(lock->wait_lock); release [L]
457 struct rt_mutex *lock; in rt_mutex_adjust_prio_chain() local
471 * We limit the lock chain length for each invocation. in rt_mutex_adjust_prio_chain()
482 printk(KERN_WARNING "Maximum lock depth %d reached " in rt_mutex_adjust_prio_chain()
522 * the previous owner of the lock might have released the lock. in rt_mutex_adjust_prio_chain()
529 * the task might have moved on in the lock chain or even left in rt_mutex_adjust_prio_chain()
530 * the chain completely and blocks now on an unrelated lock or in rt_mutex_adjust_prio_chain()
533 * We stored the lock on which @task was blocked in @next_lock, in rt_mutex_adjust_prio_chain()
536 if (next_lock != waiter->lock) in rt_mutex_adjust_prio_chain()
576 * [4] Get the next lock in rt_mutex_adjust_prio_chain()
578 lock = waiter->lock; in rt_mutex_adjust_prio_chain()
581 * which is the reverse lock order versus the other rtmutex in rt_mutex_adjust_prio_chain()
584 if (!raw_spin_trylock(&lock->wait_lock)) { in rt_mutex_adjust_prio_chain()
592 * lock->wait_lock. in rt_mutex_adjust_prio_chain()
594 * Deadlock detection. If the lock is the same as the original in rt_mutex_adjust_prio_chain()
595 * lock which caused us to walk the lock chain or if the in rt_mutex_adjust_prio_chain()
596 * current lock is owned by the task which initiated the chain in rt_mutex_adjust_prio_chain()
599 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { in rt_mutex_adjust_prio_chain()
600 debug_rt_mutex_deadlock(chwalk, orig_waiter, lock); in rt_mutex_adjust_prio_chain()
601 raw_spin_unlock(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
607 * If we just follow the lock chain for deadlock detection, no in rt_mutex_adjust_prio_chain()
620 * [9] check_exit_conditions_3 protected by lock->wait_lock. in rt_mutex_adjust_prio_chain()
621 * If there is no owner of the lock, end of chain. in rt_mutex_adjust_prio_chain()
623 if (!rt_mutex_owner(lock)) { in rt_mutex_adjust_prio_chain()
624 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
628 /* [10] Grab the next task, i.e. owner of @lock */ in rt_mutex_adjust_prio_chain()
629 task = get_task_struct(rt_mutex_owner(lock)); in rt_mutex_adjust_prio_chain()
642 top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
646 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
656 * operation on @lock. We need it for the boost/deboost in rt_mutex_adjust_prio_chain()
659 prerequeue_top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
661 /* [7] Requeue the waiter in the lock waiter tree. */ in rt_mutex_adjust_prio_chain()
662 rt_mutex_dequeue(lock, waiter); in rt_mutex_adjust_prio_chain()
683 rt_mutex_enqueue(lock, waiter); in rt_mutex_adjust_prio_chain()
690 * [9] check_exit_conditions_3 protected by lock->wait_lock. in rt_mutex_adjust_prio_chain()
692 * We must abort the chain walk if there is no lock owner even in rt_mutex_adjust_prio_chain()
693 * in the dead lock detection case, as we have nothing to in rt_mutex_adjust_prio_chain()
696 if (!rt_mutex_owner(lock)) { in rt_mutex_adjust_prio_chain()
700 * to get the lock. in rt_mutex_adjust_prio_chain()
702 if (prerequeue_top_waiter != rt_mutex_top_waiter(lock)) in rt_mutex_adjust_prio_chain()
703 wake_up_process(rt_mutex_top_waiter(lock)->task); in rt_mutex_adjust_prio_chain()
704 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
708 /* [10] Grab the next task, i.e. the owner of @lock */ in rt_mutex_adjust_prio_chain()
709 task = get_task_struct(rt_mutex_owner(lock)); in rt_mutex_adjust_prio_chain()
713 if (waiter == rt_mutex_top_waiter(lock)) { in rt_mutex_adjust_prio_chain()
716 * waiter on the lock. Replace the previous top waiter in rt_mutex_adjust_prio_chain()
726 * The waiter was the top waiter on the lock, but is in rt_mutex_adjust_prio_chain()
736 waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
748 * and lock->wait_lock. The actual decisions are made after we in rt_mutex_adjust_prio_chain()
751 * Check whether the task which owns the current lock is pi in rt_mutex_adjust_prio_chain()
752 * blocked itself. If yes we store a pointer to the lock for in rt_mutex_adjust_prio_chain()
753 * the lock chain change detection above. After we dropped in rt_mutex_adjust_prio_chain()
758 * Store the top waiter of @lock for the end of chain walk in rt_mutex_adjust_prio_chain()
761 top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
765 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
771 * We reached the end of the lock chain. Stop right here. No in rt_mutex_adjust_prio_chain()
778 * If the current waiter is not the top waiter on the lock, in rt_mutex_adjust_prio_chain()
798 * Must be called with lock->wait_lock held and interrupts disabled
800 * @lock: The lock to be acquired.
801 * @task: The task which wants to acquire the lock
802 * @waiter: The waiter that is queued to the lock's wait tree if the
805 static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, in try_to_take_rt_mutex() argument
808 lockdep_assert_held(&lock->wait_lock); in try_to_take_rt_mutex()
811 * Before testing whether we can acquire @lock, we set the in try_to_take_rt_mutex()
812 * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all in try_to_take_rt_mutex()
813 * other tasks which try to modify @lock into the slow path in try_to_take_rt_mutex()
814 * and they serialize on @lock->wait_lock. in try_to_take_rt_mutex()
819 * - There is a lock owner. The caller must fixup the in try_to_take_rt_mutex()
820 * transient state if it does a trylock or leaves the lock in try_to_take_rt_mutex()
823 * - @task acquires the lock and there are no other in try_to_take_rt_mutex()
827 mark_rt_mutex_waiters(lock); in try_to_take_rt_mutex()
830 * If @lock has an owner, give up. in try_to_take_rt_mutex()
832 if (rt_mutex_owner(lock)) in try_to_take_rt_mutex()
837 * into @lock waiter tree. If @waiter == NULL then this is a in try_to_take_rt_mutex()
843 * @lock, give up. in try_to_take_rt_mutex()
845 if (waiter != rt_mutex_top_waiter(lock)) in try_to_take_rt_mutex()
849 * We can acquire the lock. Remove the waiter from the in try_to_take_rt_mutex()
850 * lock waiters tree. in try_to_take_rt_mutex()
852 rt_mutex_dequeue(lock, waiter); in try_to_take_rt_mutex()
856 * If the lock has waiters already we check whether @task is in try_to_take_rt_mutex()
857 * eligible to take over the lock. in try_to_take_rt_mutex()
860 * the lock. @task->pi_blocked_on is NULL, so it does in try_to_take_rt_mutex()
863 if (rt_mutex_has_waiters(lock)) { in try_to_take_rt_mutex()
870 rt_mutex_top_waiter(lock))) in try_to_take_rt_mutex()
875 * don't have to change anything in the lock in try_to_take_rt_mutex()
880 * No waiters. Take the lock without the in try_to_take_rt_mutex()
898 * Finish the lock acquisition. @task is the new owner. If in try_to_take_rt_mutex()
902 if (rt_mutex_has_waiters(lock)) in try_to_take_rt_mutex()
903 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock)); in try_to_take_rt_mutex()
907 /* We got the lock. */ in try_to_take_rt_mutex()
908 debug_rt_mutex_lock(lock); in try_to_take_rt_mutex()
914 rt_mutex_set_owner(lock, task); in try_to_take_rt_mutex()
920 * Task blocks on lock.
924 * This must be called with lock->wait_lock held and interrupts disabled
926 static int task_blocks_on_rt_mutex(struct rt_mutex *lock, in task_blocks_on_rt_mutex() argument
931 struct task_struct *owner = rt_mutex_owner(lock); in task_blocks_on_rt_mutex()
936 lockdep_assert_held(&lock->wait_lock); in task_blocks_on_rt_mutex()
952 waiter->lock = lock; in task_blocks_on_rt_mutex()
956 /* Get the top priority waiter on the lock */ in task_blocks_on_rt_mutex()
957 if (rt_mutex_has_waiters(lock)) in task_blocks_on_rt_mutex()
958 top_waiter = rt_mutex_top_waiter(lock); in task_blocks_on_rt_mutex()
959 rt_mutex_enqueue(lock, waiter); in task_blocks_on_rt_mutex()
969 if (waiter == rt_mutex_top_waiter(lock)) { in task_blocks_on_rt_mutex()
980 /* Store the lock on which owner is blocked or NULL */ in task_blocks_on_rt_mutex()
993 * The owner can't disappear while holding a lock, in task_blocks_on_rt_mutex()
999 raw_spin_unlock_irq(&lock->wait_lock); in task_blocks_on_rt_mutex()
1001 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock, in task_blocks_on_rt_mutex()
1004 raw_spin_lock_irq(&lock->wait_lock); in task_blocks_on_rt_mutex()
1013 * Called with lock->wait_lock held and interrupts disabled.
1016 struct rt_mutex *lock) in mark_wakeup_next_waiter() argument
1022 waiter = rt_mutex_top_waiter(lock); in mark_wakeup_next_waiter()
1036 * queued on the lock until it gets the lock, this lock in mark_wakeup_next_waiter()
1040 * the top waiter can steal this lock. in mark_wakeup_next_waiter()
1042 lock->owner = (void *) RT_MUTEX_HAS_WAITERS; in mark_wakeup_next_waiter()
1060 * Remove a waiter from a lock and give up
1062 * Must be called with lock->wait_lock held and interrupts disabled. I must
1065 static void remove_waiter(struct rt_mutex *lock, in remove_waiter() argument
1068 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); in remove_waiter()
1069 struct task_struct *owner = rt_mutex_owner(lock); in remove_waiter()
1072 lockdep_assert_held(&lock->wait_lock); in remove_waiter()
1075 rt_mutex_dequeue(lock, waiter); in remove_waiter()
1081 * waiter of the lock and there is an owner to update. in remove_waiter()
1090 if (rt_mutex_has_waiters(lock)) in remove_waiter()
1091 rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock)); in remove_waiter()
1095 /* Store the lock on which owner is blocked or NULL */ in remove_waiter()
1110 raw_spin_unlock_irq(&lock->wait_lock); in remove_waiter()
1112 rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock, in remove_waiter()
1115 raw_spin_lock_irq(&lock->wait_lock); in remove_waiter()
1136 next_lock = waiter->lock; in rt_mutex_adjust_pi()
1156 * @lock: the rt_mutex to take
1162 * Must be called with lock->wait_lock held and interrupts disabled
1165 __rt_mutex_slowlock(struct rt_mutex *lock, int state, in __rt_mutex_slowlock() argument
1172 /* Try to acquire the lock: */ in __rt_mutex_slowlock()
1173 if (try_to_take_rt_mutex(lock, current, waiter)) in __rt_mutex_slowlock()
1190 raw_spin_unlock_irq(&lock->wait_lock); in __rt_mutex_slowlock()
1196 raw_spin_lock_irq(&lock->wait_lock); in __rt_mutex_slowlock()
1225 * Slow path lock function:
1228 rt_mutex_slowlock(struct rt_mutex *lock, int state, in rt_mutex_slowlock() argument
1242 * rtmutex with lock->wait_lock held. But we cannot unconditionally in rt_mutex_slowlock()
1246 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowlock()
1248 /* Try to acquire the lock again: */ in rt_mutex_slowlock()
1249 if (try_to_take_rt_mutex(lock, current, NULL)) { in rt_mutex_slowlock()
1250 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowlock()
1260 ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk); in rt_mutex_slowlock()
1264 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); in rt_mutex_slowlock()
1268 remove_waiter(lock, &waiter); in rt_mutex_slowlock()
1276 fixup_rt_mutex_waiters(lock); in rt_mutex_slowlock()
1278 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowlock()
1289 static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock) in __rt_mutex_slowtrylock() argument
1291 int ret = try_to_take_rt_mutex(lock, current, NULL); in __rt_mutex_slowtrylock()
1294 * try_to_take_rt_mutex() sets the lock waiters bit in __rt_mutex_slowtrylock()
1297 fixup_rt_mutex_waiters(lock); in __rt_mutex_slowtrylock()
1303 * Slow path try-lock function:
1305 static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) in rt_mutex_slowtrylock() argument
1311 * If the lock already has an owner we fail to get the lock. in rt_mutex_slowtrylock()
1312 * This can be done without taking the @lock->wait_lock as in rt_mutex_slowtrylock()
1315 if (rt_mutex_owner(lock)) in rt_mutex_slowtrylock()
1319 * The mutex has currently no owner. Lock the wait lock and try to in rt_mutex_slowtrylock()
1320 * acquire the lock. We use irqsave here to support early boot calls. in rt_mutex_slowtrylock()
1322 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowtrylock()
1324 ret = __rt_mutex_slowtrylock(lock); in rt_mutex_slowtrylock()
1326 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowtrylock()
1336 static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, in rt_mutex_slowunlock() argument
1342 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1344 debug_rt_mutex_unlock(lock); in rt_mutex_slowunlock()
1351 * foo->lock->owner = NULL; in rt_mutex_slowunlock()
1352 * rtmutex_lock(foo->lock); <- fast path in rt_mutex_slowunlock()
1354 * rtmutex_unlock(foo->lock); <- fast path in rt_mutex_slowunlock()
1357 * raw_spin_unlock(foo->lock->wait_lock); in rt_mutex_slowunlock()
1362 * lock->wait_lock. So we do the following sequence: in rt_mutex_slowunlock()
1364 * owner = rt_mutex_owner(lock); in rt_mutex_slowunlock()
1365 * clear_rt_mutex_waiters(lock); in rt_mutex_slowunlock()
1366 * raw_spin_unlock(&lock->wait_lock); in rt_mutex_slowunlock()
1367 * if (cmpxchg(&lock->owner, owner, 0) == owner) in rt_mutex_slowunlock()
1372 * lock->owner is serialized by lock->wait_lock: in rt_mutex_slowunlock()
1374 * lock->owner = NULL; in rt_mutex_slowunlock()
1375 * raw_spin_unlock(&lock->wait_lock); in rt_mutex_slowunlock()
1377 while (!rt_mutex_has_waiters(lock)) { in rt_mutex_slowunlock()
1378 /* Drops lock->wait_lock ! */ in rt_mutex_slowunlock()
1379 if (unlock_rt_mutex_safe(lock, flags) == true) in rt_mutex_slowunlock()
1382 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1391 mark_wakeup_next_waiter(wake_q, lock); in rt_mutex_slowunlock()
1392 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1398 * debug aware fast / slowpath lock,trylock,unlock
1404 rt_mutex_fastlock(struct rt_mutex *lock, int state, in rt_mutex_fastlock() argument
1405 int (*slowfn)(struct rt_mutex *lock, int state, in rt_mutex_fastlock() argument
1409 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) in rt_mutex_fastlock()
1412 return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); in rt_mutex_fastlock()
1416 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, in rt_mutex_timed_fastlock() argument
1419 int (*slowfn)(struct rt_mutex *lock, int state, in rt_mutex_timed_fastlock() argument
1424 likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) in rt_mutex_timed_fastlock()
1427 return slowfn(lock, state, timeout, chwalk); in rt_mutex_timed_fastlock()
1431 rt_mutex_fasttrylock(struct rt_mutex *lock, in rt_mutex_fasttrylock() argument
1432 int (*slowfn)(struct rt_mutex *lock)) in rt_mutex_fasttrylock() argument
1434 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) in rt_mutex_fasttrylock()
1437 return slowfn(lock); in rt_mutex_fasttrylock()
1452 rt_mutex_fastunlock(struct rt_mutex *lock, in rt_mutex_fastunlock() argument
1453 bool (*slowfn)(struct rt_mutex *lock, in rt_mutex_fastunlock() argument
1458 if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) in rt_mutex_fastunlock()
1461 if (slowfn(lock, &wake_q)) in rt_mutex_fastunlock()
1465 static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass) in __rt_mutex_lock() argument
1469 mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); in __rt_mutex_lock()
1470 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); in __rt_mutex_lock()
1475 * rt_mutex_lock_nested - lock a rt_mutex
1477 * @lock: the rt_mutex to be locked
1480 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) in rt_mutex_lock_nested() argument
1482 __rt_mutex_lock(lock, subclass); in rt_mutex_lock_nested()
1489 * rt_mutex_lock - lock a rt_mutex
1491 * @lock: the rt_mutex to be locked
1493 void __sched rt_mutex_lock(struct rt_mutex *lock) in rt_mutex_lock() argument
1495 __rt_mutex_lock(lock, 0); in rt_mutex_lock()
1501 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
1503 * @lock: the rt_mutex to be locked
1509 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) in rt_mutex_lock_interruptible() argument
1515 mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); in rt_mutex_lock_interruptible()
1516 ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); in rt_mutex_lock_interruptible()
1518 mutex_release(&lock->dep_map, _RET_IP_); in rt_mutex_lock_interruptible()
1527 int __sched rt_mutex_futex_trylock(struct rt_mutex *lock) in rt_mutex_futex_trylock() argument
1529 return rt_mutex_slowtrylock(lock); in rt_mutex_futex_trylock()
1532 int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock) in __rt_mutex_futex_trylock() argument
1534 return __rt_mutex_slowtrylock(lock); in __rt_mutex_futex_trylock()
1538 * rt_mutex_timed_lock - lock a rt_mutex interruptible
1542 * @lock: the rt_mutex to be locked
1551 rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout) in rt_mutex_timed_lock() argument
1557 mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); in rt_mutex_timed_lock()
1558 ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, in rt_mutex_timed_lock()
1562 mutex_release(&lock->dep_map, _RET_IP_); in rt_mutex_timed_lock()
1569 * rt_mutex_trylock - try to lock a rt_mutex
1571 * @lock: the rt_mutex to be locked
1579 int __sched rt_mutex_trylock(struct rt_mutex *lock) in rt_mutex_trylock() argument
1586 ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); in rt_mutex_trylock()
1588 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); in rt_mutex_trylock()
1597 * @lock: the rt_mutex to be unlocked
1599 void __sched rt_mutex_unlock(struct rt_mutex *lock) in rt_mutex_unlock() argument
1601 mutex_release(&lock->dep_map, _RET_IP_); in rt_mutex_unlock()
1602 rt_mutex_fastunlock(lock, rt_mutex_slowunlock); in rt_mutex_unlock()
1610 bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, in __rt_mutex_futex_unlock() argument
1613 lockdep_assert_held(&lock->wait_lock); in __rt_mutex_futex_unlock()
1615 debug_rt_mutex_unlock(lock); in __rt_mutex_futex_unlock()
1617 if (!rt_mutex_has_waiters(lock)) { in __rt_mutex_futex_unlock()
1618 lock->owner = NULL; in __rt_mutex_futex_unlock()
1628 mark_wakeup_next_waiter(wake_q, lock); in __rt_mutex_futex_unlock()
1633 void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) in rt_mutex_futex_unlock() argument
1639 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_futex_unlock()
1640 postunlock = __rt_mutex_futex_unlock(lock, &wake_q); in rt_mutex_futex_unlock()
1641 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_futex_unlock()
1649 * @lock: the mutex to be destroyed
1655 void rt_mutex_destroy(struct rt_mutex *lock) in rt_mutex_destroy() argument
1657 WARN_ON(rt_mutex_is_locked(lock)); in rt_mutex_destroy()
1659 lock->magic = NULL; in rt_mutex_destroy()
1665 * __rt_mutex_init - initialize the rt lock
1667 * @lock: the rt lock to be initialized
1669 * Initialize the rt lock to unlocked state.
1671 * Initializing of a locked rt lock is not allowed
1673 void __rt_mutex_init(struct rt_mutex *lock, const char *name, in __rt_mutex_init() argument
1676 lock->owner = NULL; in __rt_mutex_init()
1677 raw_spin_lock_init(&lock->wait_lock); in __rt_mutex_init()
1678 lock->waiters = RB_ROOT_CACHED; in __rt_mutex_init()
1681 debug_rt_mutex_init(lock, name, key); in __rt_mutex_init()
1686 * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
1689 * @lock: the rt_mutex to be locked
1699 void rt_mutex_init_proxy_locked(struct rt_mutex *lock, in rt_mutex_init_proxy_locked() argument
1702 __rt_mutex_init(lock, NULL, NULL); in rt_mutex_init_proxy_locked()
1703 debug_rt_mutex_proxy_lock(lock, proxy_owner); in rt_mutex_init_proxy_locked()
1704 rt_mutex_set_owner(lock, proxy_owner); in rt_mutex_init_proxy_locked()
1708 * rt_mutex_proxy_unlock - release a lock on behalf of owner
1710 * @lock: the rt_mutex to be locked
1719 void rt_mutex_proxy_unlock(struct rt_mutex *lock, in rt_mutex_proxy_unlock() argument
1722 debug_rt_mutex_proxy_unlock(lock); in rt_mutex_proxy_unlock()
1723 rt_mutex_set_owner(lock, NULL); in rt_mutex_proxy_unlock()
1727 * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
1728 * @lock: the rt_mutex to take
1739 * 0 - task blocked on lock
1740 * 1 - acquired the lock for task, caller should wake it up
1745 int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, in __rt_mutex_start_proxy_lock() argument
1751 lockdep_assert_held(&lock->wait_lock); in __rt_mutex_start_proxy_lock()
1753 if (try_to_take_rt_mutex(lock, task, NULL)) in __rt_mutex_start_proxy_lock()
1757 ret = task_blocks_on_rt_mutex(lock, waiter, task, in __rt_mutex_start_proxy_lock()
1760 if (ret && !rt_mutex_owner(lock)) { in __rt_mutex_start_proxy_lock()
1764 * released the lock while we were walking the in __rt_mutex_start_proxy_lock()
1776 * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
1777 * @lock: the rt_mutex to take
1788 * 0 - task blocked on lock
1789 * 1 - acquired the lock for task, caller should wake it up
1794 int rt_mutex_start_proxy_lock(struct rt_mutex *lock, in rt_mutex_start_proxy_lock() argument
1800 raw_spin_lock_irq(&lock->wait_lock); in rt_mutex_start_proxy_lock()
1801 ret = __rt_mutex_start_proxy_lock(lock, waiter, task); in rt_mutex_start_proxy_lock()
1803 remove_waiter(lock, waiter); in rt_mutex_start_proxy_lock()
1804 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_start_proxy_lock()
1810 * rt_mutex_next_owner - return the next owner of the lock
1812 * @lock: the rt lock query
1814 * Returns the next owner of the lock or NULL
1816 * Caller has to serialize against other accessors to the lock
1821 struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) in rt_mutex_next_owner() argument
1823 if (!rt_mutex_has_waiters(lock)) in rt_mutex_next_owner()
1826 return rt_mutex_top_waiter(lock)->task; in rt_mutex_next_owner()
1830 * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
1831 * @lock: the rt_mutex we were woken on
1836 * Wait for the the lock acquisition started on our behalf by
1846 int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, in rt_mutex_wait_proxy_lock() argument
1852 raw_spin_lock_irq(&lock->wait_lock); in rt_mutex_wait_proxy_lock()
1855 ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); in rt_mutex_wait_proxy_lock()
1860 fixup_rt_mutex_waiters(lock); in rt_mutex_wait_proxy_lock()
1861 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_wait_proxy_lock()
1867 * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
1868 * @lock: the rt_mutex we were woken on
1874 * Unless we acquired the lock; we're still enqueued on the wait-list and can
1881 * false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
1886 bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, in rt_mutex_cleanup_proxy_lock() argument
1891 raw_spin_lock_irq(&lock->wait_lock); in rt_mutex_cleanup_proxy_lock()
1893 * Do an unconditional try-lock, this deals with the lock stealing in rt_mutex_cleanup_proxy_lock()
1899 * we will own the lock and it will have removed the waiter. If we in rt_mutex_cleanup_proxy_lock()
1903 try_to_take_rt_mutex(lock, current, waiter); in rt_mutex_cleanup_proxy_lock()
1908 if (rt_mutex_owner(lock) != current) { in rt_mutex_cleanup_proxy_lock()
1909 remove_waiter(lock, waiter); in rt_mutex_cleanup_proxy_lock()
1916 fixup_rt_mutex_waiters(lock); in rt_mutex_cleanup_proxy_lock()
1918 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_cleanup_proxy_lock()