Lines Matching full:lock
21 * lock->owner state tracking:
23 * lock->owner holds the task_struct pointer of the owner. Bit 0
24 * is used to keep track of the "lock has waiters" state.
27 * NULL 0 lock is free (fast acquire possible)
28 * NULL 1 lock is free and has waiters and the top waiter
29 * is going to take the lock*
30 * taskpointer 0 lock is held (fast release possible)
31 * taskpointer 1 lock is held and has waiters**
34 * possible when bit 0 of lock->owner is 0.
36 * (*) It also can be a transitional state when grabbing the lock
37 * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
38 * we need to set the bit0 before looking at the lock, and the owner may be
42 * waiters. This can happen when grabbing the lock in the slow path.
43 * To prevent a cmpxchg of the owner releasing the lock, we need to
44 * set this bit before looking at the lock.
48 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) in rt_mutex_set_owner() argument
52 if (rt_mutex_has_waiters(lock)) in rt_mutex_set_owner()
55 lock->owner = (struct task_struct *)val; in rt_mutex_set_owner()
58 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) in clear_rt_mutex_waiters() argument
60 lock->owner = (struct task_struct *) in clear_rt_mutex_waiters()
61 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); in clear_rt_mutex_waiters()
64 static void fixup_rt_mutex_waiters(struct rt_mutex *lock) in fixup_rt_mutex_waiters() argument
66 if (!rt_mutex_has_waiters(lock)) in fixup_rt_mutex_waiters()
67 clear_rt_mutex_waiters(lock); in fixup_rt_mutex_waiters()
76 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) in mark_rt_mutex_waiters() argument
78 unsigned long owner, *p = (unsigned long *) &lock->owner; in mark_rt_mutex_waiters()
86 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) in mark_rt_mutex_waiters() argument
88 lock->owner = (struct task_struct *) in mark_rt_mutex_waiters()
89 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); in mark_rt_mutex_waiters()
125 * (Note: We do this outside of the protection of lock->wait_lock to
126 * allow the lock to be taken while or before we readjust the priority
155 struct rt_mutex *lock; in rt_mutex_adjust_prio_chain() local
179 printk(KERN_WARNING "Maximum lock depth %d reached " in rt_mutex_adjust_prio_chain()
204 * the previous owner of the lock might have released the lock. in rt_mutex_adjust_prio_chain()
225 lock = waiter->lock; in rt_mutex_adjust_prio_chain()
226 if (!raw_spin_trylock(&lock->wait_lock)) { in rt_mutex_adjust_prio_chain()
233 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { in rt_mutex_adjust_prio_chain()
234 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); in rt_mutex_adjust_prio_chain()
235 raw_spin_unlock(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
240 top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
243 plist_del(&waiter->list_entry, &lock->wait_list); in rt_mutex_adjust_prio_chain()
245 plist_add(&waiter->list_entry, &lock->wait_list); in rt_mutex_adjust_prio_chain()
249 if (!rt_mutex_owner(lock)) { in rt_mutex_adjust_prio_chain()
252 * to wake the new top waiter up to try to get the lock. in rt_mutex_adjust_prio_chain()
255 if (top_waiter != rt_mutex_top_waiter(lock)) in rt_mutex_adjust_prio_chain()
256 wake_up_process(rt_mutex_top_waiter(lock)->task); in rt_mutex_adjust_prio_chain()
257 raw_spin_unlock(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
263 task = rt_mutex_owner(lock); in rt_mutex_adjust_prio_chain()
267 if (waiter == rt_mutex_top_waiter(lock)) { in rt_mutex_adjust_prio_chain()
277 waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
285 top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
286 raw_spin_unlock(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
304 * Must be called with lock->wait_lock held.
306 * @lock: the lock to be acquired.
307 * @task: the task which wants to acquire the lock
308 * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
310 static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, in try_to_take_rt_mutex() argument
316 * - no other waiter is on the lock in try_to_take_rt_mutex()
317 * - the lock has been released since we did the cmpxchg in try_to_take_rt_mutex()
318 * the lock can be released or taken while we are doing the in try_to_take_rt_mutex()
319 * checks and marking the lock with RT_MUTEX_HAS_WAITERS. in try_to_take_rt_mutex()
324 * happen anymore and lock->wait_lock protects us from the in try_to_take_rt_mutex()
327 * Note, that this might set lock->owner = in try_to_take_rt_mutex()
328 * RT_MUTEX_HAS_WAITERS in the case the lock is not contended in try_to_take_rt_mutex()
332 mark_rt_mutex_waiters(lock); in try_to_take_rt_mutex()
334 if (rt_mutex_owner(lock)) in try_to_take_rt_mutex()
338 * It will get the lock because of one of these conditions: in try_to_take_rt_mutex()
343 if (rt_mutex_has_waiters(lock)) { in try_to_take_rt_mutex()
344 if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) { in try_to_take_rt_mutex()
345 if (!waiter || waiter != rt_mutex_top_waiter(lock)) in try_to_take_rt_mutex()
350 if (waiter || rt_mutex_has_waiters(lock)) { in try_to_take_rt_mutex()
358 plist_del(&waiter->list_entry, &lock->wait_list); in try_to_take_rt_mutex()
366 if (rt_mutex_has_waiters(lock)) { in try_to_take_rt_mutex()
367 top = rt_mutex_top_waiter(lock); in try_to_take_rt_mutex()
374 /* We got the lock. */ in try_to_take_rt_mutex()
375 debug_rt_mutex_lock(lock); in try_to_take_rt_mutex()
377 rt_mutex_set_owner(lock, task); in try_to_take_rt_mutex()
379 rt_mutex_deadlock_account_lock(lock, task); in try_to_take_rt_mutex()
385 * Task blocks on lock.
389 * This must be called with lock->wait_lock held.
391 static int task_blocks_on_rt_mutex(struct rt_mutex *lock, in task_blocks_on_rt_mutex() argument
396 struct task_struct *owner = rt_mutex_owner(lock); in task_blocks_on_rt_mutex()
404 waiter->lock = lock; in task_blocks_on_rt_mutex()
408 /* Get the top priority waiter on the lock */ in task_blocks_on_rt_mutex()
409 if (rt_mutex_has_waiters(lock)) in task_blocks_on_rt_mutex()
410 top_waiter = rt_mutex_top_waiter(lock); in task_blocks_on_rt_mutex()
411 plist_add(&waiter->list_entry, &lock->wait_list); in task_blocks_on_rt_mutex()
420 if (waiter == rt_mutex_top_waiter(lock)) { in task_blocks_on_rt_mutex()
437 * The owner can't disappear while holding a lock, in task_blocks_on_rt_mutex()
443 raw_spin_unlock(&lock->wait_lock); in task_blocks_on_rt_mutex()
445 res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, in task_blocks_on_rt_mutex()
448 raw_spin_lock(&lock->wait_lock); in task_blocks_on_rt_mutex()
454 * Wake up the next waiter on the lock.
458 * Called with lock->wait_lock held.
460 static void wakeup_next_waiter(struct rt_mutex *lock) in wakeup_next_waiter() argument
467 waiter = rt_mutex_top_waiter(lock); in wakeup_next_waiter()
473 * lock->wait_lock. in wakeup_next_waiter()
477 rt_mutex_set_owner(lock, NULL); in wakeup_next_waiter()
485 * Remove a waiter from a lock and give up
487 * Must be called with lock->wait_lock held and
490 static void remove_waiter(struct rt_mutex *lock, in remove_waiter() argument
493 int first = (waiter == rt_mutex_top_waiter(lock)); in remove_waiter()
494 struct task_struct *owner = rt_mutex_owner(lock); in remove_waiter()
499 plist_del(&waiter->list_entry, &lock->wait_list); in remove_waiter()
512 if (rt_mutex_has_waiters(lock)) { in remove_waiter()
515 next = rt_mutex_top_waiter(lock); in remove_waiter()
534 raw_spin_unlock(&lock->wait_lock); in remove_waiter()
536 rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); in remove_waiter()
538 raw_spin_lock(&lock->wait_lock); in remove_waiter()
568 * @lock: the rt_mutex to take
574 * lock->wait_lock must be held by the caller.
577 __rt_mutex_slowlock(struct rt_mutex *lock, int state, in __rt_mutex_slowlock() argument
584 /* Try to acquire the lock: */ in __rt_mutex_slowlock()
585 if (try_to_take_rt_mutex(lock, current, waiter)) in __rt_mutex_slowlock()
602 raw_spin_unlock(&lock->wait_lock); in __rt_mutex_slowlock()
606 schedule_rt_mutex(lock); in __rt_mutex_slowlock()
608 raw_spin_lock(&lock->wait_lock); in __rt_mutex_slowlock()
616 * Slow path lock function:
619 rt_mutex_slowlock(struct rt_mutex *lock, int state, in rt_mutex_slowlock() argument
628 raw_spin_lock(&lock->wait_lock); in rt_mutex_slowlock()
630 /* Try to acquire the lock again: */ in rt_mutex_slowlock()
631 if (try_to_take_rt_mutex(lock, current, NULL)) { in rt_mutex_slowlock()
632 raw_spin_unlock(&lock->wait_lock); in rt_mutex_slowlock()
645 ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock); in rt_mutex_slowlock()
648 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); in rt_mutex_slowlock()
653 remove_waiter(lock, &waiter); in rt_mutex_slowlock()
659 fixup_rt_mutex_waiters(lock); in rt_mutex_slowlock()
661 raw_spin_unlock(&lock->wait_lock); in rt_mutex_slowlock()
673 * Slow path try-lock function:
676 rt_mutex_slowtrylock(struct rt_mutex *lock) in rt_mutex_slowtrylock() argument
680 raw_spin_lock(&lock->wait_lock); in rt_mutex_slowtrylock()
682 if (likely(rt_mutex_owner(lock) != current)) { in rt_mutex_slowtrylock()
684 ret = try_to_take_rt_mutex(lock, current, NULL); in rt_mutex_slowtrylock()
686 * try_to_take_rt_mutex() sets the lock waiters in rt_mutex_slowtrylock()
689 fixup_rt_mutex_waiters(lock); in rt_mutex_slowtrylock()
692 raw_spin_unlock(&lock->wait_lock); in rt_mutex_slowtrylock()
701 rt_mutex_slowunlock(struct rt_mutex *lock) in rt_mutex_slowunlock() argument
703 raw_spin_lock(&lock->wait_lock); in rt_mutex_slowunlock()
705 debug_rt_mutex_unlock(lock); in rt_mutex_slowunlock()
709 if (!rt_mutex_has_waiters(lock)) { in rt_mutex_slowunlock()
710 lock->owner = NULL; in rt_mutex_slowunlock()
711 raw_spin_unlock(&lock->wait_lock); in rt_mutex_slowunlock()
715 wakeup_next_waiter(lock); in rt_mutex_slowunlock()
717 raw_spin_unlock(&lock->wait_lock); in rt_mutex_slowunlock()
724 * debug aware fast / slowpath lock,trylock,unlock
730 rt_mutex_fastlock(struct rt_mutex *lock, int state, in rt_mutex_fastlock() argument
732 int (*slowfn)(struct rt_mutex *lock, int state, in rt_mutex_fastlock() argument
736 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { in rt_mutex_fastlock()
737 rt_mutex_deadlock_account_lock(lock, current); in rt_mutex_fastlock()
740 return slowfn(lock, state, NULL, detect_deadlock); in rt_mutex_fastlock()
744 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, in rt_mutex_timed_fastlock() argument
746 int (*slowfn)(struct rt_mutex *lock, int state, in rt_mutex_timed_fastlock() argument
750 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { in rt_mutex_timed_fastlock()
751 rt_mutex_deadlock_account_lock(lock, current); in rt_mutex_timed_fastlock()
754 return slowfn(lock, state, timeout, detect_deadlock); in rt_mutex_timed_fastlock()
758 rt_mutex_fasttrylock(struct rt_mutex *lock, in rt_mutex_fasttrylock() argument
759 int (*slowfn)(struct rt_mutex *lock)) in rt_mutex_fasttrylock() argument
761 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { in rt_mutex_fasttrylock()
762 rt_mutex_deadlock_account_lock(lock, current); in rt_mutex_fasttrylock()
765 return slowfn(lock); in rt_mutex_fasttrylock()
769 rt_mutex_fastunlock(struct rt_mutex *lock, in rt_mutex_fastunlock() argument
770 void (*slowfn)(struct rt_mutex *lock)) in rt_mutex_fastunlock() argument
772 if (likely(rt_mutex_cmpxchg(lock, current, NULL))) in rt_mutex_fastunlock()
775 slowfn(lock); in rt_mutex_fastunlock()
779 * rt_mutex_lock - lock a rt_mutex
781 * @lock: the rt_mutex to be locked
783 void __sched rt_mutex_lock(struct rt_mutex *lock) in rt_mutex_lock() argument
787 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock); in rt_mutex_lock()
792 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
794 * @lock: the rt_mutex to be locked
800 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
802 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, in rt_mutex_lock_interruptible() argument
807 return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, in rt_mutex_lock_interruptible()
813 * rt_mutex_timed_lock - lock a rt_mutex interruptible
817 * @lock: the rt_mutex to be locked
825 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
828 rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout, in rt_mutex_timed_lock() argument
833 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, in rt_mutex_timed_lock()
839 * rt_mutex_trylock - try to lock a rt_mutex
841 * @lock: the rt_mutex to be locked
845 int __sched rt_mutex_trylock(struct rt_mutex *lock) in rt_mutex_trylock() argument
847 return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); in rt_mutex_trylock()
854 * @lock: the rt_mutex to be unlocked
856 void __sched rt_mutex_unlock(struct rt_mutex *lock) in rt_mutex_unlock() argument
858 rt_mutex_fastunlock(lock, rt_mutex_slowunlock); in rt_mutex_unlock()
864 * @lock: the mutex to be destroyed
870 void rt_mutex_destroy(struct rt_mutex *lock) in rt_mutex_destroy() argument
872 WARN_ON(rt_mutex_is_locked(lock)); in rt_mutex_destroy()
874 lock->magic = NULL; in rt_mutex_destroy()
881 * __rt_mutex_init - initialize the rt lock
883 * @lock: the rt lock to be initialized
885 * Initialize the rt lock to unlocked state.
887 * Initializing of a locked rt lock is not allowed
889 void __rt_mutex_init(struct rt_mutex *lock, const char *name) in __rt_mutex_init() argument
891 lock->owner = NULL; in __rt_mutex_init()
892 raw_spin_lock_init(&lock->wait_lock); in __rt_mutex_init()
893 plist_head_init(&lock->wait_list); in __rt_mutex_init()
895 debug_rt_mutex_init(lock, name); in __rt_mutex_init()
900 * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
903 * @lock: the rt_mutex to be locked
909 void rt_mutex_init_proxy_locked(struct rt_mutex *lock, in rt_mutex_init_proxy_locked() argument
912 __rt_mutex_init(lock, NULL); in rt_mutex_init_proxy_locked()
913 debug_rt_mutex_proxy_lock(lock, proxy_owner); in rt_mutex_init_proxy_locked()
914 rt_mutex_set_owner(lock, proxy_owner); in rt_mutex_init_proxy_locked()
915 rt_mutex_deadlock_account_lock(lock, proxy_owner); in rt_mutex_init_proxy_locked()
919 * rt_mutex_proxy_unlock - release a lock on behalf of owner
921 * @lock: the rt_mutex to be locked
926 void rt_mutex_proxy_unlock(struct rt_mutex *lock, in rt_mutex_proxy_unlock() argument
929 debug_rt_mutex_proxy_unlock(lock); in rt_mutex_proxy_unlock()
930 rt_mutex_set_owner(lock, NULL); in rt_mutex_proxy_unlock()
935 * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
936 * @lock: the rt_mutex to take
942 * 0 - task blocked on lock
943 * 1 - acquired the lock for task, caller should wake it up
948 int rt_mutex_start_proxy_lock(struct rt_mutex *lock, in rt_mutex_start_proxy_lock() argument
954 raw_spin_lock(&lock->wait_lock); in rt_mutex_start_proxy_lock()
956 if (try_to_take_rt_mutex(lock, task, NULL)) { in rt_mutex_start_proxy_lock()
957 raw_spin_unlock(&lock->wait_lock); in rt_mutex_start_proxy_lock()
961 ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); in rt_mutex_start_proxy_lock()
963 if (ret && !rt_mutex_owner(lock)) { in rt_mutex_start_proxy_lock()
967 * released the lock while we were walking the in rt_mutex_start_proxy_lock()
974 remove_waiter(lock, waiter); in rt_mutex_start_proxy_lock()
976 raw_spin_unlock(&lock->wait_lock); in rt_mutex_start_proxy_lock()
984 * rt_mutex_next_owner - return the next owner of the lock
986 * @lock: the rt lock query
988 * Returns the next owner of the lock or NULL
990 * Caller has to serialize against other accessors to the lock
995 struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) in rt_mutex_next_owner() argument
997 if (!rt_mutex_has_waiters(lock)) in rt_mutex_next_owner()
1000 return rt_mutex_top_waiter(lock)->task; in rt_mutex_next_owner()
1004 * rt_mutex_finish_proxy_lock() - Complete lock acquisition
1005 * @lock: the rt_mutex we were woken on
1011 * Complete the lock acquisition started our behalf by another thread.
1019 int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, in rt_mutex_finish_proxy_lock() argument
1026 raw_spin_lock(&lock->wait_lock); in rt_mutex_finish_proxy_lock()
1030 ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); in rt_mutex_finish_proxy_lock()
1035 remove_waiter(lock, waiter); in rt_mutex_finish_proxy_lock()
1041 fixup_rt_mutex_waiters(lock); in rt_mutex_finish_proxy_lock()
1043 raw_spin_unlock(&lock->wait_lock); in rt_mutex_finish_proxy_lock()