Lines Matching full:lock
40 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) in __mutex_init() argument
42 atomic_set(&lock->count, 1); in __mutex_init()
43 spin_lock_init(&lock->wait_lock); in __mutex_init()
44 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init()
45 mutex_clear_owner(lock); in __mutex_init()
47 debug_mutex_init(lock, name, key); in __mutex_init()
54 * We split the mutex lock/unlock logic into separate fastpath and
64 * @lock: the mutex to be acquired
66 * Lock the mutex exclusively for this task. If the mutex is not
83 void __sched mutex_lock(struct mutex *lock) in mutex_lock() argument
90 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); in mutex_lock()
91 mutex_set_owner(lock); in mutex_lock()
101 * @lock: the mutex to be released
110 void __sched mutex_unlock(struct mutex *lock) in mutex_unlock() argument
122 mutex_clear_owner(lock); in mutex_unlock()
124 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); in mutex_unlock()
130 * Lock a mutex (possibly interruptible), slowpath:
133 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, in __mutex_lock_common() argument
141 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); in __mutex_lock_common()
148 * pending waiters and the lock owner is currently running on a in __mutex_lock_common()
151 * The rationale is that if the lock owner is running, it is likely to in __mutex_lock_common()
152 * release the lock soon. in __mutex_lock_common()
154 * Since this needs the lock owner, and this mutex implementation in __mutex_lock_common()
155 * doesn't track the owner atomically in the lock field, we need to in __mutex_lock_common()
167 * release the lock or go to sleep. in __mutex_lock_common()
169 owner = ACCESS_ONCE(lock->owner); in __mutex_lock_common()
170 if (owner && !mutex_spin_on_owner(lock, owner)) in __mutex_lock_common()
173 if (atomic_cmpxchg(&lock->count, 1, 0) == 1) { in __mutex_lock_common()
174 lock_acquired(&lock->dep_map, ip); in __mutex_lock_common()
175 mutex_set_owner(lock); in __mutex_lock_common()
182 * owner acquiring the lock and setting the owner field. If in __mutex_lock_common()
183 * we're an RT task that will live-lock because we won't let in __mutex_lock_common()
198 spin_lock_mutex(&lock->wait_lock, flags); in __mutex_lock_common()
200 debug_mutex_lock_common(lock, &waiter); in __mutex_lock_common()
201 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); in __mutex_lock_common()
204 list_add_tail(&waiter.list, &lock->wait_list); in __mutex_lock_common()
207 if (atomic_xchg(&lock->count, -1) == 1) in __mutex_lock_common()
210 lock_contended(&lock->dep_map, ip); in __mutex_lock_common()
214 * Lets try to take the lock again - this is needed even if in __mutex_lock_common()
216 * acquire the lock), to make sure that we get a wakeup once in __mutex_lock_common()
218 * operation that gives us the lock. We xchg it to -1, so in __mutex_lock_common()
219 * that when we release the lock, we properly wake up the in __mutex_lock_common()
222 if (atomic_xchg(&lock->count, -1) == 1) in __mutex_lock_common()
230 mutex_remove_waiter(lock, &waiter, in __mutex_lock_common()
232 mutex_release(&lock->dep_map, 1, ip); in __mutex_lock_common()
233 spin_unlock_mutex(&lock->wait_lock, flags); in __mutex_lock_common()
241 /* didn't get the lock, go to sleep: */ in __mutex_lock_common()
242 spin_unlock_mutex(&lock->wait_lock, flags); in __mutex_lock_common()
246 spin_lock_mutex(&lock->wait_lock, flags); in __mutex_lock_common()
250 lock_acquired(&lock->dep_map, ip); in __mutex_lock_common()
251 /* got the lock - rejoice! */ in __mutex_lock_common()
252 mutex_remove_waiter(lock, &waiter, current_thread_info()); in __mutex_lock_common()
253 mutex_set_owner(lock); in __mutex_lock_common()
256 if (likely(list_empty(&lock->wait_list))) in __mutex_lock_common()
257 atomic_set(&lock->count, 0); in __mutex_lock_common()
259 spin_unlock_mutex(&lock->wait_lock, flags); in __mutex_lock_common()
269 mutex_lock_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_nested() argument
272 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); in mutex_lock_nested()
278 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) in _mutex_lock_nest_lock() argument
281 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); in _mutex_lock_nest_lock()
287 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_killable_nested() argument
290 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); in mutex_lock_killable_nested()
295 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_interruptible_nested() argument
298 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, in mutex_lock_interruptible_nested()
306 * Release the lock, slowpath:
311 struct mutex *lock = container_of(lock_count, struct mutex, count); in __mutex_unlock_common_slowpath() local
314 spin_lock_mutex(&lock->wait_lock, flags); in __mutex_unlock_common_slowpath()
315 mutex_release(&lock->dep_map, nested, _RET_IP_); in __mutex_unlock_common_slowpath()
316 debug_mutex_unlock(lock); in __mutex_unlock_common_slowpath()
319 * some architectures leave the lock unlocked in the fastpath failure in __mutex_unlock_common_slowpath()
324 atomic_set(&lock->count, 1); in __mutex_unlock_common_slowpath()
326 if (!list_empty(&lock->wait_list)) { in __mutex_unlock_common_slowpath()
329 list_entry(lock->wait_list.next, in __mutex_unlock_common_slowpath()
332 debug_mutex_wake_waiter(lock, waiter); in __mutex_unlock_common_slowpath()
337 spin_unlock_mutex(&lock->wait_lock, flags); in __mutex_unlock_common_slowpath()
341 * Release the lock, slowpath:
362 * @lock: the mutex to be acquired
364 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
366 * signal arrives while waiting for the lock then this function
371 int __sched mutex_lock_interruptible(struct mutex *lock) in mutex_lock_interruptible() argument
377 (&lock->count, __mutex_lock_interruptible_slowpath); in mutex_lock_interruptible()
379 mutex_set_owner(lock); in mutex_lock_interruptible()
386 int __sched mutex_lock_killable(struct mutex *lock) in mutex_lock_killable() argument
392 (&lock->count, __mutex_lock_killable_slowpath); in mutex_lock_killable()
394 mutex_set_owner(lock); in mutex_lock_killable()
403 struct mutex *lock = container_of(lock_count, struct mutex, count); in __mutex_lock_slowpath() local
405 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); in __mutex_lock_slowpath()
411 struct mutex *lock = container_of(lock_count, struct mutex, count); in __mutex_lock_killable_slowpath() local
413 return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); in __mutex_lock_killable_slowpath()
419 struct mutex *lock = container_of(lock_count, struct mutex, count); in __mutex_lock_interruptible_slowpath() local
421 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); in __mutex_lock_interruptible_slowpath()
427 * can get the lock:
431 struct mutex *lock = container_of(lock_count, struct mutex, count); in __mutex_trylock_slowpath() local
435 spin_lock_mutex(&lock->wait_lock, flags); in __mutex_trylock_slowpath()
437 prev = atomic_xchg(&lock->count, -1); in __mutex_trylock_slowpath()
439 mutex_set_owner(lock); in __mutex_trylock_slowpath()
440 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); in __mutex_trylock_slowpath()
444 if (likely(list_empty(&lock->wait_list))) in __mutex_trylock_slowpath()
445 atomic_set(&lock->count, 0); in __mutex_trylock_slowpath()
447 spin_unlock_mutex(&lock->wait_lock, flags); in __mutex_trylock_slowpath()
454 * @lock: the mutex to be acquired
466 int __sched mutex_trylock(struct mutex *lock) in mutex_trylock() argument
470 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); in mutex_trylock()
472 mutex_set_owner(lock); in mutex_trylock()
481 * @lock: the mutex to return holding if we dec to 0
483 * return true and hold lock if we dec to 0, return false otherwise
485 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) in atomic_dec_and_mutex_lock() argument
490 /* we might hit 0, so take the lock */ in atomic_dec_and_mutex_lock()
491 mutex_lock(lock); in atomic_dec_and_mutex_lock()
494 mutex_unlock(lock); in atomic_dec_and_mutex_lock()
497 /* we hit 0, and we hold the lock */ in atomic_dec_and_mutex_lock()