1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kernel/locking/mutex.c
4  *
5  * Mutexes: blocking mutual exclusion locks
6  *
7  * Started by Ingo Molnar:
8  *
9  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10  *
11  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
12  * David Howells for suggestions and improvements.
13  *
14  *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15  *    from the -rt tree, where it was originally implemented for rtmutexes
16  *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
17  *    and Sven Dietrich.
18  *
19  * Also see Documentation/locking/mutex-design.rst.
20  */
21 #include <linux/mutex.h>
22 #include <linux/ww_mutex.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/rt.h>
25 #include <linux/sched/wake_q.h>
26 #include <linux/sched/debug.h>
27 #include <linux/export.h>
28 #include <linux/spinlock.h>
29 #include <linux/interrupt.h>
30 #include <linux/debug_locks.h>
31 #include <linux/osq_lock.h>
32 
33 #define CREATE_TRACE_POINTS
34 #include <trace/events/lock.h>
35 
36 #ifndef CONFIG_PREEMPT_RT
37 #include "mutex.h"
38 
39 #ifdef CONFIG_DEBUG_MUTEXES
40 # define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
41 #else
42 # define MUTEX_WARN_ON(cond)
43 #endif
44 
45 void
__mutex_init(struct mutex * lock,const char * name,struct lock_class_key * key)46 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
47 {
48 	atomic_long_set(&lock->owner, 0);
49 	raw_spin_lock_init(&lock->wait_lock);
50 	INIT_LIST_HEAD(&lock->wait_list);
51 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
52 	osq_lock_init(&lock->osq);
53 #endif
54 
55 	debug_mutex_init(lock, name, key);
56 }
57 EXPORT_SYMBOL(__mutex_init);
58 
__owner_task(unsigned long owner)59 static inline struct task_struct *__owner_task(unsigned long owner)
60 {
61 	return (struct task_struct *)(owner & ~MUTEX_FLAGS);
62 }
63 
mutex_is_locked(struct mutex * lock)64 bool mutex_is_locked(struct mutex *lock)
65 {
66 	return __mutex_owner(lock) != NULL;
67 }
68 EXPORT_SYMBOL(mutex_is_locked);
69 
__owner_flags(unsigned long owner)70 static inline unsigned long __owner_flags(unsigned long owner)
71 {
72 	return owner & MUTEX_FLAGS;
73 }
74 
75 /* Do not use the return value as a pointer directly. */
mutex_get_owner(struct mutex * lock)76 unsigned long mutex_get_owner(struct mutex *lock)
77 {
78 	unsigned long owner = atomic_long_read(&lock->owner);
79 
80 	return (unsigned long)__owner_task(owner);
81 }
82 
83 /*
84  * Returns: __mutex_owner(lock) on failure or NULL on success.
85  */
__mutex_trylock_common(struct mutex * lock,bool handoff)86 static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
87 {
88 	unsigned long owner, curr = (unsigned long)current;
89 
90 	owner = atomic_long_read(&lock->owner);
91 	for (;;) { /* must loop, can race against a flag */
92 		unsigned long flags = __owner_flags(owner);
93 		unsigned long task = owner & ~MUTEX_FLAGS;
94 
95 		if (task) {
96 			if (flags & MUTEX_FLAG_PICKUP) {
97 				if (task != curr)
98 					break;
99 				flags &= ~MUTEX_FLAG_PICKUP;
100 			} else if (handoff) {
101 				if (flags & MUTEX_FLAG_HANDOFF)
102 					break;
103 				flags |= MUTEX_FLAG_HANDOFF;
104 			} else {
105 				break;
106 			}
107 		} else {
108 			MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
109 			task = curr;
110 		}
111 
112 		if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
113 			if (task == curr)
114 				return NULL;
115 			break;
116 		}
117 	}
118 
119 	return __owner_task(owner);
120 }
121 
122 /*
123  * Trylock or set HANDOFF
124  */
__mutex_trylock_or_handoff(struct mutex * lock,bool handoff)125 static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
126 {
127 	return !__mutex_trylock_common(lock, handoff);
128 }
129 
130 /*
131  * Actual trylock that will work on any unlocked state.
132  */
__mutex_trylock(struct mutex * lock)133 static inline bool __mutex_trylock(struct mutex *lock)
134 {
135 	return !__mutex_trylock_common(lock, false);
136 }
137 
138 #ifndef CONFIG_DEBUG_LOCK_ALLOC
139 /*
140  * Lockdep annotations are contained to the slow paths for simplicity.
141  * There is nothing that would stop spreading the lockdep annotations outwards
142  * except more code.
143  */
144 
145 /*
146  * Optimistic trylock that only works in the uncontended case. Make sure to
147  * follow with a __mutex_trylock() before failing.
148  */
__mutex_trylock_fast(struct mutex * lock)149 static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
150 {
151 	unsigned long curr = (unsigned long)current;
152 	unsigned long zero = 0UL;
153 
154 	MUTEX_WARN_ON(lock->magic != lock);
155 
156 	if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
157 		return true;
158 
159 	return false;
160 }
161 
__mutex_unlock_fast(struct mutex * lock)162 static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
163 {
164 	unsigned long curr = (unsigned long)current;
165 
166 	return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
167 }
168 #endif
169 
__mutex_set_flag(struct mutex * lock,unsigned long flag)170 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
171 {
172 	atomic_long_or(flag, &lock->owner);
173 }
174 
__mutex_clear_flag(struct mutex * lock,unsigned long flag)175 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
176 {
177 	atomic_long_andnot(flag, &lock->owner);
178 }
179 
__mutex_waiter_is_first(struct mutex * lock,struct mutex_waiter * waiter)180 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
181 {
182 	return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
183 }
184 
185 /*
186  * Add @waiter to a given location in the lock wait_list and set the
187  * FLAG_WAITERS flag if it's the first waiter.
188  */
189 static void
__mutex_add_waiter(struct mutex * lock,struct mutex_waiter * waiter,struct list_head * list)190 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
191 		   struct list_head *list)
192 {
193 #ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
194 	WRITE_ONCE(current->blocker_mutex, lock);
195 #endif
196 	debug_mutex_add_waiter(lock, waiter, current);
197 
198 	list_add_tail(&waiter->list, list);
199 	if (__mutex_waiter_is_first(lock, waiter))
200 		__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
201 }
202 
203 static void
__mutex_remove_waiter(struct mutex * lock,struct mutex_waiter * waiter)204 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
205 {
206 	list_del(&waiter->list);
207 	if (likely(list_empty(&lock->wait_list)))
208 		__mutex_clear_flag(lock, MUTEX_FLAGS);
209 
210 	debug_mutex_remove_waiter(lock, waiter, current);
211 #ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
212 	WRITE_ONCE(current->blocker_mutex, NULL);
213 #endif
214 }
215 
216 /*
217  * Give up ownership to a specific task, when @task = NULL, this is equivalent
218  * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
219  * WAITERS. Provides RELEASE semantics like a regular unlock, the
220  * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
221  */
__mutex_handoff(struct mutex * lock,struct task_struct * task)222 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
223 {
224 	unsigned long owner = atomic_long_read(&lock->owner);
225 
226 	for (;;) {
227 		unsigned long new;
228 
229 		MUTEX_WARN_ON(__owner_task(owner) != current);
230 		MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
231 
232 		new = (owner & MUTEX_FLAG_WAITERS);
233 		new |= (unsigned long)task;
234 		if (task)
235 			new |= MUTEX_FLAG_PICKUP;
236 
237 		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
238 			break;
239 	}
240 }
241 
242 #ifndef CONFIG_DEBUG_LOCK_ALLOC
243 /*
244  * We split the mutex lock/unlock logic into separate fastpath and
245  * slowpath functions, to reduce the register pressure on the fastpath.
246  * We also put the fastpath first in the kernel image, to make sure the
247  * branch is predicted by the CPU as default-untaken.
248  */
249 static void __sched __mutex_lock_slowpath(struct mutex *lock);
250 
251 /**
252  * mutex_lock - acquire the mutex
253  * @lock: the mutex to be acquired
254  *
255  * Lock the mutex exclusively for this task. If the mutex is not
256  * available right now, it will sleep until it can get it.
257  *
258  * The mutex must later on be released by the same task that
259  * acquired it. Recursive locking is not allowed. The task
260  * may not exit without first unlocking the mutex. Also, kernel
261  * memory where the mutex resides must not be freed with
262  * the mutex still locked. The mutex must first be initialized
263  * (or statically defined) before it can be locked. memset()-ing
264  * the mutex to 0 is not allowed.
265  *
266  * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
267  * checks that will enforce the restrictions and will also do
268  * deadlock debugging)
269  *
270  * This function is similar to (but not equivalent to) down().
271  */
mutex_lock(struct mutex * lock)272 void __sched mutex_lock(struct mutex *lock)
273 {
274 	might_sleep();
275 
276 	if (!__mutex_trylock_fast(lock))
277 		__mutex_lock_slowpath(lock);
278 }
279 EXPORT_SYMBOL(mutex_lock);
280 #endif
281 
282 #include "ww_mutex.h"
283 
284 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
285 
286 /*
287  * Trylock variant that returns the owning task on failure.
288  */
__mutex_trylock_or_owner(struct mutex * lock)289 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
290 {
291 	return __mutex_trylock_common(lock, false);
292 }
293 
294 static inline
ww_mutex_spin_on_owner(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)295 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
296 			    struct mutex_waiter *waiter)
297 {
298 	struct ww_mutex *ww;
299 
300 	ww = container_of(lock, struct ww_mutex, base);
301 
302 	/*
303 	 * If ww->ctx is set the contents are undefined, only
304 	 * by acquiring wait_lock there is a guarantee that
305 	 * they are not invalid when reading.
306 	 *
307 	 * As such, when deadlock detection needs to be
308 	 * performed the optimistic spinning cannot be done.
309 	 *
310 	 * Check this in every inner iteration because we may
311 	 * be racing against another thread's ww_mutex_lock.
312 	 */
313 	if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
314 		return false;
315 
316 	/*
317 	 * If we aren't on the wait list yet, cancel the spin
318 	 * if there are waiters. We want  to avoid stealing the
319 	 * lock from a waiter with an earlier stamp, since the
320 	 * other thread may already own a lock that we also
321 	 * need.
322 	 */
323 	if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
324 		return false;
325 
326 	/*
327 	 * Similarly, stop spinning if we are no longer the
328 	 * first waiter.
329 	 */
330 	if (waiter && !__mutex_waiter_is_first(lock, waiter))
331 		return false;
332 
333 	return true;
334 }
335 
336 /*
337  * Look out! "owner" is an entirely speculative pointer access and not
338  * reliable.
339  *
340  * "noinline" so that this function shows up on perf profiles.
341  */
342 static noinline
mutex_spin_on_owner(struct mutex * lock,struct task_struct * owner,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)343 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
344 			 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
345 {
346 	bool ret = true;
347 
348 	lockdep_assert_preemption_disabled();
349 
350 	while (__mutex_owner(lock) == owner) {
351 		/*
352 		 * Ensure we emit the owner->on_cpu, dereference _after_
353 		 * checking lock->owner still matches owner. And we already
354 		 * disabled preemption which is equal to the RCU read-side
355 		 * crital section in optimistic spinning code. Thus the
356 		 * task_strcut structure won't go away during the spinning
357 		 * period
358 		 */
359 		barrier();
360 
361 		/*
362 		 * Use vcpu_is_preempted to detect lock holder preemption issue.
363 		 */
364 		if (!owner_on_cpu(owner) || need_resched()) {
365 			ret = false;
366 			break;
367 		}
368 
369 		if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
370 			ret = false;
371 			break;
372 		}
373 
374 		cpu_relax();
375 	}
376 
377 	return ret;
378 }
379 
380 /*
381  * Initial check for entering the mutex spinning loop
382  */
mutex_can_spin_on_owner(struct mutex * lock)383 static inline int mutex_can_spin_on_owner(struct mutex *lock)
384 {
385 	struct task_struct *owner;
386 	int retval = 1;
387 
388 	lockdep_assert_preemption_disabled();
389 
390 	if (need_resched())
391 		return 0;
392 
393 	/*
394 	 * We already disabled preemption which is equal to the RCU read-side
395 	 * crital section in optimistic spinning code. Thus the task_strcut
396 	 * structure won't go away during the spinning period.
397 	 */
398 	owner = __mutex_owner(lock);
399 	if (owner)
400 		retval = owner_on_cpu(owner);
401 
402 	/*
403 	 * If lock->owner is not set, the mutex has been released. Return true
404 	 * such that we'll trylock in the spin path, which is a faster option
405 	 * than the blocking slow path.
406 	 */
407 	return retval;
408 }
409 
410 /*
411  * Optimistic spinning.
412  *
413  * We try to spin for acquisition when we find that the lock owner
414  * is currently running on a (different) CPU and while we don't
415  * need to reschedule. The rationale is that if the lock owner is
416  * running, it is likely to release the lock soon.
417  *
418  * The mutex spinners are queued up using MCS lock so that only one
419  * spinner can compete for the mutex. However, if mutex spinning isn't
420  * going to happen, there is no point in going through the lock/unlock
421  * overhead.
422  *
423  * Returns true when the lock was taken, otherwise false, indicating
424  * that we need to jump to the slowpath and sleep.
425  *
426  * The waiter flag is set to true if the spinner is a waiter in the wait
427  * queue. The waiter-spinner will spin on the lock directly and concurrently
428  * with the spinner at the head of the OSQ, if present, until the owner is
429  * changed to itself.
430  */
431 static __always_inline bool
mutex_optimistic_spin(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)432 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
433 		      struct mutex_waiter *waiter)
434 {
435 	if (!waiter) {
436 		/*
437 		 * The purpose of the mutex_can_spin_on_owner() function is
438 		 * to eliminate the overhead of osq_lock() and osq_unlock()
439 		 * in case spinning isn't possible. As a waiter-spinner
440 		 * is not going to take OSQ lock anyway, there is no need
441 		 * to call mutex_can_spin_on_owner().
442 		 */
443 		if (!mutex_can_spin_on_owner(lock))
444 			goto fail;
445 
446 		/*
447 		 * In order to avoid a stampede of mutex spinners trying to
448 		 * acquire the mutex all at once, the spinners need to take a
449 		 * MCS (queued) lock first before spinning on the owner field.
450 		 */
451 		if (!osq_lock(&lock->osq))
452 			goto fail;
453 	}
454 
455 	for (;;) {
456 		struct task_struct *owner;
457 
458 		/* Try to acquire the mutex... */
459 		owner = __mutex_trylock_or_owner(lock);
460 		if (!owner)
461 			break;
462 
463 		/*
464 		 * There's an owner, wait for it to either
465 		 * release the lock or go to sleep.
466 		 */
467 		if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
468 			goto fail_unlock;
469 
470 		/*
471 		 * The cpu_relax() call is a compiler barrier which forces
472 		 * everything in this loop to be re-loaded. We don't need
473 		 * memory barriers as we'll eventually observe the right
474 		 * values at the cost of a few extra spins.
475 		 */
476 		cpu_relax();
477 	}
478 
479 	if (!waiter)
480 		osq_unlock(&lock->osq);
481 
482 	return true;
483 
484 
485 fail_unlock:
486 	if (!waiter)
487 		osq_unlock(&lock->osq);
488 
489 fail:
490 	/*
491 	 * If we fell out of the spin path because of need_resched(),
492 	 * reschedule now, before we try-lock the mutex. This avoids getting
493 	 * scheduled out right after we obtained the mutex.
494 	 */
495 	if (need_resched()) {
496 		/*
497 		 * We _should_ have TASK_RUNNING here, but just in case
498 		 * we do not, make it so, otherwise we might get stuck.
499 		 */
500 		__set_current_state(TASK_RUNNING);
501 		schedule_preempt_disabled();
502 	}
503 
504 	return false;
505 }
506 #else
507 static __always_inline bool
mutex_optimistic_spin(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)508 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
509 		      struct mutex_waiter *waiter)
510 {
511 	return false;
512 }
513 #endif
514 
515 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
516 
517 /**
518  * mutex_unlock - release the mutex
519  * @lock: the mutex to be released
520  *
521  * Unlock a mutex that has been locked by this task previously.
522  *
523  * This function must not be used in interrupt context. Unlocking
524  * of a not locked mutex is not allowed.
525  *
526  * The caller must ensure that the mutex stays alive until this function has
527  * returned - mutex_unlock() can NOT directly be used to release an object such
528  * that another concurrent task can free it.
529  * Mutexes are different from spinlocks & refcounts in this aspect.
530  *
531  * This function is similar to (but not equivalent to) up().
532  */
mutex_unlock(struct mutex * lock)533 void __sched mutex_unlock(struct mutex *lock)
534 {
535 #ifndef CONFIG_DEBUG_LOCK_ALLOC
536 	if (__mutex_unlock_fast(lock))
537 		return;
538 #endif
539 	__mutex_unlock_slowpath(lock, _RET_IP_);
540 }
541 EXPORT_SYMBOL(mutex_unlock);
542 
543 /**
544  * ww_mutex_unlock - release the w/w mutex
545  * @lock: the mutex to be released
546  *
547  * Unlock a mutex that has been locked by this task previously with any of the
548  * ww_mutex_lock* functions (with or without an acquire context). It is
549  * forbidden to release the locks after releasing the acquire context.
550  *
551  * This function must not be used in interrupt context. Unlocking
552  * of a unlocked mutex is not allowed.
553  */
ww_mutex_unlock(struct ww_mutex * lock)554 void __sched ww_mutex_unlock(struct ww_mutex *lock)
555 {
556 	__ww_mutex_unlock(lock);
557 	mutex_unlock(&lock->base);
558 }
559 EXPORT_SYMBOL(ww_mutex_unlock);
560 
561 /*
562  * Lock a mutex (possibly interruptible), slowpath:
563  */
564 static __always_inline int __sched
__mutex_lock_common(struct mutex * lock,unsigned int state,unsigned int subclass,struct lockdep_map * nest_lock,unsigned long ip,struct ww_acquire_ctx * ww_ctx,const bool use_ww_ctx)565 __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
566 		    struct lockdep_map *nest_lock, unsigned long ip,
567 		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
568 {
569 	DEFINE_WAKE_Q(wake_q);
570 	struct mutex_waiter waiter;
571 	struct ww_mutex *ww;
572 	unsigned long flags;
573 	int ret;
574 
575 	if (!use_ww_ctx)
576 		ww_ctx = NULL;
577 
578 	might_sleep();
579 
580 	MUTEX_WARN_ON(lock->magic != lock);
581 
582 	ww = container_of(lock, struct ww_mutex, base);
583 	if (ww_ctx) {
584 		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
585 			return -EALREADY;
586 
587 		/*
588 		 * Reset the wounded flag after a kill. No other process can
589 		 * race and wound us here since they can't have a valid owner
590 		 * pointer if we don't have any locks held.
591 		 */
592 		if (ww_ctx->acquired == 0)
593 			ww_ctx->wounded = 0;
594 
595 #ifdef CONFIG_DEBUG_LOCK_ALLOC
596 		nest_lock = &ww_ctx->dep_map;
597 #endif
598 	}
599 
600 	preempt_disable();
601 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
602 
603 	trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
604 	if (__mutex_trylock(lock) ||
605 	    mutex_optimistic_spin(lock, ww_ctx, NULL)) {
606 		/* got the lock, yay! */
607 		lock_acquired(&lock->dep_map, ip);
608 		if (ww_ctx)
609 			ww_mutex_set_context_fastpath(ww, ww_ctx);
610 		trace_contention_end(lock, 0);
611 		preempt_enable();
612 		return 0;
613 	}
614 
615 	raw_spin_lock_irqsave(&lock->wait_lock, flags);
616 	/*
617 	 * After waiting to acquire the wait_lock, try again.
618 	 */
619 	if (__mutex_trylock(lock)) {
620 		if (ww_ctx)
621 			__ww_mutex_check_waiters(lock, ww_ctx, &wake_q);
622 
623 		goto skip_wait;
624 	}
625 
626 	debug_mutex_lock_common(lock, &waiter);
627 	waiter.task = current;
628 	if (use_ww_ctx)
629 		waiter.ww_ctx = ww_ctx;
630 
631 	lock_contended(&lock->dep_map, ip);
632 
633 	if (!use_ww_ctx) {
634 		/* add waiting tasks to the end of the waitqueue (FIFO): */
635 		__mutex_add_waiter(lock, &waiter, &lock->wait_list);
636 	} else {
637 		/*
638 		 * Add in stamp order, waking up waiters that must kill
639 		 * themselves.
640 		 */
641 		ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx, &wake_q);
642 		if (ret)
643 			goto err_early_kill;
644 	}
645 
646 	set_current_state(state);
647 	trace_contention_begin(lock, LCB_F_MUTEX);
648 	for (;;) {
649 		bool first;
650 
651 		/*
652 		 * Once we hold wait_lock, we're serialized against
653 		 * mutex_unlock() handing the lock off to us, do a trylock
654 		 * before testing the error conditions to make sure we pick up
655 		 * the handoff.
656 		 */
657 		if (__mutex_trylock(lock))
658 			goto acquired;
659 
660 		/*
661 		 * Check for signals and kill conditions while holding
662 		 * wait_lock. This ensures the lock cancellation is ordered
663 		 * against mutex_unlock() and wake-ups do not go missing.
664 		 */
665 		if (signal_pending_state(state, current)) {
666 			ret = -EINTR;
667 			goto err;
668 		}
669 
670 		if (ww_ctx) {
671 			ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
672 			if (ret)
673 				goto err;
674 		}
675 
676 		raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
677 
678 		schedule_preempt_disabled();
679 
680 		first = __mutex_waiter_is_first(lock, &waiter);
681 
682 		set_current_state(state);
683 		/*
684 		 * Here we order against unlock; we must either see it change
685 		 * state back to RUNNING and fall through the next schedule(),
686 		 * or we must see its unlock and acquire.
687 		 */
688 		if (__mutex_trylock_or_handoff(lock, first))
689 			break;
690 
691 		if (first) {
692 			trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
693 			if (mutex_optimistic_spin(lock, ww_ctx, &waiter))
694 				break;
695 			trace_contention_begin(lock, LCB_F_MUTEX);
696 		}
697 
698 		raw_spin_lock_irqsave(&lock->wait_lock, flags);
699 	}
700 	raw_spin_lock_irqsave(&lock->wait_lock, flags);
701 acquired:
702 	__set_current_state(TASK_RUNNING);
703 
704 	if (ww_ctx) {
705 		/*
706 		 * Wound-Wait; we stole the lock (!first_waiter), check the
707 		 * waiters as anyone might want to wound us.
708 		 */
709 		if (!ww_ctx->is_wait_die &&
710 		    !__mutex_waiter_is_first(lock, &waiter))
711 			__ww_mutex_check_waiters(lock, ww_ctx, &wake_q);
712 	}
713 
714 	__mutex_remove_waiter(lock, &waiter);
715 
716 	debug_mutex_free_waiter(&waiter);
717 
718 skip_wait:
719 	/* got the lock - cleanup and rejoice! */
720 	lock_acquired(&lock->dep_map, ip);
721 	trace_contention_end(lock, 0);
722 
723 	if (ww_ctx)
724 		ww_mutex_lock_acquired(ww, ww_ctx);
725 
726 	raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
727 	preempt_enable();
728 	return 0;
729 
730 err:
731 	__set_current_state(TASK_RUNNING);
732 	__mutex_remove_waiter(lock, &waiter);
733 err_early_kill:
734 	trace_contention_end(lock, ret);
735 	raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
736 	debug_mutex_free_waiter(&waiter);
737 	mutex_release(&lock->dep_map, ip);
738 	preempt_enable();
739 	return ret;
740 }
741 
742 static int __sched
__mutex_lock(struct mutex * lock,unsigned int state,unsigned int subclass,struct lockdep_map * nest_lock,unsigned long ip)743 __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
744 	     struct lockdep_map *nest_lock, unsigned long ip)
745 {
746 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
747 }
748 
749 static int __sched
__ww_mutex_lock(struct mutex * lock,unsigned int state,unsigned int subclass,unsigned long ip,struct ww_acquire_ctx * ww_ctx)750 __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
751 		unsigned long ip, struct ww_acquire_ctx *ww_ctx)
752 {
753 	return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
754 }
755 
756 /**
757  * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context
758  * @ww: mutex to lock
759  * @ww_ctx: optional w/w acquire context
760  *
761  * Trylocks a mutex with the optional acquire context; no deadlock detection is
762  * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
763  *
764  * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is
765  * specified, -EALREADY handling may happen in calls to ww_mutex_trylock.
766  *
767  * A mutex acquired with this function must be released with ww_mutex_unlock.
768  */
ww_mutex_trylock(struct ww_mutex * ww,struct ww_acquire_ctx * ww_ctx)769 int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
770 {
771 	if (!ww_ctx)
772 		return mutex_trylock(&ww->base);
773 
774 	MUTEX_WARN_ON(ww->base.magic != &ww->base);
775 
776 	/*
777 	 * Reset the wounded flag after a kill. No other process can
778 	 * race and wound us here, since they can't have a valid owner
779 	 * pointer if we don't have any locks held.
780 	 */
781 	if (ww_ctx->acquired == 0)
782 		ww_ctx->wounded = 0;
783 
784 	if (__mutex_trylock(&ww->base)) {
785 		ww_mutex_set_context_fastpath(ww, ww_ctx);
786 		mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
787 		return 1;
788 	}
789 
790 	return 0;
791 }
792 EXPORT_SYMBOL(ww_mutex_trylock);
793 
794 #ifdef CONFIG_DEBUG_LOCK_ALLOC
795 void __sched
mutex_lock_nested(struct mutex * lock,unsigned int subclass)796 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
797 {
798 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
799 }
800 
801 EXPORT_SYMBOL_GPL(mutex_lock_nested);
802 
803 void __sched
_mutex_lock_nest_lock(struct mutex * lock,struct lockdep_map * nest)804 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
805 {
806 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
807 }
808 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
809 
810 int __sched
mutex_lock_killable_nested(struct mutex * lock,unsigned int subclass)811 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
812 {
813 	return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
814 }
815 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
816 
817 int __sched
mutex_lock_interruptible_nested(struct mutex * lock,unsigned int subclass)818 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
819 {
820 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
821 }
822 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
823 
824 void __sched
mutex_lock_io_nested(struct mutex * lock,unsigned int subclass)825 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
826 {
827 	int token;
828 
829 	might_sleep();
830 
831 	token = io_schedule_prepare();
832 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
833 			    subclass, NULL, _RET_IP_, NULL, 0);
834 	io_schedule_finish(token);
835 }
836 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
837 
838 static inline int
ww_mutex_deadlock_injection(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)839 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
840 {
841 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
842 	unsigned tmp;
843 
844 	if (ctx->deadlock_inject_countdown-- == 0) {
845 		tmp = ctx->deadlock_inject_interval;
846 		if (tmp > UINT_MAX/4)
847 			tmp = UINT_MAX;
848 		else
849 			tmp = tmp*2 + tmp + tmp/2;
850 
851 		ctx->deadlock_inject_interval = tmp;
852 		ctx->deadlock_inject_countdown = tmp;
853 		ctx->contending_lock = lock;
854 
855 		ww_mutex_unlock(lock);
856 
857 		return -EDEADLK;
858 	}
859 #endif
860 
861 	return 0;
862 }
863 
864 int __sched
ww_mutex_lock(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)865 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
866 {
867 	int ret;
868 
869 	might_sleep();
870 	ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
871 			       0, _RET_IP_, ctx);
872 	if (!ret && ctx && ctx->acquired > 1)
873 		return ww_mutex_deadlock_injection(lock, ctx);
874 
875 	return ret;
876 }
877 EXPORT_SYMBOL_GPL(ww_mutex_lock);
878 
879 int __sched
ww_mutex_lock_interruptible(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)880 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
881 {
882 	int ret;
883 
884 	might_sleep();
885 	ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
886 			      0, _RET_IP_, ctx);
887 
888 	if (!ret && ctx && ctx->acquired > 1)
889 		return ww_mutex_deadlock_injection(lock, ctx);
890 
891 	return ret;
892 }
893 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
894 
895 #endif
896 
897 /*
898  * Release the lock, slowpath:
899  */
__mutex_unlock_slowpath(struct mutex * lock,unsigned long ip)900 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
901 {
902 	struct task_struct *next = NULL;
903 	DEFINE_WAKE_Q(wake_q);
904 	unsigned long owner;
905 	unsigned long flags;
906 
907 	mutex_release(&lock->dep_map, ip);
908 
909 	/*
910 	 * Release the lock before (potentially) taking the spinlock such that
911 	 * other contenders can get on with things ASAP.
912 	 *
913 	 * Except when HANDOFF, in that case we must not clear the owner field,
914 	 * but instead set it to the top waiter.
915 	 */
916 	owner = atomic_long_read(&lock->owner);
917 	for (;;) {
918 		MUTEX_WARN_ON(__owner_task(owner) != current);
919 		MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
920 
921 		if (owner & MUTEX_FLAG_HANDOFF)
922 			break;
923 
924 		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
925 			if (owner & MUTEX_FLAG_WAITERS)
926 				break;
927 
928 			return;
929 		}
930 	}
931 
932 	raw_spin_lock_irqsave(&lock->wait_lock, flags);
933 	debug_mutex_unlock(lock);
934 	if (!list_empty(&lock->wait_list)) {
935 		/* get the first entry from the wait-list: */
936 		struct mutex_waiter *waiter =
937 			list_first_entry(&lock->wait_list,
938 					 struct mutex_waiter, list);
939 
940 		next = waiter->task;
941 
942 		debug_mutex_wake_waiter(lock, waiter);
943 		wake_q_add(&wake_q, next);
944 	}
945 
946 	if (owner & MUTEX_FLAG_HANDOFF)
947 		__mutex_handoff(lock, next);
948 
949 	raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
950 }
951 
952 #ifndef CONFIG_DEBUG_LOCK_ALLOC
953 /*
954  * Here come the less common (and hence less performance-critical) APIs:
955  * mutex_lock_interruptible() and mutex_trylock().
956  */
957 static noinline int __sched
958 __mutex_lock_killable_slowpath(struct mutex *lock);
959 
960 static noinline int __sched
961 __mutex_lock_interruptible_slowpath(struct mutex *lock);
962 
963 /**
964  * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
965  * @lock: The mutex to be acquired.
966  *
967  * Lock the mutex like mutex_lock().  If a signal is delivered while the
968  * process is sleeping, this function will return without acquiring the
969  * mutex.
970  *
971  * Context: Process context.
972  * Return: 0 if the lock was successfully acquired or %-EINTR if a
973  * signal arrived.
974  */
mutex_lock_interruptible(struct mutex * lock)975 int __sched mutex_lock_interruptible(struct mutex *lock)
976 {
977 	might_sleep();
978 
979 	if (__mutex_trylock_fast(lock))
980 		return 0;
981 
982 	return __mutex_lock_interruptible_slowpath(lock);
983 }
984 
985 EXPORT_SYMBOL(mutex_lock_interruptible);
986 
987 /**
988  * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
989  * @lock: The mutex to be acquired.
990  *
991  * Lock the mutex like mutex_lock().  If a signal which will be fatal to
992  * the current process is delivered while the process is sleeping, this
993  * function will return without acquiring the mutex.
994  *
995  * Context: Process context.
996  * Return: 0 if the lock was successfully acquired or %-EINTR if a
997  * fatal signal arrived.
998  */
mutex_lock_killable(struct mutex * lock)999 int __sched mutex_lock_killable(struct mutex *lock)
1000 {
1001 	might_sleep();
1002 
1003 	if (__mutex_trylock_fast(lock))
1004 		return 0;
1005 
1006 	return __mutex_lock_killable_slowpath(lock);
1007 }
1008 EXPORT_SYMBOL(mutex_lock_killable);
1009 
1010 /**
1011  * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1012  * @lock: The mutex to be acquired.
1013  *
1014  * Lock the mutex like mutex_lock().  While the task is waiting for this
1015  * mutex, it will be accounted as being in the IO wait state by the
1016  * scheduler.
1017  *
1018  * Context: Process context.
1019  */
mutex_lock_io(struct mutex * lock)1020 void __sched mutex_lock_io(struct mutex *lock)
1021 {
1022 	int token;
1023 
1024 	token = io_schedule_prepare();
1025 	mutex_lock(lock);
1026 	io_schedule_finish(token);
1027 }
1028 EXPORT_SYMBOL_GPL(mutex_lock_io);
1029 
1030 static noinline void __sched
__mutex_lock_slowpath(struct mutex * lock)1031 __mutex_lock_slowpath(struct mutex *lock)
1032 {
1033 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1034 }
1035 
1036 static noinline int __sched
__mutex_lock_killable_slowpath(struct mutex * lock)1037 __mutex_lock_killable_slowpath(struct mutex *lock)
1038 {
1039 	return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1040 }
1041 
1042 static noinline int __sched
__mutex_lock_interruptible_slowpath(struct mutex * lock)1043 __mutex_lock_interruptible_slowpath(struct mutex *lock)
1044 {
1045 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1046 }
1047 
1048 static noinline int __sched
__ww_mutex_lock_slowpath(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1049 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1050 {
1051 	return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0,
1052 			       _RET_IP_, ctx);
1053 }
1054 
1055 static noinline int __sched
__ww_mutex_lock_interruptible_slowpath(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1056 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1057 					    struct ww_acquire_ctx *ctx)
1058 {
1059 	return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0,
1060 			       _RET_IP_, ctx);
1061 }
1062 
1063 #endif
1064 
1065 /**
1066  * mutex_trylock - try to acquire the mutex, without waiting
1067  * @lock: the mutex to be acquired
1068  *
1069  * Try to acquire the mutex atomically. Returns 1 if the mutex
1070  * has been acquired successfully, and 0 on contention.
1071  *
1072  * NOTE: this function follows the spin_trylock() convention, so
1073  * it is negated from the down_trylock() return values! Be careful
1074  * about this when converting semaphore users to mutexes.
1075  *
1076  * This function must not be used in interrupt context. The
1077  * mutex must be released by the same task that acquired it.
1078  */
mutex_trylock(struct mutex * lock)1079 int __sched mutex_trylock(struct mutex *lock)
1080 {
1081 	bool locked;
1082 
1083 	MUTEX_WARN_ON(lock->magic != lock);
1084 
1085 	locked = __mutex_trylock(lock);
1086 	if (locked)
1087 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1088 
1089 	return locked;
1090 }
1091 EXPORT_SYMBOL(mutex_trylock);
1092 
1093 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1094 int __sched
ww_mutex_lock(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1095 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1096 {
1097 	might_sleep();
1098 
1099 	if (__mutex_trylock_fast(&lock->base)) {
1100 		if (ctx)
1101 			ww_mutex_set_context_fastpath(lock, ctx);
1102 		return 0;
1103 	}
1104 
1105 	return __ww_mutex_lock_slowpath(lock, ctx);
1106 }
1107 EXPORT_SYMBOL(ww_mutex_lock);
1108 
1109 int __sched
ww_mutex_lock_interruptible(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1110 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1111 {
1112 	might_sleep();
1113 
1114 	if (__mutex_trylock_fast(&lock->base)) {
1115 		if (ctx)
1116 			ww_mutex_set_context_fastpath(lock, ctx);
1117 		return 0;
1118 	}
1119 
1120 	return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1121 }
1122 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1123 
1124 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
1125 #endif /* !CONFIG_PREEMPT_RT */
1126 
1127 EXPORT_TRACEPOINT_SYMBOL_GPL(contention_begin);
1128 EXPORT_TRACEPOINT_SYMBOL_GPL(contention_end);
1129 
1130 /**
1131  * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1132  * @cnt: the atomic which we are to dec
1133  * @lock: the mutex to return holding if we dec to 0
1134  *
1135  * return true and hold lock if we dec to 0, return false otherwise
1136  */
atomic_dec_and_mutex_lock(atomic_t * cnt,struct mutex * lock)1137 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1138 {
1139 	/* dec if we can't possibly hit 0 */
1140 	if (atomic_add_unless(cnt, -1, 1))
1141 		return 0;
1142 	/* we might hit 0, so take the lock */
1143 	mutex_lock(lock);
1144 	if (!atomic_dec_and_test(cnt)) {
1145 		/* when we actually did the dec, we didn't hit 0 */
1146 		mutex_unlock(lock);
1147 		return 0;
1148 	}
1149 	/* we hit 0, and we hold the lock */
1150 	return 1;
1151 }
1152 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1153