1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * MCS lock defines
4 *
5 * This file contains the main data structure and API definitions of MCS lock.
6 *
7 * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock
8 * with the desirable properties of being fair, and with each cpu trying
9 * to acquire the lock spinning on a local variable.
10 * It avoids expensive cache bounces that common test-and-set spin-lock
11 * implementations incur.
12 */
13 #ifndef __LINUX_MCS_SPINLOCK_H
14 #define __LINUX_MCS_SPINLOCK_H
15
16 #include <asm/mcs_spinlock.h>
17
18 #ifndef arch_mcs_spin_lock_contended
19 /*
20 * Using smp_cond_load_acquire() provides the acquire semantics
21 * required so that subsequent operations happen after the
22 * lock is acquired. Additionally, some architectures such as
23 * ARM64 would like to do spin-waiting instead of purely
24 * spinning, and smp_cond_load_acquire() provides that behavior.
25 */
26 #define arch_mcs_spin_lock_contended(l) \
27 smp_cond_load_acquire(l, VAL)
28 #endif
29
30 #ifndef arch_mcs_spin_unlock_contended
31 /*
32 * smp_store_release() provides a memory barrier to ensure all
33 * operations in the critical section has been completed before
34 * unlocking.
35 */
36 #define arch_mcs_spin_unlock_contended(l) \
37 smp_store_release((l), 1)
38 #endif
39
40 /*
41 * Note: the smp_load_acquire/smp_store_release pair is not
42 * sufficient to form a full memory barrier across
43 * cpus for many architectures (except x86) for mcs_unlock and mcs_lock.
44 * For applications that need a full barrier across multiple cpus
45 * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be
46 * used after mcs_lock.
47 */
48
49 /*
50 * In order to acquire the lock, the caller should declare a local node and
51 * pass a reference of the node to this function in addition to the lock.
52 * If the lock has already been acquired, then this will proceed to spin
53 * on this node->locked until the previous lock holder sets the node->locked
54 * in mcs_spin_unlock().
55 */
56 static inline
mcs_spin_lock(struct mcs_spinlock ** lock,struct mcs_spinlock * node)57 void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
58 {
59 struct mcs_spinlock *prev;
60
61 /* Init node */
62 node->locked = 0;
63 node->next = NULL;
64
65 /*
66 * We rely on the full barrier with global transitivity implied by the
67 * below xchg() to order the initialization stores above against any
68 * observation of @node. And to provide the ACQUIRE ordering associated
69 * with a LOCK primitive.
70 */
71 prev = xchg(lock, node);
72 if (likely(prev == NULL)) {
73 /*
74 * Lock acquired, don't need to set node->locked to 1. Threads
75 * only spin on its own node->locked value for lock acquisition.
76 * However, since this thread can immediately acquire the lock
77 * and does not proceed to spin on its own node->locked, this
78 * value won't be used. If a debug mode is needed to
79 * audit lock status, then set node->locked value here.
80 */
81 return;
82 }
83 WRITE_ONCE(prev->next, node);
84
85 /* Wait until the lock holder passes the lock down. */
86 arch_mcs_spin_lock_contended(&node->locked);
87 }
88
89 /*
90 * Releases the lock. The caller should pass in the corresponding node that
91 * was used to acquire the lock.
92 */
93 static inline
mcs_spin_unlock(struct mcs_spinlock ** lock,struct mcs_spinlock * node)94 void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
95 {
96 struct mcs_spinlock *next = READ_ONCE(node->next);
97
98 if (likely(!next)) {
99 /*
100 * Release the lock by setting it to NULL
101 */
102 if (likely(cmpxchg_release(lock, node, NULL) == node))
103 return;
104 /* Wait until the next pointer is set */
105 while (!(next = READ_ONCE(node->next)))
106 cpu_relax();
107 }
108
109 /* Pass lock to next waiter. */
110 arch_mcs_spin_unlock_contended(&next->locked);
111 }
112
113 #endif /* __LINUX_MCS_SPINLOCK_H */
114