xref: /linux/include/linux/bit_spinlock.h (revision c17ee635fd3a482b2ad2bf5e269755c2eae5f25e)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_BIT_SPINLOCK_H
3 #define __LINUX_BIT_SPINLOCK_H
4 
5 #include <linux/kernel.h>
6 #include <linux/preempt.h>
7 #include <linux/atomic.h>
8 #include <linux/bug.h>
9 
10 #include <asm/processor.h>  /* for cpu_relax() */
11 
12 /*
13  * For static context analysis, we need a unique token for each possible bit
14  * that can be used as a bit_spinlock. The easiest way to do that is to create a
15  * fake context that we can cast to with the __bitlock(bitnum, addr) macro
16  * below, which will give us unique instances for each (bit, addr) pair that the
17  * static analysis can use.
18  */
19 context_lock_struct(__context_bitlock) { };
20 #define __bitlock(bitnum, addr) (struct __context_bitlock *)(bitnum + (addr))
21 
22 /*
23  *  bit-based spin_lock()
24  *
25  * Don't use this unless you really need to: spin_lock() and spin_unlock()
26  * are significantly faster.
27  */
28 static __always_inline void bit_spin_lock(int bitnum, unsigned long *addr)
29 	__acquires(__bitlock(bitnum, addr))
30 {
31 	/*
32 	 * Assuming the lock is uncontended, this never enters
33 	 * the body of the outer loop. If it is contended, then
34 	 * within the inner loop a non-atomic test is used to
35 	 * busywait with less bus contention for a good time to
36 	 * attempt to acquire the lock bit.
37 	 */
38 	preempt_disable();
39 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
40 	while (unlikely(test_and_set_bit_lock(bitnum, addr))) {
41 		preempt_enable();
42 		do {
43 			cpu_relax();
44 		} while (test_bit(bitnum, addr));
45 		preempt_disable();
46 	}
47 #endif
48 	__acquire(__bitlock(bitnum, addr));
49 }
50 
51 /*
52  * Return true if it was acquired
53  */
54 static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr)
55 	__cond_acquires(true, __bitlock(bitnum, addr))
56 {
57 	preempt_disable();
58 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
59 	if (unlikely(test_and_set_bit_lock(bitnum, addr))) {
60 		preempt_enable();
61 		return 0;
62 	}
63 #endif
64 	__acquire(__bitlock(bitnum, addr));
65 	return 1;
66 }
67 
68 /*
69  *  bit-based spin_unlock()
70  */
71 static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr)
72 	__releases(__bitlock(bitnum, addr))
73 {
74 #ifdef CONFIG_DEBUG_SPINLOCK
75 	BUG_ON(!test_bit(bitnum, addr));
76 #endif
77 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
78 	clear_bit_unlock(bitnum, addr);
79 #endif
80 	preempt_enable();
81 	__release(__bitlock(bitnum, addr));
82 }
83 
84 /*
85  *  bit-based spin_unlock()
86  *  non-atomic version, which can be used eg. if the bit lock itself is
87  *  protecting the rest of the flags in the word.
88  */
89 static __always_inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
90 	__releases(__bitlock(bitnum, addr))
91 {
92 #ifdef CONFIG_DEBUG_SPINLOCK
93 	BUG_ON(!test_bit(bitnum, addr));
94 #endif
95 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
96 	__clear_bit_unlock(bitnum, addr);
97 #endif
98 	preempt_enable();
99 	__release(__bitlock(bitnum, addr));
100 }
101 
102 /*
103  * Return true if the lock is held.
104  */
105 static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
106 {
107 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
108 	return test_bit(bitnum, addr);
109 #elif defined CONFIG_PREEMPT_COUNT
110 	return preempt_count();
111 #else
112 	return 1;
113 #endif
114 }
115 
116 #endif /* __LINUX_BIT_SPINLOCK_H */
117 
118