1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_LOCAL_LOCK_H
3 # error "Do not include directly, include linux/local_lock.h"
4 #endif
5 
6 #include <linux/percpu-defs.h>
7 #include <linux/lockdep.h>
8 
9 #ifndef CONFIG_PREEMPT_RT
10 
11 typedef struct {
12 #ifdef CONFIG_DEBUG_LOCK_ALLOC
13 	struct lockdep_map	dep_map;
14 	struct task_struct	*owner;
15 #endif
16 } local_lock_t;
17 
18 /* local_trylock() and local_trylock_irqsave() only work with local_trylock_t */
19 typedef struct {
20 	local_lock_t	llock;
21 	u8		acquired;
22 } local_trylock_t;
23 
24 #ifdef CONFIG_DEBUG_LOCK_ALLOC
25 # define LOCAL_LOCK_DEBUG_INIT(lockname)		\
26 	.dep_map = {					\
27 		.name = #lockname,			\
28 		.wait_type_inner = LD_WAIT_CONFIG,	\
29 		.lock_type = LD_LOCK_PERCPU,		\
30 	},						\
31 	.owner = NULL,
32 
33 # define LOCAL_TRYLOCK_DEBUG_INIT(lockname)		\
34 	.llock = { LOCAL_LOCK_DEBUG_INIT((lockname).llock) },
35 
local_lock_acquire(local_lock_t * l)36 static inline void local_lock_acquire(local_lock_t *l)
37 {
38 	lock_map_acquire(&l->dep_map);
39 	DEBUG_LOCKS_WARN_ON(l->owner);
40 	l->owner = current;
41 }
42 
local_trylock_acquire(local_lock_t * l)43 static inline void local_trylock_acquire(local_lock_t *l)
44 {
45 	lock_map_acquire_try(&l->dep_map);
46 	DEBUG_LOCKS_WARN_ON(l->owner);
47 	l->owner = current;
48 }
49 
local_lock_release(local_lock_t * l)50 static inline void local_lock_release(local_lock_t *l)
51 {
52 	DEBUG_LOCKS_WARN_ON(l->owner != current);
53 	l->owner = NULL;
54 	lock_map_release(&l->dep_map);
55 }
56 
local_lock_debug_init(local_lock_t * l)57 static inline void local_lock_debug_init(local_lock_t *l)
58 {
59 	l->owner = NULL;
60 }
61 #else /* CONFIG_DEBUG_LOCK_ALLOC */
62 # define LOCAL_LOCK_DEBUG_INIT(lockname)
63 # define LOCAL_TRYLOCK_DEBUG_INIT(lockname)
local_lock_acquire(local_lock_t * l)64 static inline void local_lock_acquire(local_lock_t *l) { }
local_trylock_acquire(local_lock_t * l)65 static inline void local_trylock_acquire(local_lock_t *l) { }
local_lock_release(local_lock_t * l)66 static inline void local_lock_release(local_lock_t *l) { }
local_lock_debug_init(local_lock_t * l)67 static inline void local_lock_debug_init(local_lock_t *l) { }
68 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
69 
70 #define INIT_LOCAL_LOCK(lockname)	{ LOCAL_LOCK_DEBUG_INIT(lockname) }
71 #define INIT_LOCAL_TRYLOCK(lockname)	{ LOCAL_TRYLOCK_DEBUG_INIT(lockname) }
72 
73 #define __local_lock_init(lock)					\
74 do {								\
75 	static struct lock_class_key __key;			\
76 								\
77 	debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
78 	lockdep_init_map_type(&(lock)->dep_map, #lock, &__key,  \
79 			      0, LD_WAIT_CONFIG, LD_WAIT_INV,	\
80 			      LD_LOCK_PERCPU);			\
81 	local_lock_debug_init(lock);				\
82 } while (0)
83 
84 #define __local_trylock_init(lock) __local_lock_init(lock.llock)
85 
86 #define __spinlock_nested_bh_init(lock)				\
87 do {								\
88 	static struct lock_class_key __key;			\
89 								\
90 	debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
91 	lockdep_init_map_type(&(lock)->dep_map, #lock, &__key,  \
92 			      0, LD_WAIT_CONFIG, LD_WAIT_INV,	\
93 			      LD_LOCK_NORMAL);			\
94 	local_lock_debug_init(lock);				\
95 } while (0)
96 
97 #define __local_lock_acquire(lock)					\
98 	do {								\
99 		local_trylock_t *tl;					\
100 		local_lock_t *l;					\
101 									\
102 		l = (local_lock_t *)this_cpu_ptr(lock);			\
103 		tl = (local_trylock_t *)l;				\
104 		_Generic((lock),					\
105 			__percpu local_trylock_t *: ({			\
106 				lockdep_assert(tl->acquired == 0);	\
107 				WRITE_ONCE(tl->acquired, 1);		\
108 			}),						\
109 			__percpu local_lock_t *: (void)0);		\
110 		local_lock_acquire(l);					\
111 	} while (0)
112 
113 #define __local_lock(lock)					\
114 	do {							\
115 		preempt_disable();				\
116 		__local_lock_acquire(lock);			\
117 	} while (0)
118 
119 #define __local_lock_irq(lock)					\
120 	do {							\
121 		local_irq_disable();				\
122 		__local_lock_acquire(lock);			\
123 	} while (0)
124 
125 #define __local_lock_irqsave(lock, flags)			\
126 	do {							\
127 		local_irq_save(flags);				\
128 		__local_lock_acquire(lock);			\
129 	} while (0)
130 
131 #define __local_trylock(lock)					\
132 	({							\
133 		local_trylock_t *tl;				\
134 								\
135 		preempt_disable();				\
136 		tl = this_cpu_ptr(lock);			\
137 		if (READ_ONCE(tl->acquired)) {			\
138 			preempt_enable();			\
139 			tl = NULL;				\
140 		} else {					\
141 			WRITE_ONCE(tl->acquired, 1);		\
142 			local_trylock_acquire(			\
143 				(local_lock_t *)tl);		\
144 		}						\
145 		!!tl;						\
146 	})
147 
148 #define __local_trylock_irqsave(lock, flags)			\
149 	({							\
150 		local_trylock_t *tl;				\
151 								\
152 		local_irq_save(flags);				\
153 		tl = this_cpu_ptr(lock);			\
154 		if (READ_ONCE(tl->acquired)) {			\
155 			local_irq_restore(flags);		\
156 			tl = NULL;				\
157 		} else {					\
158 			WRITE_ONCE(tl->acquired, 1);		\
159 			local_trylock_acquire(			\
160 				(local_lock_t *)tl);		\
161 		}						\
162 		!!tl;						\
163 	})
164 
165 #define __local_lock_release(lock)					\
166 	do {								\
167 		local_trylock_t *tl;					\
168 		local_lock_t *l;					\
169 									\
170 		l = (local_lock_t *)this_cpu_ptr(lock);			\
171 		tl = (local_trylock_t *)l;				\
172 		local_lock_release(l);					\
173 		_Generic((lock),					\
174 			__percpu local_trylock_t *: ({			\
175 				lockdep_assert(tl->acquired == 1);	\
176 				WRITE_ONCE(tl->acquired, 0);		\
177 			}),						\
178 			__percpu local_lock_t *: (void)0);		\
179 	} while (0)
180 
181 #define __local_unlock(lock)					\
182 	do {							\
183 		__local_lock_release(lock);			\
184 		preempt_enable();				\
185 	} while (0)
186 
187 #define __local_unlock_irq(lock)				\
188 	do {							\
189 		__local_lock_release(lock);			\
190 		local_irq_enable();				\
191 	} while (0)
192 
193 #define __local_unlock_irqrestore(lock, flags)			\
194 	do {							\
195 		__local_lock_release(lock);			\
196 		local_irq_restore(flags);			\
197 	} while (0)
198 
199 #define __local_lock_nested_bh(lock)				\
200 	do {							\
201 		lockdep_assert_in_softirq();			\
202 		local_lock_acquire(this_cpu_ptr(lock));	\
203 	} while (0)
204 
205 #define __local_unlock_nested_bh(lock)				\
206 	local_lock_release(this_cpu_ptr(lock))
207 
208 #else /* !CONFIG_PREEMPT_RT */
209 
210 /*
211  * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
212  * critical section while staying preemptible.
213  */
214 typedef spinlock_t local_lock_t;
215 typedef spinlock_t local_trylock_t;
216 
217 #define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
218 #define INIT_LOCAL_TRYLOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
219 
220 #define __local_lock_init(l)					\
221 	do {							\
222 		local_spin_lock_init((l));			\
223 	} while (0)
224 
225 #define __local_trylock_init(l)			__local_lock_init(l)
226 
227 #define __local_lock(__lock)					\
228 	do {							\
229 		migrate_disable();				\
230 		spin_lock(this_cpu_ptr((__lock)));		\
231 	} while (0)
232 
233 #define __local_lock_irq(lock)			__local_lock(lock)
234 
235 #define __local_lock_irqsave(lock, flags)			\
236 	do {							\
237 		typecheck(unsigned long, flags);		\
238 		flags = 0;					\
239 		__local_lock(lock);				\
240 	} while (0)
241 
242 #define __local_unlock(__lock)					\
243 	do {							\
244 		spin_unlock(this_cpu_ptr((__lock)));		\
245 		migrate_enable();				\
246 	} while (0)
247 
248 #define __local_unlock_irq(lock)		__local_unlock(lock)
249 
250 #define __local_unlock_irqrestore(lock, flags)	__local_unlock(lock)
251 
252 #define __local_lock_nested_bh(lock)				\
253 do {								\
254 	lockdep_assert_in_softirq_func();			\
255 	spin_lock(this_cpu_ptr(lock));				\
256 } while (0)
257 
258 #define __local_unlock_nested_bh(lock)				\
259 do {								\
260 	spin_unlock(this_cpu_ptr((lock)));			\
261 } while (0)
262 
263 #define __local_trylock(lock)					\
264 	({							\
265 		int __locked;					\
266 								\
267 		if (in_nmi() | in_hardirq()) {			\
268 			__locked = 0;				\
269 		} else {					\
270 			migrate_disable();			\
271 			__locked = spin_trylock(this_cpu_ptr((lock)));	\
272 			if (!__locked)				\
273 				migrate_enable();		\
274 		}						\
275 		__locked;					\
276 	})
277 
278 #define __local_trylock_irqsave(lock, flags)			\
279 	({							\
280 		typecheck(unsigned long, flags);		\
281 		flags = 0;					\
282 		__local_trylock(lock);				\
283 	})
284 
285 #endif /* CONFIG_PREEMPT_RT */
286