Lines Matching full:lock

31  * trace is also emitted for the previous lock acquisition.
44 spinlock_t lock; member
60 static unsigned __find_holder(struct block_lock *lock, in __find_holder() argument
66 if (lock->holders[i] == task) in __find_holder()
73 /* call this *after* you increment lock->count */
74 static void __add_holder(struct block_lock *lock, struct task_struct *task) in __add_holder() argument
76 unsigned h = __find_holder(lock, NULL); in __add_holder()
82 lock->holders[h] = task; in __add_holder()
85 t = lock->traces + h; in __add_holder()
90 /* call this *before* you decrement lock->count */
91 static void __del_holder(struct block_lock *lock, struct task_struct *task) in __del_holder() argument
93 unsigned h = __find_holder(lock, task); in __del_holder()
94 lock->holders[h] = NULL; in __del_holder()
98 static int __check_holder(struct block_lock *lock) in __check_holder() argument
103 if (lock->holders[i] == current) { in __check_holder()
104 DMERR("recursive lock detected in metadata"); in __check_holder()
107 stack_trace_print(lock->traces[i].entries, in __check_holder()
108 lock->traces[i].nr_entries, 4); in __check_holder()
148 static void __wake_many(struct block_lock *lock) in __wake_many() argument
152 BUG_ON(lock->count < 0); in __wake_many()
153 list_for_each_entry_safe(w, tmp, &lock->waiters, list) { in __wake_many()
154 if (lock->count >= MAX_HOLDERS) in __wake_many()
158 if (lock->count > 0) in __wake_many()
161 lock->count = -1; in __wake_many()
162 __add_holder(lock, w->task); in __wake_many()
167 lock->count++; in __wake_many()
168 __add_holder(lock, w->task); in __wake_many()
173 static void bl_init(struct block_lock *lock) in bl_init() argument
177 spin_lock_init(&lock->lock); in bl_init()
178 lock->count = 0; in bl_init()
179 INIT_LIST_HEAD(&lock->waiters); in bl_init()
181 lock->holders[i] = NULL; in bl_init()
184 static int __available_for_read(struct block_lock *lock) in __available_for_read() argument
186 return lock->count >= 0 && in __available_for_read()
187 lock->count < MAX_HOLDERS && in __available_for_read()
188 list_empty(&lock->waiters); in __available_for_read()
191 static int bl_down_read(struct block_lock *lock) in bl_down_read() argument
196 spin_lock(&lock->lock); in bl_down_read()
197 r = __check_holder(lock); in bl_down_read()
199 spin_unlock(&lock->lock); in bl_down_read()
203 if (__available_for_read(lock)) { in bl_down_read()
204 lock->count++; in bl_down_read()
205 __add_holder(lock, current); in bl_down_read()
206 spin_unlock(&lock->lock); in bl_down_read()
214 list_add_tail(&w.list, &lock->waiters); in bl_down_read()
215 spin_unlock(&lock->lock); in bl_down_read()
222 static int bl_down_read_nonblock(struct block_lock *lock) in bl_down_read_nonblock() argument
226 spin_lock(&lock->lock); in bl_down_read_nonblock()
227 r = __check_holder(lock); in bl_down_read_nonblock()
231 if (__available_for_read(lock)) { in bl_down_read_nonblock()
232 lock->count++; in bl_down_read_nonblock()
233 __add_holder(lock, current); in bl_down_read_nonblock()
239 spin_unlock(&lock->lock); in bl_down_read_nonblock()
243 static void bl_up_read(struct block_lock *lock) in bl_up_read() argument
245 spin_lock(&lock->lock); in bl_up_read()
246 BUG_ON(lock->count <= 0); in bl_up_read()
247 __del_holder(lock, current); in bl_up_read()
248 --lock->count; in bl_up_read()
249 if (!list_empty(&lock->waiters)) in bl_up_read()
250 __wake_many(lock); in bl_up_read()
251 spin_unlock(&lock->lock); in bl_up_read()
254 static int bl_down_write(struct block_lock *lock) in bl_down_write() argument
259 spin_lock(&lock->lock); in bl_down_write()
260 r = __check_holder(lock); in bl_down_write()
262 spin_unlock(&lock->lock); in bl_down_write()
266 if (lock->count == 0 && list_empty(&lock->waiters)) { in bl_down_write()
267 lock->count = -1; in bl_down_write()
268 __add_holder(lock, current); in bl_down_write()
269 spin_unlock(&lock->lock); in bl_down_write()
281 list_add(&w.list, &lock->waiters); in bl_down_write()
282 spin_unlock(&lock->lock); in bl_down_write()
290 static void bl_up_write(struct block_lock *lock) in bl_up_write() argument
292 spin_lock(&lock->lock); in bl_up_write()
293 __del_holder(lock, current); in bl_up_write()
294 lock->count = 0; in bl_up_write()
295 if (!list_empty(&lock->waiters)) in bl_up_write()
296 __wake_many(lock); in bl_up_write()
297 spin_unlock(&lock->lock); in bl_up_write()
350 struct block_lock lock; member
358 bl_init(&aux->lock); in dm_block_manager_alloc_callback()
468 r = bl_down_read(&aux->lock); in dm_bm_read_lock()
479 bl_up_read(&aux->lock); in dm_bm_read_lock()
504 r = bl_down_write(&aux->lock); in dm_bm_write_lock()
515 bl_up_write(&aux->lock); in dm_bm_write_lock()
539 r = bl_down_read_nonblock(&aux->lock); in dm_bm_read_try_lock()
549 bl_up_read(&aux->lock); in dm_bm_read_try_lock()
575 r = bl_down_write(&aux->lock); in dm_bm_write_lock_zero()
595 bl_up_write(&aux->lock); in dm_bm_unlock()
597 bl_up_read(&aux->lock); in dm_bm_unlock()