Lines Matching full:lock

29  * - spinning lock semantics
30 * - blocking lock semantics
31 * - try-lock semantics for readers and writers
32 * - one level nesting, allowing read lock to be taken by the same thread that
33 * already has write lock
46 * denotes how many times the blocking lock was held;
49 * Write lock always allows only one thread to access the data.
60 * Lock recursion
188 * Mark already held read lock as blocking. Can be nested in write lock by the
192 * on the lock will not actively spin but sleep instead.
200 * No lock is required. The lock owner may change if we have a read in btrfs_set_lock_blocking_read()
201 * lock, but it won't change to or away from us. If we have the write in btrfs_set_lock_blocking_read()
202 * lock, we are the owner and it'll never change. in btrfs_set_lock_blocking_read()
209 read_unlock(&eb->lock); in btrfs_set_lock_blocking_read()
213 * Mark already held write lock as blocking.
216 * waiting on the lock will not actively spin but sleep instead.
224 * No lock is required. The lock owner may change if we have a read in btrfs_set_lock_blocking_write()
225 * lock, but it won't change to or away from us. If we have the write in btrfs_set_lock_blocking_write()
226 * lock, we are the owner and it'll never change. in btrfs_set_lock_blocking_write()
234 write_unlock(&eb->lock); in btrfs_set_lock_blocking_write()
239 * Lock the extent buffer for read. Wait for any writers (spinning or blocking).
240 * Can be nested in write lock by the same thread.
255 read_lock(&eb->lock); in __btrfs_tree_read_lock()
262 * We allow an additional read lock to be added because in __btrfs_tree_read_lock()
270 read_unlock(&eb->lock); in __btrfs_tree_read_lock()
274 read_unlock(&eb->lock); in __btrfs_tree_read_lock()
290 * Lock extent buffer for read, optimistically expecting that there are no
300 read_lock(&eb->lock); in btrfs_tree_read_lock_atomic()
301 /* Refetch value after lock */ in btrfs_tree_read_lock_atomic()
303 read_unlock(&eb->lock); in btrfs_tree_read_lock_atomic()
313 * Try-lock for read. Don't block or wait for contending writers.
322 if (!read_trylock(&eb->lock)) in btrfs_try_tree_read_lock()
325 /* Refetch value after lock */ in btrfs_try_tree_read_lock()
327 read_unlock(&eb->lock); in btrfs_try_tree_read_lock()
337 * Try-lock for write. May block until the lock is uncontended, but does not
347 write_lock(&eb->lock); in btrfs_try_tree_write_lock()
348 /* Refetch value after lock */ in btrfs_try_tree_write_lock()
350 write_unlock(&eb->lock); in btrfs_try_tree_write_lock()
361 * Release read lock. Must be used only if the lock is in spinning mode. If
362 * the read lock is nested, must pair with read lock before the write unlock.
370 * if we're nested, we have the write lock. No new locking in btrfs_tree_read_unlock()
371 * is needed as long as we are the lock owner. in btrfs_tree_read_unlock()
373 * field only matters to the lock owner. in btrfs_tree_read_unlock()
382 read_unlock(&eb->lock); in btrfs_tree_read_unlock()
386 * Release read lock, previously set to blocking by a pairing call to
387 * btrfs_set_lock_blocking_read(). Can be nested in write lock by the same
396 * if we're nested, we have the write lock. No new locking in btrfs_tree_read_unlock_blocking()
397 * is needed as long as we are the lock owner. in btrfs_tree_read_unlock_blocking()
399 * field only matters to the lock owner. in btrfs_tree_read_unlock_blocking()
414 * Lock for write. Wait for all blocking and spinning readers and writers. This
415 * starts context where reader lock could be nested by the same thread.
420 __acquires(&eb->lock) in __btrfs_tree_lock()
431 write_lock(&eb->lock); in __btrfs_tree_lock()
432 /* Refetch value after lock */ in __btrfs_tree_lock()
435 write_unlock(&eb->lock); in __btrfs_tree_lock()
450 * Release the write lock, either blocking or spinning (ie. there's no need
452 * This also ends the context for nesting, the read lock must have been
461 * that already owns the lock so we don't need to use READ_ONCE in btrfs_tree_unlock()
484 write_unlock(&eb->lock); in btrfs_tree_unlock()
500 * If we currently have a spinning reader or writer lock this in btrfs_set_path_blocking()
518 * btrfs_search_slot will keep the lock held on higher nodes in a few corner
542 * we end up with a lock on the root node.
544 * Return: root extent buffer with write lock held
563 * we end up with a lock on the root node.
565 * Return: root extent buffer with read lock held
587 * DREW stands for double-reader-writer-exclusion lock. It's used in situation
591 * writer both race to acquire their respective sides of the lock the writer
592 * would yield its lock as soon as it detects a concurrent reader. Additionally
594 * acquire the lock.
597 int btrfs_drew_lock_init(struct btrfs_drew_lock *lock) in btrfs_drew_lock_init() argument
601 ret = percpu_counter_init(&lock->writers, 0, GFP_KERNEL); in btrfs_drew_lock_init()
605 atomic_set(&lock->readers, 0); in btrfs_drew_lock_init()
606 init_waitqueue_head(&lock->pending_readers); in btrfs_drew_lock_init()
607 init_waitqueue_head(&lock->pending_writers); in btrfs_drew_lock_init()
612 void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock) in btrfs_drew_lock_destroy() argument
614 percpu_counter_destroy(&lock->writers); in btrfs_drew_lock_destroy()
618 bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock) in btrfs_drew_try_write_lock() argument
620 if (atomic_read(&lock->readers)) in btrfs_drew_try_write_lock()
623 percpu_counter_inc(&lock->writers); in btrfs_drew_try_write_lock()
627 if (atomic_read(&lock->readers)) { in btrfs_drew_try_write_lock()
628 btrfs_drew_write_unlock(lock); in btrfs_drew_try_write_lock()
635 void btrfs_drew_write_lock(struct btrfs_drew_lock *lock) in btrfs_drew_write_lock() argument
638 if (btrfs_drew_try_write_lock(lock)) in btrfs_drew_write_lock()
640 wait_event(lock->pending_writers, !atomic_read(&lock->readers)); in btrfs_drew_write_lock()
644 void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock) in btrfs_drew_write_unlock() argument
646 percpu_counter_dec(&lock->writers); in btrfs_drew_write_unlock()
647 cond_wake_up(&lock->pending_readers); in btrfs_drew_write_unlock()
650 void btrfs_drew_read_lock(struct btrfs_drew_lock *lock) in btrfs_drew_read_lock() argument
652 atomic_inc(&lock->readers); in btrfs_drew_read_lock()
662 wait_event(lock->pending_readers, in btrfs_drew_read_lock()
663 percpu_counter_sum(&lock->writers) == 0); in btrfs_drew_read_lock()
666 void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock) in btrfs_drew_read_unlock() argument
672 if (atomic_dec_and_test(&lock->readers)) in btrfs_drew_read_unlock()
673 wake_up(&lock->pending_writers); in btrfs_drew_read_unlock()