/linux-6.8/include/linux/ |
D | rwlock_api_smp.h | 18 void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock); 19 void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock); 20 void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) __acquires(lock); 21 void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock); 22 void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock); 23 void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock); 24 void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock); 25 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) 26 __acquires(lock); 27 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) [all …]
|
D | spinlock_api_up.h | 19 #define assert_raw_spin_locked(lock) do { (void)(lock); } while (0) argument 24 * flags straight, to suppress compiler warnings of unused lock 27 #define ___LOCK(lock) \ argument 28 do { __acquire(lock); (void)(lock); } while (0) 30 #define __LOCK(lock) \ argument 31 do { preempt_disable(); ___LOCK(lock); } while (0) 33 #define __LOCK_BH(lock) \ argument 34 do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0) 36 #define __LOCK_IRQ(lock) \ argument 37 do { local_irq_disable(); __LOCK(lock); } while (0) [all …]
|
D | rwlock.h | 18 extern void __rwlock_init(rwlock_t *lock, const char *name, 20 # define rwlock_init(lock) \ argument 24 __rwlock_init((lock), #lock, &__key); \ 27 # define rwlock_init(lock) \ argument 28 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) 32 extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock); 33 extern int do_raw_read_trylock(rwlock_t *lock); 34 extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock); 35 extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock); 36 extern int do_raw_write_trylock(rwlock_t *lock); [all …]
|
D | spinlock_api_smp.h | 22 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 23 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) 24 __acquires(lock); 26 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) 27 __acquires(lock); 28 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); 29 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) 30 __acquires(lock); 32 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) 33 __acquires(lock); [all …]
|
D | spinlock.h | 72 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME 101 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, 104 # define raw_spin_lock_init(lock) \ argument 108 __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \ 112 # define raw_spin_lock_init(lock) \ argument 113 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) 116 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) argument 119 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) argument 121 #define raw_spin_is_contended(lock) (((void)(lock), 0)) argument 126 * between program-order earlier lock acquisitions and program-order later [all …]
|
D | spinlock_rt.h | 10 extern void __rt_spin_lock_init(spinlock_t *lock, const char *name, 13 static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name, in __rt_spin_lock_init() argument 23 rt_mutex_base_init(&(slock)->lock); \ 31 rt_mutex_base_init(&(slock)->lock); \ 35 extern void rt_spin_lock(spinlock_t *lock); 36 extern void rt_spin_lock_nested(spinlock_t *lock, int subclass); 37 extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock); 38 extern void rt_spin_unlock(spinlock_t *lock); 39 extern void rt_spin_lock_unlock(spinlock_t *lock); 40 extern int rt_spin_trylock_bh(spinlock_t *lock); [all …]
|
D | local_lock.h | 8 * local_lock_init - Runtime initialize a lock instance 10 #define local_lock_init(lock) __local_lock_init(lock) argument 13 * local_lock - Acquire a per CPU local lock 14 * @lock: The lock variable 16 #define local_lock(lock) __local_lock(lock) argument 19 * local_lock_irq - Acquire a per CPU local lock and disable interrupts 20 * @lock: The lock variable 22 #define local_lock_irq(lock) __local_lock_irq(lock) argument 25 * local_lock_irqsave - Acquire a per CPU local lock, save and disable 27 * @lock: The lock variable [all …]
|
D | mutex.h | 42 extern void mutex_destroy(struct mutex *lock); 48 static inline void mutex_destroy(struct mutex *lock) {} in mutex_destroy() argument 77 extern void __mutex_init(struct mutex *lock, const char *name, 82 * @lock: the mutex to be queried 86 extern bool mutex_is_locked(struct mutex *lock); 102 extern void __mutex_rt_init(struct mutex *lock, const char *name, 104 extern int mutex_trylock(struct mutex *lock); 106 static inline void mutex_destroy(struct mutex *lock) { } in mutex_destroy() argument 129 extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); 130 extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); [all …]
|
/linux-6.8/kernel/locking/ |
D | spinlock_debug.c | 17 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, in __raw_spin_lock_init() argument 22 * Make sure we are not reinitializing a held lock: in __raw_spin_lock_init() 24 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __raw_spin_lock_init() 25 lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner); in __raw_spin_lock_init() 27 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in __raw_spin_lock_init() 28 lock->magic = SPINLOCK_MAGIC; in __raw_spin_lock_init() 29 lock->owner = SPINLOCK_OWNER_INIT; in __raw_spin_lock_init() 30 lock->owner_cpu = -1; in __raw_spin_lock_init() 36 void __rwlock_init(rwlock_t *lock, const char *name, in __rwlock_init() argument 41 * Make sure we are not reinitializing a held lock: in __rwlock_init() [all …]
|
D | rtmutex_api.c | 17 * Debug aware fast / slowpath lock,trylock,unlock 22 static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock, in __rt_mutex_lock_common() argument 30 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, _RET_IP_); in __rt_mutex_lock_common() 31 ret = __rt_mutex_lock(&lock->rtmutex, state); in __rt_mutex_lock_common() 33 mutex_release(&lock->dep_map, _RET_IP_); in __rt_mutex_lock_common() 45 * rt_mutex_lock_nested - lock a rt_mutex 47 * @lock: the rt_mutex to be locked 50 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) in rt_mutex_lock_nested() argument 52 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass); in rt_mutex_lock_nested() 56 void __sched _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock) in _rt_mutex_lock_nest_lock() argument [all …]
|
D | mutex.c | 34 #include <trace/events/lock.h> 46 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) in __mutex_init() argument 48 atomic_long_set(&lock->owner, 0); in __mutex_init() 49 raw_spin_lock_init(&lock->wait_lock); in __mutex_init() 50 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init() 52 osq_lock_init(&lock->osq); in __mutex_init() 55 debug_mutex_init(lock, name, key); in __mutex_init() 60 * @owner: contains: 'struct task_struct *' to the current lock owner, 65 * Bit1 indicates unlock needs to hand the lock to the top-waiter 79 static inline struct task_struct *__mutex_owner(struct mutex *lock) in __mutex_owner() argument [all …]
|
D | spinlock.c | 35 * not re-enabled during lock-acquire (which the preempt-spin-ops do): 46 * Some architectures can relax in favour of the CPU owning the lock. 63 * This could be a long-held lock. We both prepare to spin for a long 65 * towards that other CPU that it should break the lock ASAP. 68 void __lockfunc __raw_##op##_lock(locktype##_t *lock) \ 72 if (likely(do_raw_##op##_trylock(lock))) \ 76 arch_##op##_relax(&lock->raw_lock); \ 80 unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ 87 if (likely(do_raw_##op##_trylock(lock))) \ 92 arch_##op##_relax(&lock->raw_lock); \ [all …]
|
D | rtmutex.c | 27 #include <trace/events/lock.h> 36 struct rt_mutex *lock, in __ww_mutex_add_waiter() argument 42 static inline void __ww_mutex_check_waiters(struct rt_mutex *lock, in __ww_mutex_check_waiters() argument 47 static inline void ww_mutex_lock_acquired(struct ww_mutex *lock, in ww_mutex_lock_acquired() argument 52 static inline int __ww_mutex_check_kill(struct rt_mutex *lock, in __ww_mutex_check_kill() argument 66 * lock->owner state tracking: 68 * lock->owner holds the task_struct pointer of the owner. Bit 0 69 * is used to keep track of the "lock has waiters" state. 72 * NULL 0 lock is free (fast acquire possible) 73 * NULL 1 lock is free and has waiters and the top waiter [all …]
|
D | ww_mutex.h | 9 __ww_waiter_first(struct mutex *lock) in __ww_waiter_first() argument 13 w = list_first_entry(&lock->wait_list, struct mutex_waiter, list); in __ww_waiter_first() 14 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_first() 21 __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w) in __ww_waiter_next() argument 24 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_next() 31 __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w) in __ww_waiter_prev() argument 34 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_prev() 41 __ww_waiter_last(struct mutex *lock) in __ww_waiter_last() argument 45 w = list_last_entry(&lock->wait_list, struct mutex_waiter, list); in __ww_waiter_last() 46 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_last() [all …]
|
/linux-6.8/fs/bcachefs/ |
D | six.c | 14 #include <trace/events/lock.h> 27 static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type); 38 /* Value we add to the lock in order to take the lock: */ 41 /* If the lock has this value (used as a mask), taking the lock fails: */ 44 /* Mask that indicates lock is held for this type: */ 47 /* Waitlist we wakeup when releasing the lock: */ 72 static inline void six_set_bitmask(struct six_lock *lock, u32 mask) in six_set_bitmask() argument 74 if ((atomic_read(&lock->state) & mask) != mask) in six_set_bitmask() 75 atomic_or(mask, &lock->state); in six_set_bitmask() 78 static inline void six_clear_bitmask(struct six_lock *lock, u32 mask) in six_clear_bitmask() argument [all …]
|
D | six.h | 14 * write lock without deadlocking, so an operation that updates multiple nodes 23 * six_lock_read(&foo->lock); 24 * six_unlock_read(&foo->lock); 26 * An intent lock must be held before taking a write lock: 27 * six_lock_intent(&foo->lock); 28 * six_lock_write(&foo->lock); 29 * six_unlock_write(&foo->lock); 30 * six_unlock_intent(&foo->lock); 40 * There are also interfaces that take the lock type as an enum: 42 * six_lock_type(&foo->lock, SIX_LOCK_read); [all …]
|
/linux-6.8/fs/ocfs2/dlm/ |
D | dlmast.c | 35 struct dlm_lock *lock); 36 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 39 * lock level will obsolete a pending bast. 40 * For example, if dlm_thread queued a bast for an EX lock that 42 * lock owner downconverted to NL, the bast is now obsolete. 44 * This is needed because the lock and convert paths can queue 47 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) in dlm_should_cancel_bast() argument 50 assert_spin_locked(&lock->spinlock); in dlm_should_cancel_bast() 52 if (lock->ml.highest_blocked == LKM_IVMODE) in dlm_should_cancel_bast() 54 BUG_ON(lock->ml.highest_blocked == LKM_NLMODE); in dlm_should_cancel_bast() [all …]
|
D | dlmlock.c | 5 * underlying calls for lock creation 45 struct dlm_lock *lock, int flags); 49 static void dlm_lock_detach_lockres(struct dlm_lock *lock); 66 /* Tell us whether we can grant a new lock request. 71 * returns: 1 if the lock can be granted, 0 otherwise. 74 struct dlm_lock *lock) in dlm_can_grant_new_lock() argument 79 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) in dlm_can_grant_new_lock() 84 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) in dlm_can_grant_new_lock() 87 lock->ml.type)) in dlm_can_grant_new_lock() 94 /* performs lock creation at the lockres master site [all …]
|
D | dlmconvert.c | 5 * underlying calls for lock conversion 38 * only one that holds a lock on exit (res->spinlock). 43 struct dlm_lock *lock, int flags, 48 struct dlm_lock *lock, int flags, int type); 61 struct dlm_lock *lock, int flags, int type) in dlmconvert_master() argument 72 status = __dlmconvert_master(dlm, res, lock, flags, type, in dlmconvert_master() 83 dlm_queue_ast(dlm, lock); in dlmconvert_master() 93 /* performs lock conversion at the lockres master site 96 * taken: takes and drops lock->spinlock 99 * call_ast: whether ast should be called for this lock [all …]
|
/linux-6.8/drivers/md/persistent-data/ |
D | dm-block-manager.c | 32 * trace is also emitted for the previous lock acquisition. 45 spinlock_t lock; member 61 static unsigned int __find_holder(struct block_lock *lock, in __find_holder() argument 67 if (lock->holders[i] == task) in __find_holder() 74 /* call this *after* you increment lock->count */ 75 static void __add_holder(struct block_lock *lock, struct task_struct *task) in __add_holder() argument 77 unsigned int h = __find_holder(lock, NULL); in __add_holder() 83 lock->holders[h] = task; in __add_holder() 86 t = lock->traces + h; in __add_holder() 91 /* call this *before* you decrement lock->count */ [all …]
|
/linux-6.8/include/asm-generic/ |
D | qrwlock.h | 3 * Queue read/write lock 28 #define _QW_LOCKED 0x0ff /* A writer holds the lock */ 36 extern void queued_read_lock_slowpath(struct qrwlock *lock); 37 extern void queued_write_lock_slowpath(struct qrwlock *lock); 40 * queued_read_trylock - try to acquire read lock of a queued rwlock 41 * @lock : Pointer to queued rwlock structure 42 * Return: 1 if lock acquired, 0 if failed 44 static inline int queued_read_trylock(struct qrwlock *lock) in queued_read_trylock() argument 48 cnts = atomic_read(&lock->cnts); in queued_read_trylock() 50 cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts); in queued_read_trylock() [all …]
|
/linux-6.8/rust/kernel/sync/ |
D | lock.rs | 3 //! Generic kernel lock and guard. 5 //! It contains a generic Rust lock and guard that allow for different backends (e.g., mutexes, 16 /// The "backend" of a lock. 18 /// It is the actual implementation of the lock, without the need to repeat patterns used in all 23 /// - Implementers must ensure that only one thread/CPU may access the protected data once the lock 24 /// is owned, that is, between calls to `lock` and `unlock`. 26 /// lock operation. 28 /// The state required by the lock. 31 /// The state required to be kept between lock and unlock. 34 /// Initialises the lock. [all …]
|
/linux-6.8/Documentation/locking/ |
D | lockdep-design.rst | 8 Lock-class 15 tens of thousands of) instantiations. For example a lock in the inode 17 lock class. 19 The validator tracks the 'usage state' of lock-classes, and it tracks 20 the dependencies between different lock-classes. Lock usage indicates 21 how a lock is used with regard to its IRQ contexts, while lock 22 dependency can be understood as lock order, where L1 -> L2 suggests that 26 continuing effort to prove lock usages and dependencies are correct or 29 A lock-class's behavior is constructed by its instances collectively: 30 when the first instance of a lock-class is used after bootup the class [all …]
|
D | robust-futex-ABI.rst | 56 pointer to a single linked list of 'lock entries', one per lock, 58 to itself, 'head'. The last 'lock entry' points back to the 'head'. 61 address of the associated 'lock entry', plus or minus, of what will 62 be called the 'lock word', from that 'lock entry'. The 'lock word' 63 is always a 32 bit word, unlike the other words above. The 'lock 65 of the thread holding the lock in the bottom 30 bits. See further 69 the address of the 'lock entry', during list insertion and removal, 73 Each 'lock entry' on the single linked list starting at 'head' consists 74 of just a single word, pointing to the next 'lock entry', or back to 75 'head' if there are no more entries. In addition, nearby to each 'lock [all …]
|
/linux-6.8/arch/powerpc/include/asm/ |
D | simple_spinlock.h | 6 * Simple spin lock operations. 35 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) in arch_spin_value_unlocked() argument 37 return lock.slock == 0; in arch_spin_value_unlocked() 40 static inline int arch_spin_is_locked(arch_spinlock_t *lock) in arch_spin_is_locked() argument 42 return !arch_spin_value_unlocked(READ_ONCE(*lock)); in arch_spin_is_locked() 46 * This returns the old value in the lock, so we succeeded 47 * in getting the lock if the return value is 0. 49 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) in __arch_spin_trylock() argument 64 : "r" (token), "r" (&lock->slock), [eh] "n" (eh) in __arch_spin_trylock() 70 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument [all …]
|