/linux/fs/bcachefs/ |
H A D | six.c | 212 static void __six_lock_wakeup(struct six_lock *lock, enum six_lock_type lock_type) in __six_lock_wakeup() argument 224 if (w->lock_want != lock_type) in __six_lock_wakeup() 227 if (saw_one && lock_type != SIX_LOCK_read) in __six_lock_wakeup() 231 ret = __do_six_trylock(lock, lock_type, w->task, false); in __six_lock_wakeup() 254 six_clear_bitmask(lock, SIX_LOCK_WAITING_read << lock_type); in __six_lock_wakeup() 259 lock_type = -ret - 1; in __six_lock_wakeup() 266 enum six_lock_type lock_type) in six_lock_wakeup() argument 268 if (lock_type == SIX_LOCK_write && (state & SIX_LOCK_HELD_read)) in six_lock_wakeup() 271 if (!(state & (SIX_LOCK_WAITING_read << lock_type))) in six_lock_wakeup() 274 __six_lock_wakeup(lock, lock_type); in six_lock_wakeup() [all...] |
H A D | btree_cache.c | 902 enum six_lock_type lock_type, in bch2_btree_node_fill() argument 991 if (!six_relock_type(&b->c.lock, lock_type, seq)) in bch2_btree_node_fill() 995 if (lock_type == SIX_LOCK_read) in bch2_btree_node_fill() 1041 enum six_lock_type lock_type, in __bch2_btree_node_get() argument 1060 level, lock_type, true); in __bch2_btree_node_get() 1073 ret = btree_node_lock(trans, path, &b->c, level, lock_type, trace_ip); in __bch2_btree_node_get() 1082 six_unlock_type(&b->c.lock, lock_type); in __bch2_btree_node_get() 1098 six_unlock_type(&b->c.lock, lock_type); in __bch2_btree_node_get() 1112 if (!six_relock_type(&b->c.lock, lock_type, seq)) in __bch2_btree_node_get() 1120 six_unlock_type(&b->c.lock, lock_type); in __bch2_btree_node_get() 1165 bch2_btree_node_get(struct btree_trans * trans,struct btree_path * path,const struct bkey_i * k,unsigned level,enum six_lock_type lock_type,unsigned long trace_ip) bch2_btree_node_get() argument [all...] |
H A D | btree_locking.h | 135 int lock_type = btree_node_locked_type(path, level); in btree_node_unlock() local 139 if (lock_type != BTREE_NODE_UNLOCKED) { in btree_node_unlock() 140 if (unlikely(lock_type == BTREE_NODE_WRITE_LOCKED)) { in btree_node_unlock() 142 lock_type = BTREE_NODE_INTENT_LOCKED; in btree_node_unlock() 144 six_unlock_type(&path->l[level].b->c.lock, lock_type); in btree_node_unlock()
|
/linux/include/linux/ |
H A D | lockdep_types.h | 140 u8 lock_type; member 192 u8 lock_type; member
|
H A D | lockdep.h | 129 struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type); 161 (lock)->dep_map.lock_type) 167 (lock)->dep_map.lock_type) 173 (lock)->dep_map.lock_type) 179 (lock)->dep_map.lock_type) 316 .lock_type = LD_LOCK_WAIT_OVERRIDE, }
|
H A D | spinlock_types_raw.h | 45 .lock_type = LD_LOCK_PERCPU, \
|
H A D | local_lock_internal.h | 29 .lock_type = LD_LOCK_PERCPU, \
|
/linux/lib/ |
H A D | xarray.c | 38 static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type) in xas_lock_type() argument 40 if (lock_type == XA_LOCK_IRQ) in xas_lock_type() 42 else if (lock_type == XA_LOCK_BH) in xas_lock_type() 48 static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type) in xas_unlock_type() argument 50 if (lock_type == XA_LOCK_IRQ) in xas_unlock_type() 52 else if (lock_type == XA_LOCK_BH) in xas_unlock_type() 331 unsigned int lock_type = xa_lock_type(xas->xa); in __xas_nomem() local 340 xas_unlock_type(xas, lock_type); in __xas_nomem() 342 xas_lock_type(xas, lock_type); in __xas_nomem()
|
/linux/fs/afs/ |
H A D | flock.c | 93 bool exclusive = (vnode->lock_type == AFS_LOCK_WRITE); in afs_grant_locks() 119 if (vnode->lock_type == AFS_LOCK_WRITE) in afs_next_locker() 517 if (vnode->lock_type == AFS_LOCK_WRITE) { in afs_do_setlk() 551 vnode->lock_type = type; in afs_do_setlk()
|
H A D | internal.h | 718 afs_lock_type_t lock_type : 8; member
|
/linux/kernel/locking/ |
H A D | lockdep.c | 1347 class->lock_type = lock->lock_type; in register_lock_class() 2253 if (entry->class->lock_type == LD_LOCK_NORMAL) in usage_skip() 2282 if (entry->class->lock_type == LD_LOCK_PERCPU && in usage_skip() 4896 if (unlikely(class->lock_type == LD_LOCK_WAIT_OVERRIDE)) in check_wait_context() 4939 u8 inner, u8 outer, u8 lock_type) in lockdep_init_map_type() argument 4962 lock->lock_type = lock_type; in lockdep_init_map_type() 5438 lock->lock_type); in __lock_set_class()
|
/linux/fs/smb/client/ |
H A D | cifsproto.h | 542 const __u16 netfid, const __u8 lock_type, 553 struct file_lock *, const __u16 lock_type,
|
H A D | cifssmb.c | 1949 const __u16 netfid, const __u8 lock_type, const __u32 num_unlock, in cifs_lockv() argument 1969 pSMB->LockType = lock_type; in cifs_lockv() 2066 struct file_lock *pLockData, const __u16 lock_type, in CIFSSMBPosixLock() argument 2116 parm_data->lock_type = cpu_to_le16(lock_type); in CIFSSMBPosixLock() 2166 if (parm_data->lock_type == cpu_to_le16(CIFS_UNLCK)) in CIFSSMBPosixLock() 2169 if (parm_data->lock_type == in CIFSSMBPosixLock() 2172 else if (parm_data->lock_type == in CIFSSMBPosixLock()
|
H A D | cifspdu.h | 1138 __le16 lock_type; /* 0 = Read, 1 = Write, 2 = Unlock */ member
|
/linux/drivers/block/ |
H A D | rbd.c | 1732 u8 lock_type; in rbd_object_map_lock() local 1753 RBD_LOCK_NAME, &lock_type, &lock_tag, in rbd_object_map_lock() 3877 u8 lock_type; in get_lock_owner_info() local 3884 &lock_type, &lock_tag, &lockers, &num_lockers); in get_lock_owner_info() 3902 if (lock_type != CEPH_CLS_LOCK_EXCLUSIVE) { in get_lock_owner_info()
|