/linux-3.3/include/linux/ |
D | rwlock_api_smp.h | 18 void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock); 19 void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock); 20 void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock); 21 void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock); 22 void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock); 23 void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock); 24 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) 25 __acquires(lock); 26 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) 27 __acquires(lock); [all …]
|
D | spinlock_api_up.h | 19 #define assert_raw_spin_locked(lock) do { (void)(lock); } while (0) argument 24 * flags straight, to suppress compiler warnings of unused lock 27 #define __LOCK(lock) \ argument 28 do { preempt_disable(); __acquire(lock); (void)(lock); } while (0) 30 #define __LOCK_BH(lock) \ argument 31 do { local_bh_disable(); __LOCK(lock); } while (0) 33 #define __LOCK_IRQ(lock) \ argument 34 do { local_irq_disable(); __LOCK(lock); } while (0) 36 #define __LOCK_IRQSAVE(lock, flags) \ argument 37 do { local_irq_save(flags); __LOCK(lock); } while (0) [all …]
|
D | spinlock.h | 64 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME 93 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, 95 # define raw_spin_lock_init(lock) \ argument 99 __raw_spin_lock_init((lock), #lock, &__key); \ 103 # define raw_spin_lock_init(lock) \ argument 104 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) 107 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) argument 110 #define raw_spin_is_contended(lock) ((lock)->break_lock) argument 114 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) argument 116 #define raw_spin_is_contended(lock) (((void)(lock), 0)) argument [all …]
|
D | rwlock.h | 18 extern void __rwlock_init(rwlock_t *lock, const char *name, 20 # define rwlock_init(lock) \ argument 24 __rwlock_init((lock), #lock, &__key); \ 27 # define rwlock_init(lock) \ argument 28 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) 32 extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock); 33 #define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock) argument 34 extern int do_raw_read_trylock(rwlock_t *lock); 35 extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock); 36 extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock); [all …]
|
D | spinlock_api_smp.h | 22 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 23 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) 24 __acquires(lock); 26 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) 27 __acquires(lock); 28 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); 29 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) 30 __acquires(lock); 32 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) 33 __acquires(lock); [all …]
|
D | spinlock_up.h | 25 static inline void arch_spin_lock(arch_spinlock_t *lock) in arch_spin_lock() argument 27 lock->slock = 0; in arch_spin_lock() 31 arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) in arch_spin_lock_flags() argument 34 lock->slock = 0; in arch_spin_lock_flags() 37 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument 39 char oldval = lock->slock; in arch_spin_trylock() 41 lock->slock = 0; in arch_spin_trylock() 46 static inline void arch_spin_unlock(arch_spinlock_t *lock) in arch_spin_unlock() argument 48 lock->slock = 1; in arch_spin_unlock() 54 #define arch_read_lock(lock) do { (void)(lock); } while (0) argument [all …]
|
/linux-3.3/drivers/gpu/drm/ttm/ |
D | ttm_lock.c | 45 void ttm_lock_init(struct ttm_lock *lock) in ttm_lock_init() argument 47 spin_lock_init(&lock->lock); in ttm_lock_init() 48 init_waitqueue_head(&lock->queue); in ttm_lock_init() 49 lock->rw = 0; in ttm_lock_init() 50 lock->flags = 0; in ttm_lock_init() 51 lock->kill_takers = false; in ttm_lock_init() 52 lock->signal = SIGKILL; in ttm_lock_init() 56 void ttm_read_unlock(struct ttm_lock *lock) in ttm_read_unlock() argument 58 spin_lock(&lock->lock); in ttm_read_unlock() 59 if (--lock->rw == 0) in ttm_read_unlock() [all …]
|
/linux-3.3/lib/ |
D | spinlock_debug.c | 16 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, in __raw_spin_lock_init() argument 21 * Make sure we are not reinitializing a held lock: in __raw_spin_lock_init() 23 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __raw_spin_lock_init() 24 lockdep_init_map(&lock->dep_map, name, key, 0); in __raw_spin_lock_init() 26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in __raw_spin_lock_init() 27 lock->magic = SPINLOCK_MAGIC; in __raw_spin_lock_init() 28 lock->owner = SPINLOCK_OWNER_INIT; in __raw_spin_lock_init() 29 lock->owner_cpu = -1; in __raw_spin_lock_init() 34 void __rwlock_init(rwlock_t *lock, const char *name, in __rwlock_init() argument 39 * Make sure we are not reinitializing a held lock: in __rwlock_init() [all …]
|
/linux-3.3/kernel/ |
D | rtmutex.c | 21 * lock->owner state tracking: 23 * lock->owner holds the task_struct pointer of the owner. Bit 0 24 * is used to keep track of the "lock has waiters" state. 27 * NULL 0 lock is free (fast acquire possible) 28 * NULL 1 lock is free and has waiters and the top waiter 29 * is going to take the lock* 30 * taskpointer 0 lock is held (fast release possible) 31 * taskpointer 1 lock is held and has waiters** 34 * possible when bit 0 of lock->owner is 0. 36 * (*) It also can be a transitional state when grabbing the lock [all …]
|
D | spinlock.c | 27 * not re-enabled during lock-acquire (which the preempt-spin-ops do): 42 * This could be a long-held lock. We both prepare to spin for a long 44 * towards that other CPU that it should break the lock ASAP. 47 void __lockfunc __raw_##op##_lock(locktype##_t *lock) \ 51 if (likely(do_raw_##op##_trylock(lock))) \ 55 if (!(lock)->break_lock) \ 56 (lock)->break_lock = 1; \ 57 while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\ 58 arch_##op##_relax(&lock->raw_lock); \ 60 (lock)->break_lock = 0; \ [all …]
|
D | mutex.c | 40 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) in __mutex_init() argument 42 atomic_set(&lock->count, 1); in __mutex_init() 43 spin_lock_init(&lock->wait_lock); in __mutex_init() 44 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init() 45 mutex_clear_owner(lock); in __mutex_init() 47 debug_mutex_init(lock, name, key); in __mutex_init() 54 * We split the mutex lock/unlock logic into separate fastpath and 64 * @lock: the mutex to be acquired 66 * Lock the mutex exclusively for this task. If the mutex is not 83 void __sched mutex_lock(struct mutex *lock) in mutex_lock() argument [all …]
|
/linux-3.3/include/drm/ttm/ |
D | ttm_lock.h | 33 * of the DRM heavyweight hardware lock. 34 * The lock is a read-write lock. Taking it in read mode and write mode 39 * It's allowed to leave kernel space with the vt lock held. 40 * If a user-space process dies while having the vt-lock, 41 * it will be released during the file descriptor release. The vt lock 42 * excludes write lock and read lock. 44 * The suspend mode is used to lock out all TTM users when preparing for 59 * @base: ttm base object used solely to release the lock if the client 60 * holding the lock dies. 61 * @queue: Queue for processes waiting for lock change-of-status. [all …]
|
/linux-3.3/drivers/gpu/drm/ |
D | drm_lock.c | 44 * Lock ioctl. 52 * Add the current task to the lock wait queue, and attempt to take to lock. 57 struct drm_lock *lock = data; in drm_lock() local 63 if (lock->context == DRM_KERNEL_CONTEXT) { in drm_lock() 65 task_pid_nr(current), lock->context); in drm_lock() 69 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", in drm_lock() 70 lock->context, task_pid_nr(current), in drm_lock() 71 master->lock.hw_lock->lock, lock->flags); in drm_lock() 74 if (lock->context < 0) in drm_lock() 77 add_wait_queue(&master->lock.lock_queue, &entry); in drm_lock() [all …]
|
/linux-3.3/fs/ocfs2/dlm/ |
D | dlmast.c | 52 struct dlm_lock *lock); 53 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 56 * lock level will obsolete a pending bast. 57 * For example, if dlm_thread queued a bast for an EX lock that 59 * lock owner downconverted to NL, the bast is now obsolete. 61 * This is needed because the lock and convert paths can queue 64 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) in dlm_should_cancel_bast() argument 67 assert_spin_locked(&lock->spinlock); in dlm_should_cancel_bast() 69 if (lock->ml.highest_blocked == LKM_IVMODE) in dlm_should_cancel_bast() 71 BUG_ON(lock->ml.highest_blocked == LKM_NLMODE); in dlm_should_cancel_bast() [all …]
|
D | dlmlock.c | 6 * underlying calls for lock creation 62 struct dlm_lock *lock, int flags); 66 static void dlm_lock_detach_lockres(struct dlm_lock *lock); 84 /* Tell us whether we can grant a new lock request. 89 * returns: 1 if the lock can be granted, 0 otherwise. 92 struct dlm_lock *lock) in dlm_can_grant_new_lock() argument 100 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) in dlm_can_grant_new_lock() 107 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) in dlm_can_grant_new_lock() 110 lock->ml.type)) in dlm_can_grant_new_lock() 117 /* performs lock creation at the lockres master site [all …]
|
/linux-3.3/drivers/md/persistent-data/ |
D | dm-block-manager.c | 28 * trace is also emitted for the previous lock aquisition. 38 spinlock_t lock; member 55 static unsigned __find_holder(struct block_lock *lock, in __find_holder() argument 61 if (lock->holders[i] == task) in __find_holder() 68 /* call this *after* you increment lock->count */ 69 static void __add_holder(struct block_lock *lock, struct task_struct *task) in __add_holder() argument 71 unsigned h = __find_holder(lock, NULL); in __add_holder() 77 lock->holders[h] = task; in __add_holder() 80 t = lock->traces + h; in __add_holder() 83 t->entries = lock->entries[h]; in __add_holder() [all …]
|
/linux-3.3/arch/alpha/include/asm/ |
D | spinlock.h | 9 * Simple spin lock operations. There are two variants, one clears IRQ's 15 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) argument 16 #define arch_spin_is_locked(x) ((x)->lock != 0) 18 do { cpu_relax(); } while ((x)->lock) 20 static inline void arch_spin_unlock(arch_spinlock_t * lock) in arch_spin_unlock() argument 23 lock->lock = 0; in arch_spin_unlock() 26 static inline void arch_spin_lock(arch_spinlock_t * lock) in arch_spin_lock() argument 42 : "=&r" (tmp), "=m" (lock->lock) in arch_spin_lock() 43 : "m"(lock->lock) : "memory"); in arch_spin_lock() 46 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument [all …]
|
/linux-3.3/Documentation/ |
D | lockdep-design.txt | 7 Lock-class 14 tens of thousands of) instantiations. For example a lock in the inode 16 lock class. 18 The validator tracks the 'state' of lock-classes, and it tracks 19 dependencies between different lock-classes. The validator maintains a 22 Unlike an lock instantiation, the lock-class itself never goes away: when 23 a lock-class is used for the first time after bootup it gets registered, 24 and all subsequent uses of that lock-class will be attached to this 25 lock-class. 30 The validator tracks lock-class usage history into 4n + 1 separate state bits: [all …]
|
D | robust-futex-ABI.txt | 54 pointer to a single linked list of 'lock entries', one per lock, 56 to itself, 'head'. The last 'lock entry' points back to the 'head'. 59 address of the associated 'lock entry', plus or minus, of what will 60 be called the 'lock word', from that 'lock entry'. The 'lock word' 61 is always a 32 bit word, unlike the other words above. The 'lock 63 of the thread holding the lock in the bottom 29 bits. See further 67 the address of the 'lock entry', during list insertion and removal, 71 Each 'lock entry' on the single linked list starting at 'head' consists 72 of just a single word, pointing to the next 'lock entry', or back to 73 'head' if there are no more entries. In addition, nearby to each 'lock [all …]
|
/linux-3.3/arch/x86/include/asm/ |
D | spinlock.h | 12 * Simple spin lock operations. There are two variants, one clears IRQ's 42 * the queue, and the other indicating the current tail. The lock is acquired 47 * We use an xadd covering *both* parts of the lock, to increment the tail and 53 static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) in __ticket_spin_lock() argument 57 inc = xadd(&lock->tickets, inc); in __ticket_spin_lock() 63 inc.head = ACCESS_ONCE(lock->tickets.head); in __ticket_spin_lock() 65 barrier(); /* make sure nothing creeps before the lock is taken */ in __ticket_spin_lock() 68 static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) in __ticket_spin_trylock() argument 72 old.tickets = ACCESS_ONCE(lock->tickets); in __ticket_spin_trylock() 79 return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; in __ticket_spin_trylock() [all …]
|
/linux-3.3/arch/blackfin/include/asm/ |
D | spinlock.h | 27 static inline int arch_spin_is_locked(arch_spinlock_t *lock) in arch_spin_is_locked() argument 29 return __raw_spin_is_locked_asm(&lock->lock); in arch_spin_is_locked() 32 static inline void arch_spin_lock(arch_spinlock_t *lock) in arch_spin_lock() argument 34 __raw_spin_lock_asm(&lock->lock); in arch_spin_lock() 37 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) argument 39 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument 41 return __raw_spin_trylock_asm(&lock->lock); in arch_spin_trylock() 44 static inline void arch_spin_unlock(arch_spinlock_t *lock) in arch_spin_unlock() argument 46 __raw_spin_unlock_asm(&lock->lock); in arch_spin_unlock() 49 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) in arch_spin_unlock_wait() argument [all …]
|
/linux-3.3/arch/hexagon/include/asm/ |
D | spinlock.h | 34 * - load the lock value 36 * - if the lock value is still negative, go back and try again. 38 * - successful store new lock value if positive -> lock acquired 40 static inline void arch_read_lock(arch_rwlock_t *lock) in arch_read_lock() argument 49 : "r" (&lock->lock) in arch_read_lock() 55 static inline void arch_read_unlock(arch_rwlock_t *lock) in arch_read_unlock() argument 63 : "r" (&lock->lock) in arch_read_unlock() 70 static inline int arch_read_trylock(arch_rwlock_t *lock) in arch_read_trylock() argument 81 : "r" (&lock->lock) in arch_read_trylock() 89 return rwlock->lock == 0; in arch_read_can_lock() [all …]
|
/linux-3.3/arch/tile/include/asm/ |
D | spinlock_64.h | 21 /* Shifts and masks for the various fields in "lock". */ 27 * Return the "current" portion of a ticket lock value, 28 * i.e. the number that currently owns the lock. 36 * Return the "next" portion of a ticket lock value, 37 * i.e. the number that the next task to try to acquire the lock will get. 44 /* The lock is locked if a task would have to wait to get it. */ 45 static inline int arch_spin_is_locked(arch_spinlock_t *lock) in arch_spin_is_locked() argument 47 u32 val = lock->lock; in arch_spin_is_locked() 51 /* Bump the current ticket so the next task owns the lock. */ 52 static inline void arch_spin_unlock(arch_spinlock_t *lock) in arch_spin_unlock() argument [all …]
|
/linux-3.3/arch/m32r/include/asm/ |
D | spinlock.h | 21 * Simple spin lock operations. There are two variants, one clears IRQ's 28 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) argument 33 * arch_spin_trylock - Try spin lock and return a result 34 * @lock: Pointer to the lock variable 36 * arch_spin_trylock() tries to get the lock and returns a result. 39 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument 45 * lock->slock : =1 : unlock in arch_spin_trylock() 46 * : <=0 : lock in arch_spin_trylock() 48 * oldval = lock->slock; <--+ need atomic operation in arch_spin_trylock() 49 * lock->slock = 0; <--+ in arch_spin_trylock() [all …]
|
/linux-3.3/arch/cris/include/arch-v32/arch/ |
D | spinlock.h | 17 static inline void arch_spin_unlock(arch_spinlock_t *lock) in arch_spin_unlock() argument 20 : "=m" (lock->slock) \ in arch_spin_unlock() 25 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) in arch_spin_unlock_wait() argument 27 while (arch_spin_is_locked(lock)) in arch_spin_unlock_wait() 31 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument 33 return cris_spin_trylock((void *)&lock->slock); in arch_spin_trylock() 36 static inline void arch_spin_lock(arch_spinlock_t *lock) in arch_spin_lock() argument 38 cris_spin_lock((void *)&lock->slock); in arch_spin_lock() 42 arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) in arch_spin_lock_flags() argument 44 arch_spin_lock(lock); in arch_spin_lock_flags() [all …]
|