Lines Matching full:lock
12 * Simple spin lock operations. There are two variants, one clears IRQ's
42 * the queue, and the other indicating the current tail. The lock is acquired
47 * We use an xadd covering *both* parts of the lock, to increment the tail and
53 static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) in __ticket_spin_lock() argument
57 inc = xadd(&lock->tickets, inc); in __ticket_spin_lock()
63 inc.head = ACCESS_ONCE(lock->tickets.head); in __ticket_spin_lock()
65 barrier(); /* make sure nothing creeps before the lock is taken */ in __ticket_spin_lock()
68 static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) in __ticket_spin_trylock() argument
72 old.tickets = ACCESS_ONCE(lock->tickets); in __ticket_spin_trylock()
79 return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; in __ticket_spin_trylock()
82 static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) in __ticket_spin_unlock() argument
84 __add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX); in __ticket_spin_unlock()
87 static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) in __ticket_spin_is_locked() argument
89 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); in __ticket_spin_is_locked()
94 static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) in __ticket_spin_is_contended() argument
96 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); in __ticket_spin_is_contended()
103 static inline int arch_spin_is_locked(arch_spinlock_t *lock) in arch_spin_is_locked() argument
105 return __ticket_spin_is_locked(lock); in arch_spin_is_locked()
108 static inline int arch_spin_is_contended(arch_spinlock_t *lock) in arch_spin_is_contended() argument
110 return __ticket_spin_is_contended(lock); in arch_spin_is_contended()
114 static __always_inline void arch_spin_lock(arch_spinlock_t *lock) in arch_spin_lock() argument
116 __ticket_spin_lock(lock); in arch_spin_lock()
119 static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument
121 return __ticket_spin_trylock(lock); in arch_spin_trylock()
124 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) in arch_spin_unlock() argument
126 __ticket_spin_unlock(lock); in arch_spin_unlock()
129 static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, in arch_spin_lock_flags() argument
132 arch_spin_lock(lock); in arch_spin_lock_flags()
137 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) in arch_spin_unlock_wait() argument
139 while (arch_spin_is_locked(lock)) in arch_spin_unlock_wait()
150 * irq-safe write-lock, but readers can get non-irqsafe
159 * @lock: the rwlock in question.
161 static inline int arch_read_can_lock(arch_rwlock_t *lock) in arch_read_can_lock() argument
163 return lock->lock > 0; in arch_read_can_lock()
168 * @lock: the rwlock in question.
170 static inline int arch_write_can_lock(arch_rwlock_t *lock) in arch_write_can_lock() argument
172 return lock->write == WRITE_LOCK_CMP; in arch_write_can_lock()
194 static inline int arch_read_trylock(arch_rwlock_t *lock) in arch_read_trylock() argument
196 READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock; in arch_read_trylock()
204 static inline int arch_write_trylock(arch_rwlock_t *lock) in arch_write_trylock() argument
206 atomic_t *count = (atomic_t *)&lock->write; in arch_write_trylock()
217 :"+m" (rw->lock) : : "memory"); in arch_read_unlock()
226 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) argument
227 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) argument
235 #define arch_spin_relax(lock) cpu_relax() argument
236 #define arch_read_relax(lock) cpu_relax() argument
237 #define arch_write_relax(lock) cpu_relax() argument