| /linux/kernel/locking/ |
| H A D | rwbase_rt.c | 71 struct rt_mutex_base *rtm = &rwb->rtmutex; in __rwbase_read_lock() 153 struct rt_mutex_base *rtm = &rwb->rtmutex; in __rwbase_read_unlock() 189 __releases(&rwb->rtmutex.wait_lock) in __rwbase_write_unlock() 191 struct rt_mutex_base *rtm = &rwb->rtmutex; in __rwbase_write_unlock() 204 struct rt_mutex_base *rtm = &rwb->rtmutex; in rwbase_write_unlock() 213 struct rt_mutex_base *rtm = &rwb->rtmutex; in rwbase_write_downgrade() 224 lockdep_assert_held(&rwb->rtmutex.wait_lock); in __rwbase_write_trylock() 241 struct rt_mutex_base *rtm = &rwb->rtmutex; in rwbase_write_lock() 289 struct rt_mutex_base *rtm = &rwb->rtmutex; in rwbase_write_trylock()
|
| H A D | ww_mutex.h | 90 #define WAIT_LOCK rtmutex.wait_lock 94 __must_hold(&lock->rtmutex.wait_lock) in __ww_waiter_first() 96 struct rb_node *n = rb_first(&lock->rtmutex.waiters.rb_root); in __ww_waiter_first() 122 __must_hold(&lock->rtmutex.wait_lock) in __ww_waiter_last() 124 struct rb_node *n = rb_last(&lock->rtmutex.waiters.rb_root); in __ww_waiter_last() 139 return rt_mutex_owner(&lock->rtmutex); in __ww_mutex_owner() 144 __must_hold(&lock->rtmutex.wait_lock) in __ww_mutex_has_waiters() 146 return rt_mutex_has_waiters(&lock->rtmutex); in __ww_mutex_has_waiters() 150 __acquires(&lock->rtmutex.wait_lock) in lock_wait_lock() 152 raw_spin_lock_irqsave(&lock->rtmutex.wait_lock, *flags); in lock_wait_lock() [all …]
|
| H A D | rtmutex_api.c | 49 ret = __rt_mutex_lock(&lock->rtmutex, state); in __rt_mutex_lock_common() 143 ret = __rt_mutex_trylock(&lock->rtmutex); in rt_mutex_trylock() 159 __rt_mutex_unlock(&lock->rtmutex); in rt_mutex_unlock() 235 __rt_mutex_base_init(&lock->rtmutex); in __rt_mutex_init() 520 rt_mutex_base_init(&mutex->rtmutex); in __mutex_rt_init_generic() 535 ret = __rt_mutex_lock(&lock->rtmutex, state); in __mutex_lock_common() 598 ret = __rt_mutex_trylock(&lock->rtmutex); in _mutex_trylock_nest_lock() 645 return __rt_mutex_trylock(&lock->rtmutex); in mutex_trylock() 654 __rt_mutex_unlock(&lock->rtmutex); in mutex_unlock()
|
| H A D | ww_rt_mutex.c | 27 if (__rt_mutex_trylock(&rtm->rtmutex)) { in ww_mutex_trylock() 65 if (likely(rt_mutex_try_acquire(&rtm->rtmutex))) { in __ww_rt_mutex_lock() 71 ret = rt_mutex_slowlock(&rtm->rtmutex, ww_ctx, state); in __ww_rt_mutex_lock() 100 __rt_mutex_unlock(&rtm->rtmutex); in ww_mutex_unlock()
|
| H A D | rtmutex.c | 1258 rtm = container_of(lock, struct rt_mutex, rtmutex); in task_blocks_on_rt_mutex() 1259 __assume_ctx_lock(&rtm->rtmutex.wait_lock); in task_blocks_on_rt_mutex() 1622 struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex); in rt_mutex_slowlock_block() 1626 __assume_ctx_lock(&rtm->rtmutex.wait_lock); in rt_mutex_slowlock_block() 1712 struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex); in __rt_mutex_slowlock() 1716 __assume_ctx_lock(&rtm->rtmutex.wait_lock); in __rt_mutex_slowlock()
|
| /linux/include/linux/ |
| H A D | rwbase_rt.h | 13 struct rt_mutex_base rtmutex; member 19 .rtmutex = __RT_MUTEX_BASE_INITIALIZER(name.rtmutex), \ 24 rt_mutex_base_init(&(rwbase)->rtmutex); \
|
| H A D | rtmutex.h | 68 struct rt_mutex_base rtmutex; member 101 .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex), \
|
| H A D | mutex.h | 122 .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex) \ 129 #define mutex_is_locked(l) rt_mutex_base_is_locked(&(l)->rtmutex)
|
| H A D | mutex_types.h | 63 struct rt_mutex_base rtmutex; in context_lock_struct() local
|
| H A D | ww_mutex.h | 36 #define ww_mutex_base_is_locked(b) rt_mutex_base_is_locked(&(b)->rtmutex)
|
| /linux/lib/ |
| H A D | test_lockup.c | 493 offsetof(rwlock_t, rwbase.rtmutex.wait_lock.magic), in test_lockup_init() 496 offsetof(struct mutex, rtmutex.wait_lock.magic), in test_lockup_init() 499 offsetof(struct rw_semaphore, rwbase.rtmutex.wait_lock.magic), in test_lockup_init()
|
| /linux/Documentation/core-api/real-time/ |
| H A D | theory.rst | 14 and priority-inheritance aware implementation known as rtmutex, and by enforcing 56 rtmutex. Instead of spinning, a task attempting to acquire a contended lock 69 implemented on top of rtmutex, which provides support for priority inheritance
|
| /linux/Documentation/locking/ |
| H A D | rt-mutex.rst | 32 The enqueueing of the waiters into the rtmutex waiter tree is done in 34 rtmutex, only the top priority waiter is enqueued into the owner's
|
| H A D | rt-mutex-design.rst | 10 This document tries to describe the design of the rtmutex.c implementation. 11 It doesn't describe the reasons why rtmutex.c exists. For that please see 307 not true, the rtmutex.c code will be broken!), this allows for the least 349 The implementation of the PI code in rtmutex.c has several places that a 414 rtmutex.c. See the 'Chain walk basics and protection scope' comment for further
|
| H A D | locktypes.rst | 98 rtmutex chapter
|
| /linux/Documentation/translations/it_IT/locking/ |
| H A D | locktypes.rst | 98 rtmutex chapter 101 I blocchi a mutua esclusione RT (*rtmutex*) sono un sistema a mutua esclusione 112 rwlock_t di essere implementati usando rtmutex. 225 eccessi di un rtmutex. 423 Sui kernel PREEMPT_RT questo codice non funzionerà perché gli rtmutex richiedono 507 è troppo piccolo per farci stare un rtmutex. Dunque, la semantica dei bit
|
| /linux/scripts/ |
| H A D | context-analysis-suppression.txt | 27 src:*include/linux/rtmutex*.h=emit
|
| /linux/kernel/rcu/ |
| H A D | tree_plugin.h | 551 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx.rtmutex) == t; in rcu_preempt_deferred_qs_irqrestore() 585 rt_mutex_futex_unlock(&rnp->boost_mtx.rtmutex); in rcu_preempt_deferred_qs_irqrestore() 1203 rt_mutex_init_proxy_locked(&rnp->boost_mtx.rtmutex, t); in rcu_boost()
|
| /linux/tools/perf/Documentation/ |
| H A D | perf-lock.txt | 191 rtmutex, rwlock-rt, rwlock-rt:R, rwlock-rt:W, percpu-rwmem, pcpu-sem,
|