1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PERCPU_RWSEM_H
3 #define _LINUX_PERCPU_RWSEM_H
4
5 #include <linux/atomic.h>
6 #include <linux/percpu.h>
7 #include <linux/rcuwait.h>
8 #include <linux/wait.h>
9 #include <linux/rcu_sync.h>
10 #include <linux/lockdep.h>
11 #include <linux/cleanup.h>
12
13 struct percpu_rw_semaphore {
14 struct rcu_sync rss;
15 unsigned int __percpu *read_count;
16 struct rcuwait writer;
17 wait_queue_head_t waiters;
18 atomic_t block;
19 #ifdef CONFIG_DEBUG_LOCK_ALLOC
20 struct lockdep_map dep_map;
21 #endif
22 };
23
24 #ifdef CONFIG_DEBUG_LOCK_ALLOC
25 #define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname },
26 #else
27 #define __PERCPU_RWSEM_DEP_MAP_INIT(lockname)
28 #endif
29
30 #define __DEFINE_PERCPU_RWSEM(name, is_static) \
31 static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \
32 is_static struct percpu_rw_semaphore name = { \
33 .rss = __RCU_SYNC_INITIALIZER(name.rss), \
34 .read_count = &__percpu_rwsem_rc_##name, \
35 .writer = __RCUWAIT_INITIALIZER(name.writer), \
36 .waiters = __WAIT_QUEUE_HEAD_INITIALIZER(name.waiters), \
37 .block = ATOMIC_INIT(0), \
38 __PERCPU_RWSEM_DEP_MAP_INIT(name) \
39 }
40
41 #define DEFINE_PERCPU_RWSEM(name) \
42 __DEFINE_PERCPU_RWSEM(name, /* not static */)
43 #define DEFINE_STATIC_PERCPU_RWSEM(name) \
44 __DEFINE_PERCPU_RWSEM(name, static)
45
46 extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool, bool);
47
percpu_down_read_internal(struct percpu_rw_semaphore * sem,bool freezable)48 static inline void percpu_down_read_internal(struct percpu_rw_semaphore *sem,
49 bool freezable)
50 {
51 might_sleep();
52
53 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
54
55 preempt_disable();
56 /*
57 * We are in an RCU-sched read-side critical section, so the writer
58 * cannot both change sem->state from readers_fast and start checking
59 * counters while we are here. So if we see !sem->state, we know that
60 * the writer won't be checking until we're past the preempt_enable()
61 * and that once the synchronize_rcu() is done, the writer will see
62 * anything we did within this RCU-sched read-size critical section.
63 */
64 if (likely(rcu_sync_is_idle(&sem->rss)))
65 this_cpu_inc(*sem->read_count);
66 else
67 __percpu_down_read(sem, false, freezable); /* Unconditional memory barrier */
68 /*
69 * The preempt_enable() prevents the compiler from
70 * bleeding the critical section out.
71 */
72 preempt_enable();
73 }
74
percpu_down_read(struct percpu_rw_semaphore * sem)75 static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
76 {
77 percpu_down_read_internal(sem, false);
78 }
79
percpu_down_read_freezable(struct percpu_rw_semaphore * sem,bool freeze)80 static inline void percpu_down_read_freezable(struct percpu_rw_semaphore *sem,
81 bool freeze)
82 {
83 percpu_down_read_internal(sem, freeze);
84 }
85
percpu_down_read_trylock(struct percpu_rw_semaphore * sem)86 static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
87 {
88 bool ret = true;
89
90 preempt_disable();
91 /*
92 * Same as in percpu_down_read().
93 */
94 if (likely(rcu_sync_is_idle(&sem->rss)))
95 this_cpu_inc(*sem->read_count);
96 else
97 ret = __percpu_down_read(sem, true, false); /* Unconditional memory barrier */
98 preempt_enable();
99 /*
100 * The barrier() from preempt_enable() prevents the compiler from
101 * bleeding the critical section out.
102 */
103
104 if (ret)
105 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
106
107 return ret;
108 }
109
percpu_up_read(struct percpu_rw_semaphore * sem)110 static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
111 {
112 rwsem_release(&sem->dep_map, _RET_IP_);
113
114 preempt_disable();
115 /*
116 * Same as in percpu_down_read().
117 */
118 if (likely(rcu_sync_is_idle(&sem->rss))) {
119 this_cpu_dec(*sem->read_count);
120 } else {
121 /*
122 * slowpath; reader will only ever wake a single blocked
123 * writer.
124 */
125 smp_mb(); /* B matches C */
126 /*
127 * In other words, if they see our decrement (presumably to
128 * aggregate zero, as that is the only time it matters) they
129 * will also see our critical section.
130 */
131 this_cpu_dec(*sem->read_count);
132 rcuwait_wake_up(&sem->writer);
133 }
134 preempt_enable();
135 }
136
137 extern bool percpu_is_read_locked(struct percpu_rw_semaphore *);
138 extern void percpu_down_write(struct percpu_rw_semaphore *);
139 extern void percpu_up_write(struct percpu_rw_semaphore *);
140
DEFINE_GUARD(percpu_read,struct percpu_rw_semaphore *,percpu_down_read (_T),percpu_up_read (_T))141 DEFINE_GUARD(percpu_read, struct percpu_rw_semaphore *,
142 percpu_down_read(_T), percpu_up_read(_T))
143 DEFINE_GUARD_COND(percpu_read, _try, percpu_down_read_trylock(_T))
144
145 DEFINE_GUARD(percpu_write, struct percpu_rw_semaphore *,
146 percpu_down_write(_T), percpu_up_write(_T))
147
148 static inline bool percpu_is_write_locked(struct percpu_rw_semaphore *sem)
149 {
150 return atomic_read(&sem->block);
151 }
152
153 extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
154 const char *, struct lock_class_key *);
155
156 extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
157
158 #define percpu_init_rwsem(sem) \
159 ({ \
160 static struct lock_class_key rwsem_key; \
161 __percpu_init_rwsem(sem, #sem, &rwsem_key); \
162 })
163
164 #define percpu_rwsem_is_held(sem) lockdep_is_held(sem)
165 #define percpu_rwsem_assert_held(sem) lockdep_assert_held(sem)
166
percpu_rwsem_release(struct percpu_rw_semaphore * sem,unsigned long ip)167 static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
168 unsigned long ip)
169 {
170 lock_release(&sem->dep_map, ip);
171 }
172
percpu_rwsem_acquire(struct percpu_rw_semaphore * sem,bool read,unsigned long ip)173 static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
174 bool read, unsigned long ip)
175 {
176 lock_acquire(&sem->dep_map, 0, 1, read, 1, NULL, ip);
177 }
178
179 #endif
180