1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PERCPU_RWSEM_H
3 #define _LINUX_PERCPU_RWSEM_H
4
5 #include <linux/atomic.h>
6 #include <linux/percpu.h>
7 #include <linux/rcuwait.h>
8 #include <linux/wait.h>
9 #include <linux/rcu_sync.h>
10 #include <linux/lockdep.h>
11 #include <linux/cleanup.h>
12
13 struct percpu_rw_semaphore {
14 struct rcu_sync rss;
15 unsigned int __percpu *read_count;
16 struct rcuwait writer;
17 wait_queue_head_t waiters;
18 atomic_t block;
19 #ifdef CONFIG_DEBUG_LOCK_ALLOC
20 struct lockdep_map dep_map;
21 #endif
22 };
23
24 #ifdef CONFIG_DEBUG_LOCK_ALLOC
25 #define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname },
26 #else
27 #define __PERCPU_RWSEM_DEP_MAP_INIT(lockname)
28 #endif
29
30 #define __DEFINE_PERCPU_RWSEM(name, is_static) \
31 static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \
32 is_static struct percpu_rw_semaphore name = { \
33 .rss = __RCU_SYNC_INITIALIZER(name.rss), \
34 .read_count = &__percpu_rwsem_rc_##name, \
35 .writer = __RCUWAIT_INITIALIZER(name.writer), \
36 .waiters = __WAIT_QUEUE_HEAD_INITIALIZER(name.waiters), \
37 .block = ATOMIC_INIT(0), \
38 __PERCPU_RWSEM_DEP_MAP_INIT(name) \
39 }
40
41 #define DEFINE_PERCPU_RWSEM(name) \
42 __DEFINE_PERCPU_RWSEM(name, /* not static */)
43 #define DEFINE_STATIC_PERCPU_RWSEM(name) \
44 __DEFINE_PERCPU_RWSEM(name, static)
45
46 extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool);
47
percpu_down_read(struct percpu_rw_semaphore * sem)48 static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
49 {
50 might_sleep();
51
52 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
53
54 preempt_disable();
55 /*
56 * We are in an RCU-sched read-side critical section, so the writer
57 * cannot both change sem->state from readers_fast and start checking
58 * counters while we are here. So if we see !sem->state, we know that
59 * the writer won't be checking until we're past the preempt_enable()
60 * and that once the synchronize_rcu() is done, the writer will see
61 * anything we did within this RCU-sched read-size critical section.
62 */
63 if (likely(rcu_sync_is_idle(&sem->rss)))
64 this_cpu_inc(*sem->read_count);
65 else
66 __percpu_down_read(sem, false); /* Unconditional memory barrier */
67 /*
68 * The preempt_enable() prevents the compiler from
69 * bleeding the critical section out.
70 */
71 preempt_enable();
72 }
73
percpu_down_read_trylock(struct percpu_rw_semaphore * sem)74 static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
75 {
76 bool ret = true;
77
78 preempt_disable();
79 /*
80 * Same as in percpu_down_read().
81 */
82 if (likely(rcu_sync_is_idle(&sem->rss)))
83 this_cpu_inc(*sem->read_count);
84 else
85 ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
86 preempt_enable();
87 /*
88 * The barrier() from preempt_enable() prevents the compiler from
89 * bleeding the critical section out.
90 */
91
92 if (ret)
93 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
94
95 return ret;
96 }
97
percpu_up_read(struct percpu_rw_semaphore * sem)98 static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
99 {
100 rwsem_release(&sem->dep_map, _RET_IP_);
101
102 preempt_disable();
103 /*
104 * Same as in percpu_down_read().
105 */
106 if (likely(rcu_sync_is_idle(&sem->rss))) {
107 this_cpu_dec(*sem->read_count);
108 } else {
109 /*
110 * slowpath; reader will only ever wake a single blocked
111 * writer.
112 */
113 smp_mb(); /* B matches C */
114 /*
115 * In other words, if they see our decrement (presumably to
116 * aggregate zero, as that is the only time it matters) they
117 * will also see our critical section.
118 */
119 this_cpu_dec(*sem->read_count);
120 rcuwait_wake_up(&sem->writer);
121 }
122 preempt_enable();
123 }
124
125 extern bool percpu_is_read_locked(struct percpu_rw_semaphore *);
126 extern void percpu_down_write(struct percpu_rw_semaphore *);
127 extern void percpu_up_write(struct percpu_rw_semaphore *);
128
DEFINE_GUARD(percpu_read,struct percpu_rw_semaphore *,percpu_down_read (_T),percpu_up_read (_T))129 DEFINE_GUARD(percpu_read, struct percpu_rw_semaphore *,
130 percpu_down_read(_T), percpu_up_read(_T))
131 DEFINE_GUARD_COND(percpu_read, _try, percpu_down_read_trylock(_T))
132
133 DEFINE_GUARD(percpu_write, struct percpu_rw_semaphore *,
134 percpu_down_write(_T), percpu_up_write(_T))
135
136 static inline bool percpu_is_write_locked(struct percpu_rw_semaphore *sem)
137 {
138 return atomic_read(&sem->block);
139 }
140
141 extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
142 const char *, struct lock_class_key *);
143
144 extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
145
146 #define percpu_init_rwsem(sem) \
147 ({ \
148 static struct lock_class_key rwsem_key; \
149 __percpu_init_rwsem(sem, #sem, &rwsem_key); \
150 })
151
152 #define percpu_rwsem_is_held(sem) lockdep_is_held(sem)
153 #define percpu_rwsem_assert_held(sem) lockdep_assert_held(sem)
154
percpu_rwsem_release(struct percpu_rw_semaphore * sem,unsigned long ip)155 static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
156 unsigned long ip)
157 {
158 lock_release(&sem->dep_map, ip);
159 }
160
percpu_rwsem_acquire(struct percpu_rw_semaphore * sem,bool read,unsigned long ip)161 static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
162 bool read, unsigned long ip)
163 {
164 lock_acquire(&sem->dep_map, 0, 1, read, 1, NULL, ip);
165 }
166
167 #endif
168