1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3 * Read-Copy Update mechanism for mutual exclusion, adapted for tracing.
4 *
5 * Copyright (C) 2020 Paul E. McKenney.
6 */
7
8 #ifndef __LINUX_RCUPDATE_TRACE_H
9 #define __LINUX_RCUPDATE_TRACE_H
10
11 #include <linux/sched.h>
12 #include <linux/rcupdate.h>
13 #include <linux/cleanup.h>
14
15 #ifdef CONFIG_TASKS_TRACE_RCU
16 extern struct srcu_struct rcu_tasks_trace_srcu_struct;
17 #endif // #ifdef CONFIG_TASKS_TRACE_RCU
18
19 #if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_TASKS_TRACE_RCU)
20
rcu_read_lock_trace_held(void)21 static inline int rcu_read_lock_trace_held(void)
22 {
23 return srcu_read_lock_held(&rcu_tasks_trace_srcu_struct);
24 }
25
26 #else // #if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_TASKS_TRACE_RCU)
27
rcu_read_lock_trace_held(void)28 static inline int rcu_read_lock_trace_held(void)
29 {
30 return 1;
31 }
32
33 #endif // #else // #if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_TASKS_TRACE_RCU)
34
35 #ifdef CONFIG_TASKS_TRACE_RCU
36
37 /**
38 * rcu_read_lock_tasks_trace - mark beginning of RCU-trace read-side critical section
39 *
40 * When synchronize_rcu_tasks_trace() is invoked by one task, then that
41 * task is guaranteed to block until all other tasks exit their read-side
42 * critical sections. Similarly, if call_rcu_trace() is invoked on one
43 * task while other tasks are within RCU read-side critical sections,
44 * invocation of the corresponding RCU callback is deferred until after
45 * the all the other tasks exit their critical sections.
46 *
47 * For more details, please see the documentation for
48 * srcu_read_lock_fast(). For a description of how implicit RCU
49 * readers provide the needed ordering for architectures defining the
50 * ARCH_WANTS_NO_INSTR Kconfig option (and thus promising never to trace
51 * code where RCU is not watching), please see the __srcu_read_lock_fast()
52 * (non-kerneldoc) header comment. Otherwise, the smp_mb() below provided
53 * the needed ordering.
54 */
rcu_read_lock_tasks_trace(void)55 static inline struct srcu_ctr __percpu *rcu_read_lock_tasks_trace(void)
56 {
57 struct srcu_ctr __percpu *ret = __srcu_read_lock_fast(&rcu_tasks_trace_srcu_struct);
58
59 rcu_try_lock_acquire(&rcu_tasks_trace_srcu_struct.dep_map);
60 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_NO_MB))
61 smp_mb(); // Provide ordering on noinstr-incomplete architectures.
62 return ret;
63 }
64
65 /**
66 * rcu_read_unlock_tasks_trace - mark end of RCU-trace read-side critical section
67 * @scp: return value from corresponding rcu_read_lock_tasks_trace().
68 *
69 * Pairs with the preceding call to rcu_read_lock_tasks_trace() that
70 * returned the value passed in via scp.
71 *
72 * For more details, please see the documentation for rcu_read_unlock().
73 * For memory-ordering information, please see the header comment for the
74 * rcu_read_lock_tasks_trace() function.
75 */
rcu_read_unlock_tasks_trace(struct srcu_ctr __percpu * scp)76 static inline void rcu_read_unlock_tasks_trace(struct srcu_ctr __percpu *scp)
77 {
78 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_NO_MB))
79 smp_mb(); // Provide ordering on noinstr-incomplete architectures.
80 __srcu_read_unlock_fast(&rcu_tasks_trace_srcu_struct, scp);
81 srcu_lock_release(&rcu_tasks_trace_srcu_struct.dep_map);
82 }
83
84 /**
85 * rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section
86 *
87 * When synchronize_rcu_tasks_trace() is invoked by one task, then that
88 * task is guaranteed to block until all other tasks exit their read-side
89 * critical sections. Similarly, if call_rcu_trace() is invoked on one
90 * task while other tasks are within RCU read-side critical sections,
91 * invocation of the corresponding RCU callback is deferred until after
92 * the all the other tasks exit their critical sections.
93 *
94 * For more details, please see the documentation for rcu_read_lock().
95 */
rcu_read_lock_trace(void)96 static inline void rcu_read_lock_trace(void)
97 {
98 struct task_struct *t = current;
99
100 rcu_try_lock_acquire(&rcu_tasks_trace_srcu_struct.dep_map);
101 if (t->trc_reader_nesting++) {
102 // In case we interrupted a Tasks Trace RCU reader.
103 return;
104 }
105 barrier(); // nesting before scp to protect against interrupt handler.
106 t->trc_reader_scp = __srcu_read_lock_fast(&rcu_tasks_trace_srcu_struct);
107 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_NO_MB))
108 smp_mb(); // Placeholder for more selective ordering
109 }
110
111 /**
112 * rcu_read_unlock_trace - mark end of RCU-trace read-side critical section
113 *
114 * Pairs with a preceding call to rcu_read_lock_trace(), and nesting is
115 * allowed. Invoking a rcu_read_unlock_trace() when there is no matching
116 * rcu_read_lock_trace() is verboten, and will result in lockdep complaints.
117 *
118 * For more details, please see the documentation for rcu_read_unlock().
119 */
rcu_read_unlock_trace(void)120 static inline void rcu_read_unlock_trace(void)
121 {
122 struct srcu_ctr __percpu *scp;
123 struct task_struct *t = current;
124
125 scp = t->trc_reader_scp;
126 barrier(); // scp before nesting to protect against interrupt handler.
127 if (!--t->trc_reader_nesting) {
128 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_NO_MB))
129 smp_mb(); // Placeholder for more selective ordering
130 __srcu_read_unlock_fast(&rcu_tasks_trace_srcu_struct, scp);
131 }
132 srcu_lock_release(&rcu_tasks_trace_srcu_struct.dep_map);
133 }
134
135 /**
136 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
137 * @rhp: structure to be used for queueing the RCU updates.
138 * @func: actual callback function to be invoked after the grace period
139 *
140 * The callback function will be invoked some time after a trace rcu-tasks
141 * grace period elapses, in other words after all currently executing
142 * trace rcu-tasks read-side critical sections have completed. These
143 * read-side critical sections are delimited by calls to rcu_read_lock_trace()
144 * and rcu_read_unlock_trace().
145 *
146 * See the description of call_rcu() for more detailed information on
147 * memory ordering guarantees.
148 */
call_rcu_tasks_trace(struct rcu_head * rhp,rcu_callback_t func)149 static inline void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
150 {
151 call_srcu(&rcu_tasks_trace_srcu_struct, rhp, func);
152 }
153
154 /**
155 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
156 *
157 * Control will return to the caller some time after a trace rcu-tasks
158 * grace period has elapsed, in other words after all currently executing
159 * trace rcu-tasks read-side critical sections have elapsed. These read-side
160 * critical sections are delimited by calls to rcu_read_lock_trace()
161 * and rcu_read_unlock_trace().
162 *
163 * This is a very specialized primitive, intended only for a few uses in
164 * tracing and other situations requiring manipulation of function preambles
165 * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
166 * (yet) intended for heavy use from multiple CPUs.
167 *
168 * See the description of synchronize_rcu() for more detailed information
169 * on memory ordering guarantees.
170 */
synchronize_rcu_tasks_trace(void)171 static inline void synchronize_rcu_tasks_trace(void)
172 {
173 synchronize_srcu(&rcu_tasks_trace_srcu_struct);
174 }
175
176 /**
177 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
178 *
179 * Note that rcu_barrier_tasks_trace() is not obligated to actually wait,
180 * for example, if there are no pending callbacks.
181 */
rcu_barrier_tasks_trace(void)182 static inline void rcu_barrier_tasks_trace(void)
183 {
184 srcu_barrier(&rcu_tasks_trace_srcu_struct);
185 }
186
187 /**
188 * rcu_tasks_trace_expedite_current - Expedite the current Tasks Trace RCU grace period
189 *
190 * Cause the current Tasks Trace RCU grace period to become expedited.
191 * The grace period following the current one might also be expedited.
192 * If there is no current grace period, one might be created. If the
193 * current grace period is currently sleeping, that sleep will complete
194 * before expediting will take effect.
195 */
rcu_tasks_trace_expedite_current(void)196 static inline void rcu_tasks_trace_expedite_current(void)
197 {
198 srcu_expedite_current(&rcu_tasks_trace_srcu_struct);
199 }
200
201 // Placeholders to enable stepwise transition.
202 void __init rcu_tasks_trace_suppress_unused(void);
203
204 #else
205 /*
206 * The BPF JIT forms these addresses even when it doesn't call these
207 * functions, so provide definitions that result in runtime errors.
208 */
call_rcu_tasks_trace(struct rcu_head * rhp,rcu_callback_t func)209 static inline void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) { BUG(); }
rcu_read_lock_trace(void)210 static inline void rcu_read_lock_trace(void) { BUG(); }
rcu_read_unlock_trace(void)211 static inline void rcu_read_unlock_trace(void) { BUG(); }
212 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
213
214 DEFINE_LOCK_GUARD_0(rcu_tasks_trace,
215 rcu_read_lock_trace(),
216 rcu_read_unlock_trace())
217
218 #endif /* __LINUX_RCUPDATE_TRACE_H */
219