Lines Matching defs:rcu_data

150 struct rcu_data {  struct
152 unsigned long gp_seq; /* Track rsp->gp_seq counter. */
153 unsigned long gp_seq_needed; /* Track furthest future GP request. */
154 union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */
155 bool core_needs_qs; /* Core waits for quiesc state. */
156 bool beenonline; /* CPU online at least once. */
157 bool gpwrap; /* Possible ->gp_seq wrap. */
158 bool exp_deferred_qs; /* This CPU awaiting a deferred QS? */
159 bool cpu_started; /* RCU watching this onlining CPU. */
160 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
161 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
162 unsigned long ticks_this_gp; /* The number of scheduling-clock */
166 struct irq_work defer_qs_iw; /* Obtain later scheduler attention. */
167 bool defer_qs_iw_pending; /* Scheduler attention pending? */
168 struct work_struct strict_work; /* Schedule readers for strict GPs. */
171 struct rcu_segcblist cblist; /* Segmented callback list, with */
174 long qlen_last_fqs_check;
176 unsigned long n_cbs_invoked; /* # callbacks invoked since boot. */
177 unsigned long n_force_qs_snap;
179 long blimit; /* Upper limit on a processed batch */
182 int dynticks_snap; /* Per-GP tracking for dynticks. */
183 long dynticks_nesting; /* Track process nesting level. */
184 long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
185 atomic_t dynticks; /* Even value for idle, else odd. */
186 bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */
187 bool rcu_urgent_qs; /* GP old need light quiescent state. */
188 bool rcu_forced_tick; /* Forced tick to provide QS. */
189 bool rcu_forced_tick_exp; /* ... provide QS to expedited GP. */
191 unsigned long last_accelerate; /* Last jiffy CBs were accelerated. */
192 unsigned long last_advance_all; /* Last jiffy CBs were all advanced. */
193 int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
197 struct rcu_head barrier_head;
198 int exp_dynticks_snap; /* Double-check need for IPI. */
202 struct swait_queue_head nocb_cb_wq; /* For nocb kthreads to sleep on. */
203 struct task_struct *nocb_gp_kthread;
204 raw_spinlock_t nocb_lock; /* Guard following pair of fields. */
228 struct rcu_data *nocb_next_cb_rdp; argument
232 struct rcu_data *nocb_gp_rdp ____cacheline_internodealigned_in_smp; argument
257 /* Values for nocb_defer_wakeup field in struct rcu_data. */ argument