Lines Matching refs:rcu_data

80 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
86 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_get_gpwrap_count()
162 static void rcu_report_exp_rdp(struct rcu_data *rdp);
163 static void rcu_report_qs_rdp(struct rcu_data *rdp);
164 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
165 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
166 static bool rcu_rdp_cpu_online(struct rcu_data *rdp);
231 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_get_n_cbs_cpu()
312 static bool rcu_watching_snap_stopped_since(struct rcu_data *rdp, int snap) in rcu_watching_snap_stopped_since()
362 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false); in rcu_momentary_eqs()
515 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
601 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_irq_work_resched()
664 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in __rcu_irq_enter_check_tick()
712 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) && in rcu_needs_cpu()
713 !rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data)); in rcu_needs_cpu()
721 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp) in rcu_disable_urgency_upon_qs()
773 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true); in rcu_request_urgent_qs_task()
802 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_gpnum_ovf()
819 static int rcu_watching_snap_save(struct rcu_data *rdp) in rcu_watching_snap_save()
855 static int rcu_watching_snap_recheck(struct rcu_data *rdp) in rcu_watching_snap_recheck()
992 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, in trace_rcu_this_gp()
1016 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, in rcu_start_this_gp()
1094 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_future_gp_cleanup()
1143 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_accelerate_cbs()
1190 struct rcu_data *rdp) in rcu_accelerate_cbs_unlocked()
1219 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_advance_cbs()
1243 struct rcu_data *rdp) in rcu_advance_cbs_nowake()
1273 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) in __note_gp_changes()
1322 static void note_gp_changes(struct rcu_data *rdp) in note_gp_changes()
1809 struct rcu_data *rdp; in rcu_gp_init()
1957 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_init()
1995 rcu_report_qs_rdp(this_cpu_ptr(&rcu_data)); in rcu_gp_init()
2157 struct rcu_data *rdp; in rcu_gp_cleanup()
2198 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_cleanup()
2206 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_gp_cleanup()
2225 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_cleanup()
2443 rcu_report_qs_rdp(struct rcu_data *rdp) in rcu_report_qs_rdp()
2498 rcu_check_quiescent_state(struct rcu_data *rdp) in rcu_check_quiescent_state()
2540 static void rcu_do_batch(struct rcu_data *rdp) in rcu_do_batch()
2702 WARN_ON_ONCE(time_before(j, __this_cpu_read(rcu_data.last_sched_clock))); in rcu_sched_clock_irq()
2703 __this_cpu_write(rcu_data.last_sched_clock, j); in rcu_sched_clock_irq()
2707 raw_cpu_inc(rcu_data.ticks_this_gp); in rcu_sched_clock_irq()
2709 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { in rcu_sched_clock_irq()
2713 __this_cpu_write(rcu_data.rcu_urgent_qs, false); in rcu_sched_clock_irq()
2732 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) in force_qs_rnp()
2762 struct rcu_data *rdp; in force_qs_rnp()
2765 rdp = per_cpu_ptr(&rcu_data, cpu); in force_qs_rnp()
2801 rnp = raw_cpu_read(rcu_data.mynode); in rcu_force_quiescent_state()
2837 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); in rcu_core()
2905 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1); in invoke_rcu_core_kthread()
2906 t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task); in invoke_rcu_core_kthread()
2908 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status)); in invoke_rcu_core_kthread()
2927 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; in rcu_cpu_kthread_park()
2932 return __this_cpu_read(rcu_data.rcu_cpu_has_work); in rcu_cpu_kthread_should_run()
2942 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status); in rcu_cpu_kthread()
2943 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work); in rcu_cpu_kthread()
2944 unsigned long *j = this_cpu_ptr(&rcu_data.rcuc_activity); in rcu_cpu_kthread()
2974 .store = &rcu_data.rcu_cpu_kthread_task,
2990 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0; in rcu_spawn_core_kthreads()
2998 static void rcutree_enqueue(struct rcu_data *rdp, struct rcu_head *head, rcu_callback_t func) in rcutree_enqueue()
3009 static void call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, in call_rcu_core()
3065 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp) in check_cb_ovld_locked()
3088 static void check_cb_ovld(struct rcu_data *rdp) in check_cb_ovld()
3107 struct rcu_data *rdp; in __call_rcu_common()
3134 rdp = this_cpu_ptr(&rcu_data); in __call_rcu_common()
3382 for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent) in synchronize_rcu()
3462 struct rcu_data *rdp; in start_poll_synchronize_rcu_common()
3466 rdp = this_cpu_ptr(&rcu_data); in start_poll_synchronize_rcu_common()
3673 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_pending()
3754 static void rcu_barrier_entrain(struct rcu_data *rdp) in rcu_barrier_entrain()
3794 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_barrier_handler()
3822 struct rcu_data *rdp; in rcu_barrier()
3861 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_barrier()
3903 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_barrier()
4005 static bool rcu_rdp_cpu_online(struct rcu_data *rdp) in rcu_rdp_cpu_online()
4012 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_cpu_online()
4034 struct rcu_data *rdp; in rcu_lockdep_current_cpu_online()
4040 rdp = this_cpu_ptr(&rcu_data); in rcu_lockdep_current_cpu_online()
4144 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_boot_init_percpu_data()
4244 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_prepare_cpu()
4293 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_cpu_beenfullyonline()
4305 struct rcu_data *rdp; in rcutree_online_cpu()
4308 rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_online_cpu()
4338 struct rcu_data *rdp; in rcutree_report_cpu_starting()
4343 rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_report_cpu_starting()
4396 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcutree_report_cpu_dead()
4450 struct rcu_data *my_rdp; in rcutree_migrate_callbacks()
4452 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_migrate_callbacks()
4466 my_rdp = this_cpu_ptr(&rcu_data); in rcutree_migrate_callbacks()
4520 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_dying_cpu()
4536 struct rcu_data *rdp; in rcutree_offline_cpu()
4539 rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_offline_cpu()
4584 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_spawn_gp_kthread()
4728 per_cpu_ptr(&rcu_data, i)->mynode = rnp; in rcu_init_one()
4729 per_cpu_ptr(&rcu_data, i)->barrier_head.next = in rcu_init_one()
4730 &per_cpu_ptr(&rcu_data, i)->barrier_head; in rcu_init_one()