Searched refs:cpu_rq (Results 1 – 19 of 19) sorted by relevance
| /linux/kernel/sched/ |
| H A D | membarrier.c | 284 if (!(READ_ONCE(cpu_rq(cpu)->membarrier_state) & in membarrier_global_expedited() 292 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_global_expedited() 370 p = rcu_dereference(cpu_rq(cpu_id)->curr); in membarrier_private_expedited() 383 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_private_expedited() 479 struct rq *rq = cpu_rq(cpu); in sync_runqueues_membarrier_state()
|
| H A D | cpuacct.c | 114 raw_spin_rq_lock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_read() 131 raw_spin_rq_unlock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_read() 150 raw_spin_rq_lock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_write() 158 raw_spin_rq_unlock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_write() 341 lockdep_assert_rq_held(cpu_rq(cpu)); in cpuacct_charge()
|
| H A D | deadline.c | 122 return &cpu_rq(i)->rd->dl_bw; in dl_bw_of() 127 struct root_domain *rd = cpu_rq(i)->rd; in dl_bw_cpus() 168 return __dl_bw_capacity(cpu_rq(i)->rd->span); in dl_bw_capacity() 174 struct root_domain *rd = cpu_rq(cpu)->rd; in dl_bw_visited() 192 struct rq *rq = cpu_rq(i); in __dl_update() 682 later_rq = cpu_rq(cpu); in dl_task_offline_migration() 1623 rq = cpu_rq(cpu); in sched_init_dl_servers() 2178 rq = cpu_rq(cpu); in select_task_rq_dl() 2209 dl_task_is_earliest_deadline(p, cpu_rq(target))) in select_task_rq_dl() 2589 later_rq = cpu_rq(cpu); in find_lock_later_rq() [all …]
|
| H A D | core.c | 404 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++); in sched_core_lock() 413 raw_spin_unlock(&cpu_rq(t)->__lock); in sched_core_unlock() 434 cpu_rq(t)->core_enabled = enabled; in __sched_core_flip() 436 cpu_rq(cpu)->core->core_forceidle_start = 0; in __sched_core_flip() 447 cpu_rq(cpu)->core_enabled = enabled; in __sched_core_flip() 457 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree)); in sched_core_assert_empty() 1172 struct rq *rq = cpu_rq(cpu); in resched_cpu() 1234 struct rq *rq = cpu_rq(cpu); in wake_up_idle_cpu() 2030 init_uclamp_rq(cpu_rq(cpu)); in init_uclamp() 2473 rq = cpu_rq(new_cpu); in move_queued_task() [all …]
|
| H A D | fair.c | 2124 struct rq *rq = cpu_rq(cpu); in update_numa_stats() 2156 struct rq *rq = cpu_rq(env->dst_cpu); in task_numa_assign() 2171 rq = cpu_rq(env->dst_cpu); in task_numa_assign() 2186 rq = cpu_rq(env->best_cpu); in task_numa_assign() 2245 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare() 2605 best_rq = cpu_rq(env.best_cpu); in task_numa_migrate() 3036 tsk = READ_ONCE(cpu_rq(cpu)->curr); in task_numa_group() 4946 capacity -= max(hw_load_avg(cpu_rq(cpu)), cpufreq_get_pressure(cpu)); in get_actual_cpu_capacity() 6436 cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu)); in sync_throttle() 6596 struct rq *rq = cpu_rq(i); in destroy_cfs_bandwidth() [all …]
|
| H A D | debug.c | 346 struct rq *rq = cpu_rq(cpu); in sched_fair_server_write() 401 struct rq *rq = cpu_rq(cpu); in sched_fair_server_show() 801 struct rq *rq = cpu_rq(cpu); in print_cfs_rq() 915 dl_bw = &cpu_rq(cpu)->rd->dl_bw; in print_dl_rq() 924 struct rq *rq = cpu_rq(cpu); in print_cpu() 1282 cpu, latency, cpu_rq(cpu)->ticks_without_resched); in resched_latency_warn()
|
| H A D | ext.h | 28 return cpu_rq(cpu)->scx.cpuperf_target; in scx_cpuperf_target()
|
| H A D | rt.c | 230 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() 950 return &cpu_rq(cpu)->rt; in sched_rt_period_rt_rq() 1509 rq = cpu_rq(cpu); in select_task_rq_rt() 1560 p->prio < cpu_rq(target)->rt.highest_prio.curr) in select_task_rq_rt() 1882 lowest_rq = cpu_rq(cpu); in find_lock_lowest_rq() 2262 src_rq = cpu_rq(cpu); in pull_rt_task() 2560 rt_rq = &cpu_rq(cpu)->rt; in task_is_throttled_rt() 2935 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) in print_rt_stats()
|
| H A D | ext.c | 1105 return &cpu_rq(cpu)->scx.local_dsq; in find_dsq_for_dispatch() 2176 struct rq *srq = cpu_rq(scpu); in balance_scx() 2651 if (unlikely(check_rq_for_timeouts(cpu_rq(cpu)))) in scx_watchdog_workfn() 3789 struct rq *rq = cpu_rq(cpu); in scx_bypass() 4001 struct rq *rq = cpu_rq(cpu); in scx_disable_workfn() 4272 struct rq *rq = cpu_rq(cpu); in scx_dump_state() 4605 cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE; in scx_enable() 5126 struct rq *rq = cpu_rq(cpu); in kick_one_cpu() 5162 struct rq *rq = cpu_rq(cpu); in kick_one_cpu_if_idle() 5205 unsigned long *wait_pnt_seq = &cpu_rq(cpu)->scx.pnt_seq; in kick_cpus_irq_workfn() [all …]
|
| H A D | core_sched.c | 277 rq_i = cpu_rq(i); in __sched_core_account_forceidle()
|
| H A D | stats.c | 121 rq = cpu_rq(cpu); in show_schedstat()
|
| H A D | syscalls.c | 204 struct rq *rq = cpu_rq(cpu); in idle_cpu() 243 return cpu_rq(cpu)->idle; in idle_task() 249 struct rq *rq = cpu_rq(cpu); in sched_core_idle_cpu()
|
| H A D | cpufreq_schedutil.c | 372 if (uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu))) in sugov_hold_freq() 395 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_min) in ignore_dl_rate_limit()
|
| H A D | sched.h | 1353 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) macro 1355 #define task_rq(p) cpu_rq(task_cpu(p)) 1356 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 1460 if (sched_core_cookie_match(cpu_rq(cpu), p)) in sched_group_cookie_match() 2008 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 3124 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
|
| H A D | cputime.c | 985 rq = cpu_rq(cpu); in kcpustat_field() 1072 rq = cpu_rq(cpu); in kcpustat_cpu_fetch()
|
| H A D | topology.c | 414 struct root_domain *rd = cpu_rq(cpu)->rd; in build_perf_domains() 718 struct rq *rq = cpu_rq(cpu); in cpu_attach_domain() 2605 rq = cpu_rq(i); in build_sched_domains() 2837 cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) { in partition_sched_domains_locked()
|
| H A D | psi.c | 804 lockdep_assert_rq_held(cpu_rq(cpu)); in psi_group_change() 1235 guard(rq_lock_irq)(cpu_rq(cpu)); in psi_cgroup_restart()
|
| H A D | ext_idle.c | 548 cpu_rq(cpu)->scx.local_dsq.nr == 0 && in scx_select_cpu_dfl()
|
| /linux/tools/perf/Documentation/ |
| H A D | perf-probe.txt | 234 … be moved easily by modifying schedule(), but the same line matching 'rq=cpu_rq*' may still exist …
|