Home
last modified time | relevance | path

Searched refs:cpu_of (Results 1 – 10 of 10) sorted by relevance

/linux/kernel/sched/
H A Dpelt.c440 running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq))); in update_irq_load_avg()
441 running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq))); in update_irq_load_avg()
481 unsigned long hw_pressure = arch_scale_hw_pressure(cpu_of(rq)); in update_other_load_avgs()
H A Dpelt.h123 delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq))); in update_rq_clock_pelt()
124 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq))); in update_rq_clock_pelt()
H A Dext.c1888 p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq)); in touch_core_sched()
2253 return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq))); in scx_rq_online()
2266 if (sticky_cpu == cpu_of(rq)) in do_enqueue_task()
2395 sticky_cpu = cpu_of(rq); in enqueue_task_scx()
2418 unlikely(cpu_of(rq) != p->scx.selected_cpu)) in enqueue_task_scx()
2575 set_task_cpu(p, cpu_of(dst_rq)); in move_remote_task_to_local_dsq()
2576 p->scx.sticky_cpu = cpu_of(dst_rq); in move_remote_task_to_local_dsq()
2586 WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr)); in move_remote_task_to_local_dsq()
2616 int cpu = cpu_of(rq); in task_can_run_on_remote_rq()
2871 int node = cpu_to_node(cpu_of(r in consume_global_dsq()
[all...]
H A Dfair.c313 int cpu = cpu_of(rq); in list_add_leaf_cfs_rq()
1122 long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq))); in post_init_entity_util_avg()
4094 if (!cpu_active(cpu_of(rq_of(cfs_rq)))) in update_tg_load_avg()
4101 now = sched_clock_cpu(cpu_of(rq_of(cfs_rq))); in update_tg_load_avg()
4124 now = sched_clock_cpu(cpu_of(rq_of(cfs_rq))); in clear_tg_load_avg()
4147 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in clear_tg_offline_cfs_rqs()
4538 now += sched_clock_cpu(cpu_of(rq)) - u64_u32_load(rq->clock_idle); in migrate_se_pelt_lag()
5087 int cpu = cpu_of(rq); in update_misfit_status()
5754 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_unthrottle_up()
5783 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(r in tg_throttle_down()
[all...]
H A Dcore_sched.c244 const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq)); in __sched_core_account_forceidle()
H A Dsched.h1311 static inline int cpu_of(struct rq *rq) in cpu_of() function
1407 for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) { in sched_core_cookie_match()
2683 int cpu = cpu_of(rq); in sched_update_tick_dependency()
2803 if (!cpu_active(cpu_of(rq))) in hrtick_enabled()
3210 cpu_of(rq))); in cpufreq_update_util()
3324 rq_util = cpu_util_cfs(cpu_of(rq)) + cpu_util_rt(rq); in uclamp_rq_is_capped()
3649 struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(rq)); in mm_cid_snapshot_time()
H A Dext_idle.c737 int cpu = cpu_of(rq); in __scx_update_idle()
772 SCX_CALL_OP(sch, SCX_KF_REST, update_idle, rq, cpu_of(rq), idle); in __scx_update_idle()
H A Ddeadline.c1415 int cpu = cpu_of(rq); in dl_scaled_delta_exec()
1663 int cpu = cpu_of(rq); in __dl_server_attach_root()
1666 dl_b = dl_bw_of(cpu_of(rq)); in __dl_server_attach_root()
1680 int cpu = cpu_of(rq); in dl_server_apply_params()
2861 src_dl_b = dl_bw_of(cpu_of(rq)); in set_cpus_allowed_dl()
H A Drt.c513 iter && (rt_rq = iter->rt_rq[cpu_of(rq)]); \
533 int cpu = cpu_of(rq); in sched_rt_rq_enqueue()
551 int cpu = cpu_of(rq_of_rt_rq(rt_rq)); in sched_rt_rq_dequeue()
2430 if (p->prio < rq->donor->prio && cpu_online(cpu_of(rq))) in switched_to_rt()
H A Ddebug.c390 cpu_of(rq)); in sched_fair_server_write()