Home
last modified time | relevance | path

Searched refs:this_rq (Results 1 – 12 of 12) sorted by relevance

/linux/kernel/sched/
H A Dloadavg.c80 long calc_load_fold_active(struct rq *this_rq, long adjust) in calc_load_fold_active() argument
84 nr_active = this_rq->nr_running - adjust; in calc_load_fold_active()
85 nr_active += (long)this_rq->nr_uninterruptible; in calc_load_fold_active()
87 if (nr_active != this_rq->calc_load_active) { in calc_load_fold_active()
88 delta = nr_active - this_rq->calc_load_active; in calc_load_fold_active()
89 this_rq->calc_load_active = nr_active; in calc_load_fold_active()
253 calc_load_nohz_fold(this_rq()); in calc_load_nohz_start()
267 struct rq *this_rq = this_rq(); in calc_load_nohz_stop() local
272 this_rq->calc_load_update = READ_ONCE(calc_load_update); in calc_load_nohz_stop()
273 if (time_before(jiffies, this_rq->calc_load_update)) in calc_load_nohz_stop()
[all …]
H A Dsched.h104 extern void calc_global_load_tick(struct rq *this_rq);
105 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
1354 #define this_rq() this_cpu_ptr(&runqueues) macro
1915 rq = this_rq(); in this_rq_lock_irq()
2431 void (*task_woken)(struct rq *this_rq, struct task_struct *task);
2449 void (*switching_to) (struct rq *this_rq, struct task_struct *task);
2450 void (*switched_from)(struct rq *this_rq, struct task_struct *task);
2451 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
2452 void (*reweight_task)(struct rq *this_rq, struct task_struct *task,
2454 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
[all …]
H A Drt.c583 return this_rq()->rd->span; in sched_rt_period_mask()
2199 rq = this_rq(); in rto_push_irq_work_func()
2229 static void pull_rt_task(struct rq *this_rq) in pull_rt_task() argument
2231 int this_cpu = this_rq->cpu, cpu; in pull_rt_task()
2235 int rt_overload_count = rt_overloaded(this_rq); in pull_rt_task()
2248 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask)) in pull_rt_task()
2253 tell_cpu_to_push(this_rq); in pull_rt_task()
2258 for_each_cpu(cpu, this_rq->rd->rto_mask) { in pull_rt_task()
2272 this_rq->rt.highest_prio.curr) in pull_rt_task()
2281 double_lock_balance(this_rq, src_rq); in pull_rt_task()
[all …]
H A Dext.c1669 static bool consume_remote_task(struct rq *this_rq, struct task_struct *p, in consume_remote_task() argument
1672 raw_spin_rq_unlock(this_rq); in consume_remote_task()
1675 move_remote_task_to_local_dsq(p, 0, src_rq, this_rq); in consume_remote_task()
1679 raw_spin_rq_lock(this_rq); in consume_remote_task()
2003 dsq = find_dsq_for_dispatch(sch, this_rq(), dsq_id, p); in finish_dispatch()
5124 static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs) in kick_one_cpu() argument
5127 struct scx_rq *this_scx = &this_rq->scx; in kick_one_cpu()
5137 if (cpu_online(cpu) || cpu == cpu_of(this_rq)) { in kick_one_cpu()
5160 static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq) in kick_one_cpu_if_idle() argument
5168 (cpu_online(cpu) || cpu == cpu_of(this_rq))) in kick_one_cpu_if_idle()
[all …]
H A Dcputime.c226 struct rq *rq = this_rq(); in account_idle_time()
261 steal -= this_rq()->prev_steal_time; in steal_account_process_time()
264 this_rq()->prev_steal_time += steal; in steal_account_process_time()
404 } else if (p == this_rq()->idle) { in irqtime_account_process_tick()
497 else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET)) in account_process_tick()
H A Dfair.c4806 static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf);
6117 if (rq == this_rq()) { in __unthrottle_cfs_rq_async()
7872 this_rq()->nr_running <= 1 && in select_idle_sibling()
8374 struct root_domain *rd = this_rq()->rd; in find_energy_efficient_cpu()
8587 if (!is_rd_overutilized(this_rq()->rd)) { in select_task_rq_fair()
11736 static int sched_balance_rq(int this_cpu, struct rq *this_rq, in sched_balance_rq() argument
11749 .dst_rq = this_rq, in sched_balance_rq()
12489 WARN_ON_ONCE(rq != this_rq()); in nohz_balance_exit_idle()
12597 static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags) in _nohz_idle_balance() argument
12604 int this_cpu = this_rq->cpu; in _nohz_idle_balance()
[all …]
H A Ddeadline.c2741 static void pull_dl_task(struct rq *this_rq) in pull_dl_task() argument
2743 int this_cpu = this_rq->cpu, cpu; in pull_dl_task()
2749 if (likely(!dl_overloaded(this_rq))) in pull_dl_task()
2758 for_each_cpu(cpu, this_rq->rd->dlo_mask) { in pull_dl_task()
2768 if (this_rq->dl.dl_nr_running && in pull_dl_task()
2769 dl_time_before(this_rq->dl.earliest_dl.curr, in pull_dl_task()
2775 double_lock_balance(this_rq, src_rq); in pull_dl_task()
2792 dl_task_is_earliest_deadline(p, this_rq)) { in pull_dl_task()
2807 move_queued_task_locked(src_rq, this_rq, p); in pull_dl_task()
2815 double_unlock_balance(this_rq, src_rq); in pull_dl_task()
[all …]
H A Didle.c24 idle_set_state(this_rq(), idle_state); in sched_idle_set_state()
H A Dmembarrier.c240 struct rq *rq = this_rq(); in membarrier_update_current_mm()
H A Dcore.c924 if (rq == this_rq()) in hrtick_start()
2532 struct rq *rq = this_rq(); in migration_cpu_stop()
2635 struct rq *lowest_rq = NULL, *rq = this_rq(); in push_cpu_stop()
3647 rq = this_rq(); in ttwu_stat()
3788 struct rq *rq = this_rq(); in sched_ttwu_pending()
5153 struct rq *rq = this_rq(); in finish_task_switch()
5910 schedstat_inc(this_rq()->sched_count); in schedule_debug()
8140 struct rq *rq = this_rq(); in __balance_push_cpu_stop()
8185 if (!cpu_dying(rq->cpu) || rq != this_rq()) in balance_push()
8256 struct rq *rq = this_rq(); in balance_hotplug_wait()
[all …]
H A Dsyscalls.c1428 rq = this_rq(); in yield_to()
/linux/tools/testing/selftests/bpf/progs/
H A Dtest_access_variable_array.c11 int BPF_PROG(fentry_fentry, int this_cpu, struct rq *this_rq, in BPF_PROG() argument