Lines Matching +full:timer +full:- +full:cannot +full:- +full:wake +full:- +full:cpu
1 // SPDX-License-Identifier: GPL-2.0-only
7 * Copyright (C) 1991-2002 Linus Torvalds
71 # include <linux/entry-common.h>
96 #include "../../io_uring/io-wq.h"
162 if (p->sched_class == &stop_sched_class) /* trumps deadline */ in __task_prio()
163 return -2; in __task_prio()
165 if (rt_prio(p->prio)) /* includes deadline */ in __task_prio()
166 return p->prio; /* [-1, 99] */ in __task_prio()
168 if (p->sched_class == &idle_sched_class) in __task_prio()
188 if (-pa < -pb) in prio_less()
191 if (-pb < -pa) in prio_less()
194 if (pa == -1) /* dl_prio() doesn't work because of stop_class above */ in prio_less()
195 return !dl_time_before(a->dl.deadline, b->dl.deadline); in prio_less()
206 if (a->core_cookie < b->core_cookie) in __sched_core_less()
209 if (a->core_cookie > b->core_cookie) in __sched_core_less()
213 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count)) in __sched_core_less()
231 if (cookie < p->core_cookie) in rb_sched_core_cmp()
232 return -1; in rb_sched_core_cmp()
234 if (cookie > p->core_cookie) in rb_sched_core_cmp()
242 rq->core->core_task_seq++; in sched_core_enqueue()
244 if (!p->core_cookie) in sched_core_enqueue()
247 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); in sched_core_enqueue()
252 rq->core->core_task_seq++; in sched_core_dequeue()
255 rb_erase(&p->core_node, &rq->core_tree); in sched_core_dequeue()
256 RB_CLEAR_NODE(&p->core_node); in sched_core_dequeue()
260 * Migrating the last task off the cpu, with the cpu in forced idle in sched_core_dequeue()
262 * and re-examine whether the core is still in forced idle state. in sched_core_dequeue()
264 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 && in sched_core_dequeue()
265 rq->core->core_forceidle_count && rq->curr == rq->idle) in sched_core_dequeue()
269 static int sched_task_is_throttled(struct task_struct *p, int cpu) in sched_task_is_throttled() argument
271 if (p->sched_class->task_is_throttled) in sched_task_is_throttled()
272 return p->sched_class->task_is_throttled(p, cpu); in sched_task_is_throttled()
279 struct rb_node *node = &p->core_node; in sched_core_next()
280 int cpu = task_cpu(p); in sched_core_next() local
288 if (p->core_cookie != cookie) in sched_core_next()
291 } while (sched_task_is_throttled(p, cpu)); in sched_core_next()
297 * Find left-most (aka, highest priority) and unthrottled task matching @cookie.
305 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp); in sched_core_find()
310 if (!sched_task_is_throttled(p, rq->cpu)) in sched_core_find()
333 static void sched_core_lock(int cpu, unsigned long *flags) in sched_core_lock() argument
335 const struct cpumask *smt_mask = cpu_smt_mask(cpu); in sched_core_lock()
340 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++); in sched_core_lock()
343 static void sched_core_unlock(int cpu, unsigned long *flags) in sched_core_unlock() argument
345 const struct cpumask *smt_mask = cpu_smt_mask(cpu); in sched_core_unlock()
349 raw_spin_unlock(&cpu_rq(t)->__lock); in sched_core_unlock()
356 int cpu, t; in __sched_core_flip() local
364 for_each_cpu(cpu, &sched_core_mask) { in __sched_core_flip()
365 const struct cpumask *smt_mask = cpu_smt_mask(cpu); in __sched_core_flip()
367 sched_core_lock(cpu, &flags); in __sched_core_flip()
370 cpu_rq(t)->core_enabled = enabled; in __sched_core_flip()
372 cpu_rq(cpu)->core->core_forceidle_start = 0; in __sched_core_flip()
374 sched_core_unlock(cpu, &flags); in __sched_core_flip()
382 for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask) in __sched_core_flip()
383 cpu_rq(cpu)->core_enabled = enabled; in __sched_core_flip()
390 int cpu; in sched_core_assert_empty() local
392 for_each_possible_cpu(cpu) in sched_core_assert_empty()
393 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree)); in sched_core_assert_empty()
448 if (!atomic_add_unless(&sched_core_count, -1, 1)) in sched_core_put()
465 * p->pi_lock
466 * rq->lock
467 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
469 * rq1->lock
470 * rq2->lock where: rq1 < rq2
474 * Normal scheduling state is serialized by rq->lock. __schedule() takes the
475 * local CPU's rq->lock, it optionally removes the task from the runqueue and
479 * Task enqueue is also under rq->lock, possibly taken from another CPU.
481 * the local CPU to avoid bouncing the runqueue state around [ see
485 * complicated to avoid having to take two rq->locks.
489 * System-calls and anything external will use task_rq_lock() which acquires
490 * both p->pi_lock and rq->lock. As a consequence the state they change is
493 * - sched_setaffinity()/
494 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed
495 * - set_user_nice(): p->se.load, p->*prio
496 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio,
497 * p->se.load, p->rt_priority,
498 * p->dl.dl_{runtime, deadline, period, flags, bw, density}
499 * - sched_setnuma(): p->numa_preferred_nid
500 * - sched_move_task(): p->sched_task_group
501 * - uclamp_update_active() p->uclamp*
503 * p->state <- TASK_*:
507 * try_to_wake_up(). This latter uses p->pi_lock to serialize against
510 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
513 * rq->lock. Non-zero indicates the task is runnable, the special
515 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
517 * p->on_cpu <- { 0, 1 }:
520 * set before p is scheduled-in and cleared after p is scheduled-out, both
521 * under rq->lock. Non-zero indicates the task is running on its CPU.
524 * CPU to have ->on_cpu = 1 at the same time. ]
528 * - Don't call set_task_cpu() on a blocked task:
530 * We don't care what CPU we're not running on, this simplifies hotplug,
531 * the CPU assignment of blocked tasks isn't required to be valid.
533 * - for try_to_wake_up(), called under p->pi_lock:
535 * This allows try_to_wake_up() to only take one rq->lock, see its comment.
537 * - for migration called under rq->lock:
543 * - for migration called under double_rq_lock():
559 raw_spin_lock_nested(&rq->__lock, subclass); in raw_spin_rq_lock_nested()
585 ret = raw_spin_trylock(&rq->__lock); in raw_spin_rq_trylock()
608 * double_rq_lock - safely lock two runqueues
626 * __task_rq_lock - lock the rq @p resides on.
629 __acquires(rq->lock) in __task_rq_lock()
633 lockdep_assert_held(&p->pi_lock); in __task_rq_lock()
650 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
653 __acquires(p->pi_lock) in task_rq_lock()
654 __acquires(rq->lock) in task_rq_lock()
659 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); in task_rq_lock()
665 * ACQUIRE (rq->lock) in task_rq_lock()
666 * [S] ->on_rq = MIGRATING [L] rq = task_rq() in task_rq_lock()
667 * WMB (__set_task_cpu()) ACQUIRE (rq->lock); in task_rq_lock()
668 * [S] ->cpu = new_cpu [L] task_rq() in task_rq_lock()
669 * [L] ->on_rq in task_rq_lock()
670 * RELEASE (rq->lock) in task_rq_lock()
672 * If we observe the old CPU in task_rq_lock(), the acquire of in task_rq_lock()
673 * the old rq->lock will fully serialize against the stores. in task_rq_lock()
675 * If we observe the new CPU in task_rq_lock(), the address in task_rq_lock()
684 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); in task_rq_lock()
692 * RQ-clock updating methods:
704 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; in update_rq_clock_task()
711 * When this happens, we stop ->clock_task and only update the in update_rq_clock_task()
713 * update will consume the rest. This ensures ->clock_task is in update_rq_clock_task()
716 * It does however cause some slight miss-attribution of {soft,}irq in update_rq_clock_task()
718 * the current rq->clock timestamp, except that would require using in update_rq_clock_task()
724 rq->prev_irq_time += irq_delta; in update_rq_clock_task()
725 delta -= irq_delta; in update_rq_clock_task()
726 psi_account_irqtime(rq->curr, irq_delta); in update_rq_clock_task()
727 delayacct_irq(rq->curr, irq_delta); in update_rq_clock_task()
732 steal -= rq->prev_steal_time_rq; in update_rq_clock_task()
737 rq->prev_steal_time_rq += steal; in update_rq_clock_task()
738 delta -= steal; in update_rq_clock_task()
742 rq->clock_task += delta; in update_rq_clock_task()
757 if (rq->clock_update_flags & RQCF_ACT_SKIP) in update_rq_clock()
762 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED); in update_rq_clock()
763 rq->clock_update_flags |= RQCF_UPDATED; in update_rq_clock()
766 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; in update_rq_clock()
769 rq->clock += delta; in update_rq_clock()
775 * Use HR-timers to deliver accurate preemption points.
780 if (hrtimer_active(&rq->hrtick_timer)) in hrtick_clear()
781 hrtimer_cancel(&rq->hrtick_timer); in hrtick_clear()
785 * High-resolution timer tick.
788 static enum hrtimer_restart hrtick(struct hrtimer *timer) in hrtick() argument
790 struct rq *rq = container_of(timer, struct rq, hrtick_timer); in hrtick()
797 rq->curr->sched_class->task_tick(rq, rq->curr, 1); in hrtick()
807 struct hrtimer *timer = &rq->hrtick_timer; in __hrtick_restart() local
808 ktime_t time = rq->hrtick_time; in __hrtick_restart()
810 hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD); in __hrtick_restart()
827 * Called to set the hrtick timer state.
829 * called with rq->lock held and irqs disabled
833 struct hrtimer *timer = &rq->hrtick_timer; in hrtick_start() local
838 * doesn't make sense and can cause timer DoS. in hrtick_start()
841 rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta); in hrtick_start()
846 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); in hrtick_start()
851 * Called to set the hrtick timer state.
853 * called with rq->lock held and irqs disabled
862 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), in hrtick_start()
871 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq); in hrtick_rq_init()
873 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); in hrtick_rq_init()
874 rq->hrtick_timer.function = hrtick; in hrtick_rq_init()
909 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); in set_nr_and_not_polling()
921 typeof(ti->flags) val = READ_ONCE(ti->flags); in set_nr_if_polling()
928 } while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED)); in set_nr_if_polling()
950 struct wake_q_node *node = &task->wake_q; in __wake_q_add()
953 * Atomically grab the task, if ->wake_q is !nil already it means in __wake_q_add()
961 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) in __wake_q_add()
967 *head->lastp = node; in __wake_q_add()
968 head->lastp = &node->next; in __wake_q_add()
973 * wake_q_add() - queue a wakeup for 'later' waking.
981 * This function must be used as-if it were wake_up_process(); IOW the task
991 * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
999 * This function must be used as-if it were wake_up_process(); IOW the task
1002 * This function is essentially a task-safe equivalent to wake_q_add(). Callers
1015 struct wake_q_node *node = head->first; in wake_up_q()
1021 /* Task can safely be re-inserted now: */ in wake_up_q()
1022 node = node->next; in wake_up_q()
1023 task->wake_q.next = NULL; in wake_up_q()
1035 * resched_curr - mark rq's current task 'to be rescheduled now'.
1038 * might also involve a cross-CPU call to trigger the scheduler on
1039 * the target CPU.
1043 struct task_struct *curr = rq->curr; in resched_curr()
1044 int cpu; in resched_curr() local
1051 cpu = cpu_of(rq); in resched_curr()
1053 if (cpu == smp_processor_id()) { in resched_curr()
1060 smp_send_reschedule(cpu); in resched_curr()
1062 trace_sched_wake_idle_without_ipi(cpu); in resched_curr()
1065 void resched_cpu(int cpu) in resched_cpu() argument
1067 struct rq *rq = cpu_rq(cpu); in resched_cpu()
1071 if (cpu_online(cpu) || cpu == smp_processor_id()) in resched_cpu()
1079 * In the semi idle case, use the nearest busy CPU for migrating timers
1080 * from an idle CPU. This is good for power-savings.
1083 * selecting an idle CPU will add more delays to the timers than intended
1084 * (as that CPU's timer base may not be uptodate wrt jiffies etc).
1088 int i, cpu = smp_processor_id(), default_cpu = -1; in get_nohz_timer_target() local
1092 if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) { in get_nohz_timer_target()
1093 if (!idle_cpu(cpu)) in get_nohz_timer_target()
1094 return cpu; in get_nohz_timer_target()
1095 default_cpu = cpu; in get_nohz_timer_target()
1102 for_each_domain(cpu, sd) { in get_nohz_timer_target()
1104 if (cpu == i) in get_nohz_timer_target()
1112 if (default_cpu == -1) in get_nohz_timer_target()
1119 * When add_timer_on() enqueues a timer into the timer wheel of an
1120 * idle CPU then this timer might expire before the next timer event
1121 * which is scheduled to wake up that CPU. In case of a completely
1123 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1124 * leaves the inner idle loop so the newly added timer is taken into
1125 * account when the CPU goes back to idle and evaluates the timer
1126 * wheel for the next timer event.
1128 static void wake_up_idle_cpu(int cpu) in wake_up_idle_cpu() argument
1130 struct rq *rq = cpu_rq(cpu); in wake_up_idle_cpu()
1132 if (cpu == smp_processor_id()) in wake_up_idle_cpu()
1136 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling in wake_up_idle_cpu()
1140 * re-evaluate the next tick. Provided some re-ordering of tick in wake_up_idle_cpu()
1144 * - On most archs, a simple fetch_or on ti::flags with a in wake_up_idle_cpu()
1147 * - x86 needs to perform a last need_resched() check between in wake_up_idle_cpu()
1153 * However, remote timer enqueue is not such a frequent event in wake_up_idle_cpu()
1157 if (set_nr_and_not_polling(rq->idle)) in wake_up_idle_cpu()
1158 smp_send_reschedule(cpu); in wake_up_idle_cpu()
1160 trace_sched_wake_idle_without_ipi(cpu); in wake_up_idle_cpu()
1163 static bool wake_up_full_nohz_cpu(int cpu) in wake_up_full_nohz_cpu() argument
1166 * We just need the target to call irq_exit() and re-evaluate in wake_up_full_nohz_cpu()
1171 if (cpu_is_offline(cpu)) in wake_up_full_nohz_cpu()
1172 return true; /* Don't try to wake offline CPUs. */ in wake_up_full_nohz_cpu()
1173 if (tick_nohz_full_cpu(cpu)) { in wake_up_full_nohz_cpu()
1174 if (cpu != smp_processor_id() || in wake_up_full_nohz_cpu()
1176 tick_nohz_full_kick_cpu(cpu); in wake_up_full_nohz_cpu()
1184 * Wake up the specified CPU. If the CPU is going offline, it is the
1188 void wake_up_nohz_cpu(int cpu) in wake_up_nohz_cpu() argument
1190 if (!wake_up_full_nohz_cpu(cpu)) in wake_up_nohz_cpu()
1191 wake_up_idle_cpu(cpu); in wake_up_nohz_cpu()
1197 int cpu = cpu_of(rq); in nohz_csd_func() local
1203 flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu)); in nohz_csd_func()
1206 rq->idle_balance = idle_cpu(cpu); in nohz_csd_func()
1207 if (rq->idle_balance && !need_resched()) { in nohz_csd_func()
1208 rq->nohz_idle_balance = flags; in nohz_csd_func()
1218 if (rq->nr_running != 1) in __need_bw_check()
1221 if (p->sched_class != &fair_sched_class) in __need_bw_check()
1235 if (rq->dl.dl_nr_running) in sched_can_stop_tick()
1242 if (rq->rt.rr_nr_running) { in sched_can_stop_tick()
1243 if (rq->rt.rr_nr_running == 1) in sched_can_stop_tick()
1253 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; in sched_can_stop_tick()
1262 if (rq->nr_running > 1) in sched_can_stop_tick()
1267 * and it's on the cpu now we don't want to stop the tick. in sched_can_stop_tick()
1270 * E.g. going from 2->1 without going through pick_next_task(). in sched_can_stop_tick()
1272 if (sched_feat(HZ_BW) && __need_bw_check(rq, rq->curr)) { in sched_can_stop_tick()
1273 if (cfs_task_bw_constrained(rq->curr)) in sched_can_stop_tick()
1302 list_for_each_entry_rcu(child, &parent->children, siblings) { in walk_tg_tree_from()
1314 parent = parent->parent; in walk_tg_tree_from()
1329 int prio = p->static_prio - MAX_RT_PRIO; in set_load_weight()
1330 struct load_weight *load = &p->se.load; in set_load_weight()
1336 load->weight = scale_load(WEIGHT_IDLEPRIO); in set_load_weight()
1337 load->inv_weight = WMULT_IDLEPRIO; in set_load_weight()
1345 if (update_load && p->sched_class == &fair_sched_class) { in set_load_weight()
1348 load->weight = scale_load(sched_prio_to_weight[prio]); in set_load_weight()
1349 load->inv_weight = sched_prio_to_wmult[prio]; in set_load_weight()
1357 * The (slow-path) user-space triggers utilization clamp value updates which
1358 * can require updates on (fast-path) scheduler's data structures used to
1360 * While the per-CPU rq lock protects fast-path update operations, user-space
1382 * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1408 * * An admin modifying the cgroup cpu.uclamp.{min, max}
1420 return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1); in uclamp_bucket_id()
1433 uc_se->value = value; in uclamp_se_set()
1434 uc_se->bucket_id = uclamp_bucket_id(value); in uclamp_se_set()
1435 uc_se->user_defined = user_defined; in uclamp_se_set()
1444 * idle (which drops the max-clamp) by retaining the last known in uclamp_idle_value()
1445 * max-clamp. in uclamp_idle_value()
1448 rq->uclamp_flags |= UCLAMP_FLAG_IDLE; in uclamp_idle_value()
1458 /* Reset max-clamp retention only on idle exit */ in uclamp_idle_reset()
1459 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) in uclamp_idle_reset()
1469 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; in uclamp_rq_max_value()
1470 int bucket_id = UCLAMP_BUCKETS - 1; in uclamp_rq_max_value()
1476 for ( ; bucket_id >= 0; bucket_id--) { in uclamp_rq_max_value()
1482 /* No tasks -- default clamp values */ in uclamp_rq_max_value()
1491 lockdep_assert_held(&p->pi_lock); in __uclamp_update_util_min_rt_default()
1493 uc_se = &p->uclamp_req[UCLAMP_MIN]; in __uclamp_update_util_min_rt_default()
1496 if (uc_se->user_defined) in __uclamp_update_util_min_rt_default()
1508 /* Protect updates to p->uclamp_* */ in uclamp_update_util_min_rt_default()
1517 struct uclamp_se uc_req = p->uclamp_req[clamp_id]; in uclamp_tg_restrict()
1530 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value; in uclamp_tg_restrict()
1531 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value; in uclamp_tg_restrict()
1543 * - the task specific clamp value, when explicitly requested from userspace
1544 * - the task group effective clamp value, for tasks not either in the root
1546 * - the system default clamp value, defined by the sysadmin
1565 /* Task currently refcounted: use back-annotated (effective) value */ in uclamp_eff_value()
1566 if (p->uclamp[clamp_id].active) in uclamp_eff_value()
1567 return (unsigned long)p->uclamp[clamp_id].value; in uclamp_eff_value()
1579 * Tasks can have a task-specific value requested from user-space, track
1587 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; in uclamp_rq_inc_id()
1588 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; in uclamp_rq_inc_id()
1594 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); in uclamp_rq_inc_id()
1596 bucket = &uc_rq->bucket[uc_se->bucket_id]; in uclamp_rq_inc_id()
1597 bucket->tasks++; in uclamp_rq_inc_id()
1598 uc_se->active = true; in uclamp_rq_inc_id()
1600 uclamp_idle_reset(rq, clamp_id, uc_se->value); in uclamp_rq_inc_id()
1606 if (bucket->tasks == 1 || uc_se->value > bucket->value) in uclamp_rq_inc_id()
1607 bucket->value = uc_se->value; in uclamp_rq_inc_id()
1609 if (uc_se->value > uclamp_rq_get(rq, clamp_id)) in uclamp_rq_inc_id()
1610 uclamp_rq_set(rq, clamp_id, uc_se->value); in uclamp_rq_inc_id()
1625 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; in uclamp_rq_dec_id()
1626 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; in uclamp_rq_dec_id()
1637 * In this case the uc_se->active flag should be false since no uclamp in uclamp_rq_dec_id()
1648 * // Must not decrement bucket->tasks here in uclamp_rq_dec_id()
1652 * bucket[uc_se->bucket_id]. in uclamp_rq_dec_id()
1656 if (unlikely(!uc_se->active)) in uclamp_rq_dec_id()
1659 bucket = &uc_rq->bucket[uc_se->bucket_id]; in uclamp_rq_dec_id()
1661 SCHED_WARN_ON(!bucket->tasks); in uclamp_rq_dec_id()
1662 if (likely(bucket->tasks)) in uclamp_rq_dec_id()
1663 bucket->tasks--; in uclamp_rq_dec_id()
1665 uc_se->active = false; in uclamp_rq_dec_id()
1673 if (likely(bucket->tasks)) in uclamp_rq_dec_id()
1681 SCHED_WARN_ON(bucket->value > rq_clamp); in uclamp_rq_dec_id()
1682 if (bucket->value >= rq_clamp) { in uclamp_rq_dec_id()
1683 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); in uclamp_rq_dec_id()
1701 if (unlikely(!p->sched_class->uclamp_enabled)) in uclamp_rq_inc()
1708 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) in uclamp_rq_inc()
1709 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; in uclamp_rq_inc()
1725 if (unlikely(!p->sched_class->uclamp_enabled)) in uclamp_rq_dec()
1735 if (!p->uclamp[clamp_id].active) in uclamp_rq_reinc_id()
1745 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE)) in uclamp_rq_reinc_id()
1746 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; in uclamp_rq_reinc_id()
1801 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN], in uclamp_update_root_tg()
1803 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX], in uclamp_update_root_tg()
1862 result = -EINVAL; in sysctl_sched_uclamp_handler()
1906 int util_min = p->uclamp_req[UCLAMP_MIN].value; in uclamp_validate()
1907 int util_max = p->uclamp_req[UCLAMP_MAX].value; in uclamp_validate()
1909 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) { in uclamp_validate()
1910 util_min = attr->sched_util_min; in uclamp_validate()
1913 return -EINVAL; in uclamp_validate()
1916 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) { in uclamp_validate()
1917 util_max = attr->sched_util_max; in uclamp_validate()
1920 return -EINVAL; in uclamp_validate()
1923 if (util_min != -1 && util_max != -1 && util_min > util_max) in uclamp_validate()
1924 return -EINVAL; in uclamp_validate()
1930 * blocking operation which obviously cannot be done while holding in uclamp_validate()
1942 /* Reset on sched class change for a non user-defined clamp value. */ in uclamp_reset()
1943 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) && in uclamp_reset()
1944 !uc_se->user_defined) in uclamp_reset()
1947 /* Reset on sched_util_{min,max} == -1. */ in uclamp_reset()
1949 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && in uclamp_reset()
1950 attr->sched_util_min == -1) { in uclamp_reset()
1955 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && in uclamp_reset()
1956 attr->sched_util_max == -1) { in uclamp_reset()
1969 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; in __setscheduler_uclamp()
1988 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP))) in __setscheduler_uclamp()
1991 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && in __setscheduler_uclamp()
1992 attr->sched_util_min != -1) { in __setscheduler_uclamp()
1993 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN], in __setscheduler_uclamp()
1994 attr->sched_util_min, true); in __setscheduler_uclamp()
1997 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && in __setscheduler_uclamp()
1998 attr->sched_util_max != -1) { in __setscheduler_uclamp()
1999 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX], in __setscheduler_uclamp()
2000 attr->sched_util_max, true); in __setscheduler_uclamp()
2009 * We don't need to hold task_rq_lock() when updating p->uclamp_* here in uclamp_fork()
2013 p->uclamp[clamp_id].active = false; in uclamp_fork()
2015 if (likely(!p->sched_reset_on_fork)) in uclamp_fork()
2019 uclamp_se_set(&p->uclamp_req[clamp_id], in uclamp_fork()
2032 struct uclamp_rq *uc_rq = rq->uclamp; in init_uclamp_rq()
2040 rq->uclamp_flags = UCLAMP_FLAG_IDLE; in init_uclamp_rq()
2047 int cpu; in init_uclamp() local
2049 for_each_possible_cpu(cpu) in init_uclamp()
2050 init_uclamp_rq(cpu_rq(cpu)); in init_uclamp()
2074 return -EOPNOTSUPP; in uclamp_validate()
2097 raw_spin_lock_irq(&p->pi_lock); in get_wchan()
2098 state = READ_ONCE(p->__state); in get_wchan()
2100 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq) in get_wchan()
2102 raw_spin_unlock_irq(&p->pi_lock); in get_wchan()
2118 p->sched_class->enqueue_task(rq, p, flags); in enqueue_task()
2138 p->sched_class->dequeue_task(rq, p, flags); in dequeue_task()
2150 WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED); in activate_task()
2151 ASSERT_EXCLUSIVE_WRITER(p->on_rq); in activate_task()
2156 WRITE_ONCE(p->on_rq, (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING); in deactivate_task()
2157 ASSERT_EXCLUSIVE_WRITER(p->on_rq); in deactivate_task()
2167 prio = MAX_DL_PRIO - 1; in __normal_prio()
2169 prio = MAX_RT_PRIO - 1 - rt_prio; in __normal_prio()
2178 * without taking RT-inheritance into account. Might be
2185 return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio)); in normal_prio()
2193 * RT-boosted. If not then it returns p->normal_prio.
2197 p->normal_prio = normal_prio(p); in effective_prio()
2203 if (!rt_prio(p->prio)) in effective_prio()
2204 return p->normal_prio; in effective_prio()
2205 return p->prio; in effective_prio()
2209 * task_curr - is this task currently executing on a CPU?
2220 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
2230 if (prev_class != p->sched_class) { in check_class_changed()
2231 if (prev_class->switched_from) in check_class_changed()
2232 prev_class->switched_from(rq, p); in check_class_changed()
2234 p->sched_class->switched_to(rq, p); in check_class_changed()
2235 } else if (oldprio != p->prio || dl_task(p)) in check_class_changed()
2236 p->sched_class->prio_changed(rq, p, oldprio); in check_class_changed()
2241 if (p->sched_class == rq->curr->sched_class) in wakeup_preempt()
2242 rq->curr->sched_class->wakeup_preempt(rq, p, flags); in wakeup_preempt()
2243 else if (sched_class_above(p->sched_class, rq->curr->sched_class)) in wakeup_preempt()
2250 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) in wakeup_preempt()
2257 if (READ_ONCE(p->__state) & state) in __task_state_match()
2260 if (READ_ONCE(p->saved_state) & state) in __task_state_match()
2261 return -1; in __task_state_match()
2273 guard(raw_spinlock_irq)(&p->pi_lock); in task_state_match()
2278 * wait_task_inactive - wait for a thread to unschedule.
2282 * succeed in waiting for @p to be off its CPU, we return a positive number
2303 * any task-queue locks at all. We'll only try to get in wait_task_inactive()
2310 * If the task is actively running on another CPU in wait_task_inactive()
2311 * still, just relax and busy-wait without holding in wait_task_inactive()
2338 * When matching on p->saved_state, consider this task in wait_task_inactive()
2343 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ in wait_task_inactive()
2371 * yield - it could be a while. in wait_task_inactive()
2403 .new_mask = cpumask_of(rq->cpu), in migrate_disable_switch()
2407 if (likely(!p->migration_disabled)) in migrate_disable_switch()
2410 if (p->cpus_ptr != &p->cpus_mask) in migrate_disable_switch()
2423 if (p->migration_disabled) { in migrate_disable()
2424 p->migration_disabled++; in migrate_disable()
2429 this_rq()->nr_pinned++; in migrate_disable()
2430 p->migration_disabled = 1; in migrate_disable()
2438 .new_mask = &p->cpus_mask, in migrate_enable()
2442 if (p->migration_disabled > 1) { in migrate_enable()
2443 p->migration_disabled--; in migrate_enable()
2447 if (WARN_ON_ONCE(!p->migration_disabled)) in migrate_enable()
2455 if (p->cpus_ptr != &p->cpus_mask) in migrate_enable()
2463 p->migration_disabled = 0; in migrate_enable()
2464 this_rq()->nr_pinned--; in migrate_enable()
2470 return rq->nr_pinned; in rq_has_pinned_tasks()
2474 * Per-CPU kthreads are allowed to run on !active && online CPUs, see
2477 static inline bool is_cpu_allowed(struct task_struct *p, int cpu) in is_cpu_allowed() argument
2480 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) in is_cpu_allowed()
2485 return cpu_online(cpu); in is_cpu_allowed()
2488 if (!(p->flags & PF_KTHREAD)) in is_cpu_allowed()
2489 return cpu_active(cpu) && task_cpu_possible(cpu, p); in is_cpu_allowed()
2493 return cpu_online(cpu); in is_cpu_allowed()
2496 if (cpu_dying(cpu)) in is_cpu_allowed()
2500 return cpu_online(cpu); in is_cpu_allowed()
2506 * 1) we invoke migration_cpu_stop() on the target CPU using
2509 * off the CPU)
2518 * move_queued_task - move a queued task to new rq.
2560 * Move (not current) task off this CPU, onto the destination CPU. We're doing
2562 * away from this CPU, or CPU going down), or because we're
2566 * as the task is no longer on this CPU.
2581 * migration_cpu_stop - this will be executed by a highprio stopper thread
2582 * and performs thread migration by bumping thread off CPU then
2588 struct set_affinity_pending *pending = arg->pending; in migration_cpu_stop()
2589 struct task_struct *p = arg->task; in migration_cpu_stop()
2595 * The original target CPU might have gone down and we might in migration_cpu_stop()
2596 * be on another CPU but it doesn't matter. in migration_cpu_stop()
2600 * We need to explicitly wake pending tasks before running in migration_cpu_stop()
2606 raw_spin_lock(&p->pi_lock); in migration_cpu_stop()
2610 * If we were passed a pending, then ->stop_pending was set, thus in migration_cpu_stop()
2611 * p->migration_pending must have remained stable. in migration_cpu_stop()
2613 WARN_ON_ONCE(pending && pending != p->migration_pending); in migration_cpu_stop()
2616 * If task_rq(p) != rq, it cannot be migrated here, because we're in migration_cpu_stop()
2617 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because in migration_cpu_stop()
2618 * we're holding p->pi_lock. in migration_cpu_stop()
2625 p->migration_pending = NULL; in migration_cpu_stop()
2628 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) in migration_cpu_stop()
2634 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); in migration_cpu_stop()
2636 p->wake_cpu = arg->dest_cpu; in migration_cpu_stop()
2641 * up running on a dodgy CPU, AFAICT this can only happen in migration_cpu_stop()
2642 * during CPU hotplug, at which point we'll get pushed out in migration_cpu_stop()
2658 * ->pi_lock, so the allowed mask is stable - if it got in migration_cpu_stop()
2661 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) { in migration_cpu_stop()
2662 p->migration_pending = NULL; in migration_cpu_stop()
2668 * When migrate_enable() hits a rq mis-match we can't reliably in migration_cpu_stop()
2672 WARN_ON_ONCE(!pending->stop_pending); in migration_cpu_stop()
2676 &pending->arg, &pending->stop_work); in migration_cpu_stop()
2682 pending->stop_pending = false; in migration_cpu_stop()
2686 complete_all(&pending->done); in migration_cpu_stop()
2696 raw_spin_lock_irq(&p->pi_lock); in push_cpu_stop()
2703 p->migration_flags |= MDF_PUSH; in push_cpu_stop()
2707 p->migration_flags &= ~MDF_PUSH; in push_cpu_stop()
2709 if (p->sched_class->find_lock_rq) in push_cpu_stop()
2710 lowest_rq = p->sched_class->find_lock_rq(p, rq); in push_cpu_stop()
2718 set_task_cpu(p, lowest_rq->cpu); in push_cpu_stop()
2726 rq->push_busy = false; in push_cpu_stop()
2728 raw_spin_unlock_irq(&p->pi_lock); in push_cpu_stop()
2740 if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) { in set_cpus_allowed_common()
2741 p->cpus_ptr = ctx->new_mask; in set_cpus_allowed_common()
2745 cpumask_copy(&p->cpus_mask, ctx->new_mask); in set_cpus_allowed_common()
2746 p->nr_cpus_allowed = cpumask_weight(ctx->new_mask); in set_cpus_allowed_common()
2751 if (ctx->flags & SCA_USER) in set_cpus_allowed_common()
2752 swap(p->user_cpus_ptr, ctx->user_mask); in set_cpus_allowed_common()
2763 * supposed to change these variables while holding both rq->lock and in __do_set_cpus_allowed()
2764 * p->pi_lock. in __do_set_cpus_allowed()
2767 * accesses these variables under p->pi_lock and only does so after in __do_set_cpus_allowed()
2768 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule() in __do_set_cpus_allowed()
2773 if (ctx->flags & SCA_MIGRATE_DISABLE) in __do_set_cpus_allowed()
2774 SCHED_WARN_ON(!p->on_cpu); in __do_set_cpus_allowed()
2776 lockdep_assert_held(&p->pi_lock); in __do_set_cpus_allowed()
2784 * holding rq->lock. in __do_set_cpus_allowed()
2792 p->sched_class->set_cpus_allowed(p, ctx); in __do_set_cpus_allowed()
2819 * Because this is called with p->pi_lock held, it is not possible in do_set_cpus_allowed()
2843 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's in dup_user_cpus_ptr()
2846 dst->user_cpus_ptr = NULL; in dup_user_cpus_ptr()
2853 if (data_race(!src->user_cpus_ptr)) in dup_user_cpus_ptr()
2858 return -ENOMEM; in dup_user_cpus_ptr()
2866 raw_spin_lock_irqsave(&src->pi_lock, flags); in dup_user_cpus_ptr()
2867 if (src->user_cpus_ptr) { in dup_user_cpus_ptr()
2868 swap(dst->user_cpus_ptr, user_mask); in dup_user_cpus_ptr()
2869 cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr); in dup_user_cpus_ptr()
2871 raw_spin_unlock_irqrestore(&src->pi_lock, flags); in dup_user_cpus_ptr()
2883 swap(p->user_cpus_ptr, user_mask); in clear_user_cpus_ptr()
2898 * designated task is enqueued on an allowed CPU. If that task is currently
2899 * running, we have to kick it out using the CPU stopper.
2901 * Migrate-Disable comes along and tramples all over our nice sandcastle.
2904 * Initial conditions: P0->cpus_mask = [0, 1]
2912 * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
2913 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2926 * `--> <woken on migration completion>
2928 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2929 * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
2930 * task p are serialized by p->pi_lock, which we can leverage: the one that
2931 * should come into effect at the end of the Migrate-Disable region is the last
2932 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2937 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2940 * a completion signaled at the tail of the CPU stopper callback (1), triggered
2941 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2947 * Migrate-Disable. Consider:
2949 * Initial conditions: P0->cpus_mask = [0, 1]
2967 * p->migration_pending done with p->pi_lock held.
2971 __releases(rq->lock) in affine_move_task()
2972 __releases(p->pi_lock) in affine_move_task()
2977 /* Can the task run on the task's current CPU? If so, we're done */ in affine_move_task()
2978 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { in affine_move_task()
2982 (p->migration_flags & MDF_PUSH) && !rq->push_busy) { in affine_move_task()
2983 rq->push_busy = true; in affine_move_task()
2991 pending = p->migration_pending; in affine_move_task()
2992 if (pending && !pending->stop_pending) { in affine_move_task()
2993 p->migration_pending = NULL; in affine_move_task()
3000 stop_one_cpu_nowait(rq->cpu, push_cpu_stop, in affine_move_task()
3001 p, &rq->push_work); in affine_move_task()
3006 complete_all(&pending->done); in affine_move_task()
3012 /* serialized by p->pi_lock */ in affine_move_task()
3013 if (!p->migration_pending) { in affine_move_task()
3023 p->migration_pending = &my_pending; in affine_move_task()
3025 pending = p->migration_pending; in affine_move_task()
3026 refcount_inc(&pending->refs); in affine_move_task()
3031 * task on a disallowed CPU. in affine_move_task()
3033 * Serialized by p->pi_lock, so this is safe. in affine_move_task()
3035 pending->arg.dest_cpu = dest_cpu; in affine_move_task()
3038 pending = p->migration_pending; in affine_move_task()
3040 * - !MIGRATE_ENABLE: in affine_move_task()
3043 * - MIGRATE_ENABLE: in affine_move_task()
3044 * we're here because the current CPU isn't matching anymore, in affine_move_task()
3053 return -EINVAL; in affine_move_task()
3056 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) { in affine_move_task()
3059 * anything else we cannot do is_migration_disabled(), punt in affine_move_task()
3060 * and have the stopper function handle it all race-free. in affine_move_task()
3062 stop_pending = pending->stop_pending; in affine_move_task()
3064 pending->stop_pending = true; in affine_move_task()
3067 p->migration_flags &= ~MDF_PUSH; in affine_move_task()
3073 &pending->arg, &pending->stop_work); in affine_move_task()
3085 if (!pending->stop_pending) { in affine_move_task()
3086 p->migration_pending = NULL; in affine_move_task()
3093 complete_all(&pending->done); in affine_move_task()
3096 wait_for_completion(&pending->done); in affine_move_task()
3098 if (refcount_dec_and_test(&pending->refs)) in affine_move_task()
3099 wake_up_var(&pending->refs); /* No UaF, just an address */ in affine_move_task()
3114 * Called with both p->pi_lock and rq->lock held; drops both before returning.
3120 __releases(rq->lock) in __set_cpus_allowed_ptr_locked()
3121 __releases(p->pi_lock) in __set_cpus_allowed_ptr_locked()
3125 bool kthread = p->flags & PF_KTHREAD; in __set_cpus_allowed_ptr_locked()
3134 * however, during cpu-hot-unplug, even these might get pushed in __set_cpus_allowed_ptr_locked()
3140 * set_cpus_allowed_common() and actually reset p->cpus_ptr. in __set_cpus_allowed_ptr_locked()
3145 if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) { in __set_cpus_allowed_ptr_locked()
3146 ret = -EINVAL; in __set_cpus_allowed_ptr_locked()
3151 * Must re-check here, to close a race against __kthread_bind(), in __set_cpus_allowed_ptr_locked()
3154 if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) { in __set_cpus_allowed_ptr_locked()
3155 ret = -EINVAL; in __set_cpus_allowed_ptr_locked()
3159 if (!(ctx->flags & SCA_MIGRATE_ENABLE)) { in __set_cpus_allowed_ptr_locked()
3160 if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) { in __set_cpus_allowed_ptr_locked()
3161 if (ctx->flags & SCA_USER) in __set_cpus_allowed_ptr_locked()
3162 swap(p->user_cpus_ptr, ctx->user_mask); in __set_cpus_allowed_ptr_locked()
3168 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) { in __set_cpus_allowed_ptr_locked()
3169 ret = -EBUSY; in __set_cpus_allowed_ptr_locked()
3175 * Picking a ~random cpu helps in cases where we are changing affinity in __set_cpus_allowed_ptr_locked()
3179 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask); in __set_cpus_allowed_ptr_locked()
3181 ret = -EINVAL; in __set_cpus_allowed_ptr_locked()
3187 return affine_move_task(rq, p, rf, dest_cpu, ctx->flags); in __set_cpus_allowed_ptr_locked()
3196 * Change a given task's CPU affinity. Migrate the thread to a
3197 * proper CPU and schedule it away if the CPU it's executing on
3215 if (p->user_cpus_ptr && in __set_cpus_allowed_ptr()
3216 !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) && in __set_cpus_allowed_ptr()
3217 cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr)) in __set_cpus_allowed_ptr()
3218 ctx->new_mask = rq->scratch_mask; in __set_cpus_allowed_ptr()
3235 * Change a given task's CPU affinity to the intersection of its current
3237 * If user_cpus_ptr is defined, use it as the basis for restricting CPU
3241 * -EINVAL.
3263 err = -EPERM; in restrict_cpus_allowed_ptr()
3268 err = -EINVAL; in restrict_cpus_allowed_ptr()
3280 * Restrict the CPU affinity of task @p so that it is a subset of
3281 * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
3294 * offlining of the chosen destination CPU, so take the hotplug in force_compatible_cpus_allowed_ptr()
3314 task_pid_nr(p), p->comm, in force_compatible_cpus_allowed_ptr()
3353 unsigned int state = READ_ONCE(p->__state); in set_task_cpu()
3359 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq); in set_task_cpu()
3362 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING, in set_task_cpu()
3364 * time relying on p->on_rq. in set_task_cpu()
3367 p->sched_class == &fair_sched_class && in set_task_cpu()
3368 (p->on_rq && !task_on_rq_migrating(p))); in set_task_cpu()
3372 * The caller should hold either p->pi_lock or rq->lock, when changing in set_task_cpu()
3373 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. in set_task_cpu()
3381 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || in set_task_cpu()
3395 if (p->sched_class->migrate_task_rq) in set_task_cpu()
3396 p->sched_class->migrate_task_rq(p, new_cpu); in set_task_cpu()
3397 p->se.nr_migrations++; in set_task_cpu()
3407 static void __migrate_swap_task(struct task_struct *p, int cpu) in __migrate_swap_task() argument
3414 dst_rq = cpu_rq(cpu); in __migrate_swap_task()
3420 set_task_cpu(p, cpu); in __migrate_swap_task()
3431 * previous CPU our target instead of where it really is. in __migrate_swap_task()
3433 p->wake_cpu = cpu; in __migrate_swap_task()
3447 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu)) in migrate_swap_stop()
3448 return -EAGAIN; in migrate_swap_stop()
3450 src_rq = cpu_rq(arg->src_cpu); in migrate_swap_stop()
3451 dst_rq = cpu_rq(arg->dst_cpu); in migrate_swap_stop()
3453 guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock); in migrate_swap_stop()
3456 if (task_cpu(arg->dst_task) != arg->dst_cpu) in migrate_swap_stop()
3457 return -EAGAIN; in migrate_swap_stop()
3459 if (task_cpu(arg->src_task) != arg->src_cpu) in migrate_swap_stop()
3460 return -EAGAIN; in migrate_swap_stop()
3462 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) in migrate_swap_stop()
3463 return -EAGAIN; in migrate_swap_stop()
3465 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) in migrate_swap_stop()
3466 return -EAGAIN; in migrate_swap_stop()
3468 __migrate_swap_task(arg->src_task, arg->dst_cpu); in migrate_swap_stop()
3469 __migrate_swap_task(arg->dst_task, arg->src_cpu); in migrate_swap_stop()
3481 int ret = -EINVAL; in migrate_swap()
3495 * will be re-checked with proper locks held further down the line. in migrate_swap()
3500 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) in migrate_swap()
3503 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) in migrate_swap()
3515 * kick_process - kick a running thread to enter/exit the kernel
3516 * @p: the to-be-kicked thread
3518 * Cause a process which is running on another CPU to enter
3519 * kernel-mode, without any delay. (to get signals handled.)
3524 * to another CPU then no harm is done and the purpose has been
3530 int cpu = task_cpu(p); in kick_process() local
3532 if ((cpu != smp_processor_id()) && task_curr(p)) in kick_process()
3533 smp_send_reschedule(cpu); in kick_process()
3538 * ->cpus_ptr is protected by both rq->lock and p->pi_lock
3542 * - cpu_active must be a subset of cpu_online
3544 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
3546 * CPU isn't yet part of the sched domains, and balancing will not
3549 * - on CPU-down we clear cpu_active() to mask the sched domains and
3551 * CPU. Existing tasks will remain running there and will be taken
3555 * And can assume that any active CPU must be online. Conversely
3559 static int select_fallback_rq(int cpu, struct task_struct *p) in select_fallback_rq() argument
3561 int nid = cpu_to_node(cpu); in select_fallback_rq()
3567 * If the node that the CPU is on has been offlined, cpu_to_node() in select_fallback_rq()
3568 * will return -1. There is no CPU on the node, and we should in select_fallback_rq()
3569 * select the CPU on the other node. in select_fallback_rq()
3571 if (nid != -1) { in select_fallback_rq()
3574 /* Look for allowed, online CPU in same node. */ in select_fallback_rq()
3582 /* Any allowed, online CPU? */ in select_fallback_rq()
3583 for_each_cpu(dest_cpu, p->cpus_ptr) { in select_fallback_rq()
3601 * hold p->pi_lock and again violate locking order. in select_fallback_rq()
3621 if (p->mm && printk_ratelimit()) { in select_fallback_rq()
3622 printk_deferred("process %d (%s) no longer affine to cpu%d\n", in select_fallback_rq()
3623 task_pid_nr(p), p->comm, cpu); in select_fallback_rq()
3631 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
3634 int select_task_rq(struct task_struct *p, int cpu, int wake_flags) in select_task_rq() argument
3636 lockdep_assert_held(&p->pi_lock); in select_task_rq()
3638 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) in select_task_rq()
3639 cpu = p->sched_class->select_task_rq(p, cpu, wake_flags); in select_task_rq()
3641 cpu = cpumask_any(p->cpus_ptr); in select_task_rq()
3645 * to rely on ttwu() to place the task on a valid ->cpus_ptr in select_task_rq()
3646 * CPU. in select_task_rq()
3650 * [ this allows ->select_task() to simply return task_cpu(p) and in select_task_rq()
3653 if (unlikely(!is_cpu_allowed(p, cpu))) in select_task_rq()
3654 cpu = select_fallback_rq(task_cpu(p), p); in select_task_rq()
3656 return cpu; in select_task_rq()
3659 void sched_set_stop_task(int cpu, struct task_struct *stop) in sched_set_stop_task() argument
3662 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; in sched_set_stop_task()
3663 struct task_struct *old_stop = cpu_rq(cpu)->stop; in sched_set_stop_task()
3671 * much confusion -- but then, stop work should not in sched_set_stop_task()
3676 stop->sched_class = &stop_sched_class; in sched_set_stop_task()
3679 * The PI code calls rt_mutex_setprio() with ->pi_lock held to in sched_set_stop_task()
3685 * The stop task itself will never be part of the PI-chain, it in sched_set_stop_task()
3686 * never blocks, therefore that ->pi_lock recursion is safe. in sched_set_stop_task()
3687 * Tell lockdep about this by placing the stop->pi_lock in its in sched_set_stop_task()
3690 lockdep_set_class(&stop->pi_lock, &stop_pi_lock); in sched_set_stop_task()
3693 cpu_rq(cpu)->stop = stop; in sched_set_stop_task()
3700 old_stop->sched_class = &rt_sched_class; in sched_set_stop_task()
3709 return set_cpus_allowed_ptr(p, ctx->new_mask); in __set_cpus_allowed_ptr()
3727 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) in ttwu_stat() argument
3737 if (cpu == rq->cpu) { in ttwu_stat()
3738 __schedstat_inc(rq->ttwu_local); in ttwu_stat()
3739 __schedstat_inc(p->stats.nr_wakeups_local); in ttwu_stat()
3743 __schedstat_inc(p->stats.nr_wakeups_remote); in ttwu_stat()
3746 for_each_domain(rq->cpu, sd) { in ttwu_stat()
3747 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { in ttwu_stat()
3748 __schedstat_inc(sd->ttwu_wake_remote); in ttwu_stat()
3755 __schedstat_inc(p->stats.nr_wakeups_migrate); in ttwu_stat()
3758 __schedstat_inc(rq->ttwu_count); in ttwu_stat()
3759 __schedstat_inc(p->stats.nr_wakeups); in ttwu_stat()
3762 __schedstat_inc(p->stats.nr_wakeups_sync); in ttwu_stat()
3770 WRITE_ONCE(p->__state, TASK_RUNNING); in ttwu_do_wakeup()
3782 if (p->sched_contributes_to_load) in ttwu_do_activate()
3783 rq->nr_uninterruptible--; in ttwu_do_activate()
3790 if (p->in_iowait) { in ttwu_do_activate()
3792 atomic_dec(&task_rq(p)->nr_iowait); in ttwu_do_activate()
3801 if (p->sched_class->task_woken) { in ttwu_do_activate()
3804 * drop the rq->lock, hereafter rq is only used for statistics. in ttwu_do_activate()
3807 p->sched_class->task_woken(rq, p); in ttwu_do_activate()
3811 if (rq->idle_stamp) { in ttwu_do_activate()
3812 u64 delta = rq_clock(rq) - rq->idle_stamp; in ttwu_do_activate()
3813 u64 max = 2*rq->max_idle_balance_cost; in ttwu_do_activate()
3815 update_avg(&rq->avg_idle, delta); in ttwu_do_activate()
3817 if (rq->avg_idle > max) in ttwu_do_activate()
3818 rq->avg_idle = max; in ttwu_do_activate()
3820 rq->idle_stamp = 0; in ttwu_do_activate()
3824 p->dl_server = NULL; in ttwu_do_activate()
3841 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3844 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3845 * then schedule() must still happen and p->state can be changed to
3891 if (WARN_ON_ONCE(p->on_cpu)) in sched_ttwu_pending()
3892 smp_cond_load_acquire(&p->on_cpu, !VAL); in sched_ttwu_pending()
3897 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); in sched_ttwu_pending()
3902 * idle_cpu() does not observe a false-negative -- if it does, in sched_ttwu_pending()
3904 * of tasks on this CPU during that window. in sched_ttwu_pending()
3910 WRITE_ONCE(rq->ttwu_pending, 0); in sched_ttwu_pending()
3920 bool call_function_single_prep_ipi(int cpu) in call_function_single_prep_ipi() argument
3922 if (set_nr_if_polling(cpu_rq(cpu)->idle)) { in call_function_single_prep_ipi()
3923 trace_sched_wake_idle_without_ipi(cpu); in call_function_single_prep_ipi()
3931 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
3932 * necessary. The wakee CPU on receipt of the IPI will queue the task
3936 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) in __ttwu_queue_wakelist() argument
3938 struct rq *rq = cpu_rq(cpu); in __ttwu_queue_wakelist()
3940 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); in __ttwu_queue_wakelist()
3942 WRITE_ONCE(rq->ttwu_pending, 1); in __ttwu_queue_wakelist()
3943 __smp_call_single_queue(cpu, &p->wake_entry.llist); in __ttwu_queue_wakelist()
3946 void wake_up_if_idle(int cpu) in wake_up_if_idle() argument
3948 struct rq *rq = cpu_rq(cpu); in wake_up_if_idle()
3951 if (is_idle_task(rcu_dereference(rq->curr))) { in wake_up_if_idle()
3953 if (is_idle_task(rq->curr)) in wake_up_if_idle()
3967 * Whether CPUs are share cache resources, which means LLC on non-cluster
3978 static inline bool ttwu_queue_cond(struct task_struct *p, int cpu) in ttwu_queue_cond() argument
3981 * Do not complicate things with the async wake_list while the CPU is in ttwu_queue_cond()
3984 if (!cpu_active(cpu)) in ttwu_queue_cond()
3987 /* Ensure the task will still be allowed to run on the CPU. */ in ttwu_queue_cond()
3988 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) in ttwu_queue_cond()
3992 * If the CPU does not share cache, then queue the task on the in ttwu_queue_cond()
3995 if (!cpus_share_cache(smp_processor_id(), cpu)) in ttwu_queue_cond()
3998 if (cpu == smp_processor_id()) in ttwu_queue_cond()
4002 * If the wakee cpu is idle, or the task is descheduling and the in ttwu_queue_cond()
4003 * only running task on the CPU, then use the wakelist to offload in ttwu_queue_cond()
4004 * the task activation to the idle (or soon-to-be-idle) CPU as in ttwu_queue_cond()
4005 * the current CPU is likely busy. nr_running is checked to in ttwu_queue_cond()
4008 * Note that we can only get here with (wakee) p->on_rq=0, in ttwu_queue_cond()
4009 * p->on_cpu can be whatever, we've done the dequeue, so in ttwu_queue_cond()
4010 * the wakee has been accounted out of ->nr_running. in ttwu_queue_cond()
4012 if (!cpu_rq(cpu)->nr_running) in ttwu_queue_cond()
4018 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) in ttwu_queue_wakelist() argument
4020 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) { in ttwu_queue_wakelist()
4021 sched_clock_cpu(cpu); /* Sync clocks across CPUs */ in ttwu_queue_wakelist()
4022 __ttwu_queue_wakelist(p, cpu, wake_flags); in ttwu_queue_wakelist()
4031 static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) in ttwu_queue_wakelist() argument
4038 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) in ttwu_queue() argument
4040 struct rq *rq = cpu_rq(cpu); in ttwu_queue()
4043 if (ttwu_queue_wakelist(p, cpu, wake_flags)) in ttwu_queue()
4085 * set p::saved_state to TASK_RUNNING, but do not wake the task in ttwu_state_match()
4097 p->saved_state = TASK_RUNNING; in ttwu_state_match()
4103 * Notes on Program-Order guarantees on SMP systems.
4107 * The basic program-order guarantee on SMP systems is that when a task [t]
4108 * migrates, all its activity on its old CPU [c0] happens-before any subsequent
4109 * execution on its new CPU [c1].
4113 * A) UNLOCK of the rq(c0)->lock scheduling out task t
4114 * B) migration for t is required to synchronize *both* rq(c0)->lock and
4115 * rq(c1)->lock (if not at the same time, then in that order).
4116 * C) LOCK of the rq(c1)->lock scheduling in task
4119 * Note: the CPU doing B need not be c0 or c1
4125 * LOCK rq(0)->lock
4126 * sched-out X
4127 * sched-in Y
4128 * UNLOCK rq(0)->lock
4130 * LOCK rq(0)->lock // orders against CPU0
4132 * UNLOCK rq(0)->lock
4134 * LOCK rq(1)->lock
4136 * UNLOCK rq(1)->lock
4138 * LOCK rq(1)->lock // orders against CPU2
4139 * sched-out Z
4140 * sched-in X
4141 * UNLOCK rq(1)->lock
4144 * BLOCKING -- aka. SLEEP + WAKEUP
4150 * 1) smp_store_release(X->on_cpu, 0) -- finish_task()
4151 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
4157 * LOCK rq(0)->lock LOCK X->pi_lock
4159 * sched-out X
4160 * smp_store_release(X->on_cpu, 0);
4162 * smp_cond_load_acquire(&X->on_cpu, !VAL);
4163 * X->state = WAKING
4166 * LOCK rq(2)->lock
4168 * X->state = RUNNING
4169 * UNLOCK rq(2)->lock
4171 * LOCK rq(2)->lock // orders against CPU1
4172 * sched-out Z
4173 * sched-in X
4174 * UNLOCK rq(2)->lock
4176 * UNLOCK X->pi_lock
4177 * UNLOCK rq(0)->lock
4186 * try_to_wake_up - wake up a thread
4189 * @wake_flags: wake modifier flags (WF_*)
4193 * If (@state & @p->state) @p->state = TASK_RUNNING.
4199 * It issues a full memory barrier before accessing @p->state, see the comment
4202 * Uses p->pi_lock to serialize against concurrent wake-ups.
4204 * Relies on p->pi_lock stabilizing:
4205 * - p->sched_class
4206 * - p->cpus_ptr
4207 * - p->sched_task_group
4210 * Tries really hard to only take one task_rq(p)->lock for performance.
4211 * Takes rq->lock in:
4212 * - ttwu_runnable() -- old rq, unavoidable, see comment there;
4213 * - ttwu_queue() -- new rq, for enqueue of the task;
4214 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4219 * Return: %true if @p->state changes (an actual wakeup was done),
4225 int cpu, success = 0; in try_to_wake_up() local
4229 * We're waking current, this means 'p->on_rq' and 'task_cpu(p) in try_to_wake_up()
4231 * case the whole 'p->on_rq && ttwu_runnable()' case below in try_to_wake_up()
4235 * - we rely on Program-Order guarantees for all the ordering, in try_to_wake_up()
4236 * - we're serialized against set_special_state() by virtue of in try_to_wake_up()
4237 * it disabling IRQs (this allows not taking ->pi_lock). in try_to_wake_up()
4248 * If we are going to wake up a thread waiting for CONDITION we in try_to_wake_up()
4250 * reordered with p->state check below. This pairs with smp_store_mb() in try_to_wake_up()
4253 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { in try_to_wake_up()
4261 * Ensure we load p->on_rq _after_ p->state, otherwise it would in try_to_wake_up()
4262 * be possible to, falsely, observe p->on_rq == 0 and get stuck in try_to_wake_up()
4266 * STORE p->on_rq = 1 LOAD p->state in try_to_wake_up()
4267 * UNLOCK rq->lock in try_to_wake_up()
4270 * LOCK rq->lock smp_rmb(); in try_to_wake_up()
4272 * UNLOCK rq->lock in try_to_wake_up()
4275 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq in try_to_wake_up()
4277 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in in try_to_wake_up()
4283 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags)) in try_to_wake_up()
4288 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be in try_to_wake_up()
4289 * possible to, falsely, observe p->on_cpu == 0. in try_to_wake_up()
4291 * One must be running (->on_cpu == 1) in order to remove oneself in try_to_wake_up()
4295 * STORE p->on_cpu = 1 LOAD p->on_rq in try_to_wake_up()
4296 * UNLOCK rq->lock in try_to_wake_up()
4299 * LOCK rq->lock smp_rmb(); in try_to_wake_up()
4301 * STORE p->on_rq = 0 LOAD p->on_cpu in try_to_wake_up()
4303 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in in try_to_wake_up()
4306 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure in try_to_wake_up()
4308 * care about it's own p->state. See the comment in __schedule(). in try_to_wake_up()
4313 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq in try_to_wake_up()
4314 * == 0), which means we need to do an enqueue, change p->state to in try_to_wake_up()
4315 * TASK_WAKING such that we can unlock p->pi_lock before doing the in try_to_wake_up()
4318 WRITE_ONCE(p->__state, TASK_WAKING); in try_to_wake_up()
4321 * If the owning (remote) CPU is still in the middle of schedule() with in try_to_wake_up()
4323 * which potentially sends an IPI instead of spinning on p->on_cpu to in try_to_wake_up()
4327 * Ensure we load task_cpu(p) after p->on_cpu: in try_to_wake_up()
4329 * set_task_cpu(p, cpu); in try_to_wake_up()
4330 * STORE p->cpu = @cpu in try_to_wake_up()
4332 * LOCK rq->lock in try_to_wake_up()
4333 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu) in try_to_wake_up()
4334 * STORE p->on_cpu = 1 LOAD p->cpu in try_to_wake_up()
4336 * to ensure we observe the correct CPU on which the task is currently in try_to_wake_up()
4339 if (smp_load_acquire(&p->on_cpu) && in try_to_wake_up()
4344 * If the owning (remote) CPU is still in the middle of schedule() with in try_to_wake_up()
4352 smp_cond_load_acquire(&p->on_cpu, !VAL); in try_to_wake_up()
4354 cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU); in try_to_wake_up()
4355 if (task_cpu(p) != cpu) { in try_to_wake_up()
4356 if (p->in_iowait) { in try_to_wake_up()
4358 atomic_dec(&task_rq(p)->nr_iowait); in try_to_wake_up()
4363 set_task_cpu(p, cpu); in try_to_wake_up()
4366 cpu = task_cpu(p); in try_to_wake_up()
4369 ttwu_queue(p, cpu, wake_flags); in try_to_wake_up()
4380 unsigned int state = READ_ONCE(p->__state); in __task_needs_rq_lock()
4383 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when in __task_needs_rq_lock()
4391 * Ensure we load p->on_rq after p->__state, otherwise it would be in __task_needs_rq_lock()
4392 * possible to, falsely, observe p->on_rq == 0. in __task_needs_rq_lock()
4397 if (p->on_rq) in __task_needs_rq_lock()
4406 smp_cond_load_acquire(&p->on_cpu, !VAL); in __task_needs_rq_lock()
4413 * task_call_func - Invoke a function on task in fixed state
4419 * and call @func(@arg) on it. This function can use ->on_rq and task_curr()
4432 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); in task_call_func()
4439 * - blocked and we're holding off wakeups (pi->lock) in task_call_func()
4440 * - woken, and we're holding off enqueue (rq->lock) in task_call_func()
4441 * - queued, and we're holding off schedule (rq->lock) in task_call_func()
4442 * - running, and we're holding off de-schedule (rq->lock) in task_call_func()
4444 * The called function (@func) can use: task_curr(), p->on_rq and in task_call_func()
4445 * p->__state to differentiate between these states. in task_call_func()
4452 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); in task_call_func()
4457 * cpu_curr_snapshot - Return a snapshot of the currently running task
4458 * @cpu: The CPU on which to snapshot the task.
4461 * the specified CPU. If the same task is running on that CPU throughout,
4463 * If the CPU did any context switches even vaguely concurrently with the
4466 * that CPU somewhere around the time that this function was executing.
4468 * If the specified CPU was offline, the return value is whatever it
4469 * is, perhaps a pointer to the task_struct structure of that CPU's idle
4471 * value must take some action to ensure that the specified CPU remains
4478 struct task_struct *cpu_curr_snapshot(int cpu) in cpu_curr_snapshot() argument
4483 t = rcu_dereference(cpu_curr(cpu)); in cpu_curr_snapshot()
4489 * wake_up_process - Wake up a specific process
4492 * Attempt to wake up the nominated process and move it to the set of runnable
4518 p->on_rq = 0; in __sched_fork()
4520 p->se.on_rq = 0; in __sched_fork()
4521 p->se.exec_start = 0; in __sched_fork()
4522 p->se.sum_exec_runtime = 0; in __sched_fork()
4523 p->se.prev_sum_exec_runtime = 0; in __sched_fork()
4524 p->se.nr_migrations = 0; in __sched_fork()
4525 p->se.vruntime = 0; in __sched_fork()
4526 p->se.vlag = 0; in __sched_fork()
4527 p->se.slice = sysctl_sched_base_slice; in __sched_fork()
4528 INIT_LIST_HEAD(&p->se.group_node); in __sched_fork()
4531 p->se.cfs_rq = NULL; in __sched_fork()
4536 memset(&p->stats, 0, sizeof(p->stats)); in __sched_fork()
4539 init_dl_entity(&p->dl); in __sched_fork()
4541 INIT_LIST_HEAD(&p->rt.run_list); in __sched_fork()
4542 p->rt.timeout = 0; in __sched_fork()
4543 p->rt.time_slice = sched_rr_timeslice; in __sched_fork()
4544 p->rt.on_rq = 0; in __sched_fork()
4545 p->rt.on_list = 0; in __sched_fork()
4548 INIT_HLIST_HEAD(&p->preempt_notifiers); in __sched_fork()
4552 p->capture_control = NULL; in __sched_fork()
4556 p->wake_entry.u_flags = CSD_TYPE_TTWU; in __sched_fork()
4557 p->migration_pending = NULL; in __sched_fork()
4591 pgdat->nbp_threshold = 0; in reset_memory_tiering()
4592 pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE); in reset_memory_tiering()
4593 pgdat->nbp_th_start = jiffies_to_msecs(jiffies); in reset_memory_tiering()
4605 return -EPERM; in sysctl_numa_balancing()
4674 return -EPERM; in sysctl_schedstats()
4746 * fork()/clone()-time setup:
4754 * event cannot wake it up and insert it on the runqueue either. in sched_fork()
4756 p->__state = TASK_NEW; in sched_fork()
4761 p->prio = current->normal_prio; in sched_fork()
4768 if (unlikely(p->sched_reset_on_fork)) { in sched_fork()
4770 p->policy = SCHED_NORMAL; in sched_fork()
4771 p->static_prio = NICE_TO_PRIO(0); in sched_fork()
4772 p->rt_priority = 0; in sched_fork()
4773 } else if (PRIO_TO_NICE(p->static_prio) < 0) in sched_fork()
4774 p->static_prio = NICE_TO_PRIO(0); in sched_fork()
4776 p->prio = p->normal_prio = p->static_prio; in sched_fork()
4783 p->sched_reset_on_fork = 0; in sched_fork()
4786 if (dl_prio(p->prio)) in sched_fork()
4787 return -EAGAIN; in sched_fork()
4788 else if (rt_prio(p->prio)) in sched_fork()
4789 p->sched_class = &rt_sched_class; in sched_fork()
4791 p->sched_class = &fair_sched_class; in sched_fork()
4793 init_entity_runnable_average(&p->se); in sched_fork()
4798 memset(&p->sched_info, 0, sizeof(p->sched_info)); in sched_fork()
4801 p->on_cpu = 0; in sched_fork()
4805 plist_node_init(&p->pushable_tasks, MAX_PRIO); in sched_fork()
4806 RB_CLEAR_NODE(&p->pushable_dl_tasks); in sched_fork()
4816 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly in sched_cgroup_fork()
4819 raw_spin_lock_irqsave(&p->pi_lock, flags); in sched_cgroup_fork()
4823 tg = container_of(kargs->cset->subsys[cpu_cgrp_id], in sched_cgroup_fork()
4826 p->sched_task_group = tg; in sched_cgroup_fork()
4831 * We're setting the CPU for the first time, we don't migrate, in sched_cgroup_fork()
4835 if (p->sched_class->task_fork) in sched_cgroup_fork()
4836 p->sched_class->task_fork(p); in sched_cgroup_fork()
4837 raw_spin_unlock_irqrestore(&p->pi_lock, flags); in sched_cgroup_fork()
4862 * wake_up_new_task - wake up a newly created task for the first time.
4873 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); in wake_up_new_task()
4874 WRITE_ONCE(p->__state, TASK_RUNNING); in wake_up_new_task()
4878 * - cpus_ptr can change in the fork path in wake_up_new_task()
4879 * - any previously selected CPU might disappear through hotplug in wake_up_new_task()
4882 * as we're not fully set-up yet. in wake_up_new_task()
4884 p->recent_used_cpu = task_cpu(p); in wake_up_new_task()
4896 if (p->sched_class->task_woken) { in wake_up_new_task()
4898 * Nothing relies on rq->lock after this, so it's fine to in wake_up_new_task()
4902 p->sched_class->task_woken(rq, p); in wake_up_new_task()
4926 * preempt_notifier_register - tell me when current is being preempted & rescheduled
4934 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); in preempt_notifier_register()
4939 * preempt_notifier_unregister - no longer interested in preemption notifications
4946 hlist_del(¬ifier->link); in preempt_notifier_unregister()
4954 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) in __fire_sched_in_preempt_notifiers()
4955 notifier->ops->sched_in(notifier, raw_smp_processor_id()); in __fire_sched_in_preempt_notifiers()
4970 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) in __fire_sched_out_preempt_notifiers()
4971 notifier->ops->sched_out(notifier, next); in __fire_sched_out_preempt_notifiers()
5003 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and in prepare_task()
5006 WRITE_ONCE(next->on_cpu, 1); in prepare_task()
5014 * This must be the very last reference to @prev from this CPU. After in finish_task()
5015 * p->on_cpu is cleared, the task can be moved to a different CPU. We in finish_task()
5019 * In particular, the load of prev->state in finish_task_switch() must in finish_task()
5024 smp_store_release(&prev->on_cpu, 0); in finish_task()
5038 func = (void (*)(struct rq *))head->func; in do_balance_callbacks()
5039 next = head->next; in do_balance_callbacks()
5040 head->next = NULL; in do_balance_callbacks()
5054 * that queued it (only later, when it's safe to drop rq->lock again),
5058 * a single test, namely: rq->balance_callback == NULL.
5068 struct balance_callback *head = rq->balance_callback; in __splice_balance_callbacks()
5077 * in the same rq->lock section. in __splice_balance_callbacks()
5085 rq->balance_callback = NULL; in __splice_balance_callbacks()
5134 * of the scheduler it's an obvious special-case), so we in prepare_lock_switch()
5138 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_); in prepare_lock_switch()
5141 rq_lockp(rq)->owner = next; in prepare_lock_switch()
5149 * fix up the runqueue lock - which gets 'carried over' from in finish_lock_switch()
5152 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_); in finish_lock_switch()
5172 if (unlikely(current->kmap_ctrl.idx)) in kmap_local_sched_out()
5180 if (unlikely(current->kmap_ctrl.idx)) in kmap_local_sched_in()
5186 * prepare_task_switch - prepare to switch tasks
5213 * finish_task_switch - clean up after a task-switch
5219 * and do any other architecture-specific cleanup actions.
5229 * because prev may have moved to another CPU.
5232 __releases(rq->lock) in finish_task_switch()
5235 struct mm_struct *mm = rq->prev_mm; in finish_task_switch()
5245 * raw_spin_lock_irq(&rq->lock) // 2 in finish_task_switch()
5251 current->comm, current->pid, preempt_count())) in finish_task_switch()
5254 rq->prev_mm = NULL; in finish_task_switch()
5258 * If a task dies, then it sets TASK_DEAD in tsk->state and calls in finish_task_switch()
5262 * We must observe prev->state before clearing prev->on_cpu (in in finish_task_switch()
5264 * running on another CPU and we could rave with its RUNNING -> DEAD in finish_task_switch()
5267 prev_state = READ_ONCE(prev->__state); in finish_task_switch()
5289 * schedule between user->kernel->user threads without passing though in finish_task_switch()
5291 * rq->curr, before returning to userspace, so provide them here: in finish_task_switch()
5293 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly in finish_task_switch()
5295 * - a sync_core for SYNC_CORE. in finish_task_switch()
5303 if (prev->sched_class->task_dead) in finish_task_switch()
5304 prev->sched_class->task_dead(prev); in finish_task_switch()
5316 * schedule_tail - first thing a freshly forked thread must call.
5320 __releases(rq->lock) in schedule_tail()
5326 * finish_task_switch() will drop rq->lock() and lower preempt_count in schedule_tail()
5334 if (current->set_child_tid) in schedule_tail()
5335 put_user(task_pid_vnr(current), current->set_child_tid); in schedule_tail()
5341 * context_switch - switch to the new MM and the new thread's register state.
5357 * kernel -> kernel lazy + transfer active in context_switch()
5358 * user -> kernel lazy + mmgrab_lazy_tlb() active in context_switch()
5360 * kernel -> user switch + mmdrop_lazy_tlb() active in context_switch()
5361 * user -> user switch in context_switch()
5366 if (!next->mm) { // to kernel in context_switch()
5367 enter_lazy_tlb(prev->active_mm, next); in context_switch()
5369 next->active_mm = prev->active_mm; in context_switch()
5370 if (prev->mm) // from user in context_switch()
5371 mmgrab_lazy_tlb(prev->active_mm); in context_switch()
5373 prev->active_mm = NULL; in context_switch()
5375 membarrier_switch_mm(rq, prev->active_mm, next->mm); in context_switch()
5378 * rq->curr / membarrier_switch_mm() and returning to userspace. in context_switch()
5381 * case 'prev->active_mm == next->mm' through in context_switch()
5384 switch_mm_irqs_off(prev->active_mm, next->mm, next); in context_switch()
5385 lru_gen_use_mm(next->mm); in context_switch()
5387 if (!prev->mm) { // from kernel in context_switch()
5389 rq->prev_mm = prev->active_mm; in context_switch()
5390 prev->active_mm = NULL; in context_switch()
5417 sum += cpu_rq(i)->nr_running; in nr_running()
5423 * Check if only the current task is running on the CPU.
5426 * preemption, thus the result might have a time-of-check-to-time-of-use
5429 * - from a non-preemptible section (of course)
5431 * - from a thread that is bound to a single CPU
5433 * - in a loop with very short iterations (e.g. a polling loop)
5437 return raw_rq()->nr_running == 1; in single_task_running()
5441 unsigned long long nr_context_switches_cpu(int cpu) in nr_context_switches_cpu() argument
5443 return cpu_rq(cpu)->nr_switches; in nr_context_switches_cpu()
5452 sum += cpu_rq(i)->nr_switches; in nr_context_switches()
5460 * for a CPU that has IO-wait which might not even end up running the task when
5464 unsigned int nr_iowait_cpu(int cpu) in nr_iowait_cpu() argument
5466 return atomic_read(&cpu_rq(cpu)->nr_iowait); in nr_iowait_cpu()
5470 * IO-wait accounting, and how it's mostly bollocks (on SMP).
5472 * The idea behind IO-wait account is to account the idle time that we could
5474 * storage performance, we'd have a proportional reduction in IO-wait time.
5477 * idle time as IO-wait, because if the storage were faster, it could've been
5480 * This has been extended to SMP, by doing the same for each CPU. This however
5483 * Imagine for instance the case where two tasks block on one CPU, only the one
5484 * CPU will have IO-wait accounted, while the other has regular idle. Even
5488 * This means, that when looking globally, the current IO-wait accounting on
5491 * Worse, since the numbers are provided per CPU, they are sometimes
5492 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
5493 * associated with any one particular CPU, it can wake to another CPU than it
5494 * blocked on. This means the per CPU IO-wait number is meaningless.
5496 * Task CPU affinities can make all that even more 'interesting'.
5512 * sched_exec - execve() is a valuable balancing opportunity, because at
5521 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { in sched_exec()
5522 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC); in sched_exec()
5544 * and its field curr->exec_start; when called from task_sched_runtime(),
5551 struct sched_entity *curr = (&p->se)->cfs_rq->curr; in prefetch_curr_exec_start()
5553 struct sched_entity *curr = (&task_rq(p)->cfs)->curr; in prefetch_curr_exec_start()
5556 prefetch(&curr->exec_start); in prefetch_curr_exec_start()
5572 * 64-bit doesn't need locks to atomically read a 64-bit value. in task_sched_runtime()
5574 * Reading ->on_cpu is racy, but this is ok. in task_sched_runtime()
5576 * If we race with it leaving CPU, we'll take a lock. So we're correct. in task_sched_runtime()
5577 * If we race with it entering CPU, unaccounted time is 0. This is in task_sched_runtime()
5579 * If we see ->on_cpu without ->on_rq, the task is leaving, and has in task_sched_runtime()
5582 if (!p->on_cpu || !task_on_rq_queued(p)) in task_sched_runtime()
5583 return p->se.sum_exec_runtime; in task_sched_runtime()
5588 * Must be ->curr _and_ ->on_rq. If dequeued, we would in task_sched_runtime()
5595 p->sched_class->update_curr(rq); in task_sched_runtime()
5597 ns = p->se.sum_exec_runtime; in task_sched_runtime()
5619 if (!rq->last_seen_need_resched_ns) { in cpu_resched_latency()
5620 rq->last_seen_need_resched_ns = now; in cpu_resched_latency()
5621 rq->ticks_without_resched = 0; in cpu_resched_latency()
5625 rq->ticks_without_resched++; in cpu_resched_latency()
5626 resched_latency = now - rq->last_seen_need_resched_ns; in cpu_resched_latency()
5653 * This function gets called by the timer code, with HZ frequency.
5658 int cpu = smp_processor_id(); in scheduler_tick() local
5659 struct rq *rq = cpu_rq(cpu); in scheduler_tick()
5660 struct task_struct *curr = rq->curr; in scheduler_tick()
5665 if (housekeeping_cpu(cpu, HK_TYPE_TICK)) in scheduler_tick()
5675 curr->sched_class->task_tick(rq, curr, 0); in scheduler_tick()
5685 resched_latency_warn(cpu, resched_latency); in scheduler_tick()
5689 if (curr->flags & PF_WQ_WORKER) in scheduler_tick()
5693 rq->idle_balance = idle_cpu(cpu); in scheduler_tick()
5701 int cpu; member
5705 /* Values for ->state, see diagram below. */
5711 * State diagram for ->state:
5720 * +--TICK_SCHED_REMOTE_OFFLINING
5739 int cpu = twork->cpu; in sched_tick_remote() local
5740 struct rq *rq = cpu_rq(cpu); in sched_tick_remote()
5744 * Handle the tick only if it appears the remote CPU is running in full in sched_tick_remote()
5747 * statistics and checks timeslices in a time-independent way, regardless in sched_tick_remote()
5750 if (tick_nohz_tick_stopped_cpu(cpu)) { in sched_tick_remote()
5752 struct task_struct *curr = rq->curr; in sched_tick_remote()
5754 if (cpu_online(cpu)) { in sched_tick_remote()
5762 u64 delta = rq_clock_task(rq) - curr->se.exec_start; in sched_tick_remote()
5765 curr->sched_class->task_tick(rq, curr, 0); in sched_tick_remote()
5777 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING); in sched_tick_remote()
5783 static void sched_tick_start(int cpu) in sched_tick_start() argument
5788 if (housekeeping_cpu(cpu, HK_TYPE_TICK)) in sched_tick_start()
5793 twork = per_cpu_ptr(tick_work_cpu, cpu); in sched_tick_start()
5794 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING); in sched_tick_start()
5797 twork->cpu = cpu; in sched_tick_start()
5798 INIT_DELAYED_WORK(&twork->work, sched_tick_remote); in sched_tick_start()
5799 queue_delayed_work(system_unbound_wq, &twork->work, HZ); in sched_tick_start()
5804 static void sched_tick_stop(int cpu) in sched_tick_stop() argument
5809 if (housekeeping_cpu(cpu, HK_TYPE_TICK)) in sched_tick_stop()
5814 twork = per_cpu_ptr(tick_work_cpu, cpu); in sched_tick_stop()
5815 /* There cannot be competing actions, but don't rely on stop-machine. */ in sched_tick_stop()
5816 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING); in sched_tick_stop()
5830 static inline void sched_tick_start(int cpu) { } in sched_tick_start() argument
5831 static inline void sched_tick_stop(int cpu) { } in sched_tick_stop() argument
5845 current->preempt_disable_ip = ip; in preempt_latency_start()
5866 PREEMPT_MASK - 10); in preempt_count_add()
5913 return p->preempt_disable_ip; in get_preempt_disable_ip()
5931 prev->comm, prev->pid, preempt_count()); in __schedule_bug()
5948 * Various schedule()-time debugging checks and statistics:
5961 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) { in schedule_debug()
5962 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n", in schedule_debug()
5963 prev->comm, prev->pid, prev->non_block_count); in schedule_debug()
5978 schedstat_inc(this_rq()->sched_count); in schedule_debug()
5988 * that when we release the rq->lock the task is in the same in put_prev_task_balance()
5989 * state as before we took rq->lock. in put_prev_task_balance()
5994 for_class_range(class, prev->sched_class, &idle_sched_class) { in put_prev_task_balance()
5995 if (class->balance(rq, prev, rf)) in put_prev_task_balance()
6004 * Pick up the highest-prio task:
6018 if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) && in __pick_next_task()
6019 rq->nr_running == rq->cfs.h_nr_running)) { in __pick_next_task()
6032 * This is the fast path; it cannot be a DL server pick; in __pick_next_task()
6033 * therefore even if @p == @prev, ->dl_server must be NULL. in __pick_next_task()
6035 if (p->dl_server) in __pick_next_task()
6036 p->dl_server = NULL; in __pick_next_task()
6046 * Must be done before ->pick_next_task() because that can (re)set in __pick_next_task()
6047 * ->dl_server. in __pick_next_task()
6049 if (prev->dl_server) in __pick_next_task()
6050 prev->dl_server = NULL; in __pick_next_task()
6053 p = class->pick_next_task(rq); in __pick_next_task()
6064 return (task_rq(t)->idle == t); in is_task_rq_idle()
6069 return is_task_rq_idle(a) || (a->core_cookie == cookie); in cookie_equals()
6077 return a->core_cookie == b->core_cookie; in cookie_match()
6086 p = class->pick_task(rq); in pick_task()
6104 bool core_clock_updated = (rq == rq->core); in pick_next_task()
6106 int i, cpu, occ = 0; in pick_next_task() local
6113 cpu = cpu_of(rq); in pick_next_task()
6115 /* Stopper task is switching into idle, no need core-wide selection. */ in pick_next_task()
6116 if (cpu_is_offline(cpu)) { in pick_next_task()
6120 * another cpu during offline. in pick_next_task()
6122 rq->core_pick = NULL; in pick_next_task()
6131 * rq->core_pick can be NULL if no selection was made for a CPU because in pick_next_task()
6132 * it was either offline or went offline during a sibling's core-wide in pick_next_task()
6133 * selection. In this case, do a core-wide selection. in pick_next_task()
6135 if (rq->core->core_pick_seq == rq->core->core_task_seq && in pick_next_task()
6136 rq->core->core_pick_seq != rq->core_sched_seq && in pick_next_task()
6137 rq->core_pick) { in pick_next_task()
6138 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq); in pick_next_task()
6140 next = rq->core_pick; in pick_next_task()
6146 rq->core_pick = NULL; in pick_next_task()
6152 smt_mask = cpu_smt_mask(cpu); in pick_next_task()
6153 need_sync = !!rq->core->core_cookie; in pick_next_task()
6156 rq->core->core_cookie = 0UL; in pick_next_task()
6157 if (rq->core->core_forceidle_count) { in pick_next_task()
6159 update_rq_clock(rq->core); in pick_next_task()
6164 rq->core->core_forceidle_start = 0; in pick_next_task()
6165 rq->core->core_forceidle_count = 0; in pick_next_task()
6166 rq->core->core_forceidle_occupation = 0; in pick_next_task()
6172 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq in pick_next_task()
6181 rq->core->core_task_seq++; in pick_next_task()
6184 * Optimize for common case where this CPU has no cookies in pick_next_task()
6189 if (!next->core_cookie) { in pick_next_task()
6190 rq->core_pick = NULL; in pick_next_task()
6205 * Tie-break prio towards the current CPU in pick_next_task()
6207 for_each_cpu_wrap(i, smt_mask, cpu) { in pick_next_task()
6211 * Current cpu always has its clock updated on entrance to in pick_next_task()
6212 * pick_next_task(). If the current cpu is not the core, in pick_next_task()
6215 if (i != cpu && (rq_i != rq->core || !core_clock_updated)) in pick_next_task()
6218 p = rq_i->core_pick = pick_task(rq_i); in pick_next_task()
6223 cookie = rq->core->core_cookie = max->core_cookie; in pick_next_task()
6231 p = rq_i->core_pick; in pick_next_task()
6241 rq_i->core_pick = p; in pick_next_task()
6243 if (p == rq_i->idle) { in pick_next_task()
6244 if (rq_i->nr_running) { in pick_next_task()
6245 rq->core->core_forceidle_count++; in pick_next_task()
6247 rq->core->core_forceidle_seq++; in pick_next_task()
6254 if (schedstat_enabled() && rq->core->core_forceidle_count) { in pick_next_task()
6255 rq->core->core_forceidle_start = rq_clock(rq->core); in pick_next_task()
6256 rq->core->core_forceidle_occupation = occ; in pick_next_task()
6259 rq->core->core_pick_seq = rq->core->core_task_seq; in pick_next_task()
6260 next = rq->core_pick; in pick_next_task()
6261 rq->core_sched_seq = rq->core->core_pick_seq; in pick_next_task()
6263 /* Something should have been selected for current CPU */ in pick_next_task()
6269 * NOTE: L1TF -- at this point we're no longer running the old task and in pick_next_task()
6271 * their task. This ensures there is no inter-sibling overlap between in pick_next_task()
6272 * non-matching user state. in pick_next_task()
6281 * picked for it. That's Ok - it will pick tasks for itself, in pick_next_task()
6284 if (!rq_i->core_pick) in pick_next_task()
6288 * Update for new !FI->FI transitions, or if continuing to be in !FI: in pick_next_task()
6295 if (!(fi_before && rq->core->core_forceidle_count)) in pick_next_task()
6296 task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count); in pick_next_task()
6298 rq_i->core_pick->core_occupation = occ; in pick_next_task()
6300 if (i == cpu) { in pick_next_task()
6301 rq_i->core_pick = NULL; in pick_next_task()
6306 WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick)); in pick_next_task()
6308 if (rq_i->curr == rq_i->core_pick) { in pick_next_task()
6309 rq_i->core_pick = NULL; in pick_next_task()
6319 if (rq->core->core_forceidle_count && next == rq->idle) in pick_next_task()
6335 cookie = dst->core->core_cookie; in try_steal_cookie()
6339 if (dst->curr != dst->idle) in try_steal_cookie()
6347 if (p == src->core_pick || p == src->curr) in try_steal_cookie()
6353 if (p->core_occupation > dst->idle->core_occupation) in try_steal_cookie()
6358 * check whether the runqueue of the destination CPU is in try_steal_cookie()
6380 static bool steal_cookie_task(int cpu, struct sched_domain *sd) in steal_cookie_task() argument
6384 for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) { in steal_cookie_task()
6385 if (i == cpu) in steal_cookie_task()
6391 if (try_steal_cookie(cpu, i)) in steal_cookie_task()
6401 int cpu = cpu_of(rq); in sched_core_balance() local
6407 for_each_domain(cpu, sd) { in sched_core_balance()
6411 if (steal_cookie_task(cpu, sd)) in sched_core_balance()
6424 if (!rq->core->core_cookie) in queue_core_balance()
6427 if (!rq->nr_running) /* not forced idle */ in queue_core_balance()
6430 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance); in queue_core_balance()
6434 sched_core_lock(*_T->lock, &_T->flags),
6435 sched_core_unlock(*_T->lock, &_T->flags),
6438 static void sched_core_cpu_starting(unsigned int cpu) in sched_core_cpu_starting() argument
6440 const struct cpumask *smt_mask = cpu_smt_mask(cpu); in sched_core_cpu_starting()
6441 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; in sched_core_cpu_starting()
6444 guard(core_lock)(&cpu); in sched_core_cpu_starting()
6446 WARN_ON_ONCE(rq->core != rq); in sched_core_cpu_starting()
6454 if (t == cpu) in sched_core_cpu_starting()
6457 if (rq->core == rq) { in sched_core_cpu_starting()
6470 if (t == cpu) in sched_core_cpu_starting()
6471 rq->core = core_rq; in sched_core_cpu_starting()
6473 WARN_ON_ONCE(rq->core != core_rq); in sched_core_cpu_starting()
6477 static void sched_core_cpu_deactivate(unsigned int cpu) in sched_core_cpu_deactivate() argument
6479 const struct cpumask *smt_mask = cpu_smt_mask(cpu); in sched_core_cpu_deactivate()
6480 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; in sched_core_cpu_deactivate()
6483 guard(core_lock)(&cpu); in sched_core_cpu_deactivate()
6487 WARN_ON_ONCE(rq->core != rq); in sched_core_cpu_deactivate()
6492 if (rq->core != rq) in sched_core_cpu_deactivate()
6497 if (t == cpu) in sched_core_cpu_deactivate()
6507 core_rq->core_task_seq = rq->core_task_seq; in sched_core_cpu_deactivate()
6508 core_rq->core_pick_seq = rq->core_pick_seq; in sched_core_cpu_deactivate()
6509 core_rq->core_cookie = rq->core_cookie; in sched_core_cpu_deactivate()
6510 core_rq->core_forceidle_count = rq->core_forceidle_count; in sched_core_cpu_deactivate()
6511 core_rq->core_forceidle_seq = rq->core_forceidle_seq; in sched_core_cpu_deactivate()
6512 core_rq->core_forceidle_occupation = rq->core_forceidle_occupation; in sched_core_cpu_deactivate()
6519 core_rq->core_forceidle_start = 0; in sched_core_cpu_deactivate()
6524 rq->core = core_rq; in sched_core_cpu_deactivate()
6528 static inline void sched_core_cpu_dying(unsigned int cpu) in sched_core_cpu_dying() argument
6530 struct rq *rq = cpu_rq(cpu); in sched_core_cpu_dying()
6532 if (rq->core != rq) in sched_core_cpu_dying()
6533 rq->core = rq; in sched_core_cpu_dying()
6538 static inline void sched_core_cpu_starting(unsigned int cpu) {} in sched_core_cpu_starting() argument
6539 static inline void sched_core_cpu_deactivate(unsigned int cpu) {} in sched_core_cpu_deactivate() argument
6540 static inline void sched_core_cpu_dying(unsigned int cpu) {} in sched_core_cpu_dying() argument
6578 * To drive preemption between tasks, the scheduler sets the flag in timer
6582 * task to the run-queue and that's it.
6584 * Now, if the new task added to the run-queue preempts the current
6588 * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
6590 * - in syscall or exception context, at the next outmost
6594 * - in IRQ context, return from interrupt-handler to
6597 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
6600 * - cond_resched() call
6601 * - explicit schedule() call
6602 * - return from syscall or exception to user-space
6603 * - return from interrupt-handler to user-space
6614 int cpu; in __schedule() local
6616 cpu = smp_processor_id(); in __schedule()
6617 rq = cpu_rq(cpu); in __schedule()
6618 prev = rq->curr; in __schedule()
6629 * Make sure that signal_pending_state()->signal_pending() below in __schedule()
6636 * LOCK rq->lock LOCK p->pi_state in __schedule()
6638 * if (signal_pending_state()) if (p->state & @state) in __schedule()
6641 * after coming from user-space, before storing to rq->curr. in __schedule()
6647 rq->clock_update_flags <<= 1; in __schedule()
6649 rq->clock_update_flags = RQCF_UPDATED; in __schedule()
6651 switch_count = &prev->nivcsw; in __schedule()
6654 * We must load prev->state once (task_struct::state is volatile), such in __schedule()
6657 prev_state = READ_ONCE(prev->__state); in __schedule()
6660 WRITE_ONCE(prev->__state, TASK_RUNNING); in __schedule()
6662 prev->sched_contributes_to_load = in __schedule()
6667 if (prev->sched_contributes_to_load) in __schedule()
6668 rq->nr_uninterruptible++; in __schedule()
6672 * prev_state = prev->state; if (p->on_rq && ...) in __schedule()
6674 * p->on_rq = 0; smp_acquire__after_ctrl_dep(); in __schedule()
6675 * p->state = TASK_WAKING in __schedule()
6679 * After this, schedule() must not care about p->state any more. in __schedule()
6683 if (prev->in_iowait) { in __schedule()
6684 atomic_inc(&rq->nr_iowait); in __schedule()
6688 switch_count = &prev->nvcsw; in __schedule()
6695 rq->last_seen_need_resched_ns = 0; in __schedule()
6699 rq->nr_switches++; in __schedule()
6701 * RCU users of rcu_dereference(rq->curr) may not see in __schedule()
6704 RCU_INIT_POINTER(rq->curr, next); in __schedule()
6708 * rq->curr, before returning to user-space. in __schedule()
6712 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC. in __schedule()
6714 * - finish_lock_switch() for weakly-ordered in __schedule()
6716 * - switch_to() for arm64 (weakly-ordered, spin_unlock in __schedule()
6741 current->flags |= PF_NOFREEZE; in do_task_dead()
6746 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ in do_task_dead()
6758 * will use a blocking primitive -- which would lead to recursion. in sched_submit_work()
6762 task_flags = tsk->flags; in sched_submit_work()
6765 * wants to wake up a task to maintain concurrency. in sched_submit_work()
6777 SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT); in sched_submit_work()
6783 blk_flush_plug(tsk->plug, true); in sched_submit_work()
6790 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) { in sched_update_worker()
6791 if (tsk->flags & PF_WQ_WORKER) in sched_update_worker()
6812 lockdep_assert(!tsk->sched_rt_mutex); in schedule()
6824 * state (have scheduled out non-voluntarily) by making sure that all
6827 * (schedule out non-voluntarily).
6841 WARN_ON_ONCE(current->__state); in schedule_idle()
6867 * schedule_preempt_disabled - called with preemption disabled
6917 * This is the entry point to schedule() from in-kernel preemption
6923 * If there is a non-zero preempt_count or interrupts are disabled, in preempt_schedule()
6955 * preempt_schedule_notrace - preempt_schedule called by tracing
7059 return try_to_wake_up(curr->private, mode, wake_flags); in default_wake_function()
7066 p->sched_class = &dl_sched_class; in __setscheduler_prio()
7068 p->sched_class = &rt_sched_class; in __setscheduler_prio()
7070 p->sched_class = &fair_sched_class; in __setscheduler_prio()
7072 p->prio = prio; in __setscheduler_prio()
7079 * bit-fields. Since it's a local thing, use int. Keep the generic sounding
7087 lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1)); in rt_mutex_pre_schedule()
7093 lockdep_assert(current->sched_rt_mutex); in rt_mutex_schedule()
7100 lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0)); in rt_mutex_post_schedule()
7106 prio = min(prio, pi_task->prio); in __rt_effective_prio()
7119 * rt_mutex_setprio - set the current priority of a task
7124 * not touch ->normal_prio like __setscheduler().
7137 /* XXX used to be waiter->prio, not waiter->task->prio */ in rt_mutex_setprio()
7138 prio = __rt_effective_prio(pi_task, p->normal_prio); in rt_mutex_setprio()
7143 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) in rt_mutex_setprio()
7149 * Set under pi_lock && rq->lock, such that the value can be used under in rt_mutex_setprio()
7154 * ensure a task is de-boosted (pi_task is set to NULL) before the in rt_mutex_setprio()
7156 * points to a blocked task -- which guarantees the task is present. in rt_mutex_setprio()
7158 p->pi_top_task = pi_task; in rt_mutex_setprio()
7163 if (prio == p->prio && !dl_prio(prio)) in rt_mutex_setprio()
7171 * the timer wheel base->lock on the CPU and another CPU wants in rt_mutex_setprio()
7172 * to access the timer (probably to cancel it). We can safely in rt_mutex_setprio()
7173 * ignore the boosting request, as the idle CPU runs this code in rt_mutex_setprio()
7178 if (unlikely(p == rq->idle)) { in rt_mutex_setprio()
7179 WARN_ON(p != rq->curr); in rt_mutex_setprio()
7180 WARN_ON(p->pi_blocked_on); in rt_mutex_setprio()
7185 oldprio = p->prio; in rt_mutex_setprio()
7190 prev_class = p->sched_class; in rt_mutex_setprio()
7200 * 1. -rt task is running and holds mutex A in rt_mutex_setprio()
7201 * --> -dl task blocks on mutex A in rt_mutex_setprio()
7203 * 2. -dl task is running and holds mutex A in rt_mutex_setprio()
7204 * --> -dl task blocks on mutex A and could preempt the in rt_mutex_setprio()
7208 if (!dl_prio(p->normal_prio) || in rt_mutex_setprio()
7209 (pi_task && dl_prio(pi_task->prio) && in rt_mutex_setprio()
7210 dl_entity_preempt(&pi_task->dl, &p->dl))) { in rt_mutex_setprio()
7211 p->dl.pi_se = pi_task->dl.pi_se; in rt_mutex_setprio()
7214 p->dl.pi_se = &p->dl; in rt_mutex_setprio()
7218 p->dl.pi_se = &p->dl; in rt_mutex_setprio()
7223 p->dl.pi_se = &p->dl; in rt_mutex_setprio()
7225 p->rt.timeout = 0; in rt_mutex_setprio()
7263 * the task might be in the middle of scheduling on another CPU. in set_user_nice()
7272 * allow the 'normal' nice value to be set - but as expected in set_user_nice()
7277 p->static_prio = NICE_TO_PRIO(nice); in set_user_nice()
7288 p->static_prio = NICE_TO_PRIO(nice); in set_user_nice()
7290 old_prio = p->prio; in set_user_nice()
7291 p->prio = effective_prio(p); in set_user_nice()
7300 * lowered its priority, then reschedule its CPU: in set_user_nice()
7302 p->sched_class->prio_changed(rq, p, old_prio); in set_user_nice()
7307 * is_nice_reduction - check if nice value is an actual reduction
7316 /* Convert nice value [19,-20] to rlimit style value [1,40]: */ in is_nice_reduction()
7323 * can_nice - check if a task can reduce its nice value
7335 * sys_nice - change the priority of the current process.
7350 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); in SYSCALL_DEFINE1()
7355 return -EPERM; in SYSCALL_DEFINE1()
7368 * task_prio - return the priority value of a given task.
7375 * normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19]
7376 * fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99]
7377 * deadline -101 -1 0
7381 return p->prio - MAX_RT_PRIO; in task_prio()
7385 * idle_cpu - is a given CPU idle currently?
7386 * @cpu: the processor in question.
7388 * Return: 1 if the CPU is currently idle. 0 otherwise.
7390 int idle_cpu(int cpu) in idle_cpu() argument
7392 struct rq *rq = cpu_rq(cpu); in idle_cpu()
7394 if (rq->curr != rq->idle) in idle_cpu()
7397 if (rq->nr_running) in idle_cpu()
7401 if (rq->ttwu_pending) in idle_cpu()
7409 * available_idle_cpu - is a given CPU idle for enqueuing work.
7410 * @cpu: the CPU in question.
7412 * Return: 1 if the CPU is currently idle. 0 otherwise.
7414 int available_idle_cpu(int cpu) in available_idle_cpu() argument
7416 if (!idle_cpu(cpu)) in available_idle_cpu()
7419 if (vcpu_is_preempted(cpu)) in available_idle_cpu()
7426 * idle_task - return the idle task for a given CPU.
7427 * @cpu: the processor in question.
7429 * Return: The idle task for the CPU @cpu.
7431 struct task_struct *idle_task(int cpu) in idle_task() argument
7433 return cpu_rq(cpu)->idle; in idle_task()
7437 int sched_core_idle_cpu(int cpu) in sched_core_idle_cpu() argument
7439 struct rq *rq = cpu_rq(cpu); in sched_core_idle_cpu()
7441 if (sched_core_enabled(rq) && rq->curr == rq->idle) in sched_core_idle_cpu()
7444 return idle_cpu(cpu); in sched_core_idle_cpu()
7451 * This function computes an effective utilization for the given CPU, to be
7462 * The cfs,rt,dl utilization are the running times measured with rq->clock_task
7463 * which excludes things like IRQ and steal-time. These latter are then accrued
7470 unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, in effective_cpu_util() argument
7475 struct rq *rq = cpu_rq(cpu); in effective_cpu_util()
7477 scale = arch_scale_cpu_capacity(cpu); in effective_cpu_util()
7480 * Early check to see if IRQ/steal time saturates the CPU, can be in effective_cpu_util()
7481 * because of inaccuracies in how we track these -- see in effective_cpu_util()
7496 * - the computed DL bandwidth needed with the IRQ pressure which in effective_cpu_util()
7498 * - The minimum performance requirement for CFS and/or RT. in effective_cpu_util()
7506 if (!uclamp_is_used() && rt_rq_is_runnable(&rq->rt)) in effective_cpu_util()
7514 * to obtain the CPU's actual utilization. in effective_cpu_util()
7534 * max - irq in effective_cpu_util()
7535 * U' = irq + --------- * U in effective_cpu_util()
7544 unsigned long sched_cpu_util(int cpu) in sched_cpu_util() argument
7546 return effective_cpu_util(cpu, cpu_util_cfs(cpu), NULL, NULL); in sched_cpu_util()
7551 * find_process_by_pid - find a process with a matching PID value.
7577 * sched_setparam() passes in -1 for its policy, to let the functions in DEFINE_CLASS()
7580 #define SETPARAM_POLICY -1 in DEFINE_CLASS()
7585 int policy = attr->sched_policy; in DEFINE_CLASS()
7588 policy = p->policy; in DEFINE_CLASS()
7590 p->policy = policy; in DEFINE_CLASS()
7595 p->static_prio = NICE_TO_PRIO(attr->sched_nice); in DEFINE_CLASS()
7598 * __sched_setscheduler() ensures attr->sched_priority == 0 when in DEFINE_CLASS()
7602 p->rt_priority = attr->sched_priority; in DEFINE_CLASS()
7603 p->normal_prio = normal_prio(p); in DEFINE_CLASS()
7616 return (uid_eq(cred->euid, pcred->euid) || in check_same_owner()
7617 uid_eq(cred->euid, pcred->uid)); in check_same_owner()
7623 * event on permitted non-privileged operations:
7630 if (attr->sched_nice < task_nice(p) && in user_check_sched_setscheduler()
7631 !is_nice_reduction(p, attr->sched_nice)) in user_check_sched_setscheduler()
7639 if (policy != p->policy && !rlim_rtprio) in user_check_sched_setscheduler()
7643 if (attr->sched_priority > p->rt_priority && in user_check_sched_setscheduler()
7644 attr->sched_priority > rlim_rtprio) in user_check_sched_setscheduler()
7671 if (p->sched_reset_on_fork && !reset_on_fork) in user_check_sched_setscheduler()
7678 return -EPERM; in user_check_sched_setscheduler()
7687 int oldpolicy = -1, policy = attr->sched_policy; in __sched_setscheduler()
7702 reset_on_fork = p->sched_reset_on_fork; in __sched_setscheduler()
7703 policy = oldpolicy = p->policy; in __sched_setscheduler()
7705 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); in __sched_setscheduler()
7708 return -EINVAL; in __sched_setscheduler()
7711 if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV)) in __sched_setscheduler()
7712 return -EINVAL; in __sched_setscheduler()
7716 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL, in __sched_setscheduler()
7719 if (attr->sched_priority > MAX_RT_PRIO-1) in __sched_setscheduler()
7720 return -EINVAL; in __sched_setscheduler()
7722 (rt_policy(policy) != (attr->sched_priority != 0))) in __sched_setscheduler()
7723 return -EINVAL; in __sched_setscheduler()
7730 if (attr->sched_flags & SCHED_FLAG_SUGOV) in __sched_setscheduler()
7731 return -EINVAL; in __sched_setscheduler()
7739 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) { in __sched_setscheduler()
7749 if (dl_policy(policy) || dl_policy(p->policy)) { in __sched_setscheduler()
7755 * Make sure no PI-waiters arrive (or leave) while we are in __sched_setscheduler()
7758 * To be able to change p->policy safely, the appropriate in __sched_setscheduler()
7767 if (p == rq->stop) { in __sched_setscheduler()
7768 retval = -EINVAL; in __sched_setscheduler()
7776 if (unlikely(policy == p->policy)) { in __sched_setscheduler()
7777 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) in __sched_setscheduler()
7779 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) in __sched_setscheduler()
7783 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) in __sched_setscheduler()
7786 p->sched_reset_on_fork = reset_on_fork; in __sched_setscheduler()
7799 task_group(p)->rt_bandwidth.rt_runtime == 0 && in __sched_setscheduler()
7801 retval = -EPERM; in __sched_setscheduler()
7807 !(attr->sched_flags & SCHED_FLAG_SUGOV)) { in __sched_setscheduler()
7808 cpumask_t *span = rq->rd->span; in __sched_setscheduler()
7815 if (!cpumask_subset(span, p->cpus_ptr) || in __sched_setscheduler()
7816 rq->rd->dl_bw.bw == 0) { in __sched_setscheduler()
7817 retval = -EPERM; in __sched_setscheduler()
7824 /* Re-check policy now with rq lock held: */ in __sched_setscheduler()
7825 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { in __sched_setscheduler()
7826 policy = oldpolicy = -1; in __sched_setscheduler()
7839 retval = -EBUSY; in __sched_setscheduler()
7843 p->sched_reset_on_fork = reset_on_fork; in __sched_setscheduler()
7844 oldprio = p->prio; in __sched_setscheduler()
7846 newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice); in __sched_setscheduler()
7867 prev_class = p->sched_class; in __sched_setscheduler()
7869 if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) { in __sched_setscheduler()
7880 if (oldprio < p->prio) in __sched_setscheduler()
7919 .sched_priority = param->sched_priority, in _sched_setscheduler()
7920 .sched_nice = PRIO_TO_NICE(p->static_prio), in _sched_setscheduler()
7933 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
7962 …* sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from ke…
7988 * workloads. You cannot take two correctly working static prio workloads
8031 return -EINVAL; in do_sched_setscheduler()
8033 return -EFAULT; in do_sched_setscheduler()
8037 return -ESRCH; in do_sched_setscheduler()
8053 ret = get_user(size, &uattr->size); in sched_copy_attr()
8065 if (ret == -E2BIG) in sched_copy_attr()
8070 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) && in sched_copy_attr()
8072 return -EINVAL; in sched_copy_attr()
8076 * to be strict and return an error on out-of-bounds values? in sched_copy_attr()
8078 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); in sched_copy_attr()
8083 put_user(sizeof(*attr), &uattr->size); in sched_copy_attr()
8084 return -E2BIG; in sched_copy_attr()
8092 attr->sched_priority = p->rt_priority; in get_params()
8094 attr->sched_nice = task_nice(p); in get_params()
8098 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
8108 return -EINVAL; in SYSCALL_DEFINE3()
8114 * sys_sched_setparam - set/change the RT priority of a thread
8126 * sys_sched_setattr - same as above, but with extended sched_attr
8138 return -EINVAL; in SYSCALL_DEFINE3()
8145 return -EINVAL; in SYSCALL_DEFINE3()
8151 return -ESRCH; in SYSCALL_DEFINE3()
8160 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
8172 return -EINVAL; in SYSCALL_DEFINE1()
8177 return -ESRCH; in SYSCALL_DEFINE1()
8181 retval = p->policy; in SYSCALL_DEFINE1()
8182 if (p->sched_reset_on_fork) in SYSCALL_DEFINE1()
8189 * sys_sched_getparam - get the RT priority of a thread
8203 return -EINVAL; in SYSCALL_DEFINE2()
8208 return -ESRCH; in SYSCALL_DEFINE2()
8215 lp.sched_priority = p->rt_priority; in SYSCALL_DEFINE2()
8219 * This one might sleep, we cannot do it with a spinlock held ... in SYSCALL_DEFINE2()
8221 return copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; in SYSCALL_DEFINE2()
8226 * than what user-space knows about) to user-space.
8228 * Note that all cases are valid: user-space buffer can be larger or
8229 * smaller than the kernel-space buffer. The usual case is that both
8240 return -EFAULT; in sched_attr_copy_to_user()
8245 * If usize == ksize then we just copy everything to user-space and all is good. in sched_attr_copy_to_user()
8247 * If usize < ksize then we only copy as much as user-space has space for, in sched_attr_copy_to_user()
8250 * If usize > ksize then user-space is using a newer version of the ABI, in sched_attr_copy_to_user()
8251 * which part the kernel doesn't know about. Just ignore it - tooling can in sched_attr_copy_to_user()
8252 * detect the kernel's knowledge of attributes from the attr->size value in sched_attr_copy_to_user()
8255 kattr->size = min(usize, ksize); in sched_attr_copy_to_user()
8257 if (copy_to_user(uattr, kattr, kattr->size)) in sched_attr_copy_to_user()
8258 return -EFAULT; in sched_attr_copy_to_user()
8264 * sys_sched_getattr - similar to sched_getparam, but with sched_attr
8279 return -EINVAL; in SYSCALL_DEFINE4()
8284 return -ESRCH; in SYSCALL_DEFINE4()
8290 kattr.sched_policy = p->policy; in SYSCALL_DEFINE4()
8291 if (p->sched_reset_on_fork) in SYSCALL_DEFINE4()
8302 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; in SYSCALL_DEFINE4()
8303 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; in SYSCALL_DEFINE4()
8322 * if admission test is enabled, we only admit -deadline in dl_task_check_affinity()
8327 if (!cpumask_subset(task_rq(p)->rd->span, mask)) in dl_task_check_affinity()
8328 return -EBUSY; in dl_task_check_affinity()
8341 return -ENOMEM; in __sched_setaffinity()
8344 retval = -ENOMEM; in __sched_setaffinity()
8349 cpumask_and(new_mask, ctx->new_mask, cpus_allowed); in __sched_setaffinity()
8351 ctx->new_mask = new_mask; in __sched_setaffinity()
8352 ctx->flags |= SCA_CHECK; in __sched_setaffinity()
8378 if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) { in __sched_setaffinity()
8380 ctx->user_mask); in __sched_setaffinity()
8386 retval = -EINVAL; in __sched_setaffinity()
8404 return -ESRCH; in sched_setaffinity()
8406 if (p->flags & PF_NO_SETAFFINITY) in sched_setaffinity()
8407 return -EINVAL; in sched_setaffinity()
8411 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) in sched_setaffinity()
8412 return -EPERM; in sched_setaffinity()
8420 * With non-SMP configs, user_cpus_ptr/user_mask isn't used and in sched_setaffinity()
8427 return -ENOMEM; in sched_setaffinity()
8450 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; in get_user_cpu_mask()
8454 * sys_sched_setaffinity - set the CPU affinity of a process
8457 * @user_mask_ptr: user-space pointer to the new CPU mask
8468 return -ENOMEM; in SYSCALL_DEFINE3()
8485 return -ESRCH; in sched_getaffinity()
8491 guard(raw_spinlock_irqsave)(&p->pi_lock); in sched_getaffinity()
8492 cpumask_and(mask, &p->cpus_mask, cpu_active_mask); in sched_getaffinity()
8498 * sys_sched_getaffinity - get the CPU affinity of a process
8501 * @user_mask_ptr: user-space pointer to hold the current CPU mask
8503 * Return: size of CPU mask copied to user_mask_ptr on success. An
8513 return -EINVAL; in SYSCALL_DEFINE3()
8514 if (len & (sizeof(unsigned long)-1)) in SYSCALL_DEFINE3()
8515 return -EINVAL; in SYSCALL_DEFINE3()
8518 return -ENOMEM; in SYSCALL_DEFINE3()
8525 ret = -EFAULT; in SYSCALL_DEFINE3()
8541 schedstat_inc(rq->yld_count); in do_sched_yield()
8542 current->sched_class->yield_task(rq); in do_sched_yield()
8552 * sys_sched_yield - yield the current processor to other threads.
8554 * This function yields the current CPU to other tasks. If there are no
8555 * other threads running on this CPU then this function will return.
8573 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick in __cond_resched()
8574 * whether the current CPU is in an RCU read-side critical section, in __cond_resched()
8576 * in kernel context. In contrast, in non-preemptible kernels, in __cond_resched()
8577 * RCU readers leave no in-memory hints, which means that CPU-bound in __cond_resched()
8625 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
8628 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
8689 #include <linux/entry-common.h>
8701 * cond_resched <- __cond_resched
8702 * might_resched <- RET0
8703 * preempt_schedule <- NOP
8704 * preempt_schedule_notrace <- NOP
8705 * irqentry_exit_cond_resched <- NOP
8708 * cond_resched <- __cond_resched
8709 * might_resched <- __cond_resched
8710 * preempt_schedule <- NOP
8711 * preempt_schedule_notrace <- NOP
8712 * irqentry_exit_cond_resched <- NOP
8715 * cond_resched <- RET0
8716 * might_resched <- RET0
8717 * preempt_schedule <- preempt_schedule
8718 * preempt_schedule_notrace <- preempt_schedule_notrace
8719 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
8723 preempt_dynamic_undefined = -1,
8742 return -EINVAL; in sched_dynamic_mode()
8761 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in in __sched_dynamic_update()
8894 * yield - yield the current processor to other threads.
8923 * yield_to - yield the current processor to another thread in
8935 * -ESRCH if there's no task to yield to.
8952 if (rq->nr_running == 1 && p_rq->nr_running == 1) in yield_to()
8953 return -ESRCH; in yield_to()
8959 if (!curr->sched_class->yield_to_task) in yield_to()
8962 if (curr->sched_class != p->sched_class) in yield_to()
8968 yielded = curr->sched_class->yield_to_task(rq, p); in yield_to()
8970 schedstat_inc(rq->yld_count); in yield_to()
8972 * Make p's CPU reschedule; pick_next_entity in yield_to()
8989 int old_iowait = current->in_iowait; in io_schedule_prepare()
8991 current->in_iowait = 1; in io_schedule_prepare()
8992 blk_flush_plug(current->plug, true); in io_schedule_prepare()
8998 current->in_iowait = token; in io_schedule_finish()
9002 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
9029 * sys_sched_get_priority_max - return maximum RT priority.
9038 int ret = -EINVAL; in SYSCALL_DEFINE1()
9043 ret = MAX_RT_PRIO-1; in SYSCALL_DEFINE1()
9056 * sys_sched_get_priority_min - return minimum RT priority.
9065 int ret = -EINVAL; in SYSCALL_DEFINE1()
9087 return -EINVAL; in sched_rr_get_interval()
9092 return -ESRCH; in sched_rr_get_interval()
9100 if (p->sched_class->get_rr_interval) in sched_rr_get_interval()
9101 time_slice = p->sched_class->get_rr_interval(rq, p); in sched_rr_get_interval()
9110 * sys_sched_rr_get_interval - return the default timeslice of a process.
9115 * into the user-space timespec buffer. A value of '0' means infinity.
9153 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p)); in sched_show_task()
9163 ppid = task_pid_nr(rcu_dereference(p->real_parent)); in sched_show_task()
9165 pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d flags:0x%08lx\n", in sched_show_task()
9179 unsigned int state = READ_ONCE(p->__state); in state_filter_match()
9207 * reset the NMI-timeout, listing all files on a slow in show_state_filter()
9210 * another CPU might be blocked waiting for us to process in show_state_filter()
9232 * init_idle - set up an idle thread for a given CPU
9234 * @cpu: CPU the idle task belongs to
9239 void __init init_idle(struct task_struct *idle, int cpu) in init_idle() argument
9243 .new_mask = cpumask_of(cpu), in init_idle()
9247 struct rq *rq = cpu_rq(cpu); in init_idle()
9252 raw_spin_lock_irqsave(&idle->pi_lock, flags); in init_idle()
9255 idle->__state = TASK_RUNNING; in init_idle()
9256 idle->se.exec_start = sched_clock(); in init_idle()
9259 * look like a proper per-CPU kthread. in init_idle()
9261 idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY; in init_idle()
9262 kthread_set_per_cpu(idle, cpu); in init_idle()
9275 * holding rq->lock, the CPU isn't yet set to this CPU so the in init_idle()
9279 * use task_rq_lock() here and obtain the other rq->lock. in init_idle()
9284 __set_task_cpu(idle, cpu); in init_idle()
9287 rq->idle = idle; in init_idle()
9288 rcu_assign_pointer(rq->curr, idle); in init_idle()
9289 idle->on_rq = TASK_ON_RQ_QUEUED; in init_idle()
9291 idle->on_cpu = 1; in init_idle()
9294 raw_spin_unlock_irqrestore(&idle->pi_lock, flags); in init_idle()
9297 init_idle_preempt_count(idle, cpu); in init_idle()
9302 idle->sched_class = &idle_sched_class; in init_idle()
9303 ftrace_graph_init_idle_task(idle, cpu); in init_idle()
9304 vtime_init_idle(idle, cpu); in init_idle()
9306 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); in init_idle()
9331 * to a new cpuset; we don't want to change their CPU in task_can_attach()
9338 if (p->flags & PF_NO_SETAFFINITY) in task_can_attach()
9339 ret = -EINVAL; in task_can_attach()
9356 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) in migrate_task_to()
9357 return -EINVAL; in migrate_task_to()
9384 p->numa_preferred_nid = nid; in sched_setnuma()
9396 * Ensure that the idle task is using init_mm right before its CPU goes
9401 struct mm_struct *mm = current->active_mm; in idle_task_exit()
9404 BUG_ON(current != this_rq()->idle); in idle_task_exit()
9419 int cpu; in __balance_push_cpu_stop() local
9421 raw_spin_lock_irq(&p->pi_lock); in __balance_push_cpu_stop()
9427 cpu = select_fallback_rq(rq->cpu, p); in __balance_push_cpu_stop()
9428 rq = __migrate_task(rq, &rf, p, cpu); in __balance_push_cpu_stop()
9432 raw_spin_unlock_irq(&p->pi_lock); in __balance_push_cpu_stop()
9442 * Ensure we only run per-cpu kthreads once the CPU goes !active.
9449 struct task_struct *push_task = rq->curr; in balance_push()
9456 rq->balance_callback = &balance_push_callback; in balance_push()
9460 * CPU. in balance_push()
9462 if (!cpu_dying(rq->cpu) || rq != this_rq()) in balance_push()
9466 * Both the cpu-hotplug and stop task are in this case and are in balance_push()
9473 * If this is the idle task on the outgoing CPU try to wake in balance_push()
9476 * accurate here because the waiter is pinned on this CPU in balance_push()
9483 if (!rq->nr_running && !rq_has_pinned_tasks(rq) && in balance_push()
9484 rcuwait_active(&rq->hotplug_wait)) { in balance_push()
9486 rcuwait_wake_up(&rq->hotplug_wait); in balance_push()
9494 * Temporarily drop rq->lock such that we can wake-up the stop task. in balance_push()
9499 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task, in balance_push()
9510 static void balance_push_set(int cpu, bool on) in balance_push_set() argument
9512 struct rq *rq = cpu_rq(cpu); in balance_push_set()
9517 WARN_ON_ONCE(rq->balance_callback); in balance_push_set()
9518 rq->balance_callback = &balance_push_callback; in balance_push_set()
9519 } else if (rq->balance_callback == &balance_push_callback) { in balance_push_set()
9520 rq->balance_callback = NULL; in balance_push_set()
9526 * Invoked from a CPUs hotplug control thread after the CPU has been marked
9527 * inactive. All tasks which are not per CPU kernel threads are either
9528 * pushed off this CPU now via balance_push() or placed on a different CPU
9529 * during wakeup. Wait until the CPU is quiescent.
9535 rcuwait_wait_event(&rq->hotplug_wait, in balance_hotplug_wait()
9536 rq->nr_running == 1 && !rq_has_pinned_tasks(rq), in balance_hotplug_wait()
9546 static inline void balance_push_set(int cpu, bool on) in balance_push_set() argument
9558 if (!rq->online) { in set_rq_online()
9561 cpumask_set_cpu(rq->cpu, rq->rd->online); in set_rq_online()
9562 rq->online = 1; in set_rq_online()
9565 if (class->rq_online) in set_rq_online()
9566 class->rq_online(rq); in set_rq_online()
9573 if (rq->online) { in set_rq_offline()
9578 if (class->rq_offline) in set_rq_offline()
9579 class->rq_offline(rq); in set_rq_offline()
9582 cpumask_clear_cpu(rq->cpu, rq->rd->online); in set_rq_offline()
9583 rq->online = 0; in set_rq_offline()
9610 if (--num_cpus_frozen) in cpuset_cpu_active()
9613 * This is the last CPU online operation. So fall through and in cpuset_cpu_active()
9622 static int cpuset_cpu_inactive(unsigned int cpu) in cpuset_cpu_inactive() argument
9625 int ret = dl_bw_check_overflow(cpu); in cpuset_cpu_inactive()
9637 int sched_cpu_activate(unsigned int cpu) in sched_cpu_activate() argument
9639 struct rq *rq = cpu_rq(cpu); in sched_cpu_activate()
9646 balance_push_set(cpu, false); in sched_cpu_activate()
9652 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) in sched_cpu_activate()
9655 set_cpu_active(cpu, true); in sched_cpu_activate()
9658 sched_update_numa(cpu, true); in sched_cpu_activate()
9659 sched_domains_numa_masks_set(cpu); in sched_cpu_activate()
9673 if (rq->rd) { in sched_cpu_activate()
9674 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in sched_cpu_activate()
9682 int sched_cpu_deactivate(unsigned int cpu) in sched_cpu_deactivate() argument
9684 struct rq *rq = cpu_rq(cpu); in sched_cpu_deactivate()
9689 * Remove CPU from nohz.idle_cpus_mask to prevent participating in in sched_cpu_deactivate()
9694 set_cpu_active(cpu, false); in sched_cpu_deactivate()
9697 * From this point forward, this CPU will refuse to run any task that in sched_cpu_deactivate()
9702 balance_push_set(cpu, true); in sched_cpu_deactivate()
9706 * preempt-disabled and RCU users of this state to go away such that in sched_cpu_deactivate()
9709 * Specifically, we rely on ttwu to no longer target this CPU, see in sched_cpu_deactivate()
9717 if (rq->rd) { in sched_cpu_deactivate()
9718 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in sched_cpu_deactivate()
9727 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) in sched_cpu_deactivate()
9730 sched_core_cpu_deactivate(cpu); in sched_cpu_deactivate()
9736 sched_update_numa(cpu, false); in sched_cpu_deactivate()
9737 ret = cpuset_cpu_inactive(cpu); in sched_cpu_deactivate()
9739 balance_push_set(cpu, false); in sched_cpu_deactivate()
9740 set_cpu_active(cpu, true); in sched_cpu_deactivate()
9741 sched_update_numa(cpu, true); in sched_cpu_deactivate()
9744 sched_domains_numa_masks_clear(cpu); in sched_cpu_deactivate()
9748 static void sched_rq_cpu_starting(unsigned int cpu) in sched_rq_cpu_starting() argument
9750 struct rq *rq = cpu_rq(cpu); in sched_rq_cpu_starting()
9752 rq->calc_load_update = calc_load_update; in sched_rq_cpu_starting()
9756 int sched_cpu_starting(unsigned int cpu) in sched_cpu_starting() argument
9758 sched_core_cpu_starting(cpu); in sched_cpu_starting()
9759 sched_rq_cpu_starting(cpu); in sched_cpu_starting()
9760 sched_tick_start(cpu); in sched_cpu_starting()
9768 * CPU down completely. At this point all per CPU kthreads except the
9770 * either parked or have been unbound from the outgoing CPU. Ensure that
9773 * If after this point a bound task is being woken on this CPU then the
9777 int sched_cpu_wait_empty(unsigned int cpu) in sched_cpu_wait_empty() argument
9784 * Since this CPU is going 'away' for a while, fold any nr_active delta we
9785 * might have. Called from the CPU stopper task after ensuring that the
9786 * stopper is the last running task on the CPU, so nr_active count is
9790 * Also see the comment "Global load-average calculations".
9803 int cpu = cpu_of(rq); in dump_rq_tasks() local
9807 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running); in dump_rq_tasks()
9809 if (task_cpu(p) != cpu) in dump_rq_tasks()
9815 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm); in dump_rq_tasks()
9819 int sched_cpu_dying(unsigned int cpu) in sched_cpu_dying() argument
9821 struct rq *rq = cpu_rq(cpu); in sched_cpu_dying()
9825 sched_tick_stop(cpu); in sched_cpu_dying()
9828 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) { in sched_cpu_dying()
9829 WARN(true, "Dying CPU not properly vacated!"); in sched_cpu_dying()
9837 sched_core_cpu_dying(cpu); in sched_cpu_dying()
9848 * CPU masks are stable and all blatant races in the below code cannot in sched_init_smp()
9855 /* Move init over to a non-isolated CPU */ in sched_init_smp()
9858 current->flags &= ~PF_NO_SETAFFINITY; in sched_init_smp()
9968 raw_spin_lock_init(&rq->__lock); in sched_init()
9969 rq->nr_running = 0; in sched_init()
9970 rq->calc_load_active = 0; in sched_init()
9971 rq->calc_load_update = jiffies + LOAD_FREQ; in sched_init()
9972 init_cfs_rq(&rq->cfs); in sched_init()
9973 init_rt_rq(&rq->rt); in sched_init()
9974 init_dl_rq(&rq->dl); in sched_init()
9976 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); in sched_init()
9977 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; in sched_init()
9979 * How much CPU bandwidth does root_task_group get? in sched_init()
9981 * In case of task-groups formed thr' the cgroup filesystem, it in sched_init()
9982 * gets 100% of the CPU resources in the system. This overall in sched_init()
9983 * system CPU resource is divided among the tasks of in sched_init()
9984 * root_task_group and its child task-groups in a fair manner, in sched_init()
9985 * based on each entity's (task or task-group's) weight in sched_init()
9986 * (se->load.weight). in sched_init()
9990 * then A0's share of the CPU resource is: in sched_init()
9995 * directly in rq->cfs (i.e root_task_group->se[] = NULL). in sched_init()
9997 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); in sched_init()
10000 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; in sched_init()
10002 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); in sched_init()
10005 rq->sd = NULL; in sched_init()
10006 rq->rd = NULL; in sched_init()
10007 rq->cpu_capacity = SCHED_CAPACITY_SCALE; in sched_init()
10008 rq->balance_callback = &balance_push_callback; in sched_init()
10009 rq->active_balance = 0; in sched_init()
10010 rq->next_balance = jiffies; in sched_init()
10011 rq->push_cpu = 0; in sched_init()
10012 rq->cpu = i; in sched_init()
10013 rq->online = 0; in sched_init()
10014 rq->idle_stamp = 0; in sched_init()
10015 rq->avg_idle = 2*sysctl_sched_migration_cost; in sched_init()
10016 rq->max_idle_balance_cost = sysctl_sched_migration_cost; in sched_init()
10018 INIT_LIST_HEAD(&rq->cfs_tasks); in sched_init()
10022 rq->last_blocked_load_update_tick = jiffies; in sched_init()
10023 atomic_set(&rq->nohz_flags, 0); in sched_init()
10025 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq); in sched_init()
10028 rcuwait_init(&rq->hotplug_wait); in sched_init()
10032 atomic_set(&rq->nr_iowait, 0); in sched_init()
10035 rq->core = rq; in sched_init()
10036 rq->core_pick = NULL; in sched_init()
10037 rq->core_enabled = 0; in sched_init()
10038 rq->core_tree = RB_ROOT; in sched_init()
10039 rq->core_forceidle_count = 0; in sched_init()
10040 rq->core_forceidle_occupation = 0; in sched_init()
10041 rq->core_forceidle_start = 0; in sched_init()
10043 rq->core_cookie = 0UL; in sched_init()
10045 zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i)); in sched_init()
10058 * is dressed up as a per-CPU kthread and thus needs to play the part in sched_init()
10059 * if we want to avoid special-casing it in code that deals with per-CPU in sched_init()
10095 * Blocking primitives will set (and therefore destroy) current->state, in __might_sleep()
10099 WARN_ONCE(state != TASK_RUNNING && current->task_state_change, in __might_sleep()
10102 (void *)current->task_state_change, in __might_sleep()
10103 (void *)current->task_state_change); in __might_sleep()
10141 !is_idle_task(current) && !current->non_block_count) || in __might_resched()
10156 in_atomic(), irqs_disabled(), current->non_block_count, in __might_resched()
10157 current->pid, current->comm); in __might_resched()
10201 current->pid, current->comm); in __cant_sleep()
10233 current->pid, current->comm); in __cant_migrate()
10256 if (p->flags & PF_KTHREAD) in normalize_rt_tasks()
10259 p->se.exec_start = 0; in normalize_rt_tasks()
10260 schedstat_set(p->stats.wait_start, 0); in normalize_rt_tasks()
10261 schedstat_set(p->stats.sleep_start, 0); in normalize_rt_tasks()
10262 schedstat_set(p->stats.block_start, 0); in normalize_rt_tasks()
10286 * stopped - every CPU needs to be quiescent, and no scheduling
10293 * curr_task - return the current task for a given CPU.
10294 * @cpu: the processor in question.
10298 * Return: The current task for @cpu.
10300 struct task_struct *curr_task(int cpu) in curr_task() argument
10302 return cpu_curr(cpu); in curr_task()
10318 uclamp_se_set(&tg->uclamp_req[clamp_id], in alloc_uclamp_sched_group()
10320 tg->uclamp[clamp_id] = parent->uclamp[clamp_id]; in alloc_uclamp_sched_group()
10346 call_rcu(&tg->rcu, sched_free_group_rcu); in sched_unregister_group()
10356 return ERR_PTR(-ENOMEM); in sched_create_group()
10370 return ERR_PTR(-ENOMEM); in sched_create_group()
10378 list_add_rcu(&tg->list, &task_groups); in sched_online_group()
10383 tg->parent = parent; in sched_online_group()
10384 INIT_LIST_HEAD(&tg->children); in sched_online_group()
10385 list_add_rcu(&tg->siblings, &parent->children); in sched_online_group()
10401 call_rcu(&tg->rcu, sched_unregister_group_rcu); in sched_destroy_group()
10422 list_del_rcu(&tg->list); in sched_release_group()
10423 list_del_rcu(&tg->siblings); in sched_release_group()
10445 tsk->sched_task_group = group; in sched_change_group()
10448 if (tsk->sched_class->task_change_group) in sched_change_group()
10449 tsk->sched_class->task_change_group(tsk); in sched_change_group()
10459 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
10477 if (group == tsk->sched_task_group) in sched_move_task()
10523 return ERR_PTR(-ENOMEM); in cpu_cgroup_css_alloc()
10525 return &tg->css; in cpu_cgroup_css_alloc()
10532 struct task_group *parent = css_tg(css->parent); in cpu_cgroup_css_online()
10572 return -EINVAL; in cpu_cgroup_can_attach()
10601 uc_parent = css_tg(css)->parent in cpu_util_update_eff()
10602 ? css_tg(css)->parent->uclamp : NULL; in cpu_util_update_eff()
10606 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value; in cpu_util_update_eff()
10618 uc_se = css_tg(css)->uclamp; in cpu_util_update_eff()
10668 req.ret = -ERANGE; in capacity_from_percent()
10696 if (tg->uclamp_req[clamp_id].value != req.util) in cpu_uclamp_write()
10697 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false); in cpu_uclamp_write()
10703 tg->uclamp_pct[clamp_id] = req.percent; in cpu_uclamp_write()
10735 util_clamp = tg->uclamp_req[clamp_id].value; in cpu_uclamp_print()
10743 percent = tg->uclamp_pct[clamp_id]; in cpu_uclamp_print()
10775 return (u64) scale_load_down(tg->shares); in cpu_shares_read_u64()
10792 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in tg_set_cfs_bandwidth()
10795 return -EINVAL; in tg_set_cfs_bandwidth()
10803 return -EINVAL; in tg_set_cfs_bandwidth()
10811 return -EINVAL; in tg_set_cfs_bandwidth()
10817 return -EINVAL; in tg_set_cfs_bandwidth()
10821 return -EINVAL; in tg_set_cfs_bandwidth()
10824 * Prevent race between setting of cfs_rq->runtime_enabled and in tg_set_cfs_bandwidth()
10835 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; in tg_set_cfs_bandwidth()
10837 * If we need to toggle cfs_bandwidth_used, off->on must occur in tg_set_cfs_bandwidth()
10838 * before making related changes, and on->off must occur afterwards in tg_set_cfs_bandwidth()
10843 scoped_guard (raw_spinlock_irq, &cfs_b->lock) { in tg_set_cfs_bandwidth()
10844 cfs_b->period = ns_to_ktime(period); in tg_set_cfs_bandwidth()
10845 cfs_b->quota = quota; in tg_set_cfs_bandwidth()
10846 cfs_b->burst = burst; in tg_set_cfs_bandwidth()
10851 * Restart the period timer (if active) to handle new in tg_set_cfs_bandwidth()
10859 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; in tg_set_cfs_bandwidth()
10860 struct rq *rq = cfs_rq->rq; in tg_set_cfs_bandwidth()
10863 cfs_rq->runtime_enabled = runtime_enabled; in tg_set_cfs_bandwidth()
10864 cfs_rq->runtime_remaining = 0; in tg_set_cfs_bandwidth()
10866 if (cfs_rq->throttled) in tg_set_cfs_bandwidth()
10880 period = ktime_to_ns(tg->cfs_bandwidth.period); in tg_set_cfs_quota()
10881 burst = tg->cfs_bandwidth.burst; in tg_set_cfs_quota()
10887 return -EINVAL; in tg_set_cfs_quota()
10896 if (tg->cfs_bandwidth.quota == RUNTIME_INF) in tg_get_cfs_quota()
10897 return -1; in tg_get_cfs_quota()
10899 quota_us = tg->cfs_bandwidth.quota; in tg_get_cfs_quota()
10910 return -EINVAL; in tg_set_cfs_period()
10913 quota = tg->cfs_bandwidth.quota; in tg_set_cfs_period()
10914 burst = tg->cfs_bandwidth.burst; in tg_set_cfs_period()
10923 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); in tg_get_cfs_period()
10934 return -EINVAL; in tg_set_cfs_burst()
10937 period = ktime_to_ns(tg->cfs_bandwidth.period); in tg_set_cfs_burst()
10938 quota = tg->cfs_bandwidth.quota; in tg_set_cfs_burst()
10947 burst_us = tg->cfs_bandwidth.burst; in tg_get_cfs_burst()
11003 if (tg == d->tg) { in normalize_cfs_quota()
11004 period = d->period; in normalize_cfs_quota()
11005 quota = d->quota; in normalize_cfs_quota()
11012 if (quota == RUNTIME_INF || quota == -1) in normalize_cfs_quota()
11021 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in tg_cfs_schedulable_down()
11022 s64 quota = 0, parent_quota = -1; in tg_cfs_schedulable_down()
11024 if (!tg->parent) { in tg_cfs_schedulable_down()
11027 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; in tg_cfs_schedulable_down()
11030 parent_quota = parent_b->hierarchical_quota; in tg_cfs_schedulable_down()
11034 * always take the non-RUNTIME_INF min. On cgroup1, only in tg_cfs_schedulable_down()
11048 return -EINVAL; in tg_cfs_schedulable_down()
11051 cfs_b->hierarchical_quota = quota; in tg_cfs_schedulable_down()
11076 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in cpu_cfs_stat_show()
11078 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); in cpu_cfs_stat_show()
11079 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); in cpu_cfs_stat_show()
11080 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); in cpu_cfs_stat_show()
11088 stats = __schedstats_from_se(tg->se[i]); in cpu_cfs_stat_show()
11089 ws += schedstat_val(stats->wait_sum); in cpu_cfs_stat_show()
11095 seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst); in cpu_cfs_stat_show()
11096 seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time); in cpu_cfs_stat_show()
11107 total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time); in throttled_time_self()
11154 return css_tg(css)->idle; in cpu_idle_read_s64()
11237 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in cpu_extra_stat_show()
11240 throttled_usec = cfs_b->throttled_time; in cpu_extra_stat_show()
11242 burst_usec = cfs_b->burst_time; in cpu_extra_stat_show()
11250 cfs_b->nr_periods, cfs_b->nr_throttled, in cpu_extra_stat_show()
11251 throttled_usec, cfs_b->nr_burst, burst_usec); in cpu_extra_stat_show()
11280 u64 weight = scale_load_down(tg->shares); in cpu_weight_read_u64()
11292 * value used by scheduler and the round-trip conversions preserve in cpu_weight_write_u64()
11296 return -ERANGE; in cpu_weight_write_u64()
11306 unsigned long weight = scale_load_down(css_tg(css)->shares); in cpu_weight_nice_read_s64()
11312 delta = abs(sched_prio_to_weight[prio] - weight); in cpu_weight_nice_read_s64()
11318 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO); in cpu_weight_nice_read_s64()
11328 return -ERANGE; in cpu_weight_nice_write_s64()
11330 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO; in cpu_weight_nice_write_s64()
11356 return -EINVAL; in cpu_period_quota_parse()
11365 return -EINVAL; in cpu_period_quota_parse()
11466 void dump_cpu_task(int cpu) in dump_cpu_task() argument
11468 if (cpu == smp_processor_id() && in_hardirq()) { in dump_cpu_task()
11478 if (trigger_single_cpu_backtrace(cpu)) in dump_cpu_task()
11481 pr_info("Task dump for CPU %d:\n", cpu); in dump_cpu_task()
11482 sched_show_task(cpu_curr(cpu)); in dump_cpu_task()
11487 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
11488 * nice 1, it will get ~10% less CPU time than another CPU-bound task
11492 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
11493 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
11498 /* -20 */ 88761, 71755, 56483, 46273, 36291,
11499 /* -15 */ 29154, 23254, 18705, 14949, 11916,
11500 /* -10 */ 9548, 7620, 6100, 4904, 3906,
11501 /* -5 */ 3121, 2501, 1991, 1586, 1277,
11516 /* -20 */ 48388, 59856, 76040, 92818, 118348,
11517 /* -15 */ 147320, 184698, 229616, 287308, 360437,
11518 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
11519 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
11534 * @cid_lock: Guarantee forward-progress of cid allocation.
11536 * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
11537 * is only used when contention is detected by the lock-free allocation so
11543 * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
11545 * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
11554 * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
11560 * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
11577 * per-mm/cpu cid value.
11579 * Let's introduce task (Y) which has task->mm == mm and task (N) which has
11580 * task->mm != mm for the rest of the discussion. There are two scheduler state
11583 * (TSA) Store to rq->curr with transition from (N) to (Y)
11585 * (TSB) Store to rq->curr with transition from (Y) to (N)
11587 * On the remote-clear side, there is one transition we care about:
11592 * sides (scheduler, remote-clear). It is always performed with a cmpxchg which
11606 * Context switch CS-1 Remote-clear
11607 * - store to rq->curr: (N)->(Y) (TSA) - cmpxchg to *pcpu_id to LAZY (TMA)
11609 * - switch_mm_cid()
11610 * - memory barrier (see switch_mm_cid()
11614 * - mm_cid_get (next)
11615 * - READ_ONCE(*pcpu_cid) - rcu_dereference(src_rq->curr)
11622 * still an active task on the cpu. Remote-clear will therefore not transition
11643 t->migrate_from_cpu = task_cpu(t); in sched_mm_cid_migrate_from()
11651 struct mm_struct *mm = t->mm; in __sched_mm_cid_migrate_from_fetch_cid()
11656 return -1; in __sched_mm_cid_migrate_from_fetch_cid()
11658 last_mm_cid = t->last_mm_cid; in __sched_mm_cid_migrate_from_fetch_cid()
11662 * to be moved to the destination cpu. in __sched_mm_cid_migrate_from_fetch_cid()
11664 if (last_mm_cid == -1) in __sched_mm_cid_migrate_from_fetch_cid()
11665 return -1; in __sched_mm_cid_migrate_from_fetch_cid()
11666 src_cid = READ_ONCE(src_pcpu_cid->cid); in __sched_mm_cid_migrate_from_fetch_cid()
11668 return -1; in __sched_mm_cid_migrate_from_fetch_cid()
11672 * are not the last task to be migrated from this cpu for this mm, so in __sched_mm_cid_migrate_from_fetch_cid()
11673 * there is no need to move src_cid to the destination cpu. in __sched_mm_cid_migrate_from_fetch_cid()
11676 src_task = rcu_dereference(src_rq->curr); in __sched_mm_cid_migrate_from_fetch_cid()
11677 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) { in __sched_mm_cid_migrate_from_fetch_cid()
11678 t->last_mm_cid = -1; in __sched_mm_cid_migrate_from_fetch_cid()
11679 return -1; in __sched_mm_cid_migrate_from_fetch_cid()
11692 struct mm_struct *mm = t->mm; in __sched_mm_cid_migrate_from_try_steal_cid()
11695 if (src_cid == -1) in __sched_mm_cid_migrate_from_try_steal_cid()
11696 return -1; in __sched_mm_cid_migrate_from_try_steal_cid()
11699 * Attempt to clear the source cpu cid to move it to the destination in __sched_mm_cid_migrate_from_try_steal_cid()
11700 * cpu. in __sched_mm_cid_migrate_from_try_steal_cid()
11703 if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid)) in __sched_mm_cid_migrate_from_try_steal_cid()
11704 return -1; in __sched_mm_cid_migrate_from_try_steal_cid()
11707 * The implicit barrier after cmpxchg per-mm/cpu cid before loading in __sched_mm_cid_migrate_from_try_steal_cid()
11708 * rq->curr->mm matches the scheduler barrier in context_switch() in __sched_mm_cid_migrate_from_try_steal_cid()
11709 * between store to rq->curr and load of prev and next task's in __sched_mm_cid_migrate_from_try_steal_cid()
11710 * per-mm/cpu cid. in __sched_mm_cid_migrate_from_try_steal_cid()
11712 * The implicit barrier after cmpxchg per-mm/cpu cid before loading in __sched_mm_cid_migrate_from_try_steal_cid()
11713 * rq->curr->mm_cid_active matches the barrier in in __sched_mm_cid_migrate_from_try_steal_cid()
11715 * sched_mm_cid_after_execve() between store to t->mm_cid_active and in __sched_mm_cid_migrate_from_try_steal_cid()
11716 * load of per-mm/cpu cid. in __sched_mm_cid_migrate_from_try_steal_cid()
11721 * the lazy-put flag, this task will be responsible for transitioning in __sched_mm_cid_migrate_from_try_steal_cid()
11722 * from lazy-put flag set to MM_CID_UNSET. in __sched_mm_cid_migrate_from_try_steal_cid()
11725 src_task = rcu_dereference(src_rq->curr); in __sched_mm_cid_migrate_from_try_steal_cid()
11726 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) { in __sched_mm_cid_migrate_from_try_steal_cid()
11729 * no point in moving this cid to the destination cpu. in __sched_mm_cid_migrate_from_try_steal_cid()
11731 t->last_mm_cid = -1; in __sched_mm_cid_migrate_from_try_steal_cid()
11732 return -1; in __sched_mm_cid_migrate_from_try_steal_cid()
11739 if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET)) in __sched_mm_cid_migrate_from_try_steal_cid()
11740 return -1; in __sched_mm_cid_migrate_from_try_steal_cid()
11745 * Migration to dst cpu. Called with dst_rq lock held.
11752 struct mm_struct *mm = t->mm; in sched_mm_cid_migrate_to()
11760 src_cpu = t->migrate_from_cpu; in sched_mm_cid_migrate_to()
11761 if (src_cpu == -1) { in sched_mm_cid_migrate_to()
11762 t->last_mm_cid = -1; in sched_mm_cid_migrate_to()
11775 * greater or equal to the number of allowed cpus, because user-space in sched_mm_cid_migrate_to()
11779 dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq)); in sched_mm_cid_migrate_to()
11780 dst_cid = READ_ONCE(dst_pcpu_cid->cid); in sched_mm_cid_migrate_to()
11782 atomic_read(&mm->mm_users) >= t->nr_cpus_allowed) in sched_mm_cid_migrate_to()
11784 src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu); in sched_mm_cid_migrate_to()
11787 if (src_cid == -1) in sched_mm_cid_migrate_to()
11791 if (src_cid == -1) in sched_mm_cid_migrate_to()
11797 /* Move src_cid to dst cpu. */ in sched_mm_cid_migrate_to()
11799 WRITE_ONCE(dst_pcpu_cid->cid, src_cid); in sched_mm_cid_migrate_to()
11803 int cpu) in sched_mm_cid_remote_clear() argument
11805 struct rq *rq = cpu_rq(cpu); in sched_mm_cid_remote_clear()
11809 cid = READ_ONCE(pcpu_cid->cid); in sched_mm_cid_remote_clear()
11814 * Clear the cpu cid if it is set to keep cid allocation compact. If in sched_mm_cid_remote_clear()
11815 * there happens to be other tasks left on the source cpu using this in sched_mm_cid_remote_clear()
11820 if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid)) in sched_mm_cid_remote_clear()
11824 * The implicit barrier after cmpxchg per-mm/cpu cid before loading in sched_mm_cid_remote_clear()
11825 * rq->curr->mm matches the scheduler barrier in context_switch() in sched_mm_cid_remote_clear()
11826 * between store to rq->curr and load of prev and next task's in sched_mm_cid_remote_clear()
11827 * per-mm/cpu cid. in sched_mm_cid_remote_clear()
11829 * The implicit barrier after cmpxchg per-mm/cpu cid before loading in sched_mm_cid_remote_clear()
11830 * rq->curr->mm_cid_active matches the barrier in in sched_mm_cid_remote_clear()
11832 * sched_mm_cid_after_execve() between store to t->mm_cid_active and in sched_mm_cid_remote_clear()
11833 * load of per-mm/cpu cid. in sched_mm_cid_remote_clear()
11838 * the lazy-put flag, that task will be responsible for transitioning in sched_mm_cid_remote_clear()
11839 * from lazy-put flag set to MM_CID_UNSET. in sched_mm_cid_remote_clear()
11842 t = rcu_dereference(rq->curr); in sched_mm_cid_remote_clear()
11843 if (READ_ONCE(t->mm_cid_active) && t->mm == mm) in sched_mm_cid_remote_clear()
11853 if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET)) in sched_mm_cid_remote_clear()
11858 static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu) in sched_mm_cid_remote_clear_old() argument
11860 struct rq *rq = cpu_rq(cpu); in sched_mm_cid_remote_clear_old()
11866 * rq->clock load is racy on 32-bit but one spurious clear once in a in sched_mm_cid_remote_clear_old()
11869 rq_clock = READ_ONCE(rq->clock); in sched_mm_cid_remote_clear_old()
11870 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu); in sched_mm_cid_remote_clear_old()
11878 curr = rcu_dereference(rq->curr); in sched_mm_cid_remote_clear_old()
11879 if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) { in sched_mm_cid_remote_clear_old()
11880 WRITE_ONCE(pcpu_cid->time, rq_clock); in sched_mm_cid_remote_clear_old()
11885 if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS) in sched_mm_cid_remote_clear_old()
11887 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu); in sched_mm_cid_remote_clear_old()
11890 static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu, in sched_mm_cid_remote_clear_weight() argument
11896 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu); in sched_mm_cid_remote_clear_weight()
11897 cid = READ_ONCE(pcpu_cid->cid); in sched_mm_cid_remote_clear_weight()
11900 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu); in sched_mm_cid_remote_clear_weight()
11909 int weight, cpu; in task_mm_cid_work() local
11913 work->next = work; /* Prevent double-add */ in task_mm_cid_work()
11914 if (t->flags & PF_EXITING) in task_mm_cid_work()
11916 mm = t->mm; in task_mm_cid_work()
11919 old_scan = READ_ONCE(mm->mm_cid_next_scan); in task_mm_cid_work()
11924 res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan); in task_mm_cid_work()
11932 if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan)) in task_mm_cid_work()
11936 for_each_possible_cpu(cpu) in task_mm_cid_work()
11937 sched_mm_cid_remote_clear_old(mm, cpu); in task_mm_cid_work()
11943 for_each_possible_cpu(cpu) in task_mm_cid_work()
11944 sched_mm_cid_remote_clear_weight(mm, cpu, weight); in task_mm_cid_work()
11949 struct mm_struct *mm = t->mm; in init_sched_mm_cid()
11953 mm_users = atomic_read(&mm->mm_users); in init_sched_mm_cid()
11955 mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY); in init_sched_mm_cid()
11957 t->cid_work.next = &t->cid_work; /* Protect against double add */ in init_sched_mm_cid()
11958 init_task_work(&t->cid_work, task_mm_cid_work); in init_sched_mm_cid()
11963 struct callback_head *work = &curr->cid_work; in task_tick_mm_cid()
11966 if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || in task_tick_mm_cid()
11967 work->next != work) in task_tick_mm_cid()
11969 if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan))) in task_tick_mm_cid()
11976 struct mm_struct *mm = t->mm; in sched_mm_cid_exit_signals()
11986 WRITE_ONCE(t->mm_cid_active, 0); in sched_mm_cid_exit_signals()
11988 * Store t->mm_cid_active before loading per-mm/cpu cid. in sched_mm_cid_exit_signals()
11993 t->last_mm_cid = t->mm_cid = -1; in sched_mm_cid_exit_signals()
11998 struct mm_struct *mm = t->mm; in sched_mm_cid_before_execve()
12008 WRITE_ONCE(t->mm_cid_active, 0); in sched_mm_cid_before_execve()
12010 * Store t->mm_cid_active before loading per-mm/cpu cid. in sched_mm_cid_before_execve()
12015 t->last_mm_cid = t->mm_cid = -1; in sched_mm_cid_before_execve()
12020 struct mm_struct *mm = t->mm; in sched_mm_cid_after_execve()
12030 WRITE_ONCE(t->mm_cid_active, 1); in sched_mm_cid_after_execve()
12032 * Store t->mm_cid_active before loading per-mm/cpu cid. in sched_mm_cid_after_execve()
12036 t->last_mm_cid = t->mm_cid = mm_cid_get(rq, mm); in sched_mm_cid_after_execve()
12043 WARN_ON_ONCE(!t->mm || t->mm_cid != -1); in sched_mm_cid_fork()
12044 t->mm_cid_active = 1; in sched_mm_cid_fork()