Lines Matching +full:powered +full:- +full:remotely
1 // SPDX-License-Identifier: GPL-2.0-only
7 * Copyright (C) 1991-2002 Linus Torvalds
8 * Copyright (C) 1998-2024 Ingo Molnar, Red Hat
72 # include <linux/entry-common.h>
96 #include "../../io_uring/io-wq.h"
160 if (p->sched_class == &stop_sched_class) /* trumps deadline */ in __task_prio()
161 return -2; in __task_prio()
163 if (p->dl_server) in __task_prio()
164 return -1; /* deadline */ in __task_prio()
166 if (rt_or_dl_prio(p->prio)) in __task_prio()
167 return p->prio; /* [-1, 99] */ in __task_prio()
169 if (p->sched_class == &idle_sched_class) in __task_prio()
192 if (-pa < -pb) in prio_less()
195 if (-pb < -pa) in prio_less()
198 if (pa == -1) { /* dl_prio() doesn't work because of stop_class above */ in prio_less()
201 a_dl = &a->dl; in prio_less()
204 * __task_prio() can return -1 (for DL) even for those. In that in prio_less()
207 if (a->dl_server) in prio_less()
208 a_dl = a->dl_server; in prio_less()
210 b_dl = &b->dl; in prio_less()
211 if (b->dl_server) in prio_less()
212 b_dl = b->dl_server; in prio_less()
214 return !dl_time_before(a_dl->deadline, b_dl->deadline); in prio_less()
231 if (a->core_cookie < b->core_cookie) in __sched_core_less()
234 if (a->core_cookie > b->core_cookie) in __sched_core_less()
238 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count)) in __sched_core_less()
256 if (cookie < p->core_cookie) in rb_sched_core_cmp()
257 return -1; in rb_sched_core_cmp()
259 if (cookie > p->core_cookie) in rb_sched_core_cmp()
267 if (p->se.sched_delayed) in sched_core_enqueue()
270 rq->core->core_task_seq++; in sched_core_enqueue()
272 if (!p->core_cookie) in sched_core_enqueue()
275 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); in sched_core_enqueue()
280 if (p->se.sched_delayed) in sched_core_dequeue()
283 rq->core->core_task_seq++; in sched_core_dequeue()
286 rb_erase(&p->core_node, &rq->core_tree); in sched_core_dequeue()
287 RB_CLEAR_NODE(&p->core_node); in sched_core_dequeue()
293 * and re-examine whether the core is still in forced idle state. in sched_core_dequeue()
295 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 && in sched_core_dequeue()
296 rq->core->core_forceidle_count && rq->curr == rq->idle) in sched_core_dequeue()
302 if (p->sched_class->task_is_throttled) in sched_task_is_throttled()
303 return p->sched_class->task_is_throttled(p, cpu); in sched_task_is_throttled()
310 struct rb_node *node = &p->core_node; in sched_core_next()
319 if (p->core_cookie != cookie) in sched_core_next()
328 * Find left-most (aka, highest priority) and unthrottled task matching @cookie.
336 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp); in sched_core_find()
341 if (!sched_task_is_throttled(p, rq->cpu)) in sched_core_find()
371 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++); in sched_core_lock()
380 raw_spin_unlock(&cpu_rq(t)->__lock); in sched_core_unlock()
401 cpu_rq(t)->core_enabled = enabled; in __sched_core_flip()
403 cpu_rq(cpu)->core->core_forceidle_start = 0; in __sched_core_flip()
414 cpu_rq(cpu)->core_enabled = enabled; in __sched_core_flip()
424 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree)); in sched_core_assert_empty()
479 if (!atomic_add_unless(&sched_core_count, -1, 1)) in sched_core_put()
506 * p->pi_lock
507 * rq->lock
508 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
510 * rq1->lock
511 * rq2->lock where: rq1 < rq2
515 * Normal scheduling state is serialized by rq->lock. __schedule() takes the
516 * local CPU's rq->lock, it optionally removes the task from the runqueue and
520 * Task enqueue is also under rq->lock, possibly taken from another CPU.
526 * complicated to avoid having to take two rq->locks.
530 * System-calls and anything external will use task_rq_lock() which acquires
531 * both p->pi_lock and rq->lock. As a consequence the state they change is
534 * - sched_setaffinity()/
535 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed
536 * - set_user_nice(): p->se.load, p->*prio
537 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio,
538 * p->se.load, p->rt_priority,
539 * p->dl.dl_{runtime, deadline, period, flags, bw, density}
540 * - sched_setnuma(): p->numa_preferred_nid
541 * - sched_move_task(): p->sched_task_group
542 * - uclamp_update_active() p->uclamp*
544 * p->state <- TASK_*:
548 * try_to_wake_up(). This latter uses p->pi_lock to serialize against
551 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
554 * rq->lock. Non-zero indicates the task is runnable, the special
556 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
558 * Additionally it is possible to be ->on_rq but still be considered not
559 * runnable when p->se.sched_delayed is true. These tasks are on the runqueue
563 * p->on_cpu <- { 0, 1 }:
566 * set before p is scheduled-in and cleared after p is scheduled-out, both
567 * under rq->lock. Non-zero indicates the task is running on its CPU.
570 * CPU to have ->on_cpu = 1 at the same time. ]
574 * - Don't call set_task_cpu() on a blocked task:
579 * - for try_to_wake_up(), called under p->pi_lock:
581 * This allows try_to_wake_up() to only take one rq->lock, see its comment.
583 * - for migration called under rq->lock:
589 * - for migration called under double_rq_lock():
605 raw_spin_lock_nested(&rq->__lock, subclass); in raw_spin_rq_lock_nested()
631 ret = raw_spin_trylock(&rq->__lock); in raw_spin_rq_trylock()
654 * double_rq_lock - safely lock two runqueues
672 * __task_rq_lock - lock the rq @p resides on.
675 __acquires(rq->lock) in __task_rq_lock()
679 lockdep_assert_held(&p->pi_lock); in __task_rq_lock()
696 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
699 __acquires(p->pi_lock) in task_rq_lock()
700 __acquires(rq->lock) in task_rq_lock()
705 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); in task_rq_lock()
711 * ACQUIRE (rq->lock) in task_rq_lock()
712 * [S] ->on_rq = MIGRATING [L] rq = task_rq() in task_rq_lock()
713 * WMB (__set_task_cpu()) ACQUIRE (rq->lock); in task_rq_lock()
714 * [S] ->cpu = new_cpu [L] task_rq() in task_rq_lock()
715 * [L] ->on_rq in task_rq_lock()
716 * RELEASE (rq->lock) in task_rq_lock()
719 * the old rq->lock will fully serialize against the stores. in task_rq_lock()
730 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); in task_rq_lock()
738 * RQ-clock updating methods:
751 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; in update_rq_clock_task()
758 * When this happens, we stop ->clock_task and only update the in update_rq_clock_task()
760 * update will consume the rest. This ensures ->clock_task is in update_rq_clock_task()
763 * It does however cause some slight miss-attribution of {soft,}IRQ in update_rq_clock_task()
765 * the current rq->clock timestamp, except that would require using in update_rq_clock_task()
771 rq->prev_irq_time += irq_delta; in update_rq_clock_task()
772 delta -= irq_delta; in update_rq_clock_task()
773 delayacct_irq(rq->curr, irq_delta); in update_rq_clock_task()
781 steal -= rq->prev_steal_time_rq; in update_rq_clock_task()
786 rq->prev_steal_time_rq = prev_steal; in update_rq_clock_task()
787 delta -= steal; in update_rq_clock_task()
791 rq->clock_task += delta; in update_rq_clock_task()
807 if (rq->clock_update_flags & RQCF_ACT_SKIP) in update_rq_clock()
811 WARN_ON_ONCE(rq->clock_update_flags & RQCF_UPDATED); in update_rq_clock()
812 rq->clock_update_flags |= RQCF_UPDATED; in update_rq_clock()
817 delta = clock - rq->clock; in update_rq_clock()
820 rq->clock += delta; in update_rq_clock()
827 * Use HR-timers to deliver accurate preemption points.
832 if (hrtimer_active(&rq->hrtick_timer)) in hrtick_clear()
833 hrtimer_cancel(&rq->hrtick_timer); in hrtick_clear()
837 * High-resolution timer tick.
849 rq->donor->sched_class->task_tick(rq, rq->curr, 1); in hrtick()
859 struct hrtimer *timer = &rq->hrtick_timer; in __hrtick_restart()
860 ktime_t time = rq->hrtick_time; in __hrtick_restart()
881 * called with rq->lock held and IRQs disabled
885 struct hrtimer *timer = &rq->hrtick_timer; in hrtick_start()
893 rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta); in hrtick_start()
898 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); in hrtick_start()
905 * called with rq->lock held and IRQs disabled
914 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), in hrtick_start()
923 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq); in hrtick_rq_init()
925 hrtimer_setup(&rq->hrtick_timer, hrtick, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); in hrtick_rq_init()
959 return !(fetch_or(&ti->flags, 1 << tif) & _TIF_POLLING_NRFLAG); in set_nr_and_not_polling()
971 typeof(ti->flags) val = READ_ONCE(ti->flags); in set_nr_if_polling()
978 } while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED)); in set_nr_if_polling()
1000 struct wake_q_node *node = &task->wake_q; in __wake_q_add()
1003 * Atomically grab the task, if ->wake_q is !nil already it means in __wake_q_add()
1011 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) in __wake_q_add()
1017 *head->lastp = node; in __wake_q_add()
1018 head->lastp = &node->next; in __wake_q_add()
1023 * wake_q_add() - queue a wakeup for 'later' waking.
1031 * This function must be used as-if it were wake_up_process(); IOW the task
1041 * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
1049 * This function must be used as-if it were wake_up_process(); IOW the task
1052 * This function is essentially a task-safe equivalent to wake_q_add(). Callers
1065 struct wake_q_node *node = head->first; in wake_up_q()
1071 node = node->next; in wake_up_q()
1073 WRITE_ONCE(task->wake_q.next, NULL); in wake_up_q()
1074 /* Task can safely be re-inserted now. */ in wake_up_q()
1086 * resched_curr - mark rq's current task 'to be rescheduled now'.
1089 * might also involve a cross-CPU call to trigger the scheduler on
1094 struct task_struct *curr = rq->curr; in __resched_curr()
1107 if (cti->flags & ((1 << tif) | _TIF_NEED_RESCHED)) in __resched_curr()
1173 * from an idle CPU. This is good for power-savings.
1181 int i, cpu = smp_processor_id(), default_cpu = -1; in get_nohz_timer_target()
1205 if (default_cpu == -1) in get_nohz_timer_target()
1229 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling in wake_up_idle_cpu()
1233 * re-evaluate the next tick. Provided some re-ordering of tick in wake_up_idle_cpu()
1237 * - On most architectures, a simple fetch_or on ti::flags with a in wake_up_idle_cpu()
1240 * - x86 needs to perform a last need_resched() check between in wake_up_idle_cpu()
1250 if (set_nr_and_not_polling(task_thread_info(rq->idle), TIF_NEED_RESCHED)) in wake_up_idle_cpu()
1259 * We just need the target to call irq_exit() and re-evaluate in wake_up_full_nohz_cpu()
1299 rq->idle_balance = idle_cpu(cpu); in nohz_csd_func()
1300 if (rq->idle_balance) { in nohz_csd_func()
1301 rq->nohz_idle_balance = flags; in nohz_csd_func()
1311 if (rq->nr_running != 1) in __need_bw_check()
1314 if (p->sched_class != &fair_sched_class) in __need_bw_check()
1328 if (rq->dl.dl_nr_running) in sched_can_stop_tick()
1335 if (rq->rt.rr_nr_running) { in sched_can_stop_tick()
1336 if (rq->rt.rr_nr_running == 1) in sched_can_stop_tick()
1346 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; in sched_can_stop_tick()
1358 if (rq->cfs.h_nr_queued > 1) in sched_can_stop_tick()
1366 * E.g. going from 2->1 without going through pick_next_task(). in sched_can_stop_tick()
1368 if (__need_bw_check(rq, rq->curr)) { in sched_can_stop_tick()
1369 if (cfs_task_bw_constrained(rq->curr)) in sched_can_stop_tick()
1398 list_for_each_entry_rcu(child, &parent->children, siblings) { in walk_tg_tree_from()
1410 parent = parent->parent; in walk_tg_tree_from()
1425 int prio = p->static_prio - MAX_RT_PRIO; in set_load_weight()
1440 if (update_load && p->sched_class->reweight_task) in set_load_weight()
1441 p->sched_class->reweight_task(task_rq(p), p, &lw); in set_load_weight()
1443 p->se.load = lw; in set_load_weight()
1450 * The (slow-path) user-space triggers utilization clamp value updates which
1451 * can require updates on (fast-path) scheduler's data structures used to
1453 * While the per-CPU rq lock protects fast-path update operations, user-space
1471 * used. In battery powered devices, particularly, running at the maximum
1475 * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1511 * idle (which drops the max-clamp) by retaining the last known in uclamp_idle_value()
1512 * max-clamp. in uclamp_idle_value()
1515 rq->uclamp_flags |= UCLAMP_FLAG_IDLE; in uclamp_idle_value()
1525 /* Reset max-clamp retention only on idle exit */ in uclamp_idle_reset()
1526 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) in uclamp_idle_reset()
1536 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; in uclamp_rq_max_value()
1537 int bucket_id = UCLAMP_BUCKETS - 1; in uclamp_rq_max_value()
1543 for ( ; bucket_id >= 0; bucket_id--) { in uclamp_rq_max_value()
1549 /* No tasks -- default clamp values */ in uclamp_rq_max_value()
1558 lockdep_assert_held(&p->pi_lock); in __uclamp_update_util_min_rt_default()
1560 uc_se = &p->uclamp_req[UCLAMP_MIN]; in __uclamp_update_util_min_rt_default()
1563 if (uc_se->user_defined) in __uclamp_update_util_min_rt_default()
1575 /* Protect updates to p->uclamp_* */ in uclamp_update_util_min_rt_default()
1584 struct uclamp_se uc_req = p->uclamp_req[clamp_id]; in uclamp_tg_restrict()
1597 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value; in uclamp_tg_restrict()
1598 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value; in uclamp_tg_restrict()
1610 * - the task specific clamp value, when explicitly requested from userspace
1611 * - the task group effective clamp value, for tasks not either in the root
1613 * - the system default clamp value, defined by the sysadmin
1632 /* Task currently refcounted: use back-annotated (effective) value */ in uclamp_eff_value()
1633 if (p->uclamp[clamp_id].active) in uclamp_eff_value()
1634 return (unsigned long)p->uclamp[clamp_id].value; in uclamp_eff_value()
1646 * Tasks can have a task-specific value requested from user-space, track
1654 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; in uclamp_rq_inc_id()
1655 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; in uclamp_rq_inc_id()
1661 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); in uclamp_rq_inc_id()
1663 bucket = &uc_rq->bucket[uc_se->bucket_id]; in uclamp_rq_inc_id()
1664 bucket->tasks++; in uclamp_rq_inc_id()
1665 uc_se->active = true; in uclamp_rq_inc_id()
1667 uclamp_idle_reset(rq, clamp_id, uc_se->value); in uclamp_rq_inc_id()
1673 if (bucket->tasks == 1 || uc_se->value > bucket->value) in uclamp_rq_inc_id()
1674 bucket->value = uc_se->value; in uclamp_rq_inc_id()
1676 if (uc_se->value > uclamp_rq_get(rq, clamp_id)) in uclamp_rq_inc_id()
1677 uclamp_rq_set(rq, clamp_id, uc_se->value); in uclamp_rq_inc_id()
1692 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; in uclamp_rq_dec_id()
1693 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; in uclamp_rq_dec_id()
1704 * In this case the uc_se->active flag should be false since no uclamp in uclamp_rq_dec_id()
1715 * // Must not decrement bucket->tasks here in uclamp_rq_dec_id()
1719 * bucket[uc_se->bucket_id]. in uclamp_rq_dec_id()
1723 if (unlikely(!uc_se->active)) in uclamp_rq_dec_id()
1726 bucket = &uc_rq->bucket[uc_se->bucket_id]; in uclamp_rq_dec_id()
1728 WARN_ON_ONCE(!bucket->tasks); in uclamp_rq_dec_id()
1729 if (likely(bucket->tasks)) in uclamp_rq_dec_id()
1730 bucket->tasks--; in uclamp_rq_dec_id()
1732 uc_se->active = false; in uclamp_rq_dec_id()
1740 if (likely(bucket->tasks)) in uclamp_rq_dec_id()
1748 WARN_ON_ONCE(bucket->value > rq_clamp); in uclamp_rq_dec_id()
1749 if (bucket->value >= rq_clamp) { in uclamp_rq_dec_id()
1750 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); in uclamp_rq_dec_id()
1768 if (unlikely(!p->sched_class->uclamp_enabled)) in uclamp_rq_inc()
1771 if (p->se.sched_delayed) in uclamp_rq_inc()
1778 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) in uclamp_rq_inc()
1779 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; in uclamp_rq_inc()
1795 if (unlikely(!p->sched_class->uclamp_enabled)) in uclamp_rq_dec()
1798 if (p->se.sched_delayed) in uclamp_rq_dec()
1808 if (!p->uclamp[clamp_id].active) in uclamp_rq_reinc_id()
1818 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE)) in uclamp_rq_reinc_id()
1819 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; in uclamp_rq_reinc_id()
1873 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN], in uclamp_update_root_tg()
1875 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX], in uclamp_update_root_tg()
1934 result = -EINVAL; in sysctl_sched_uclamp_handler()
1979 * We don't need to hold task_rq_lock() when updating p->uclamp_* here in uclamp_fork()
1983 p->uclamp[clamp_id].active = false; in uclamp_fork()
1985 if (likely(!p->sched_reset_on_fork)) in uclamp_fork()
1989 uclamp_se_set(&p->uclamp_req[clamp_id], in uclamp_fork()
2002 struct uclamp_rq *uc_rq = rq->uclamp; in init_uclamp_rq()
2010 rq->uclamp_flags = UCLAMP_FLAG_IDLE; in init_uclamp_rq()
2060 raw_spin_lock_irq(&p->pi_lock); in get_wchan()
2061 state = READ_ONCE(p->__state); in get_wchan()
2063 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq) in get_wchan()
2065 raw_spin_unlock_irq(&p->pi_lock); in get_wchan()
2075 p->sched_class->enqueue_task(rq, p, flags); in enqueue_task()
2077 * Must be after ->enqueue_task() because ENQUEUE_DELAYED can clear in enqueue_task()
2078 * ->sched_delayed. in enqueue_task()
2108 * Must be before ->dequeue_task() because ->dequeue_task() can 'fail' in dequeue_task()
2109 * and mark the task ->sched_delayed. in dequeue_task()
2112 return p->sched_class->dequeue_task(rq, p, flags); in dequeue_task()
2124 WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED); in activate_task()
2125 ASSERT_EXCLUSIVE_WRITER(p->on_rq); in activate_task()
2132 WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING); in deactivate_task()
2133 ASSERT_EXCLUSIVE_WRITER(p->on_rq); in deactivate_task()
2150 * task_curr - is this task currently executing on a CPU?
2161 * ->switching_to() is called with the pi_lock and rq_lock held and must not
2167 if (prev_class != p->sched_class && p->sched_class->switching_to) in check_class_changing()
2168 p->sched_class->switching_to(rq, p); in check_class_changing()
2172 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
2182 if (prev_class != p->sched_class) { in check_class_changed()
2183 if (prev_class->switched_from) in check_class_changed()
2184 prev_class->switched_from(rq, p); in check_class_changed()
2186 p->sched_class->switched_to(rq, p); in check_class_changed()
2187 } else if (oldprio != p->prio || dl_task(p)) in check_class_changed()
2188 p->sched_class->prio_changed(rq, p, oldprio); in check_class_changed()
2193 struct task_struct *donor = rq->donor; in wakeup_preempt()
2195 if (p->sched_class == donor->sched_class) in wakeup_preempt()
2196 donor->sched_class->wakeup_preempt(rq, p, flags); in wakeup_preempt()
2197 else if (sched_class_above(p->sched_class, donor->sched_class)) in wakeup_preempt()
2204 if (task_on_rq_queued(donor) && test_tsk_need_resched(rq->curr)) in wakeup_preempt()
2211 if (READ_ONCE(p->__state) & state) in __task_state_match()
2214 if (READ_ONCE(p->saved_state) & state) in __task_state_match()
2215 return -1; in __task_state_match()
2227 guard(raw_spinlock_irq)(&p->pi_lock); in task_state_match()
2232 * wait_task_inactive - wait for a thread to unschedule.
2257 * any task-queue locks at all. We'll only try to get in wait_task_inactive()
2265 * still, just relax and busy-wait without holding in wait_task_inactive()
2292 * When matching on p->saved_state, consider this task in wait_task_inactive()
2297 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ in wait_task_inactive()
2325 * yield - it could be a while. in wait_task_inactive()
2354 .new_mask = cpumask_of(rq->cpu), in migrate_disable_switch()
2358 if (likely(!p->migration_disabled)) in migrate_disable_switch()
2361 if (p->cpus_ptr != &p->cpus_mask) in migrate_disable_switch()
2374 if (p->migration_disabled) { in migrate_disable()
2377 *Warn about overflow half-way through the range. in migrate_disable()
2379 WARN_ON_ONCE((s16)p->migration_disabled < 0); in migrate_disable()
2381 p->migration_disabled++; in migrate_disable()
2386 this_rq()->nr_pinned++; in migrate_disable()
2387 p->migration_disabled = 1; in migrate_disable()
2395 .new_mask = &p->cpus_mask, in migrate_enable()
2404 if (WARN_ON_ONCE((s16)p->migration_disabled <= 0)) in migrate_enable()
2408 if (p->migration_disabled > 1) { in migrate_enable()
2409 p->migration_disabled--; in migrate_enable()
2418 if (p->cpus_ptr != &p->cpus_mask) in migrate_enable()
2426 p->migration_disabled = 0; in migrate_enable()
2427 this_rq()->nr_pinned--; in migrate_enable()
2433 return rq->nr_pinned; in rq_has_pinned_tasks()
2437 * Per-CPU kthreads are allowed to run on !active && online CPUs, see
2451 if (!(p->flags & PF_KTHREAD)) in is_cpu_allowed()
2481 * move_queued_task - move a queued task to new rq.
2544 * migration_cpu_stop - this will be executed by a high-prio stopper thread
2551 struct set_affinity_pending *pending = arg->pending; in migration_cpu_stop()
2552 struct task_struct *p = arg->task; in migration_cpu_stop()
2569 raw_spin_lock(&p->pi_lock); in migration_cpu_stop()
2573 * If we were passed a pending, then ->stop_pending was set, thus in migration_cpu_stop()
2574 * p->migration_pending must have remained stable. in migration_cpu_stop()
2576 WARN_ON_ONCE(pending && pending != p->migration_pending); in migration_cpu_stop()
2580 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because in migration_cpu_stop()
2581 * we're holding p->pi_lock. in migration_cpu_stop()
2588 p->migration_pending = NULL; in migration_cpu_stop()
2591 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) in migration_cpu_stop()
2597 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); in migration_cpu_stop()
2599 p->wake_cpu = arg->dest_cpu; in migration_cpu_stop()
2621 * ->pi_lock, so the allowed mask is stable - if it got in migration_cpu_stop()
2624 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) { in migration_cpu_stop()
2625 p->migration_pending = NULL; in migration_cpu_stop()
2631 * When migrate_enable() hits a rq mis-match we can't reliably in migration_cpu_stop()
2635 WARN_ON_ONCE(!pending->stop_pending); in migration_cpu_stop()
2639 &pending->arg, &pending->stop_work); in migration_cpu_stop()
2645 pending->stop_pending = false; in migration_cpu_stop()
2649 complete_all(&pending->done); in migration_cpu_stop()
2659 raw_spin_lock_irq(&p->pi_lock); in push_cpu_stop()
2666 p->migration_flags |= MDF_PUSH; in push_cpu_stop()
2670 p->migration_flags &= ~MDF_PUSH; in push_cpu_stop()
2672 if (p->sched_class->find_lock_rq) in push_cpu_stop()
2673 lowest_rq = p->sched_class->find_lock_rq(p, rq); in push_cpu_stop()
2687 rq->push_busy = false; in push_cpu_stop()
2689 raw_spin_unlock_irq(&p->pi_lock); in push_cpu_stop()
2701 if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) { in set_cpus_allowed_common()
2702 p->cpus_ptr = ctx->new_mask; in set_cpus_allowed_common()
2706 cpumask_copy(&p->cpus_mask, ctx->new_mask); in set_cpus_allowed_common()
2707 p->nr_cpus_allowed = cpumask_weight(ctx->new_mask); in set_cpus_allowed_common()
2712 if (ctx->flags & SCA_USER) in set_cpus_allowed_common()
2713 swap(p->user_cpus_ptr, ctx->user_mask); in set_cpus_allowed_common()
2724 * supposed to change these variables while holding both rq->lock and in __do_set_cpus_allowed()
2725 * p->pi_lock. in __do_set_cpus_allowed()
2728 * accesses these variables under p->pi_lock and only does so after in __do_set_cpus_allowed()
2729 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule() in __do_set_cpus_allowed()
2734 if (ctx->flags & SCA_MIGRATE_DISABLE) in __do_set_cpus_allowed()
2735 WARN_ON_ONCE(!p->on_cpu); in __do_set_cpus_allowed()
2737 lockdep_assert_held(&p->pi_lock); in __do_set_cpus_allowed()
2745 * holding rq->lock. in __do_set_cpus_allowed()
2753 p->sched_class->set_cpus_allowed(p, ctx); in __do_set_cpus_allowed()
2754 mm_set_cpus_allowed(p->mm, ctx->new_mask); in __do_set_cpus_allowed()
2781 * Because this is called with p->pi_lock held, it is not possible in do_set_cpus_allowed()
2795 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's in dup_user_cpus_ptr()
2798 dst->user_cpus_ptr = NULL; in dup_user_cpus_ptr()
2805 if (data_race(!src->user_cpus_ptr)) in dup_user_cpus_ptr()
2810 return -ENOMEM; in dup_user_cpus_ptr()
2818 raw_spin_lock_irqsave(&src->pi_lock, flags); in dup_user_cpus_ptr()
2819 if (src->user_cpus_ptr) { in dup_user_cpus_ptr()
2820 swap(dst->user_cpus_ptr, user_mask); in dup_user_cpus_ptr()
2821 cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr); in dup_user_cpus_ptr()
2823 raw_spin_unlock_irqrestore(&src->pi_lock, flags); in dup_user_cpus_ptr()
2835 swap(p->user_cpus_ptr, user_mask); in clear_user_cpus_ptr()
2853 * Migrate-Disable comes along and tramples all over our nice sandcastle.
2856 * Initial conditions: P0->cpus_mask = [0, 1]
2865 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2878 * `--> <woken on migration completion>
2880 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2882 * task p are serialized by p->pi_lock, which we can leverage: the one that
2883 * should come into effect at the end of the Migrate-Disable region is the last
2884 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2889 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2893 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2899 * Migrate-Disable. Consider:
2901 * Initial conditions: P0->cpus_mask = [0, 1]
2919 * p->migration_pending done with p->pi_lock held.
2923 __releases(rq->lock) in affine_move_task()
2924 __releases(p->pi_lock) in affine_move_task()
2930 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { in affine_move_task()
2934 (p->migration_flags & MDF_PUSH) && !rq->push_busy) { in affine_move_task()
2935 rq->push_busy = true; in affine_move_task()
2943 pending = p->migration_pending; in affine_move_task()
2944 if (pending && !pending->stop_pending) { in affine_move_task()
2945 p->migration_pending = NULL; in affine_move_task()
2952 stop_one_cpu_nowait(rq->cpu, push_cpu_stop, in affine_move_task()
2953 p, &rq->push_work); in affine_move_task()
2958 complete_all(&pending->done); in affine_move_task()
2964 /* serialized by p->pi_lock */ in affine_move_task()
2965 if (!p->migration_pending) { in affine_move_task()
2975 p->migration_pending = &my_pending; in affine_move_task()
2977 pending = p->migration_pending; in affine_move_task()
2978 refcount_inc(&pending->refs); in affine_move_task()
2985 * Serialized by p->pi_lock, so this is safe. in affine_move_task()
2987 pending->arg.dest_cpu = dest_cpu; in affine_move_task()
2990 pending = p->migration_pending; in affine_move_task()
2992 * - !MIGRATE_ENABLE: in affine_move_task()
2995 * - MIGRATE_ENABLE: in affine_move_task()
3005 return -EINVAL; in affine_move_task()
3008 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) { in affine_move_task()
3012 * and have the stopper function handle it all race-free. in affine_move_task()
3014 stop_pending = pending->stop_pending; in affine_move_task()
3016 pending->stop_pending = true; in affine_move_task()
3019 p->migration_flags &= ~MDF_PUSH; in affine_move_task()
3025 &pending->arg, &pending->stop_work); in affine_move_task()
3037 if (!pending->stop_pending) { in affine_move_task()
3038 p->migration_pending = NULL; in affine_move_task()
3045 complete_all(&pending->done); in affine_move_task()
3048 wait_for_completion(&pending->done); in affine_move_task()
3050 if (refcount_dec_and_test(&pending->refs)) in affine_move_task()
3051 wake_up_var(&pending->refs); /* No UaF, just an address */ in affine_move_task()
3066 * Called with both p->pi_lock and rq->lock held; drops both before returning.
3072 __releases(rq->lock) in __set_cpus_allowed_ptr_locked()
3073 __releases(p->pi_lock) in __set_cpus_allowed_ptr_locked()
3077 bool kthread = p->flags & PF_KTHREAD; in __set_cpus_allowed_ptr_locked()
3086 * however, during cpu-hot-unplug, even these might get pushed in __set_cpus_allowed_ptr_locked()
3092 * set_cpus_allowed_common() and actually reset p->cpus_ptr. in __set_cpus_allowed_ptr_locked()
3097 if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) { in __set_cpus_allowed_ptr_locked()
3098 ret = -EINVAL; in __set_cpus_allowed_ptr_locked()
3103 * Must re-check here, to close a race against __kthread_bind(), in __set_cpus_allowed_ptr_locked()
3106 if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) { in __set_cpus_allowed_ptr_locked()
3107 ret = -EINVAL; in __set_cpus_allowed_ptr_locked()
3111 if (!(ctx->flags & SCA_MIGRATE_ENABLE)) { in __set_cpus_allowed_ptr_locked()
3112 if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) { in __set_cpus_allowed_ptr_locked()
3113 if (ctx->flags & SCA_USER) in __set_cpus_allowed_ptr_locked()
3114 swap(p->user_cpus_ptr, ctx->user_mask); in __set_cpus_allowed_ptr_locked()
3120 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) { in __set_cpus_allowed_ptr_locked()
3121 ret = -EBUSY; in __set_cpus_allowed_ptr_locked()
3131 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask); in __set_cpus_allowed_ptr_locked()
3133 ret = -EINVAL; in __set_cpus_allowed_ptr_locked()
3139 return affine_move_task(rq, p, rf, dest_cpu, ctx->flags); in __set_cpus_allowed_ptr_locked()
3166 if (p->user_cpus_ptr && in __set_cpus_allowed_ptr()
3167 !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) && in __set_cpus_allowed_ptr()
3168 cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr)) in __set_cpus_allowed_ptr()
3169 ctx->new_mask = rq->scratch_mask; in __set_cpus_allowed_ptr()
3192 * -EINVAL.
3214 err = -EPERM; in restrict_cpus_allowed_ptr()
3219 err = -EINVAL; in restrict_cpus_allowed_ptr()
3232 * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
3265 task_pid_nr(p), p->comm, in force_compatible_cpus_allowed_ptr()
3300 unsigned int state = READ_ONCE(p->__state); in set_task_cpu()
3306 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq); in set_task_cpu()
3309 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING, in set_task_cpu()
3311 * time relying on p->on_rq. in set_task_cpu()
3314 p->sched_class == &fair_sched_class && in set_task_cpu()
3315 (p->on_rq && !task_on_rq_migrating(p))); in set_task_cpu()
3319 * The caller should hold either p->pi_lock or rq->lock, when changing in set_task_cpu()
3320 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. in set_task_cpu()
3328 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || in set_task_cpu()
3341 if (p->sched_class->migrate_task_rq) in set_task_cpu()
3342 p->sched_class->migrate_task_rq(p, new_cpu); in set_task_cpu()
3343 p->se.nr_migrations++; in set_task_cpu()
3377 p->wake_cpu = cpu; in __migrate_swap_task()
3391 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu)) in migrate_swap_stop()
3392 return -EAGAIN; in migrate_swap_stop()
3394 src_rq = cpu_rq(arg->src_cpu); in migrate_swap_stop()
3395 dst_rq = cpu_rq(arg->dst_cpu); in migrate_swap_stop()
3397 guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock); in migrate_swap_stop()
3400 if (task_cpu(arg->dst_task) != arg->dst_cpu) in migrate_swap_stop()
3401 return -EAGAIN; in migrate_swap_stop()
3403 if (task_cpu(arg->src_task) != arg->src_cpu) in migrate_swap_stop()
3404 return -EAGAIN; in migrate_swap_stop()
3406 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) in migrate_swap_stop()
3407 return -EAGAIN; in migrate_swap_stop()
3409 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) in migrate_swap_stop()
3410 return -EAGAIN; in migrate_swap_stop()
3412 __migrate_swap_task(arg->src_task, arg->dst_cpu); in migrate_swap_stop()
3413 __migrate_swap_task(arg->dst_task, arg->src_cpu); in migrate_swap_stop()
3425 int ret = -EINVAL; in migrate_swap()
3439 * will be re-checked with proper locks held further down the line. in migrate_swap()
3444 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) in migrate_swap()
3447 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) in migrate_swap()
3459 * kick_process - kick a running thread to enter/exit the kernel
3460 * @p: the to-be-kicked thread
3463 * kernel-mode, without any delay. (to get signals handled.)
3482 * ->cpus_ptr is protected by both rq->lock and p->pi_lock
3486 * - cpu_active must be a subset of cpu_online
3488 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
3493 * - on CPU-down we clear cpu_active() to mask the sched domains and
3512 * will return -1. There is no CPU on the node, and we should in select_fallback_rq()
3515 if (nid != -1) { in select_fallback_rq()
3527 for_each_cpu(dest_cpu, p->cpus_ptr) { in select_fallback_rq()
3545 * hold p->pi_lock and again violate locking order. in select_fallback_rq()
3565 if (p->mm && printk_ratelimit()) { in select_fallback_rq()
3567 task_pid_nr(p), p->comm, cpu); in select_fallback_rq()
3575 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
3580 lockdep_assert_held(&p->pi_lock); in select_task_rq()
3582 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) { in select_task_rq()
3583 cpu = p->sched_class->select_task_rq(p, cpu, *wake_flags); in select_task_rq()
3586 cpu = cpumask_any(p->cpus_ptr); in select_task_rq()
3591 * to rely on ttwu() to place the task on a valid ->cpus_ptr in select_task_rq()
3596 * [ this allows ->select_task() to simply return task_cpu(p) and in select_task_rq()
3608 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; in sched_set_stop_task()
3609 struct task_struct *old_stop = cpu_rq(cpu)->stop; in sched_set_stop_task()
3617 * much confusion -- but then, stop work should not in sched_set_stop_task()
3622 stop->sched_class = &stop_sched_class; in sched_set_stop_task()
3625 * The PI code calls rt_mutex_setprio() with ->pi_lock held to in sched_set_stop_task()
3631 * The stop task itself will never be part of the PI-chain, it in sched_set_stop_task()
3632 * never blocks, therefore that ->pi_lock recursion is safe. in sched_set_stop_task()
3633 * Tell lockdep about this by placing the stop->pi_lock in its in sched_set_stop_task()
3636 lockdep_set_class(&stop->pi_lock, &stop_pi_lock); in sched_set_stop_task()
3639 cpu_rq(cpu)->stop = stop; in sched_set_stop_task()
3646 old_stop->sched_class = &rt_sched_class; in sched_set_stop_task()
3672 if (cpu == rq->cpu) { in ttwu_stat()
3673 __schedstat_inc(rq->ttwu_local); in ttwu_stat()
3674 __schedstat_inc(p->stats.nr_wakeups_local); in ttwu_stat()
3678 __schedstat_inc(p->stats.nr_wakeups_remote); in ttwu_stat()
3681 for_each_domain(rq->cpu, sd) { in ttwu_stat()
3683 __schedstat_inc(sd->ttwu_wake_remote); in ttwu_stat()
3690 __schedstat_inc(p->stats.nr_wakeups_migrate); in ttwu_stat()
3693 __schedstat_inc(rq->ttwu_count); in ttwu_stat()
3694 __schedstat_inc(p->stats.nr_wakeups); in ttwu_stat()
3697 __schedstat_inc(p->stats.nr_wakeups_sync); in ttwu_stat()
3705 WRITE_ONCE(p->__state, TASK_RUNNING); in ttwu_do_wakeup()
3717 if (p->sched_contributes_to_load) in ttwu_do_activate()
3718 rq->nr_uninterruptible--; in ttwu_do_activate()
3727 if (p->in_iowait) { in ttwu_do_activate()
3729 atomic_dec(&task_rq(p)->nr_iowait); in ttwu_do_activate()
3738 if (p->sched_class->task_woken) { in ttwu_do_activate()
3741 * drop the rq->lock, hereafter rq is only used for statistics. in ttwu_do_activate()
3744 p->sched_class->task_woken(rq, p); in ttwu_do_activate()
3748 if (rq->idle_stamp) { in ttwu_do_activate()
3749 u64 delta = rq_clock(rq) - rq->idle_stamp; in ttwu_do_activate()
3750 u64 max = 2*rq->max_idle_balance_cost; in ttwu_do_activate()
3752 update_avg(&rq->avg_idle, delta); in ttwu_do_activate()
3754 if (rq->avg_idle > max) in ttwu_do_activate()
3755 rq->avg_idle = max; in ttwu_do_activate()
3757 rq->idle_stamp = 0; in ttwu_do_activate()
3776 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3779 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3780 * then schedule() must still happen and p->state can be changed to
3796 if (p->se.sched_delayed) in ttwu_runnable()
3828 if (WARN_ON_ONCE(p->on_cpu)) in sched_ttwu_pending()
3829 smp_cond_load_acquire(&p->on_cpu, !VAL); in sched_ttwu_pending()
3834 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); in sched_ttwu_pending()
3839 * idle_cpu() does not observe a false-negative -- if it does, in sched_ttwu_pending()
3847 WRITE_ONCE(rq->ttwu_pending, 0); in sched_ttwu_pending()
3859 if (set_nr_if_polling(cpu_rq(cpu)->idle)) { in call_function_single_prep_ipi()
3877 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); in __ttwu_queue_wakelist()
3879 WRITE_ONCE(rq->ttwu_pending, 1); in __ttwu_queue_wakelist()
3880 __smp_call_single_queue(cpu, &p->wake_entry.llist); in __ttwu_queue_wakelist()
3888 if (is_idle_task(rcu_dereference(rq->curr))) { in wake_up_if_idle()
3890 if (is_idle_task(rq->curr)) in wake_up_if_idle()
3915 * Whether CPUs are share cache resources, which means LLC on non-cluster
3940 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) in ttwu_queue_cond()
3956 * the task activation to the idle (or soon-to-be-idle) CPU as in ttwu_queue_cond()
3960 * Note that we can only get here with (wakee) p->on_rq=0, in ttwu_queue_cond()
3961 * p->on_cpu can be whatever, we've done the dequeue, so in ttwu_queue_cond()
3962 * the wakee has been accounted out of ->nr_running. in ttwu_queue_cond()
3964 if (!cpu_rq(cpu)->nr_running) in ttwu_queue_cond()
4049 p->saved_state = TASK_RUNNING; in ttwu_state_match()
4055 * Notes on Program-Order guarantees on SMP systems.
4059 * The basic program-order guarantee on SMP systems is that when a task [t]
4060 * migrates, all its activity on its old CPU [c0] happens-before any subsequent
4065 * A) UNLOCK of the rq(c0)->lock scheduling out task t
4066 * B) migration for t is required to synchronize *both* rq(c0)->lock and
4067 * rq(c1)->lock (if not at the same time, then in that order).
4068 * C) LOCK of the rq(c1)->lock scheduling in task
4077 * LOCK rq(0)->lock
4078 * sched-out X
4079 * sched-in Y
4080 * UNLOCK rq(0)->lock
4082 * LOCK rq(0)->lock // orders against CPU0
4084 * UNLOCK rq(0)->lock
4086 * LOCK rq(1)->lock
4088 * UNLOCK rq(1)->lock
4090 * LOCK rq(1)->lock // orders against CPU2
4091 * sched-out Z
4092 * sched-in X
4093 * UNLOCK rq(1)->lock
4096 * BLOCKING -- aka. SLEEP + WAKEUP
4102 * 1) smp_store_release(X->on_cpu, 0) -- finish_task()
4103 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
4109 * LOCK rq(0)->lock LOCK X->pi_lock
4111 * sched-out X
4112 * smp_store_release(X->on_cpu, 0);
4114 * smp_cond_load_acquire(&X->on_cpu, !VAL);
4115 * X->state = WAKING
4118 * LOCK rq(2)->lock
4120 * X->state = RUNNING
4121 * UNLOCK rq(2)->lock
4123 * LOCK rq(2)->lock // orders against CPU1
4124 * sched-out Z
4125 * sched-in X
4126 * UNLOCK rq(2)->lock
4128 * UNLOCK X->pi_lock
4129 * UNLOCK rq(0)->lock
4138 * try_to_wake_up - wake up a thread
4145 * If (@state & @p->state) @p->state = TASK_RUNNING.
4151 * It issues a full memory barrier before accessing @p->state, see the comment
4154 * Uses p->pi_lock to serialize against concurrent wake-ups.
4156 * Relies on p->pi_lock stabilizing:
4157 * - p->sched_class
4158 * - p->cpus_ptr
4159 * - p->sched_task_group
4162 * Tries really hard to only take one task_rq(p)->lock for performance.
4163 * Takes rq->lock in:
4164 * - ttwu_runnable() -- old rq, unavoidable, see comment there;
4165 * - ttwu_queue() -- new rq, for enqueue of the task;
4166 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4171 * Return: %true if @p->state changes (an actual wakeup was done),
4183 * We're waking current, this means 'p->on_rq' and 'task_cpu(p) in try_to_wake_up()
4185 * case the whole 'p->on_rq && ttwu_runnable()' case below in try_to_wake_up()
4193 * - we rely on Program-Order guarantees for all the ordering, in try_to_wake_up()
4194 * - we're serialized against set_special_state() by virtue of in try_to_wake_up()
4195 * it disabling IRQs (this allows not taking ->pi_lock). in try_to_wake_up()
4197 WARN_ON_ONCE(p->se.sched_delayed); in try_to_wake_up()
4209 * reordered with p->state check below. This pairs with smp_store_mb() in try_to_wake_up()
4212 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { in try_to_wake_up()
4220 * Ensure we load p->on_rq _after_ p->state, otherwise it would in try_to_wake_up()
4221 * be possible to, falsely, observe p->on_rq == 0 and get stuck in try_to_wake_up()
4225 * STORE p->on_rq = 1 LOAD p->state in try_to_wake_up()
4226 * UNLOCK rq->lock in try_to_wake_up()
4229 * LOCK rq->lock smp_rmb(); in try_to_wake_up()
4231 * UNLOCK rq->lock in try_to_wake_up()
4234 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq in try_to_wake_up()
4236 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in in try_to_wake_up()
4242 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags)) in try_to_wake_up()
4247 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be in try_to_wake_up()
4248 * possible to, falsely, observe p->on_cpu == 0. in try_to_wake_up()
4250 * One must be running (->on_cpu == 1) in order to remove oneself in try_to_wake_up()
4254 * STORE p->on_cpu = 1 LOAD p->on_rq in try_to_wake_up()
4255 * UNLOCK rq->lock in try_to_wake_up()
4258 * LOCK rq->lock smp_rmb(); in try_to_wake_up()
4260 * STORE p->on_rq = 0 LOAD p->on_cpu in try_to_wake_up()
4262 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in in try_to_wake_up()
4265 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure in try_to_wake_up()
4267 * care about it's own p->state. See the comment in __schedule(). in try_to_wake_up()
4272 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq in try_to_wake_up()
4273 * == 0), which means we need to do an enqueue, change p->state to in try_to_wake_up()
4274 * TASK_WAKING such that we can unlock p->pi_lock before doing the in try_to_wake_up()
4277 WRITE_ONCE(p->__state, TASK_WAKING); in try_to_wake_up()
4282 * which potentially sends an IPI instead of spinning on p->on_cpu to in try_to_wake_up()
4286 * Ensure we load task_cpu(p) after p->on_cpu: in try_to_wake_up()
4289 * STORE p->cpu = @cpu in try_to_wake_up()
4291 * LOCK rq->lock in try_to_wake_up()
4292 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu) in try_to_wake_up()
4293 * STORE p->on_cpu = 1 LOAD p->cpu in try_to_wake_up()
4298 if (smp_load_acquire(&p->on_cpu) && in try_to_wake_up()
4311 smp_cond_load_acquire(&p->on_cpu, !VAL); in try_to_wake_up()
4313 cpu = select_task_rq(p, p->wake_cpu, &wake_flags); in try_to_wake_up()
4315 if (p->in_iowait) { in try_to_wake_up()
4317 atomic_dec(&task_rq(p)->nr_iowait); in try_to_wake_up()
4339 unsigned int state = READ_ONCE(p->__state); in __task_needs_rq_lock()
4342 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when in __task_needs_rq_lock()
4350 * Ensure we load p->on_rq after p->__state, otherwise it would be in __task_needs_rq_lock()
4351 * possible to, falsely, observe p->on_rq == 0. in __task_needs_rq_lock()
4356 if (p->on_rq) in __task_needs_rq_lock()
4365 smp_cond_load_acquire(&p->on_cpu, !VAL); in __task_needs_rq_lock()
4372 * task_call_func - Invoke a function on task in fixed state
4392 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); in task_call_func()
4399 * - blocked and we're holding off wakeups (pi->lock) in task_call_func()
4400 * - woken, and we're holding off enqueue (rq->lock) in task_call_func()
4401 * - queued, and we're holding off schedule (rq->lock) in task_call_func()
4402 * - running, and we're holding off de-schedule (rq->lock) in task_call_func()
4404 * The called function (@func) can use: task_curr(), p->on_rq and in task_call_func()
4405 * p->__state to differentiate between these states. in task_call_func()
4412 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); in task_call_func()
4417 * cpu_curr_snapshot - Return a snapshot of the currently running task
4449 * wake_up_process - Wake up a specific process
4479 p->on_rq = 0; in __sched_fork()
4481 p->se.on_rq = 0; in __sched_fork()
4482 p->se.exec_start = 0; in __sched_fork()
4483 p->se.sum_exec_runtime = 0; in __sched_fork()
4484 p->se.prev_sum_exec_runtime = 0; in __sched_fork()
4485 p->se.nr_migrations = 0; in __sched_fork()
4486 p->se.vruntime = 0; in __sched_fork()
4487 p->se.vlag = 0; in __sched_fork()
4488 INIT_LIST_HEAD(&p->se.group_node); in __sched_fork()
4491 WARN_ON_ONCE(p->se.sched_delayed); in __sched_fork()
4494 p->se.cfs_rq = NULL; in __sched_fork()
4499 memset(&p->stats, 0, sizeof(p->stats)); in __sched_fork()
4502 init_dl_entity(&p->dl); in __sched_fork()
4504 INIT_LIST_HEAD(&p->rt.run_list); in __sched_fork()
4505 p->rt.timeout = 0; in __sched_fork()
4506 p->rt.time_slice = sched_rr_timeslice; in __sched_fork()
4507 p->rt.on_rq = 0; in __sched_fork()
4508 p->rt.on_list = 0; in __sched_fork()
4511 init_scx_entity(&p->scx); in __sched_fork()
4515 INIT_HLIST_HEAD(&p->preempt_notifiers); in __sched_fork()
4519 p->capture_control = NULL; in __sched_fork()
4523 p->wake_entry.u_flags = CSD_TYPE_TTWU; in __sched_fork()
4524 p->migration_pending = NULL; in __sched_fork()
4558 pgdat->nbp_threshold = 0; in reset_memory_tiering()
4559 pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE); in reset_memory_tiering()
4560 pgdat->nbp_th_start = jiffies_to_msecs(jiffies); in reset_memory_tiering()
4572 return -EPERM; in sysctl_numa_balancing()
4641 return -EPERM; in sysctl_schedstats()
4712 * fork()/clone()-time setup:
4722 p->__state = TASK_NEW; in sched_fork()
4727 p->prio = current->normal_prio; in sched_fork()
4734 if (unlikely(p->sched_reset_on_fork)) { in sched_fork()
4736 p->policy = SCHED_NORMAL; in sched_fork()
4737 p->static_prio = NICE_TO_PRIO(0); in sched_fork()
4738 p->rt_priority = 0; in sched_fork()
4739 } else if (PRIO_TO_NICE(p->static_prio) < 0) in sched_fork()
4740 p->static_prio = NICE_TO_PRIO(0); in sched_fork()
4742 p->prio = p->normal_prio = p->static_prio; in sched_fork()
4744 p->se.custom_slice = 0; in sched_fork()
4745 p->se.slice = sysctl_sched_base_slice; in sched_fork()
4751 p->sched_reset_on_fork = 0; in sched_fork()
4754 if (dl_prio(p->prio)) in sched_fork()
4755 return -EAGAIN; in sched_fork()
4759 if (rt_prio(p->prio)) { in sched_fork()
4760 p->sched_class = &rt_sched_class; in sched_fork()
4762 } else if (task_should_scx(p->policy)) { in sched_fork()
4763 p->sched_class = &ext_sched_class; in sched_fork()
4766 p->sched_class = &fair_sched_class; in sched_fork()
4769 init_entity_runnable_average(&p->se); in sched_fork()
4774 memset(&p->sched_info, 0, sizeof(p->sched_info)); in sched_fork()
4777 p->on_cpu = 0; in sched_fork()
4781 plist_node_init(&p->pushable_tasks, MAX_PRIO); in sched_fork()
4782 RB_CLEAR_NODE(&p->pushable_dl_tasks); in sched_fork()
4792 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly in sched_cgroup_fork()
4795 raw_spin_lock_irqsave(&p->pi_lock, flags); in sched_cgroup_fork()
4799 tg = container_of(kargs->cset->subsys[cpu_cgrp_id], in sched_cgroup_fork()
4802 p->sched_task_group = tg; in sched_cgroup_fork()
4811 if (p->sched_class->task_fork) in sched_cgroup_fork()
4812 p->sched_class->task_fork(p); in sched_cgroup_fork()
4813 raw_spin_unlock_irqrestore(&p->pi_lock, flags); in sched_cgroup_fork()
4846 * wake_up_new_task - wake up a newly created task for the first time.
4858 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); in wake_up_new_task()
4859 WRITE_ONCE(p->__state, TASK_RUNNING); in wake_up_new_task()
4863 * - cpus_ptr can change in the fork path in wake_up_new_task()
4864 * - any previously selected CPU might disappear through hotplug in wake_up_new_task()
4867 * as we're not fully set-up yet. in wake_up_new_task()
4869 p->recent_used_cpu = task_cpu(p); in wake_up_new_task()
4881 if (p->sched_class->task_woken) { in wake_up_new_task()
4883 * Nothing relies on rq->lock after this, so it's fine to in wake_up_new_task()
4887 p->sched_class->task_woken(rq, p); in wake_up_new_task()
4911 * preempt_notifier_register - tell me when current is being preempted & rescheduled
4919 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); in preempt_notifier_register()
4924 * preempt_notifier_unregister - no longer interested in preemption notifications
4931 hlist_del(¬ifier->link); in preempt_notifier_unregister()
4939 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) in __fire_sched_in_preempt_notifiers()
4940 notifier->ops->sched_in(notifier, raw_smp_processor_id()); in __fire_sched_in_preempt_notifiers()
4955 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) in __fire_sched_out_preempt_notifiers()
4956 notifier->ops->sched_out(notifier, next); in __fire_sched_out_preempt_notifiers()
4988 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and in prepare_task()
4991 WRITE_ONCE(next->on_cpu, 1); in prepare_task()
5000 * p->on_cpu is cleared, the task can be moved to a different CPU. We in finish_task()
5004 * In particular, the load of prev->state in finish_task_switch() must in finish_task()
5009 smp_store_release(&prev->on_cpu, 0); in finish_task()
5023 func = (void (*)(struct rq *))head->func; in do_balance_callbacks()
5024 next = head->next; in do_balance_callbacks()
5025 head->next = NULL; in do_balance_callbacks()
5039 * that queued it (only later, when it's safe to drop rq->lock again),
5043 * a single test, namely: rq->balance_callback == NULL.
5053 struct balance_callback *head = rq->balance_callback; in __splice_balance_callbacks()
5062 * in the same rq->lock section. in __splice_balance_callbacks()
5070 rq->balance_callback = NULL; in __splice_balance_callbacks()
5110 * of the scheduler it's an obvious special-case), so we in prepare_lock_switch()
5114 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_); in prepare_lock_switch()
5117 rq_lockp(rq)->owner = next; in prepare_lock_switch()
5125 * fix up the runqueue lock - which gets 'carried over' from in finish_lock_switch()
5128 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_); in finish_lock_switch()
5148 if (unlikely(current->kmap_ctrl.idx)) in kmap_local_sched_out()
5156 if (unlikely(current->kmap_ctrl.idx)) in kmap_local_sched_in()
5162 * prepare_task_switch - prepare to switch tasks
5189 * finish_task_switch - clean up after a task-switch
5195 * and do any other architecture-specific cleanup actions.
5208 __releases(rq->lock) in finish_task_switch()
5211 struct mm_struct *mm = rq->prev_mm; in finish_task_switch()
5221 * raw_spin_lock_irq(&rq->lock) // 2 in finish_task_switch()
5227 current->comm, current->pid, preempt_count())) in finish_task_switch()
5230 rq->prev_mm = NULL; in finish_task_switch()
5234 * If a task dies, then it sets TASK_DEAD in tsk->state and calls in finish_task_switch()
5238 * We must observe prev->state before clearing prev->on_cpu (in in finish_task_switch()
5240 * running on another CPU and we could rave with its RUNNING -> DEAD in finish_task_switch()
5243 prev_state = READ_ONCE(prev->__state); in finish_task_switch()
5265 * schedule between user->kernel->user threads without passing though in finish_task_switch()
5267 * rq->curr, before returning to userspace, so provide them here: in finish_task_switch()
5269 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly in finish_task_switch()
5271 * - a sync_core for SYNC_CORE. in finish_task_switch()
5279 if (prev->sched_class->task_dead) in finish_task_switch()
5280 prev->sched_class->task_dead(prev); in finish_task_switch()
5292 * schedule_tail - first thing a freshly forked thread must call.
5296 __releases(rq->lock) in schedule_tail()
5302 * finish_task_switch() will drop rq->lock() and lower preempt_count in schedule_tail()
5316 if (current->set_child_tid) in schedule_tail()
5317 put_user(task_pid_vnr(current), current->set_child_tid); in schedule_tail()
5323 * context_switch - switch to the new MM and the new thread's register state.
5339 * kernel -> kernel lazy + transfer active in context_switch()
5340 * user -> kernel lazy + mmgrab_lazy_tlb() active in context_switch()
5342 * kernel -> user switch + mmdrop_lazy_tlb() active in context_switch()
5343 * user -> user switch in context_switch()
5348 if (!next->mm) { // to kernel in context_switch()
5349 enter_lazy_tlb(prev->active_mm, next); in context_switch()
5351 next->active_mm = prev->active_mm; in context_switch()
5352 if (prev->mm) // from user in context_switch()
5353 mmgrab_lazy_tlb(prev->active_mm); in context_switch()
5355 prev->active_mm = NULL; in context_switch()
5357 membarrier_switch_mm(rq, prev->active_mm, next->mm); in context_switch()
5360 * rq->curr / membarrier_switch_mm() and returning to userspace. in context_switch()
5363 * case 'prev->active_mm == next->mm' through in context_switch()
5366 switch_mm_irqs_off(prev->active_mm, next->mm, next); in context_switch()
5367 lru_gen_use_mm(next->mm); in context_switch()
5369 if (!prev->mm) { // from kernel in context_switch()
5371 rq->prev_mm = prev->active_mm; in context_switch()
5372 prev->active_mm = NULL; in context_switch()
5399 sum += cpu_rq(i)->nr_running; in nr_running()
5408 * preemption, thus the result might have a time-of-check-to-time-of-use
5411 * - from a non-preemptible section (of course)
5413 * - from a thread that is bound to a single CPU
5415 * - in a loop with very short iterations (e.g. a polling loop)
5419 return raw_rq()->nr_running == 1; in single_task_running()
5425 return cpu_rq(cpu)->nr_switches; in nr_context_switches_cpu()
5434 sum += cpu_rq(i)->nr_switches; in nr_context_switches()
5442 * for a CPU that has IO-wait which might not even end up running the task when
5448 return atomic_read(&cpu_rq(cpu)->nr_iowait); in nr_iowait_cpu()
5452 * IO-wait accounting, and how it's mostly bollocks (on SMP).
5454 * The idea behind IO-wait account is to account the idle time that we could
5456 * storage performance, we'd have a proportional reduction in IO-wait time.
5459 * idle time as IO-wait, because if the storage were faster, it could've been
5466 * CPU will have IO-wait accounted, while the other has regular idle. Even
5470 * This means, that when looking globally, the current IO-wait accounting on
5476 * blocked on. This means the per CPU IO-wait number is meaningless.
5494 * sched_exec - execve() is a valuable balancing opportunity, because at
5503 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { in sched_exec()
5504 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC); in sched_exec()
5526 * and its field curr->exec_start; when called from task_sched_runtime(),
5533 struct sched_entity *curr = p->se.cfs_rq->curr; in prefetch_curr_exec_start()
5535 struct sched_entity *curr = task_rq(p)->cfs.curr; in prefetch_curr_exec_start()
5538 prefetch(&curr->exec_start); in prefetch_curr_exec_start()
5554 * 64-bit doesn't need locks to atomically read a 64-bit value. in task_sched_runtime()
5556 * Reading ->on_cpu is racy, but this is OK. in task_sched_runtime()
5561 * If we see ->on_cpu without ->on_rq, the task is leaving, and has in task_sched_runtime()
5564 if (!p->on_cpu || !task_on_rq_queued(p)) in task_sched_runtime()
5565 return p->se.sum_exec_runtime; in task_sched_runtime()
5570 * Must be ->curr _and_ ->on_rq. If dequeued, we would in task_sched_runtime()
5577 p->sched_class->update_curr(rq); in task_sched_runtime()
5579 ns = p->se.sum_exec_runtime; in task_sched_runtime()
5600 if (!rq->last_seen_need_resched_ns) { in cpu_resched_latency()
5601 rq->last_seen_need_resched_ns = now; in cpu_resched_latency()
5602 rq->ticks_without_resched = 0; in cpu_resched_latency()
5606 rq->ticks_without_resched++; in cpu_resched_latency()
5607 resched_latency = now - rq->last_seen_need_resched_ns; in cpu_resched_latency()
5650 donor = rq->donor; in sched_tick()
5661 donor->sched_class->task_tick(rq, donor, 0); in sched_tick()
5676 if (donor->flags & PF_WQ_WORKER) in sched_tick()
5681 rq->idle_balance = idle_cpu(cpu); in sched_tick()
5694 /* Values for ->state, see diagram below. */
5700 * State diagram for ->state:
5709 * +--TICK_SCHED_REMOTE_OFFLINING
5728 int cpu = twork->cpu; in sched_tick_remote()
5736 * statistics and checks timeslices in a time-independent way, regardless in sched_tick_remote()
5741 struct task_struct *curr = rq->curr; in sched_tick_remote()
5749 WARN_ON_ONCE(rq->curr != rq->donor); in sched_tick_remote()
5757 u64 delta = rq_clock_task(rq) - curr->se.exec_start; in sched_tick_remote()
5760 curr->sched_class->task_tick(rq, curr, 0); in sched_tick_remote()
5772 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING); in sched_tick_remote()
5789 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING); in sched_tick_start()
5792 twork->cpu = cpu; in sched_tick_start()
5793 INIT_DELAYED_WORK(&twork->work, sched_tick_remote); in sched_tick_start()
5794 queue_delayed_work(system_unbound_wq, &twork->work, HZ); in sched_tick_start()
5810 /* There cannot be competing actions, but don't rely on stop-machine. */ in sched_tick_stop()
5811 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING); in sched_tick_stop()
5840 current->preempt_disable_ip = ip; in preempt_latency_start()
5861 PREEMPT_MASK - 10); in preempt_count_add()
5908 return p->preempt_disable_ip; in get_preempt_disable_ip()
5926 prev->comm, prev->pid, preempt_count()); in __schedule_bug()
5943 * Various schedule()-time debugging checks and statistics:
5956 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) { in schedule_debug()
5957 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n", in schedule_debug()
5958 prev->comm, prev->pid, prev->non_block_count); in schedule_debug()
5973 schedstat_inc(this_rq()->sched_count); in schedule_debug()
5979 const struct sched_class *start_class = prev->sched_class; in prev_balance()
5989 rq->scx.flags |= SCX_RQ_BAL_PENDING; in prev_balance()
5997 * that when we release the rq->lock the task is in the same in prev_balance()
5998 * state as before we took rq->lock. in prev_balance()
6004 if (class->balance && class->balance(rq, prev, rf)) in prev_balance()
6010 * Pick up the highest-prio task:
6018 rq->dl_server = NULL; in __pick_next_task()
6029 if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) && in __pick_next_task()
6030 rq->nr_running == rq->cfs.h_nr_queued)) { in __pick_next_task()
6049 if (class->pick_next_task) { in __pick_next_task()
6050 p = class->pick_next_task(rq, prev); in __pick_next_task()
6054 p = class->pick_task(rq); in __pick_next_task()
6068 return (task_rq(t)->idle == t); in is_task_rq_idle()
6073 return is_task_rq_idle(a) || (a->core_cookie == cookie); in cookie_equals()
6081 return a->core_cookie == b->core_cookie; in cookie_match()
6089 rq->dl_server = NULL; in pick_task()
6092 p = class->pick_task(rq); in pick_task()
6110 bool core_clock_updated = (rq == rq->core); in pick_next_task()
6121 /* Stopper task is switching into idle, no need core-wide selection. */ in pick_next_task()
6128 rq->core_pick = NULL; in pick_next_task()
6129 rq->core_dl_server = NULL; in pick_next_task()
6138 * rq->core_pick can be NULL if no selection was made for a CPU because in pick_next_task()
6139 * it was either offline or went offline during a sibling's core-wide in pick_next_task()
6140 * selection. In this case, do a core-wide selection. in pick_next_task()
6142 if (rq->core->core_pick_seq == rq->core->core_task_seq && in pick_next_task()
6143 rq->core->core_pick_seq != rq->core_sched_seq && in pick_next_task()
6144 rq->core_pick) { in pick_next_task()
6145 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq); in pick_next_task()
6147 next = rq->core_pick; in pick_next_task()
6148 rq->dl_server = rq->core_dl_server; in pick_next_task()
6149 rq->core_pick = NULL; in pick_next_task()
6150 rq->core_dl_server = NULL; in pick_next_task()
6157 need_sync = !!rq->core->core_cookie; in pick_next_task()
6160 rq->core->core_cookie = 0UL; in pick_next_task()
6161 if (rq->core->core_forceidle_count) { in pick_next_task()
6163 update_rq_clock(rq->core); in pick_next_task()
6168 rq->core->core_forceidle_start = 0; in pick_next_task()
6169 rq->core->core_forceidle_count = 0; in pick_next_task()
6170 rq->core->core_forceidle_occupation = 0; in pick_next_task()
6176 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq in pick_next_task()
6185 rq->core->core_task_seq++; in pick_next_task()
6193 if (!next->core_cookie) { in pick_next_task()
6194 rq->core_pick = NULL; in pick_next_task()
6195 rq->core_dl_server = NULL; in pick_next_task()
6210 * Tie-break prio towards the current CPU in pick_next_task()
6220 if (i != cpu && (rq_i != rq->core || !core_clock_updated)) in pick_next_task()
6223 rq_i->core_pick = p = pick_task(rq_i); in pick_next_task()
6224 rq_i->core_dl_server = rq_i->dl_server; in pick_next_task()
6230 cookie = rq->core->core_cookie = max->core_cookie; in pick_next_task()
6238 p = rq_i->core_pick; in pick_next_task()
6248 rq_i->core_pick = p; in pick_next_task()
6249 rq_i->core_dl_server = NULL; in pick_next_task()
6251 if (p == rq_i->idle) { in pick_next_task()
6252 if (rq_i->nr_running) { in pick_next_task()
6253 rq->core->core_forceidle_count++; in pick_next_task()
6255 rq->core->core_forceidle_seq++; in pick_next_task()
6262 if (schedstat_enabled() && rq->core->core_forceidle_count) { in pick_next_task()
6263 rq->core->core_forceidle_start = rq_clock(rq->core); in pick_next_task()
6264 rq->core->core_forceidle_occupation = occ; in pick_next_task()
6267 rq->core->core_pick_seq = rq->core->core_task_seq; in pick_next_task()
6268 next = rq->core_pick; in pick_next_task()
6269 rq->core_sched_seq = rq->core->core_pick_seq; in pick_next_task()
6277 * NOTE: L1TF -- at this point we're no longer running the old task and in pick_next_task()
6279 * their task. This ensures there is no inter-sibling overlap between in pick_next_task()
6280 * non-matching user state. in pick_next_task()
6289 * picked for it. That's Ok - it will pick tasks for itself, in pick_next_task()
6292 if (!rq_i->core_pick) in pick_next_task()
6296 * Update for new !FI->FI transitions, or if continuing to be in !FI: in pick_next_task()
6303 if (!(fi_before && rq->core->core_forceidle_count)) in pick_next_task()
6304 task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count); in pick_next_task()
6306 rq_i->core_pick->core_occupation = occ; in pick_next_task()
6309 rq_i->core_pick = NULL; in pick_next_task()
6310 rq_i->core_dl_server = NULL; in pick_next_task()
6315 WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick)); in pick_next_task()
6317 if (rq_i->curr == rq_i->core_pick) { in pick_next_task()
6318 rq_i->core_pick = NULL; in pick_next_task()
6319 rq_i->core_dl_server = NULL; in pick_next_task()
6328 if (rq->core->core_forceidle_count && next == rq->idle) in pick_next_task()
6344 cookie = dst->core->core_cookie; in try_steal_cookie()
6348 if (dst->curr != dst->idle) in try_steal_cookie()
6356 if (p == src->core_pick || p == src->curr) in try_steal_cookie()
6362 if (p->core_occupation > dst->idle->core_occupation) in try_steal_cookie()
6430 if (!rq->core->core_cookie) in queue_core_balance()
6433 if (!rq->nr_running) /* not forced idle */ in queue_core_balance()
6436 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance); in queue_core_balance()
6440 sched_core_lock(*_T->lock, &_T->flags),
6441 sched_core_unlock(*_T->lock, &_T->flags),
6452 WARN_ON_ONCE(rq->core != rq); in sched_core_cpu_starting()
6463 if (rq->core == rq) { in sched_core_cpu_starting()
6477 rq->core = core_rq; in sched_core_cpu_starting()
6479 WARN_ON_ONCE(rq->core != core_rq); in sched_core_cpu_starting()
6493 WARN_ON_ONCE(rq->core != rq); in sched_core_cpu_deactivate()
6498 if (rq->core != rq) in sched_core_cpu_deactivate()
6513 core_rq->core_task_seq = rq->core_task_seq; in sched_core_cpu_deactivate()
6514 core_rq->core_pick_seq = rq->core_pick_seq; in sched_core_cpu_deactivate()
6515 core_rq->core_cookie = rq->core_cookie; in sched_core_cpu_deactivate()
6516 core_rq->core_forceidle_count = rq->core_forceidle_count; in sched_core_cpu_deactivate()
6517 core_rq->core_forceidle_seq = rq->core_forceidle_seq; in sched_core_cpu_deactivate()
6518 core_rq->core_forceidle_occupation = rq->core_forceidle_occupation; in sched_core_cpu_deactivate()
6525 core_rq->core_forceidle_start = 0; in sched_core_cpu_deactivate()
6530 rq->core = core_rq; in sched_core_cpu_deactivate()
6538 if (rq->core != rq) in sched_core_cpu_dying()
6539 rq->core = rq; in sched_core_cpu_dying()
6562 #define SM_IDLE (-1)
6579 WRITE_ONCE(p->__state, TASK_RUNNING); in try_to_block_task()
6583 p->sched_contributes_to_load = in try_to_block_task()
6593 * prev_state = prev->state; if (p->on_rq && ...) in try_to_block_task()
6595 * p->on_rq = 0; smp_acquire__after_ctrl_dep(); in try_to_block_task()
6596 * p->state = TASK_WAKING in try_to_block_task()
6600 * After this, schedule() must not care about p->state any more. in try_to_block_task()
6620 * task to the run-queue and that's it.
6622 * Now, if the new task added to the run-queue preempts the current
6626 * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
6628 * - in syscall or exception context, at the next outmost
6632 * - in IRQ context, return from interrupt-handler to
6635 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
6638 * - cond_resched() call
6639 * - explicit schedule() call
6640 * - return from syscall or exception to user-space
6641 * - return from interrupt-handler to user-space
6664 prev = rq->curr; in __schedule()
6675 * Make sure that signal_pending_state()->signal_pending() below in __schedule()
6682 * LOCK rq->lock LOCK p->pi_state in __schedule()
6684 * if (signal_pending_state()) if (p->state & @state) in __schedule()
6687 * after coming from user-space, before storing to rq->curr; this in __schedule()
6695 rq->clock_update_flags <<= 1; in __schedule()
6697 rq->clock_update_flags = RQCF_UPDATED; in __schedule()
6699 switch_count = &prev->nivcsw; in __schedule()
6705 * We must load prev->state once (task_struct::state is volatile), such in __schedule()
6708 prev_state = READ_ONCE(prev->__state); in __schedule()
6711 if (!rq->nr_running && !scx_enabled()) { in __schedule()
6717 switch_count = &prev->nvcsw; in __schedule()
6725 rq->last_seen_need_resched_ns = 0; in __schedule()
6729 rq->nr_switches++; in __schedule()
6731 * RCU users of rcu_dereference(rq->curr) may not see in __schedule()
6734 RCU_INIT_POINTER(rq->curr, next); in __schedule()
6738 * rq->curr, before returning to user-space. in __schedule()
6742 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC, in __schedule()
6743 * RISC-V. switch_mm() relies on membarrier_arch_switch_mm() in __schedule()
6744 * on PowerPC and on RISC-V. in __schedule()
6745 * - finish_lock_switch() for weakly-ordered in __schedule()
6747 * - switch_to() for arm64 (weakly-ordered, spin_unlock in __schedule()
6753 * On RISC-V, this barrier pairing is also needed for the in __schedule()
6762 prev->se.sched_delayed); in __schedule()
6782 current->flags |= PF_NOFREEZE; in do_task_dead()
6787 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ in do_task_dead()
6799 * will use a blocking primitive -- which would lead to recursion. in sched_submit_work()
6803 task_flags = tsk->flags; in sched_submit_work()
6818 WARN_ON_ONCE(current->__state & TASK_RTLOCK_WAIT); in sched_submit_work()
6824 blk_flush_plug(tsk->plug, true); in sched_submit_work()
6831 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) { in sched_update_worker()
6832 if (tsk->flags & PF_BLOCK_TS) in sched_update_worker()
6834 if (tsk->flags & PF_WQ_WORKER) in sched_update_worker()
6836 else if (tsk->flags & PF_IO_WORKER) in sched_update_worker()
6855 lockdep_assert(!tsk->sched_rt_mutex); in schedule()
6867 * state (have scheduled out non-voluntarily) by making sure that all
6870 * (schedule out non-voluntarily).
6884 WARN_ON_ONCE(current->__state); in schedule_idle()
6895 * or we have been woken up remotely but the IPI has not yet arrived, in schedule_user()
6910 * schedule_preempt_disabled - called with preemption disabled
6960 * This is the entry point to schedule() from in-kernel preemption
6966 * If there is a non-zero preempt_count or interrupts are disabled, in preempt_schedule()
6998 * preempt_schedule_notrace - preempt_schedule called by tracing
7102 return try_to_wake_up(curr->private, mode, wake_flags); in default_wake_function()
7126 * bit-fields. Since it's a local thing, use int. Keep the generic sounding
7134 lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1)); in rt_mutex_pre_schedule()
7140 lockdep_assert(current->sched_rt_mutex); in rt_mutex_schedule()
7147 lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0)); in rt_mutex_post_schedule()
7151 * rt_mutex_setprio - set the current priority of a task
7156 * not touch ->normal_prio like __setscheduler().
7169 /* XXX used to be waiter->prio, not waiter->task->prio */ in rt_mutex_setprio()
7170 prio = __rt_effective_prio(pi_task, p->normal_prio); in rt_mutex_setprio()
7175 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) in rt_mutex_setprio()
7181 * Set under pi_lock && rq->lock, such that the value can be used under in rt_mutex_setprio()
7186 * ensure a task is de-boosted (pi_task is set to NULL) before the in rt_mutex_setprio()
7188 * points to a blocked task -- which guarantees the task is present. in rt_mutex_setprio()
7190 p->pi_top_task = pi_task; in rt_mutex_setprio()
7195 if (prio == p->prio && !dl_prio(prio)) in rt_mutex_setprio()
7199 * Idle task boosting is a no-no in general. There is one in rt_mutex_setprio()
7203 * the timer wheel base->lock on the CPU and another CPU wants in rt_mutex_setprio()
7210 if (unlikely(p == rq->idle)) { in rt_mutex_setprio()
7211 WARN_ON(p != rq->curr); in rt_mutex_setprio()
7212 WARN_ON(p->pi_blocked_on); in rt_mutex_setprio()
7217 oldprio = p->prio; in rt_mutex_setprio()
7222 prev_class = p->sched_class; in rt_mutex_setprio()
7223 next_class = __setscheduler_class(p->policy, prio); in rt_mutex_setprio()
7225 if (prev_class != next_class && p->se.sched_delayed) in rt_mutex_setprio()
7237 * 1. -rt task is running and holds mutex A in rt_mutex_setprio()
7238 * --> -dl task blocks on mutex A in rt_mutex_setprio()
7240 * 2. -dl task is running and holds mutex A in rt_mutex_setprio()
7241 * --> -dl task blocks on mutex A and could preempt the in rt_mutex_setprio()
7245 if (!dl_prio(p->normal_prio) || in rt_mutex_setprio()
7246 (pi_task && dl_prio(pi_task->prio) && in rt_mutex_setprio()
7247 dl_entity_preempt(&pi_task->dl, &p->dl))) { in rt_mutex_setprio()
7248 p->dl.pi_se = pi_task->dl.pi_se; in rt_mutex_setprio()
7251 p->dl.pi_se = &p->dl; in rt_mutex_setprio()
7255 p->dl.pi_se = &p->dl; in rt_mutex_setprio()
7260 p->dl.pi_se = &p->dl; in rt_mutex_setprio()
7262 p->rt.timeout = 0; in rt_mutex_setprio()
7265 p->sched_class = next_class; in rt_mutex_setprio()
7266 p->prio = prio; in rt_mutex_setprio()
7296 * In PREEMPT_RCU kernels, ->rcu_read_lock_nesting tells the tick in __cond_resched()
7297 * whether the current CPU is in an RCU read-side critical section, in __cond_resched()
7299 * in kernel context. In contrast, in non-preemptible kernels, in __cond_resched()
7300 * RCU readers leave no in-memory hints, which means that CPU-bound in __cond_resched()
7305 * A third case, preemptible, but non-PREEMPT_RCU provides for in __cond_resched()
7350 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
7353 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
7414 #include <linux/entry-common.h>
7426 * cond_resched <- __cond_resched
7427 * might_resched <- RET0
7428 * preempt_schedule <- NOP
7429 * preempt_schedule_notrace <- NOP
7430 * irqentry_exit_cond_resched <- NOP
7431 * dynamic_preempt_lazy <- false
7434 * cond_resched <- __cond_resched
7435 * might_resched <- __cond_resched
7436 * preempt_schedule <- NOP
7437 * preempt_schedule_notrace <- NOP
7438 * irqentry_exit_cond_resched <- NOP
7439 * dynamic_preempt_lazy <- false
7442 * cond_resched <- RET0
7443 * might_resched <- RET0
7444 * preempt_schedule <- preempt_schedule
7445 * preempt_schedule_notrace <- preempt_schedule_notrace
7446 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7447 * dynamic_preempt_lazy <- false
7450 * cond_resched <- RET0
7451 * might_resched <- RET0
7452 * preempt_schedule <- preempt_schedule
7453 * preempt_schedule_notrace <- preempt_schedule_notrace
7454 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7455 * dynamic_preempt_lazy <- true
7459 preempt_dynamic_undefined = -1,
7486 return -EINVAL; in sched_dynamic_mode()
7508 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in in __sched_dynamic_update()
7655 #define preempt_dynamic_mode -1
7708 int old_iowait = current->in_iowait; in io_schedule_prepare()
7710 current->in_iowait = 1; in io_schedule_prepare()
7711 blk_flush_plug(current->plug, true); in io_schedule_prepare()
7717 current->in_iowait = token; in io_schedule_finish()
7721 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
7755 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p)); in sched_show_task()
7763 ppid = task_pid_nr(rcu_dereference(p->real_parent)); in sched_show_task()
7765 pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d task_flags:0x%04x flags:0x%08lx\n", in sched_show_task()
7767 ppid, p->flags, read_task_thread_flags(p)); in sched_show_task()
7780 unsigned int state = READ_ONCE(p->__state); in state_filter_match()
7808 * reset the NMI-timeout, listing all files on a slow in show_state_filter()
7832 * init_idle - set up an idle thread for a given CPU
7850 raw_spin_lock_irqsave(&idle->pi_lock, flags); in init_idle()
7853 idle->__state = TASK_RUNNING; in init_idle()
7854 idle->se.exec_start = sched_clock(); in init_idle()
7857 * look like a proper per-CPU kthread. in init_idle()
7859 idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY; in init_idle()
7871 * holding rq->lock, the CPU isn't yet set to this CPU so the in init_idle()
7875 * use task_rq_lock() here and obtain the other rq->lock. in init_idle()
7883 rq->idle = idle; in init_idle()
7885 rcu_assign_pointer(rq->curr, idle); in init_idle()
7886 idle->on_rq = TASK_ON_RQ_QUEUED; in init_idle()
7888 idle->on_cpu = 1; in init_idle()
7891 raw_spin_unlock_irqrestore(&idle->pi_lock, flags); in init_idle()
7899 idle->sched_class = &idle_sched_class; in init_idle()
7903 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); in init_idle()
7935 if (p->flags & PF_NO_SETAFFINITY) in task_can_attach()
7936 ret = -EINVAL; in task_can_attach()
7953 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) in migrate_task_to()
7954 return -EINVAL; in migrate_task_to()
7981 p->numa_preferred_nid = nid; in sched_setnuma()
8003 struct mm_struct *mm = current->active_mm; in sched_force_init_mm()
8008 current->active_mm = &init_mm; in sched_force_init_mm()
8025 raw_spin_lock_irq(&p->pi_lock); in __balance_push_cpu_stop()
8031 cpu = select_fallback_rq(rq->cpu, p); in __balance_push_cpu_stop()
8036 raw_spin_unlock_irq(&p->pi_lock); in __balance_push_cpu_stop()
8046 * Ensure we only run per-cpu kthreads once the CPU goes !active.
8053 struct task_struct *push_task = rq->curr; in balance_push()
8060 rq->balance_callback = &balance_push_callback; in balance_push()
8066 if (!cpu_dying(rq->cpu) || rq != this_rq()) in balance_push()
8070 * Both the cpu-hotplug and stop task are in this case and are in balance_push()
8087 if (!rq->nr_running && !rq_has_pinned_tasks(rq) && in balance_push()
8088 rcuwait_active(&rq->hotplug_wait)) { in balance_push()
8090 rcuwait_wake_up(&rq->hotplug_wait); in balance_push()
8098 * Temporarily drop rq->lock such that we can wake-up the stop task. in balance_push()
8103 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task, in balance_push()
8121 WARN_ON_ONCE(rq->balance_callback); in balance_push_set()
8122 rq->balance_callback = &balance_push_callback; in balance_push_set()
8123 } else if (rq->balance_callback == &balance_push_callback) { in balance_push_set()
8124 rq->balance_callback = NULL; in balance_push_set()
8139 rcuwait_wait_event(&rq->hotplug_wait, in balance_hotplug_wait()
8140 rq->nr_running == 1 && !rq_has_pinned_tasks(rq), in balance_hotplug_wait()
8162 if (!rq->online) { in set_rq_online()
8165 cpumask_set_cpu(rq->cpu, rq->rd->online); in set_rq_online()
8166 rq->online = 1; in set_rq_online()
8169 if (class->rq_online) in set_rq_online()
8170 class->rq_online(rq); in set_rq_online()
8177 if (rq->online) { in set_rq_offline()
8182 if (class->rq_offline) in set_rq_offline()
8183 class->rq_offline(rq); in set_rq_offline()
8186 cpumask_clear_cpu(rq->cpu, rq->rd->online); in set_rq_offline()
8187 rq->online = 0; in set_rq_offline()
8196 if (rq->rd) { in sched_set_rq_online()
8197 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in sched_set_rq_online()
8208 if (rq->rd) { in sched_set_rq_offline()
8209 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in sched_set_rq_offline()
8238 if (--num_cpus_frozen) in cpuset_cpu_active()
8342 * preempt-disabled and RCU users of this state to go away such that in sched_cpu_deactivate()
8378 rq->calc_load_update = calc_load_update; in sched_rq_cpu_starting()
8414 * stable. We need to take the tear-down thread which is calling this into
8417 * Also see the comment "Global load-average calculations".
8434 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running); in dump_rq_tasks()
8442 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm); in dump_rq_tasks()
8455 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) { in sched_cpu_dying()
8482 /* Move init over to a non-isolated CPU */ in sched_init_smp()
8485 current->flags &= ~PF_NO_SETAFFINITY; in sched_init_smp()
8600 raw_spin_lock_init(&rq->__lock); in sched_init()
8601 rq->nr_running = 0; in sched_init()
8602 rq->calc_load_active = 0; in sched_init()
8603 rq->calc_load_update = jiffies + LOAD_FREQ; in sched_init()
8604 init_cfs_rq(&rq->cfs); in sched_init()
8605 init_rt_rq(&rq->rt); in sched_init()
8606 init_dl_rq(&rq->dl); in sched_init()
8608 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); in sched_init()
8609 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; in sched_init()
8613 * In case of task-groups formed through the cgroup filesystem, it in sched_init()
8616 * root_task_group and its child task-groups in a fair manner, in sched_init()
8617 * based on each entity's (task or task-group's) weight in sched_init()
8618 * (se->load.weight). in sched_init()
8627 * directly in rq->cfs (i.e root_task_group->se[] = NULL). in sched_init()
8629 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); in sched_init()
8638 rq->rt.rt_runtime = global_rt_runtime(); in sched_init()
8639 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); in sched_init()
8642 rq->sd = NULL; in sched_init()
8643 rq->rd = NULL; in sched_init()
8644 rq->cpu_capacity = SCHED_CAPACITY_SCALE; in sched_init()
8645 rq->balance_callback = &balance_push_callback; in sched_init()
8646 rq->active_balance = 0; in sched_init()
8647 rq->next_balance = jiffies; in sched_init()
8648 rq->push_cpu = 0; in sched_init()
8649 rq->cpu = i; in sched_init()
8650 rq->online = 0; in sched_init()
8651 rq->idle_stamp = 0; in sched_init()
8652 rq->avg_idle = 2*sysctl_sched_migration_cost; in sched_init()
8653 rq->max_idle_balance_cost = sysctl_sched_migration_cost; in sched_init()
8655 INIT_LIST_HEAD(&rq->cfs_tasks); in sched_init()
8659 rq->last_blocked_load_update_tick = jiffies; in sched_init()
8660 atomic_set(&rq->nohz_flags, 0); in sched_init()
8662 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq); in sched_init()
8665 rcuwait_init(&rq->hotplug_wait); in sched_init()
8669 atomic_set(&rq->nr_iowait, 0); in sched_init()
8673 rq->core = rq; in sched_init()
8674 rq->core_pick = NULL; in sched_init()
8675 rq->core_dl_server = NULL; in sched_init()
8676 rq->core_enabled = 0; in sched_init()
8677 rq->core_tree = RB_ROOT; in sched_init()
8678 rq->core_forceidle_count = 0; in sched_init()
8679 rq->core_forceidle_occupation = 0; in sched_init()
8680 rq->core_forceidle_start = 0; in sched_init()
8682 rq->core_cookie = 0UL; in sched_init()
8684 zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i)); in sched_init()
8698 * is dressed up as a per-CPU kthread and thus needs to play the part in sched_init()
8699 * if we want to avoid special-casing it in code that deals with per-CPU in sched_init()
8737 * Blocking primitives will set (and therefore destroy) current->state, in __might_sleep()
8741 WARN_ONCE(state != TASK_RUNNING && current->task_state_change, in __might_sleep()
8744 (void *)current->task_state_change, in __might_sleep()
8745 (void *)current->task_state_change); in __might_sleep()
8783 !is_idle_task(current) && !current->non_block_count) || in __might_resched()
8798 in_atomic(), irqs_disabled(), current->non_block_count, in __might_resched()
8799 current->pid, current->comm); in __might_resched()
8843 current->pid, current->comm); in __cant_sleep()
8875 current->pid, current->comm); in __cant_migrate()
8898 if (p->flags & PF_KTHREAD) in normalize_rt_tasks()
8901 p->se.exec_start = 0; in normalize_rt_tasks()
8902 schedstat_set(p->stats.wait_start, 0); in normalize_rt_tasks()
8903 schedstat_set(p->stats.sleep_start, 0); in normalize_rt_tasks()
8904 schedstat_set(p->stats.block_start, 0); in normalize_rt_tasks()
8928 * stopped - every CPU needs to be quiescent, and no scheduling
8935 * curr_task - return the current task for a given CPU.
8960 uclamp_se_set(&tg->uclamp_req[clamp_id], in alloc_uclamp_sched_group()
8962 tg->uclamp[clamp_id] = parent->uclamp[clamp_id]; in alloc_uclamp_sched_group()
8988 call_rcu(&tg->rcu, sched_free_group_rcu); in sched_unregister_group()
8998 return ERR_PTR(-ENOMEM); in sched_create_group()
9013 return ERR_PTR(-ENOMEM); in sched_create_group()
9021 list_add_rcu(&tg->list, &task_groups); in sched_online_group()
9026 tg->parent = parent; in sched_online_group()
9027 INIT_LIST_HEAD(&tg->children); in sched_online_group()
9028 list_add_rcu(&tg->siblings, &parent->children); in sched_online_group()
9044 call_rcu(&tg->rcu, sched_unregister_group_rcu); in sched_destroy_group()
9065 list_del_rcu(&tg->list); in sched_release_group()
9066 list_del_rcu(&tg->siblings); in sched_release_group()
9082 tsk->sched_task_group = tg; in sched_change_group()
9085 if (tsk->sched_class->task_change_group) in sched_change_group()
9086 tsk->sched_class->task_change_group(tsk); in sched_change_group()
9096 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
9148 return ERR_PTR(-ENOMEM); in cpu_cgroup_css_alloc()
9150 return &tg->css; in cpu_cgroup_css_alloc()
9157 struct task_group *parent = css_tg(css->parent); in cpu_cgroup_css_online()
9209 return -EINVAL; in cpu_cgroup_can_attach()
9245 uc_parent = css_tg(css)->parent in cpu_util_update_eff()
9246 ? css_tg(css)->parent->uclamp : NULL; in cpu_util_update_eff()
9250 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value; in cpu_util_update_eff()
9262 uc_se = css_tg(css)->uclamp; in cpu_util_update_eff()
9312 req.ret = -ERANGE; in capacity_from_percent()
9340 if (tg->uclamp_req[clamp_id].value != req.util) in cpu_uclamp_write()
9341 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false); in cpu_uclamp_write()
9347 tg->uclamp_pct[clamp_id] = req.percent; in cpu_uclamp_write()
9379 util_clamp = tg->uclamp_req[clamp_id].value; in cpu_uclamp_print()
9387 percent = tg->uclamp_pct[clamp_id]; in cpu_uclamp_print()
9409 return scale_load_down(tg->shares); in tg_weight()
9411 return sched_weight_from_cgroup(tg->scx_weight); in tg_weight()
9450 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in tg_set_cfs_bandwidth()
9453 return -EINVAL; in tg_set_cfs_bandwidth()
9461 return -EINVAL; in tg_set_cfs_bandwidth()
9469 return -EINVAL; in tg_set_cfs_bandwidth()
9475 return -EINVAL; in tg_set_cfs_bandwidth()
9479 return -EINVAL; in tg_set_cfs_bandwidth()
9482 * Prevent race between setting of cfs_rq->runtime_enabled and in tg_set_cfs_bandwidth()
9493 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; in tg_set_cfs_bandwidth()
9495 * If we need to toggle cfs_bandwidth_used, off->on must occur in tg_set_cfs_bandwidth()
9496 * before making related changes, and on->off must occur afterwards in tg_set_cfs_bandwidth()
9501 scoped_guard (raw_spinlock_irq, &cfs_b->lock) { in tg_set_cfs_bandwidth()
9502 cfs_b->period = ns_to_ktime(period); in tg_set_cfs_bandwidth()
9503 cfs_b->quota = quota; in tg_set_cfs_bandwidth()
9504 cfs_b->burst = burst; in tg_set_cfs_bandwidth()
9517 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; in tg_set_cfs_bandwidth()
9518 struct rq *rq = cfs_rq->rq; in tg_set_cfs_bandwidth()
9521 cfs_rq->runtime_enabled = runtime_enabled; in tg_set_cfs_bandwidth()
9522 cfs_rq->runtime_remaining = 0; in tg_set_cfs_bandwidth()
9524 if (cfs_rq->throttled) in tg_set_cfs_bandwidth()
9538 period = ktime_to_ns(tg->cfs_bandwidth.period); in tg_set_cfs_quota()
9539 burst = tg->cfs_bandwidth.burst; in tg_set_cfs_quota()
9545 return -EINVAL; in tg_set_cfs_quota()
9554 if (tg->cfs_bandwidth.quota == RUNTIME_INF) in tg_get_cfs_quota()
9555 return -1; in tg_get_cfs_quota()
9557 quota_us = tg->cfs_bandwidth.quota; in tg_get_cfs_quota()
9568 return -EINVAL; in tg_set_cfs_period()
9571 quota = tg->cfs_bandwidth.quota; in tg_set_cfs_period()
9572 burst = tg->cfs_bandwidth.burst; in tg_set_cfs_period()
9581 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); in tg_get_cfs_period()
9592 return -EINVAL; in tg_set_cfs_burst()
9595 period = ktime_to_ns(tg->cfs_bandwidth.period); in tg_set_cfs_burst()
9596 quota = tg->cfs_bandwidth.quota; in tg_set_cfs_burst()
9605 burst_us = tg->cfs_bandwidth.burst; in tg_get_cfs_burst()
9661 if (tg == d->tg) { in normalize_cfs_quota()
9662 period = d->period; in normalize_cfs_quota()
9663 quota = d->quota; in normalize_cfs_quota()
9670 if (quota == RUNTIME_INF || quota == -1) in normalize_cfs_quota()
9679 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in tg_cfs_schedulable_down()
9680 s64 quota = 0, parent_quota = -1; in tg_cfs_schedulable_down()
9682 if (!tg->parent) { in tg_cfs_schedulable_down()
9685 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; in tg_cfs_schedulable_down()
9688 parent_quota = parent_b->hierarchical_quota; in tg_cfs_schedulable_down()
9692 * always take the non-RUNTIME_INF min. On cgroup1, only in tg_cfs_schedulable_down()
9706 return -EINVAL; in tg_cfs_schedulable_down()
9709 cfs_b->hierarchical_quota = quota; in tg_cfs_schedulable_down()
9734 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in cpu_cfs_stat_show()
9736 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); in cpu_cfs_stat_show()
9737 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); in cpu_cfs_stat_show()
9738 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); in cpu_cfs_stat_show()
9746 stats = __schedstats_from_se(tg->se[i]); in cpu_cfs_stat_show()
9747 ws += schedstat_val(stats->wait_sum); in cpu_cfs_stat_show()
9753 seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst); in cpu_cfs_stat_show()
9754 seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time); in cpu_cfs_stat_show()
9765 total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time); in throttled_time_self()
9811 return css_tg(css)->idle; in cpu_idle_read_s64()
9899 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in cpu_extra_stat_show()
9902 throttled_usec = cfs_b->throttled_time; in cpu_extra_stat_show()
9904 burst_usec = cfs_b->burst_time; in cpu_extra_stat_show()
9912 cfs_b->nr_periods, cfs_b->nr_throttled, in cpu_extra_stat_show()
9913 throttled_usec, cfs_b->nr_burst, burst_usec); in cpu_extra_stat_show()
9952 return -ERANGE; in cpu_weight_write_u64()
9971 delta = abs(sched_prio_to_weight[prio] - weight); in cpu_weight_nice_read_s64()
9977 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO); in cpu_weight_nice_read_s64()
9987 return -ERANGE; in cpu_weight_nice_write_s64()
9989 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO; in cpu_weight_nice_write_s64()
10019 return -EINVAL; in cpu_period_quota_parse()
10028 return -EINVAL; in cpu_period_quota_parse()
10047 u64 burst = tg->cfs_bandwidth.burst; in cpu_max_write()
10150 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
10151 * nice 1, it will get ~10% less CPU time than another CPU-bound task
10155 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
10161 /* -20 */ 88761, 71755, 56483, 46273, 36291,
10162 /* -15 */ 29154, 23254, 18705, 14949, 11916,
10163 /* -10 */ 9548, 7620, 6100, 4904, 3906,
10164 /* -5 */ 3121, 2501, 1991, 1586, 1277,
10172 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated.
10175 * pre-calculated inverse to speed up arithmetics by turning divisions
10179 /* -20 */ 48388, 59856, 76040, 92818, 118348,
10180 /* -15 */ 147320, 184698, 229616, 287308, 360437,
10181 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
10182 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
10197 * @cid_lock: Guarantee forward-progress of cid allocation.
10199 * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
10200 * is only used when contention is detected by the lock-free allocation so
10206 * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
10208 * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
10217 * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
10223 * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
10240 * per-mm/cpu cid value.
10242 * Let's introduce task (Y) which has task->mm == mm and task (N) which has
10243 * task->mm != mm for the rest of the discussion. There are two scheduler state
10246 * (TSA) Store to rq->curr with transition from (N) to (Y)
10248 * (TSB) Store to rq->curr with transition from (Y) to (N)
10250 * On the remote-clear side, there is one transition we care about:
10255 * sides (scheduler, remote-clear). It is always performed with a cmpxchg which
10269 * Context switch CS-1 Remote-clear
10270 * - store to rq->curr: (N)->(Y) (TSA) - cmpxchg to *pcpu_id to LAZY (TMA)
10272 * - switch_mm_cid()
10273 * - memory barrier (see switch_mm_cid()
10277 * - mm_cid_get (next)
10278 * - READ_ONCE(*pcpu_cid) - rcu_dereference(src_rq->curr)
10285 * still an active task on the cpu. Remote-clear will therefore not transition
10306 t->migrate_from_cpu = task_cpu(t); in sched_mm_cid_migrate_from()
10314 struct mm_struct *mm = t->mm; in __sched_mm_cid_migrate_from_fetch_cid()
10319 return -1; in __sched_mm_cid_migrate_from_fetch_cid()
10321 last_mm_cid = t->last_mm_cid; in __sched_mm_cid_migrate_from_fetch_cid()
10327 if (last_mm_cid == -1) in __sched_mm_cid_migrate_from_fetch_cid()
10328 return -1; in __sched_mm_cid_migrate_from_fetch_cid()
10329 src_cid = READ_ONCE(src_pcpu_cid->cid); in __sched_mm_cid_migrate_from_fetch_cid()
10331 return -1; in __sched_mm_cid_migrate_from_fetch_cid()
10339 src_task = rcu_dereference(src_rq->curr); in __sched_mm_cid_migrate_from_fetch_cid()
10340 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) { in __sched_mm_cid_migrate_from_fetch_cid()
10341 t->last_mm_cid = -1; in __sched_mm_cid_migrate_from_fetch_cid()
10342 return -1; in __sched_mm_cid_migrate_from_fetch_cid()
10355 struct mm_struct *mm = t->mm; in __sched_mm_cid_migrate_from_try_steal_cid()
10358 if (src_cid == -1) in __sched_mm_cid_migrate_from_try_steal_cid()
10359 return -1; in __sched_mm_cid_migrate_from_try_steal_cid()
10366 if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid)) in __sched_mm_cid_migrate_from_try_steal_cid()
10367 return -1; in __sched_mm_cid_migrate_from_try_steal_cid()
10370 * The implicit barrier after cmpxchg per-mm/cpu cid before loading in __sched_mm_cid_migrate_from_try_steal_cid()
10371 * rq->curr->mm matches the scheduler barrier in context_switch() in __sched_mm_cid_migrate_from_try_steal_cid()
10372 * between store to rq->curr and load of prev and next task's in __sched_mm_cid_migrate_from_try_steal_cid()
10373 * per-mm/cpu cid. in __sched_mm_cid_migrate_from_try_steal_cid()
10375 * The implicit barrier after cmpxchg per-mm/cpu cid before loading in __sched_mm_cid_migrate_from_try_steal_cid()
10376 * rq->curr->mm_cid_active matches the barrier in in __sched_mm_cid_migrate_from_try_steal_cid()
10378 * sched_mm_cid_after_execve() between store to t->mm_cid_active and in __sched_mm_cid_migrate_from_try_steal_cid()
10379 * load of per-mm/cpu cid. in __sched_mm_cid_migrate_from_try_steal_cid()
10384 * the lazy-put flag, this task will be responsible for transitioning in __sched_mm_cid_migrate_from_try_steal_cid()
10385 * from lazy-put flag set to MM_CID_UNSET. in __sched_mm_cid_migrate_from_try_steal_cid()
10388 src_task = rcu_dereference(src_rq->curr); in __sched_mm_cid_migrate_from_try_steal_cid()
10389 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) { in __sched_mm_cid_migrate_from_try_steal_cid()
10394 t->last_mm_cid = -1; in __sched_mm_cid_migrate_from_try_steal_cid()
10395 return -1; in __sched_mm_cid_migrate_from_try_steal_cid()
10402 if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET)) in __sched_mm_cid_migrate_from_try_steal_cid()
10403 return -1; in __sched_mm_cid_migrate_from_try_steal_cid()
10404 WRITE_ONCE(src_pcpu_cid->recent_cid, MM_CID_UNSET); in __sched_mm_cid_migrate_from_try_steal_cid()
10416 struct mm_struct *mm = t->mm; in sched_mm_cid_migrate_to()
10425 src_cpu = t->migrate_from_cpu; in sched_mm_cid_migrate_to()
10426 if (src_cpu == -1) { in sched_mm_cid_migrate_to()
10427 t->last_mm_cid = -1; in sched_mm_cid_migrate_to()
10440 * greater or equal to the number of allowed CPUs, because user-space in sched_mm_cid_migrate_to()
10444 dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq)); in sched_mm_cid_migrate_to()
10445 dst_cid_is_set = !mm_cid_is_unset(READ_ONCE(dst_pcpu_cid->cid)) || in sched_mm_cid_migrate_to()
10446 !mm_cid_is_unset(READ_ONCE(dst_pcpu_cid->recent_cid)); in sched_mm_cid_migrate_to()
10447 if (dst_cid_is_set && atomic_read(&mm->mm_users) >= READ_ONCE(mm->nr_cpus_allowed)) in sched_mm_cid_migrate_to()
10449 src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu); in sched_mm_cid_migrate_to()
10452 if (src_cid == -1) in sched_mm_cid_migrate_to()
10456 if (src_cid == -1) in sched_mm_cid_migrate_to()
10464 WRITE_ONCE(dst_pcpu_cid->cid, src_cid); in sched_mm_cid_migrate_to()
10465 WRITE_ONCE(dst_pcpu_cid->recent_cid, src_cid); in sched_mm_cid_migrate_to()
10475 cid = READ_ONCE(pcpu_cid->cid); in sched_mm_cid_remote_clear()
10486 if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid)) in sched_mm_cid_remote_clear()
10490 * The implicit barrier after cmpxchg per-mm/cpu cid before loading in sched_mm_cid_remote_clear()
10491 * rq->curr->mm matches the scheduler barrier in context_switch() in sched_mm_cid_remote_clear()
10492 * between store to rq->curr and load of prev and next task's in sched_mm_cid_remote_clear()
10493 * per-mm/cpu cid. in sched_mm_cid_remote_clear()
10495 * The implicit barrier after cmpxchg per-mm/cpu cid before loading in sched_mm_cid_remote_clear()
10496 * rq->curr->mm_cid_active matches the barrier in in sched_mm_cid_remote_clear()
10498 * sched_mm_cid_after_execve() between store to t->mm_cid_active and in sched_mm_cid_remote_clear()
10499 * load of per-mm/cpu cid. in sched_mm_cid_remote_clear()
10504 * the lazy-put flag, that task will be responsible for transitioning in sched_mm_cid_remote_clear()
10505 * from lazy-put flag set to MM_CID_UNSET. in sched_mm_cid_remote_clear()
10508 t = rcu_dereference(rq->curr); in sched_mm_cid_remote_clear()
10509 if (READ_ONCE(t->mm_cid_active) && t->mm == mm) in sched_mm_cid_remote_clear()
10519 if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET)) in sched_mm_cid_remote_clear()
10532 * rq->clock load is racy on 32-bit but one spurious clear once in a in sched_mm_cid_remote_clear_old()
10535 rq_clock = READ_ONCE(rq->clock); in sched_mm_cid_remote_clear_old()
10536 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu); in sched_mm_cid_remote_clear_old()
10544 curr = rcu_dereference(rq->curr); in sched_mm_cid_remote_clear_old()
10545 if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) { in sched_mm_cid_remote_clear_old()
10546 WRITE_ONCE(pcpu_cid->time, rq_clock); in sched_mm_cid_remote_clear_old()
10551 if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS) in sched_mm_cid_remote_clear_old()
10562 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu); in sched_mm_cid_remote_clear_weight()
10563 cid = READ_ONCE(pcpu_cid->cid); in sched_mm_cid_remote_clear_weight()
10579 work->next = work; /* Prevent double-add */ in task_mm_cid_work()
10580 if (t->flags & PF_EXITING) in task_mm_cid_work()
10582 mm = t->mm; in task_mm_cid_work()
10585 old_scan = READ_ONCE(mm->mm_cid_next_scan); in task_mm_cid_work()
10590 res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan); in task_mm_cid_work()
10598 if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan)) in task_mm_cid_work()
10615 struct mm_struct *mm = t->mm; in init_sched_mm_cid()
10619 mm_users = atomic_read(&mm->mm_users); in init_sched_mm_cid()
10621 mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY); in init_sched_mm_cid()
10623 t->cid_work.next = &t->cid_work; /* Protect against double add */ in init_sched_mm_cid()
10624 init_task_work(&t->cid_work, task_mm_cid_work); in init_sched_mm_cid()
10629 struct callback_head *work = &curr->cid_work; in task_tick_mm_cid()
10632 if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || in task_tick_mm_cid()
10633 work->next != work) in task_tick_mm_cid()
10635 if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan))) in task_tick_mm_cid()
10644 struct mm_struct *mm = t->mm; in sched_mm_cid_exit_signals()
10654 WRITE_ONCE(t->mm_cid_active, 0); in sched_mm_cid_exit_signals()
10656 * Store t->mm_cid_active before loading per-mm/cpu cid. in sched_mm_cid_exit_signals()
10661 t->last_mm_cid = t->mm_cid = -1; in sched_mm_cid_exit_signals()
10666 struct mm_struct *mm = t->mm; in sched_mm_cid_before_execve()
10676 WRITE_ONCE(t->mm_cid_active, 0); in sched_mm_cid_before_execve()
10678 * Store t->mm_cid_active before loading per-mm/cpu cid. in sched_mm_cid_before_execve()
10683 t->last_mm_cid = t->mm_cid = -1; in sched_mm_cid_before_execve()
10688 struct mm_struct *mm = t->mm; in sched_mm_cid_after_execve()
10698 WRITE_ONCE(t->mm_cid_active, 1); in sched_mm_cid_after_execve()
10700 * Store t->mm_cid_active before loading per-mm/cpu cid. in sched_mm_cid_after_execve()
10704 t->last_mm_cid = t->mm_cid = mm_cid_get(rq, t, mm); in sched_mm_cid_after_execve()
10710 WARN_ON_ONCE(!t->mm || t->mm_cid != -1); in sched_mm_cid_fork()
10711 t->mm_cid_active = 1; in sched_mm_cid_fork()
10731 if (ctx->queued) in sched_deq_and_put_task()
10733 if (ctx->running) in sched_deq_and_put_task()
10739 struct rq *rq = task_rq(ctx->p); in sched_enq_and_set_task()
10743 if (ctx->queued) in sched_enq_and_set_task()
10744 enqueue_task(rq, ctx->p, ctx->queue_flags | ENQUEUE_NOCLOCK); in sched_enq_and_set_task()
10745 if (ctx->running) in sched_enq_and_set_task()
10746 set_next_task(rq, ctx->p); in sched_enq_and_set_task()