Lines Matching +full:idle +full:- +full:state

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Generic entry points for the idle threads and
4 * implementation of the idle task scheduling class.
17 * sched_idle_set_state - Record idle state for the current CPU.
18 * @idle_state: State to record.
32 cpu_idle_force_poll--; in cpu_idle_poll_ctrl()
85 * default_idle_call - Default CPU idle routine.
104 * rcu_idle_enter() relies on lockdep IRQ state, so switch that in default_idle_call()
105 * last -- this is very similar to the entry code. in default_idle_call()
135 return -EBUSY; in call_cpuidle_s2idle()
144 * The idle task must be scheduled, it is pointless to go to idle, just in call_cpuidle()
145 * update no idle residency and return. in call_cpuidle()
148 dev->last_residency_ns = 0; in call_cpuidle()
150 return -EBUSY; in call_cpuidle()
154 * Enter the idle state previously returned by the governor decision. in call_cpuidle()
156 * care of re-enabling the local interrupts in call_cpuidle()
162 * cpuidle_idle_call - the main idle function
177 * Check if the idle task must be rescheduled. If it is the in cpuidle_idle_call()
178 * case, exit the function after re-enabling the local irq. in cpuidle_idle_call()
186 * The RCU framework needs to be told that we are entering an idle in cpuidle_idle_call()
199 * Suspend-to-idle ("s2idle") is a system state in which all user space in cpuidle_idle_call()
202 * the cpuidle governor and go stratight for the deepest idle state in cpuidle_idle_call()
204 * timekeeping to prevent timer interrupts from kicking us out of idle in cpuidle_idle_call()
208 if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) { in cpuidle_idle_call()
219 max_latency_ns = dev->forced_idle_latency_limit_ns; in cpuidle_idle_call()
230 * Ask the cpuidle framework to choose a convenient idle state. in cpuidle_idle_call()
250 * It is up to the idle functions to reenable local interrupts in cpuidle_idle_call()
257 * Generic idle loop implementation
267 * Our polling bit is clear if we're not scheduled (i.e. if rq->curr != in do_idle()
268 * rq->idle). This means that, if rq->idle has the polling bit set, in do_idle()
291 * detected in the wakeup from idle path that the tick in do_idle()
293 * idle as we know that the IPI is going to arrive right away. in do_idle()
308 * This is required because for polling idle loops we will not have had in do_idle()
309 * an IPI to fold the state for us. in do_idle()
323 * RCU relies on this call to be done outside of an RCU read-side in do_idle()
348 WRITE_ONCE(it->done, 1); in idle_inject_timer_fn()
362 WARN_ON_ONCE(current->policy != SCHED_FIFO); in play_idle_precise()
363 WARN_ON_ONCE(current->nr_cpus_allowed != 1); in play_idle_precise()
364 WARN_ON_ONCE(!(current->flags & PF_KTHREAD)); in play_idle_precise()
365 WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY)); in play_idle_precise()
370 current->flags |= PF_IDLE; in play_idle_precise()
383 current->flags &= ~PF_IDLE; in play_idle_precise()
390 void cpu_startup_entry(enum cpuhp_state state) in cpu_startup_entry() argument
393 cpuhp_online_idle(state); in cpu_startup_entry()
399 * idle-task scheduling class.
406 return task_cpu(p); /* IDLE tasks as never migrated */ in select_task_rq_idle()
417 * Idle tasks are unconditionally rescheduled:
431 schedstat_inc(rq->sched_goidle); in set_next_task_idle()
436 struct task_struct *next = rq->idle; in pick_next_task_idle()
444 * It is not legal to sleep in the idle task - print a warning
450 raw_spin_unlock_irq(&rq->lock); in dequeue_task_idle()
451 printk(KERN_ERR "bad: scheduling from the idle thread!\n"); in dequeue_task_idle()
453 raw_spin_lock_irq(&rq->lock); in dequeue_task_idle()
484 * Simple, special scheduling class for the per-CPU idle tasks:
488 /* no enqueue/yield_task for idle tasks */