/linux/tools/verification/models/sched/ |
H A D | sssw.dot | 5 {node [shape = doublecircle] "runnable"}; 6 {node [shape = circle] "runnable"}; 10 "__init_runnable" -> "runnable"; 11 "runnable" [label = "runnable", color = green3]; 12 "runnable" -> "runnable" [ label = "sched_set_state_runnable\nsched_wakeup\nsched_switch_in\nsched_switch_yield\nsched_switch_preempt\nsignal_deliver" ]; 13 "runnable" -> "sleepable" [ label = "sched_set_state_sleepable" ]; 14 "runnable" -> "sleeping" [ label = "sched_switch_blocking" ]; 16 "signal_wakeup" -> "runnable" [ labe [all...] |
/linux/kernel/sched/ |
H A D | pelt.c | 104 unsigned long load, unsigned long runnable, int running) in accumulate_sum() argument 130 * runnable = running = 0; in accumulate_sum() 144 if (runnable) in accumulate_sum() 145 sa->runnable_sum += runnable * contrib << SCHED_CAPACITY_SHIFT; in accumulate_sum() 153 * We can represent the historical contribution to runnable average as the 154 * coefficients of a geometric series. To do this we sub-divide our runnable 162 * Let u_i denote the fraction of p_i that the entity was runnable. 182 unsigned long load, unsigned long runnable, int running) in ___update_load_sum() argument 207 * running is a subset of runnable (weight) so running can't be set if in ___update_load_sum() 208 * runnable i in ___update_load_sum() [all...] |
/linux/tools/sched_ext/ |
H A D | scx_flatcg.bpf.c | 13 * Let's say all three have runnable tasks. The total share that each of these 26 * and keep updating the eventual shares as the cgroups' runnable states change. 416 static void update_active_weight_sums(struct cgroup *cgrp, bool runnable) in update_active_weight_sums() argument 433 if (runnable) { in update_active_weight_sums() 444 * If @cgrp is becoming runnable, its hweight should be refreshed after in update_active_weight_sums() 450 if (!runnable) in update_active_weight_sums() 476 if (runnable) { in update_active_weight_sums() 503 if (runnable) in update_active_weight_sums() 539 void BPF_STRUCT_OPS(fcg_stopping, struct task_struct *p, bool runnable) in BPF_STRUCT_OPS() argument 942 .runnable [all...] |
H A D | scx_simple.bpf.c | 110 void BPF_STRUCT_OPS(simple_stopping, struct task_struct *p, bool runnable) in BPF_STRUCT_OPS() argument
|
H A D | scx_central.bpf.c | 246 void BPF_STRUCT_OPS(central_stopping, struct task_struct *p, bool runnable) in BPF_STRUCT_OPS() argument
|
/linux/Documentation/scheduler/ |
H A D | schedutil.rst | 35 Using this we track 2 key metrics: 'running' and 'runnable'. 'Running' 36 reflects the time an entity spends on the CPU, while 'runnable' reflects the 40 while 'runnable' will increase to reflect the amount of contention. 83 The result is that the above 'running' and 'runnable' metrics become invariant 104 A further runqueue wide sum (of runnable tasks) is maintained of:
|
H A D | sched-ext.rst | 20 a runnable task stalls, or on invoking the SysRq key sequence 65 detection of any internal error including stalled runnable tasks aborts the 211 from the global DSQ. If that doesn't yield a runnable task either, 280 * If the previous task is an SCX task and still runnable, keep executing 313 ops.runnable(); /* Task becomes ready to run */ 315 while (task is runnable) { 324 while (task->scx.slice > 0 && task is runnable)
|
H A D | sched-design-CFS.rst | 48 up CPU time between runnable tasks as close to "ideal multitasking hardware" as 75 CFS maintains a time-ordered rbtree, where all runnable tasks are sorted by the 174 Called when a task enters a runnable state. 180 When a task is no longer runnable, this function is called to keep the 192 This function checks if a task that entered the runnable state should
|
H A D | sched-eevdf.rst | 14 runnable tasks with the same priority. To do so, it assigns a virtual run
|
H A D | sched-bwc.rst | 15 slices as threads in the cgroup become runnable. Once all quota has been 202 decide which application is chosen to run, as they will both be runnable and
|
/linux/tools/testing/selftests/sched_ext/ |
H A D | maximal.bpf.c | 42 void BPF_STRUCT_OPS(maximal_stopping, struct task_struct *p, bool runnable) in BPF_STRUCT_OPS() argument 144 .runnable = (void *) maximal_runnable,
|
H A D | select_cpu_vtime.bpf.c | 67 bool runnable) in BPF_STRUCT_OPS() argument
|
/linux/Documentation/timers/ |
H A D | no_hz.rst | 24 have only one runnable task (CONFIG_NO_HZ_FULL=y). Unless you 44 will frequently be multiple runnable tasks per CPU. In these cases, 107 If a CPU has only one runnable task, there is little point in sending it 109 Note that omitting scheduling-clock ticks for CPUs with only one runnable 113 sending scheduling-clock interrupts to CPUs with a single runnable task, 257 runnable task for a given CPU, even though there are a number 260 runnable high-priority SCHED_FIFO task and an arbitrary number 267 single runnable SCHED_FIFO task and multiple runnable SCHED_OTHER 270 And even when there are multiple runnable task [all...] |
/linux/Documentation/trace/rv/ |
H A D | monitor_sched.rst | 67 The set non runnable on its own context (snroc) monitor ensures changes in a 270 is woken up or set to ``runnable``. 287 back to runnable, the resulting switch (if there) looks like a yield to the 292 This monitor doesn't include a running state, ``sleepable`` and ``runnable`` 312 | | _blocking H runnable H | |
|
/linux/fs/bcachefs/ |
H A D | alloc_types.h | 77 x(runnable) \
|
/linux/drivers/gpu/drm/panthor/ |
H A D | panthor_sched.c | 231 * @runnable: Runnable group lists. 238 struct list_head runnable[PANTHOR_CSG_PRIORITY_COUNT]; member 286 * This will force a tick, so other runnable groups can be scheduled if one 689 * panthor_group::groups::{runnable,idle} and 1187 /* Other reasons are not blocking. Consider the queue as runnable in cs_slot_sync_queue_state_locked() 2134 &sched->groups.runnable[group->priority]); in tick_ctx_cleanup() 2154 &sched->groups.runnable[group->priority]); in tick_ctx_cleanup() 2308 list_move_tail(&group->run_node, &sched->groups.runnable[prio]); in tick_ctx_apply() 2338 if (!list_empty(&sched->groups.runnable[ctx->min_priority])) { in tick_ctx_update_resched_target() 2393 &sched->groups.runnable[pri in tick_work() [all...] |
/linux/Documentation/tools/rv/ |
H A D | rv-mon-sched.rst | 52 * snroc: set non runnable on its own context
|
/linux/Documentation/virt/ |
H A D | guest-halt-polling.rst | 18 even with other runnable tasks in the host.
|
/linux/Documentation/accounting/ |
H A D | delay-accounting.rst | 7 runnable task may wait for a free CPU to run on. 12 a) waiting for a CPU (while being runnable)
|
H A D | taskstats-struct.rst | 112 /* Delay waiting for cpu, while runnable
|
/linux/Documentation/virt/kvm/ |
H A D | halt-polling.rst | 18 interval or some other task on the runqueue is runnable the scheduler is 150 - Halt polling will only be conducted by the host when no other tasks are runnable on
|
/linux/Documentation/admin-guide/pm/ |
H A D | cpuidle.rst | 90 Tasks can be in various states. In particular, they are *runnable* if there are 93 events to occur or similar). When a task becomes runnable, the CPU scheduler 94 assigns it to one of the available CPUs to run and if there are no more runnable 97 another CPU). [If there are multiple runnable tasks assigned to one CPU 101 The special "idle" task becomes runnable if there are no other runnable tasks 193 multiple runnable tasks assigned to one CPU at the same time, the only way to 216 not be shared between multiple runnable tasks, the primary reason for using the
|
/linux/tools/perf/Documentation/ |
H A D | perf-sched.txt | 67 task scheduling delay (time between runnable and actually running) and
|
/linux/Documentation/arch/arm64/ |
H A D | asymmetric-32bit.rst | 154 ``KVM_EXIT_FAIL_ENTRY`` and will remain non-runnable until successfully
|
/linux/Documentation/arch/s390/ |
H A D | vfio-ccw.rst | 331 space, and assemble a runnable kernel channel program by updating the 382 channel program, which becomes runnable for a real device.
|