Lines Matching +full:disable +full:- +full:over +full:- +full:current
1 // SPDX-License-Identifier: GPL-2.0
6 * Copyright (C) 2018 - 2021 Intel Corporation
9 * Util-awareness mechanism:
15 * DOC: teo-description
27 * Of course, non-timer wakeup sources are more important in some use cases
71 * the current sleep length (the candidate idle state) and compute 3 sums as
74 * - The sum of the "hits" and "intercepts" metrics for the candidate state
77 * had been equal to the current one).
79 * - The sum of the "intercepts" metrics for all of the idle states shallower
82 * equal to the current one).
84 * - The sum of the numbers of recent intercepts for all of the idle states
91 * - Traverse the idle states shallower than the candidate one in the
94 * - For each of them compute the sum of the "intercepts" metrics and the sum
95 * of the numbers of recent intercepts over all of the idle states between
99 * - If each of these sums that needs to be taken into account (because the
103 * not exceeded the idle duration in over a half of the relevant cases),
108 * Util-awareness mechanism:
110 * The idea behind the util-awareness extension is that there are two distinct
112 * state selection - utilized and not utilized.
120 * being utilized, the usual metrics-based approach to selecting the deepest
125 * The threshold is computed per-CPU as a percentage of the CPU's capacity
129 * Before selecting the next idle state, the governor compares the current CPU
171 * struct teo_bin - Metrics used by the TEO cpuidle governor.
183 * struct teo_cpu - CPU data used by the TEO cpuidle governor.
184 * @time_span_ns: Time between idle state selection and post-wakeup update.
207 * teo_cpu_is_utilized - Check if the CPU's util is above the threshold
214 return sched_cpu_util(cpu) > cpu_data->util_threshold; in teo_cpu_is_utilized()
224 * teo_update - Update CPU metrics after wakeup.
230 struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); in teo_update()
235 if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) { in teo_update()
243 u64 lat_ns = drv->states[dev->last_state_idx].exit_latency_ns; in teo_update()
252 measured_ns = dev->last_residency_ns; in teo_update()
255 * executed by the CPU is not likely to be worst-case every in teo_update()
260 measured_ns -= lat_ns / 2; in teo_update()
265 cpu_data->total = 0; in teo_update()
272 for (i = 0; i < drv->state_count; i++) { in teo_update()
273 struct teo_bin *bin = &cpu_data->state_bins[i]; in teo_update()
275 bin->hits -= bin->hits >> DECAY_SHIFT; in teo_update()
276 bin->intercepts -= bin->intercepts >> DECAY_SHIFT; in teo_update()
278 cpu_data->total += bin->hits + bin->intercepts; in teo_update()
280 target_residency_ns = drv->states[i].target_residency_ns; in teo_update()
282 if (target_residency_ns <= cpu_data->sleep_length_ns) { in teo_update()
289 i = cpu_data->next_recent_idx++; in teo_update()
290 if (cpu_data->next_recent_idx >= NR_RECENT) in teo_update()
291 cpu_data->next_recent_idx = 0; in teo_update()
293 if (cpu_data->recent_idx[i] >= 0) in teo_update()
294 cpu_data->state_bins[cpu_data->recent_idx[i]].recent--; in teo_update()
299 * to stop the tick. This effectively adds an extra hits-only bin in teo_update()
300 * beyond the last state-related one. in teo_update()
303 cpu_data->tick_hits -= cpu_data->tick_hits >> DECAY_SHIFT; in teo_update()
305 cpu_data->total += cpu_data->tick_hits; in teo_update()
307 if (TICK_NSEC <= cpu_data->sleep_length_ns) { in teo_update()
308 idx_timer = drv->state_count; in teo_update()
310 cpu_data->tick_hits += PULSE; in teo_update()
323 cpu_data->state_bins[idx_timer].hits += PULSE; in teo_update()
324 cpu_data->recent_idx[i] = -1; in teo_update()
326 cpu_data->state_bins[idx_duration].intercepts += PULSE; in teo_update()
327 cpu_data->state_bins[idx_duration].recent++; in teo_update()
328 cpu_data->recent_idx[i] = idx_duration; in teo_update()
332 cpu_data->total += PULSE; in teo_update()
338 drv->states[i].target_residency_ns >= TICK_NSEC; in teo_state_ok()
342 * teo_find_shallower_state - Find shallower idle state matching given duration.
355 for (i = state_idx - 1; i >= 0; i--) { in teo_find_shallower_state()
356 if (dev->states_usage[i].disable || in teo_find_shallower_state()
357 (no_poll && drv->states[i].flags & CPUIDLE_FLAG_POLLING)) in teo_find_shallower_state()
361 if (drv->states[i].target_residency_ns <= duration_ns) in teo_find_shallower_state()
368 * teo_select - Selects the next idle state to enter.
376 struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); in teo_select()
377 s64 latency_req = cpuidle_governor_latency_req(dev->cpu); in teo_select()
387 int idx0 = 0, idx = -1; in teo_select()
393 if (dev->last_state_idx >= 0) { in teo_select()
395 dev->last_state_idx = -1; in teo_select()
398 cpu_data->time_span_ns = local_clock(); in teo_select()
403 cpu_data->sleep_length_ns = KTIME_MAX; in teo_select()
406 if (drv->state_count < 2) { in teo_select()
411 if (!dev->states_usage[0].disable) in teo_select()
414 cpu_utilized = teo_cpu_is_utilized(dev->cpu, cpu_data); in teo_select()
416 * If the CPU is being utilized over the threshold and there are only 2 in teo_select()
418 * the shallowest non-polling state and exit. in teo_select()
420 if (drv->state_count < 3 && cpu_utilized) { in teo_select()
429 if ((!idx && !(drv->states[0].flags & CPUIDLE_FLAG_POLLING) && in teo_select()
430 teo_state_ok(0, drv)) || dev->states_usage[1].disable) { in teo_select()
436 duration_ns = drv->states[1].target_residency_ns; in teo_select()
441 for (i = 1; i < drv->state_count; i++) { in teo_select()
442 struct teo_bin *prev_bin = &cpu_data->state_bins[i-1]; in teo_select()
443 struct cpuidle_state *s = &drv->states[i]; in teo_select()
447 * shallower than the current one. in teo_select()
449 intercept_sum += prev_bin->intercepts; in teo_select()
450 hit_sum += prev_bin->hits; in teo_select()
451 recent_sum += prev_bin->recent; in teo_select()
453 if (dev->states_usage[i].disable) in teo_select()
461 if (s->exit_latency_ns <= latency_req) in teo_select()
464 /* Save the sums for the current state. */ in teo_select()
481 duration_ns = drv->states[idx].target_residency_ns; in teo_select()
486 cpu_data->state_bins[drv->state_count-1].intercepts; in teo_select()
490 * shallower than the current candidate one (idx) is greater than the in teo_select()
493 * intercepts over all of the states shallower than the candidate one in teo_select()
497 alt_intercepts = 2 * idx_intercept_sum > cpu_data->total - idx_hit_sum; in teo_select()
504 * not exceeded the idle duration in over a half of the relevant in teo_select()
514 for (i = idx - 1; i >= 0; i--) { in teo_select()
515 struct teo_bin *bin = &cpu_data->state_bins[i]; in teo_select()
517 intercept_sum += bin->intercepts; in teo_select()
518 recent_sum += bin->recent; in teo_select()
524 * Use the current state unless it is too in teo_select()
529 !dev->states_usage[i].disable) in teo_select()
537 if (dev->states_usage[i].disable) in teo_select()
542 * The current state is too shallow, but if an in teo_select()
558 * idle state shallower than the current candidate one. in teo_select()
564 * If the CPU is being utilized over the threshold, choose a shallower in teo_select()
565 * non-polling state to improve latency, unless the scheduler tick has in teo_select()
576 * Skip the timers check if state 0 is the current candidate one, in teo_select()
577 * because an immediate non-timer wakeup is expected in that case. in teo_select()
584 * the current candidate state is low enough and skip the timers in teo_select()
587 if ((drv->states[0].flags & CPUIDLE_FLAG_POLLING) && in teo_select()
588 drv->states[idx].target_residency_ns < RESIDENCY_THRESHOLD_NS) in teo_select()
592 cpu_data->sleep_length_ns = duration_ns; in teo_select()
598 if (drv->states[idx].target_residency_ns > duration_ns) { in teo_select()
609 if (drv->states[idx].target_residency_ns < TICK_NSEC && in teo_select()
610 tick_intercept_sum > cpu_data->total / 2 + cpu_data->total / 8) in teo_select()
619 if ((!(drv->states[idx].flags & CPUIDLE_FLAG_POLLING) && in teo_select()
629 drv->states[idx].target_residency_ns > delta_tick) in teo_select()
638 * teo_reflect - Note that governor data for the CPU need to be updated.
644 struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); in teo_reflect()
646 dev->last_state_idx = state; in teo_reflect()
652 if (dev->poll_time_limit || in teo_reflect()
653 (tick_nohz_idle_got_tick() && cpu_data->sleep_length_ns > TICK_NSEC)) { in teo_reflect()
654 dev->poll_time_limit = false; in teo_reflect()
655 cpu_data->time_span_ns = cpu_data->sleep_length_ns; in teo_reflect()
657 cpu_data->time_span_ns = local_clock() - cpu_data->time_span_ns; in teo_reflect()
662 * teo_enable_device - Initialize the governor's data for the target CPU.
669 struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); in teo_enable_device()
670 unsigned long max_capacity = arch_scale_cpu_capacity(dev->cpu); in teo_enable_device()
674 cpu_data->util_threshold = max_capacity >> UTIL_THRESHOLD_SHIFT; in teo_enable_device()
677 cpu_data->recent_idx[i] = -1; in teo_enable_device()