Lines Matching +full:local +full:- +full:timer +full:- +full:stop
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
4 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
5 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
25 #include <linux/posix-timers.h>
31 #include "tick-internal.h"
33 #include <trace/events/timer.h>
36 * Per-CPU nohz control structure
149 if (ts->inidle) in tick_sched_do_timer()
150 ts->got_idle_tick = 1; in tick_sched_do_timer()
164 if (ts->tick_stopped) { in tick_sched_handle()
167 ts->idle_jiffies++; in tick_sched_handle()
173 ts->next_tick = 0; in tick_sched_handle()
229 if (check_tick_dependency(&ts->tick_dep_mask)) in can_stop_full_tick()
232 if (check_tick_dependency(¤t->tick_dep_mask)) in can_stop_full_tick()
235 if (check_tick_dependency(¤t->signal->tick_dep_mask)) in can_stop_full_tick()
253 * re-evaluate its dependency on the tick and restart it if necessary.
267 * re-evaluate its dependency on the tick and restart it if necessary.
278 * Kick all full dynticks CPUs in order to force these to re-evaluate
319 * Set per-CPU tick dependency. Used by scheduler and perf events in order to
329 prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask); in tick_nohz_dep_set_cpu()
332 /* Perf needs local kick that is NMI safe */ in tick_nohz_dep_set_cpu()
336 /* Remote irq work not NMI-safe */ in tick_nohz_dep_set_cpu()
349 atomic_andnot(BIT(bit), &ts->tick_dep_mask); in tick_nohz_dep_clear_cpu()
354 * Set a per-task tick dependency. RCU need this. Also posix CPU timers
359 if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask)) { in tick_nohz_dep_set_task()
377 atomic_andnot(BIT(bit), &tsk->tick_dep_mask); in tick_nohz_dep_clear_task()
382 * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse
387 tick_nohz_dep_set_all(&sig->tick_dep_mask, bit); in tick_nohz_dep_set_signal()
392 atomic_andnot(BIT(bit), &sig->tick_dep_mask); in tick_nohz_dep_clear_signal()
396 * Re-evaluate the need for the tick as we switch the current task.
412 if (ts->tick_stopped) { in __tick_nohz_task_switch()
413 if (atomic_read(¤t->tick_dep_mask) || in __tick_nohz_task_switch()
414 atomic_read(¤t->signal->tick_dep_mask)) in __tick_nohz_task_switch()
421 /* Get the boot-time nohz CPU list from the kernel parameters. */
438 return -EBUSY; in tick_nohz_cpu_down()
455 pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support irq work self-IPIs\n"); in tick_nohz_init()
485 * NOHZ - aka dynamic tick functionality
507 return ts->tick_stopped; in tick_nohz_tick_stopped()
514 return ts->tick_stopped; in tick_nohz_tick_stopped_cpu()
518 * tick_nohz_update_jiffies - update jiffies when idle was interrupted
541 * Updates the per-CPU time idle statistics counters
548 if (ts->idle_active) { in update_ts_time_stats()
549 delta = ktime_sub(now, ts->idle_entrytime); in update_ts_time_stats()
551 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); in update_ts_time_stats()
553 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); in update_ts_time_stats()
554 ts->idle_entrytime = now; in update_ts_time_stats()
565 ts->idle_active = 0; in tick_nohz_stop_idle()
572 ts->idle_entrytime = ktime_get(); in tick_nohz_start_idle()
573 ts->idle_active = 1; in tick_nohz_start_idle()
578 * get_cpu_idle_time_us - get the total idle time of a CPU
589 * This function returns -1 if NOHZ is not enabled.
597 return -1; in get_cpu_idle_time_us()
602 idle = ts->idle_sleeptime; in get_cpu_idle_time_us()
604 if (ts->idle_active && !nr_iowait_cpu(cpu)) { in get_cpu_idle_time_us()
605 ktime_t delta = ktime_sub(now, ts->idle_entrytime); in get_cpu_idle_time_us()
607 idle = ktime_add(ts->idle_sleeptime, delta); in get_cpu_idle_time_us()
609 idle = ts->idle_sleeptime; in get_cpu_idle_time_us()
619 * get_cpu_iowait_time_us - get the total iowait time of a CPU
630 * This function returns -1 if NOHZ is not enabled.
638 return -1; in get_cpu_iowait_time_us()
643 iowait = ts->iowait_sleeptime; in get_cpu_iowait_time_us()
645 if (ts->idle_active && nr_iowait_cpu(cpu) > 0) { in get_cpu_iowait_time_us()
646 ktime_t delta = ktime_sub(now, ts->idle_entrytime); in get_cpu_iowait_time_us()
648 iowait = ktime_add(ts->iowait_sleeptime, delta); in get_cpu_iowait_time_us()
650 iowait = ts->iowait_sleeptime; in get_cpu_iowait_time_us()
660 hrtimer_cancel(&ts->sched_timer); in tick_nohz_restart()
661 hrtimer_set_expires(&ts->sched_timer, ts->last_tick); in tick_nohz_restart()
664 hrtimer_forward(&ts->sched_timer, now, tick_period); in tick_nohz_restart()
666 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { in tick_nohz_restart()
667 hrtimer_start_expires(&ts->sched_timer, in tick_nohz_restart()
670 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); in tick_nohz_restart()
674 * Reset to make sure next tick stop doesn't get fooled by past in tick_nohz_restart()
677 ts->next_tick = 0; in tick_nohz_restart()
697 ts->last_jiffies = basejiff; in tick_nohz_next_event()
698 ts->timer_expires_base = basemono; in tick_nohz_next_event()
703 * Aside of that check whether the local timer softirq is in tick_nohz_next_event()
705 * because there is an already expired timer, so it will request in tick_nohz_next_event()
706 * immeditate expiry, which rearms the hardware timer with a in tick_nohz_next_event()
715 * Get the next pending timer. If high resolution in tick_nohz_next_event()
716 * timers are enabled this only takes the timer wheel in tick_nohz_next_event()
722 ts->next_timer = next_tmr; in tick_nohz_next_event()
729 * force prod the timer. in tick_nohz_next_event()
731 delta = next_tick - basemono; in tick_nohz_next_event()
734 * Tell the timer code that the base is not idle, i.e. undo in tick_nohz_next_event()
739 * We've not stopped the tick yet, and there's a timer in the in tick_nohz_next_event()
742 if (!ts->tick_stopped) { in tick_nohz_next_event()
743 ts->timer_expires = 0; in tick_nohz_next_event()
755 (tick_do_timer_cpu != TICK_DO_TIMER_NONE || !ts->do_timer_last)) in tick_nohz_next_event()
759 if (delta < (KTIME_MAX - basemono)) in tick_nohz_next_event()
764 ts->timer_expires = min_t(u64, expires, next_tick); in tick_nohz_next_event()
767 return ts->timer_expires; in tick_nohz_next_event()
773 u64 basemono = ts->timer_expires_base; in tick_nohz_stop_tick()
774 u64 expires = ts->timer_expires; in tick_nohz_stop_tick()
777 /* Make sure we won't be trying to stop it twice in a row. */ in tick_nohz_stop_tick()
778 ts->timer_expires_base = 0; in tick_nohz_stop_tick()
783 * the tick timer next, which might be this CPU as well. If we in tick_nohz_stop_tick()
790 ts->do_timer_last = 1; in tick_nohz_stop_tick()
792 ts->do_timer_last = 0; in tick_nohz_stop_tick()
796 if (ts->tick_stopped && (expires == ts->next_tick)) { in tick_nohz_stop_tick()
798 if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer)) in tick_nohz_stop_tick()
802 …printk_once("basemono: %llu ts->next_tick: %llu dev->next_event: %llu timer->active: %d timer->exp… in tick_nohz_stop_tick()
803 basemono, ts->next_tick, dev->next_event, in tick_nohz_stop_tick()
804 hrtimer_active(&ts->sched_timer), hrtimer_get_expires(&ts->sched_timer)); in tick_nohz_stop_tick()
814 if (!ts->tick_stopped) { in tick_nohz_stop_tick()
818 ts->last_tick = hrtimer_get_expires(&ts->sched_timer); in tick_nohz_stop_tick()
819 ts->tick_stopped = 1; in tick_nohz_stop_tick()
823 ts->next_tick = tick; in tick_nohz_stop_tick()
826 * If the expiration time == KTIME_MAX, then we simply stop in tick_nohz_stop_tick()
827 * the tick timer. in tick_nohz_stop_tick()
830 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) in tick_nohz_stop_tick()
831 hrtimer_cancel(&ts->sched_timer); in tick_nohz_stop_tick()
835 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { in tick_nohz_stop_tick()
836 hrtimer_start(&ts->sched_timer, tick, in tick_nohz_stop_tick()
839 hrtimer_set_expires(&ts->sched_timer, tick); in tick_nohz_stop_tick()
846 ts->timer_expires_base = 0; in tick_nohz_retain_tick()
865 * Clear the timer idle flag, so we avoid IPIs on remote queueing and in tick_nohz_restart_sched_tick()
873 * Cancel the scheduled timer and restore the tick in tick_nohz_restart_sched_tick()
875 ts->tick_stopped = 0; in tick_nohz_restart_sched_tick()
876 ts->idle_exittime = now; in tick_nohz_restart_sched_tick()
889 if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE) in tick_nohz_full_update_tick()
894 else if (ts->tick_stopped) in tick_nohz_full_update_tick()
904 * the CPU which runs the tick timer next. If we don't drop in can_stop_idle_tick()
915 ts->next_tick = 0; in can_stop_idle_tick()
919 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) in can_stop_idle_tick()
930 pr_warn("NOHZ tick-stop error: Non-RCU local softirq work is pending, handler #%02x!!!\n", in can_stop_idle_tick()
946 * assigned before entering dyntick-idle mode, in can_stop_idle_tick()
952 /* Should not happen for nohz-full */ in can_stop_idle_tick()
967 * tick timer expiration time is known already. in __tick_nohz_idle_stop_tick()
969 if (ts->timer_expires_base) in __tick_nohz_idle_stop_tick()
970 expires = ts->timer_expires; in __tick_nohz_idle_stop_tick()
976 ts->idle_calls++; in __tick_nohz_idle_stop_tick()
979 int was_stopped = ts->tick_stopped; in __tick_nohz_idle_stop_tick()
983 ts->idle_sleeps++; in __tick_nohz_idle_stop_tick()
984 ts->idle_expires = expires; in __tick_nohz_idle_stop_tick()
986 if (!was_stopped && ts->tick_stopped) { in __tick_nohz_idle_stop_tick()
987 ts->idle_jiffies = ts->last_jiffies; in __tick_nohz_idle_stop_tick()
996 * tick_nohz_idle_stop_tick - stop the idle tick from the idle task
998 * When the next event is more than a tick into the future, stop the idle tick
1016 * tick_nohz_idle_enter - prepare for entering idle on the current CPU
1030 WARN_ON_ONCE(ts->timer_expires_base); in tick_nohz_idle_enter()
1032 ts->inidle = 1; in tick_nohz_idle_enter()
1039 * tick_nohz_irq_exit - update next tick event from interrupt exit
1042 * a reschedule, it may still add, modify or delete a timer, enqueue
1044 * So we need to re-calculate and reprogram the next tick event.
1050 if (ts->inidle) in tick_nohz_irq_exit()
1057 * tick_nohz_idle_got_tick - Check whether or not the tick handler has run
1063 if (ts->got_idle_tick) { in tick_nohz_idle_got_tick()
1064 ts->got_idle_tick = 0; in tick_nohz_idle_got_tick()
1071 * tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer
1079 return __this_cpu_read(tick_cpu_device.evtdev)->next_event; in tick_nohz_get_next_hrtimer()
1083 * tick_nohz_get_sleep_length - return the expected length of the current sleep
1097 ktime_t now = ts->idle_entrytime; in tick_nohz_get_sleep_length()
1100 WARN_ON_ONCE(!ts->inidle); in tick_nohz_get_sleep_length()
1102 *delta_next = ktime_sub(dev->next_event, now); in tick_nohz_get_sleep_length()
1112 * If the next highres timer to expire is earlier than next_event, the in tick_nohz_get_sleep_length()
1116 hrtimer_next_event_without(&ts->sched_timer)); in tick_nohz_get_sleep_length()
1122 * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value
1131 return ts->idle_calls; in tick_nohz_get_idle_calls_cpu()
1135 * tick_nohz_get_idle_calls - return the current idle calls counter value
1143 return ts->idle_calls; in tick_nohz_get_idle_calls()
1158 ticks = jiffies - ts->idle_jiffies; in tick_nohz_account_idle_ticks()
1177 if (ts->tick_stopped) in tick_nohz_idle_restart_tick()
1182 * tick_nohz_idle_exit - restart the idle tick from the idle task
1196 WARN_ON_ONCE(!ts->inidle); in tick_nohz_idle_exit()
1197 WARN_ON_ONCE(ts->timer_expires_base); in tick_nohz_idle_exit()
1199 ts->inidle = 0; in tick_nohz_idle_exit()
1200 idle_active = ts->idle_active; in tick_nohz_idle_exit()
1201 tick_stopped = ts->tick_stopped; in tick_nohz_idle_exit()
1224 dev->next_event = KTIME_MAX; in tick_nohz_handler()
1230 if (unlikely(ts->tick_stopped)) in tick_nohz_handler()
1233 hrtimer_forward(&ts->sched_timer, now, tick_period); in tick_nohz_handler()
1234 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); in tick_nohz_handler()
1241 ts->nohz_mode = mode; in tick_nohz_activate()
1248 * tick_nohz_switch_to_nohz - switch to nohz mode
1265 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); in tick_nohz_switch_to_nohz()
1269 hrtimer_set_expires(&ts->sched_timer, next); in tick_nohz_switch_to_nohz()
1270 hrtimer_forward_now(&ts->sched_timer, tick_period); in tick_nohz_switch_to_nohz()
1271 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); in tick_nohz_switch_to_nohz()
1280 if (!ts->idle_active && !ts->tick_stopped) in tick_nohz_irq_enter()
1283 if (ts->idle_active) in tick_nohz_irq_enter()
1285 if (ts->tick_stopped) in tick_nohz_irq_enter()
1307 * High resolution timer specific code
1311 * We rearm the timer until we get disabled by the idle code.
1314 static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) in tick_sched_timer() argument
1317 container_of(timer, struct tick_sched, sched_timer); in tick_sched_timer()
1330 ts->next_tick = 0; in tick_sched_timer()
1333 if (unlikely(ts->tick_stopped)) in tick_sched_timer()
1336 hrtimer_forward(timer, now, tick_period); in tick_sched_timer()
1352 * tick_setup_sched_timer - setup the tick emulation timer
1360 * Emulate tick processing via per-CPU hrtimers: in tick_setup_sched_timer()
1362 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); in tick_setup_sched_timer()
1363 ts->sched_timer.function = tick_sched_timer; in tick_setup_sched_timer()
1365 /* Get the next period (per-CPU) */ in tick_setup_sched_timer()
1366 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); in tick_setup_sched_timer()
1373 hrtimer_add_expires_ns(&ts->sched_timer, offset); in tick_setup_sched_timer()
1376 hrtimer_forward(&ts->sched_timer, now, tick_period); in tick_setup_sched_timer()
1377 hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD); in tick_setup_sched_timer()
1388 if (ts->sched_timer.base) in tick_cancel_sched_timer()
1389 hrtimer_cancel(&ts->sched_timer); in tick_cancel_sched_timer()
1414 set_bit(0, &ts->check_clocks); in tick_oneshot_notify()
1420 * Called cyclic from the hrtimer softirq (driven by the timer
1421 * softirq) allow_nohz signals, that we can switch into low-res nohz
1429 if (!test_and_clear_bit(0, &ts->check_clocks)) in tick_check_oneshot_change()
1432 if (ts->nohz_mode != NOHZ_MODE_INACTIVE) in tick_check_oneshot_change()