Lines Matching full:timer
9 * In contrast to the low-resolution timeout API, aka timer wheel,
16 * Based on the original timer wheel code
41 #include <linux/timer.h>
47 #include <trace/events/timer.h>
61 * The timer bases:
64 * into the timer bases by the hrtimer_base_type enum. When trying
135 * timer->base->cpu_base
154 * means that all timers which are tied to this base via timer->base are
160 * When the timer's base is locked, and the timer removed from list, it is
161 * possible to set timer->base = &migration_base and drop the lock: the timer
165 struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, in lock_hrtimer_base() argument
171 base = READ_ONCE(timer->base); in lock_hrtimer_base()
174 if (likely(base == timer->base)) in lock_hrtimer_base()
176 /* The timer has migrated to another CPU: */ in lock_hrtimer_base()
184 * We do not migrate the timer when it is expiring before the next
193 hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base) in hrtimer_check_target() argument
197 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); in hrtimer_check_target()
213 * We switch the timer base to a power-optimized selected CPU target,
216 * - timer migration is enabled
217 * - the timer callback is not running
218 * - the timer is not the first expiring timer on the new target
220 * If one of the above requirements is not fulfilled we move the timer
222 * the timer callback is currently running.
225 switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, in switch_hrtimer_base() argument
239 * We are trying to move timer to new_base. in switch_hrtimer_base()
240 * However we can't change timer's base while it is running, in switch_hrtimer_base()
243 * code will take care of this when the timer function has in switch_hrtimer_base()
245 * the timer is enqueued. in switch_hrtimer_base()
247 if (unlikely(hrtimer_callback_running(timer))) in switch_hrtimer_base()
251 WRITE_ONCE(timer->base, &migration_base); in switch_hrtimer_base()
256 hrtimer_check_target(timer, new_base)) { in switch_hrtimer_base()
260 WRITE_ONCE(timer->base, base); in switch_hrtimer_base()
263 WRITE_ONCE(timer->base, new_base); in switch_hrtimer_base()
266 hrtimer_check_target(timer, new_base)) { in switch_hrtimer_base()
282 lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) in lock_hrtimer_base() argument
284 struct hrtimer_clock_base *base = timer->base; in lock_hrtimer_base()
358 struct hrtimer *timer = addr; in hrtimer_fixup_init() local
362 hrtimer_cancel(timer); in hrtimer_fixup_init()
363 debug_object_init(timer, &hrtimer_debug_descr); in hrtimer_fixup_init()
392 struct hrtimer *timer = addr; in hrtimer_fixup_free() local
396 hrtimer_cancel(timer); in hrtimer_fixup_free()
397 debug_object_free(timer, &hrtimer_debug_descr); in hrtimer_fixup_free()
412 static inline void debug_hrtimer_init(struct hrtimer *timer) in debug_hrtimer_init() argument
414 debug_object_init(timer, &hrtimer_debug_descr); in debug_hrtimer_init()
417 static inline void debug_hrtimer_activate(struct hrtimer *timer, in debug_hrtimer_activate() argument
420 debug_object_activate(timer, &hrtimer_debug_descr); in debug_hrtimer_activate()
423 static inline void debug_hrtimer_deactivate(struct hrtimer *timer) in debug_hrtimer_deactivate() argument
425 debug_object_deactivate(timer, &hrtimer_debug_descr); in debug_hrtimer_deactivate()
428 static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
431 void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id, in hrtimer_init_on_stack() argument
434 debug_object_init_on_stack(timer, &hrtimer_debug_descr); in hrtimer_init_on_stack()
435 __hrtimer_init(timer, clock_id, mode); in hrtimer_init_on_stack()
445 debug_object_init_on_stack(&sl->timer, &hrtimer_debug_descr); in hrtimer_init_sleeper_on_stack()
450 void destroy_hrtimer_on_stack(struct hrtimer *timer) in destroy_hrtimer_on_stack() argument
452 debug_object_free(timer, &hrtimer_debug_descr); in destroy_hrtimer_on_stack()
458 static inline void debug_hrtimer_init(struct hrtimer *timer) { } in debug_hrtimer_init() argument
459 static inline void debug_hrtimer_activate(struct hrtimer *timer, in debug_hrtimer_activate() argument
461 static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } in debug_hrtimer_deactivate() argument
465 debug_init(struct hrtimer *timer, clockid_t clockid, in debug_init() argument
468 debug_hrtimer_init(timer); in debug_init()
469 trace_hrtimer_init(timer, clockid, mode); in debug_init()
472 static inline void debug_activate(struct hrtimer *timer, in debug_activate() argument
475 debug_hrtimer_activate(timer, mode); in debug_activate()
476 trace_hrtimer_start(timer, mode); in debug_activate()
479 static inline void debug_deactivate(struct hrtimer *timer) in debug_deactivate() argument
481 debug_hrtimer_deactivate(timer); in debug_deactivate()
482 trace_hrtimer_cancel(timer); in debug_deactivate()
512 struct hrtimer *timer; in __hrtimer_next_event_base() local
515 timer = container_of(next, struct hrtimer, node); in __hrtimer_next_event_base()
516 if (timer == exclude) { in __hrtimer_next_event_base()
517 /* Get to the next timer in the queue. */ in __hrtimer_next_event_base()
522 timer = container_of(next, struct hrtimer, node); in __hrtimer_next_event_base()
524 expires = ktime_sub(hrtimer_get_expires(timer), base->offset); in __hrtimer_next_event_base()
528 /* Skip cpu_base update if a timer is being excluded. */ in __hrtimer_next_event_base()
532 if (timer->is_soft) in __hrtimer_next_event_base()
533 cpu_base->softirq_next_timer = timer; in __hrtimer_next_event_base()
535 cpu_base->next_timer = timer; in __hrtimer_next_event_base()
640 * timer interrupt could occur too late. in hrtimer_force_reprogram()
658 * If a hang was detected in the last timer interrupt then we in hrtimer_force_reprogram()
677 /* High resolution timer related functions */
681 * High resolution timer enabled ?
768 * When a timer is enqueued and expires earlier than the already enqueued
769 * timers, we have to check, whether it expires earlier than the timer for
774 static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram) in hrtimer_reprogram() argument
777 struct hrtimer_clock_base *base = timer->base; in hrtimer_reprogram()
778 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); in hrtimer_reprogram()
780 WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0); in hrtimer_reprogram()
783 * CLOCK_REALTIME timer might be requested with an absolute in hrtimer_reprogram()
789 if (timer->is_soft) { in hrtimer_reprogram()
805 timer_cpu_base->softirq_next_timer = timer; in hrtimer_reprogram()
814 * If the timer is not on the current cpu, we cannot reprogram in hrtimer_reprogram()
833 /* Update the pointer to the next expiring timer */ in hrtimer_reprogram()
834 cpu_base->next_timer = timer; in hrtimer_reprogram()
841 * If a hang was detected in the last timer interrupt then we in hrtimer_reprogram()
842 * do not schedule a timer which is earlier than the expiry in hrtimer_reprogram()
850 * Program the timer hardware. We enforce the expiry for in hrtimer_reprogram()
862 * We might have to reprogram the high resolution timer interrupt. On
864 * resolution timer interrupts. On UP we just disable interrupts and
877 * During resume we might have to reprogram the high resolution timer
895 void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) in unlock_hrtimer_base() argument
897 raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); in unlock_hrtimer_base()
901 * hrtimer_forward - forward the timer expiry
902 * @timer: hrtimer to forward
906 * Forward the timer expiry so it will expire in the future.
909 * Can be safely called from the callback function of @timer. If
910 * called from other contexts @timer must neither be enqueued nor
914 * Note: This only updates the timer expiry value and does not requeue
915 * the timer.
917 u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) in hrtimer_forward() argument
922 delta = ktime_sub(now, hrtimer_get_expires(timer)); in hrtimer_forward()
927 if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED)) in hrtimer_forward()
937 hrtimer_add_expires_ns(timer, incr * orun); in hrtimer_forward()
938 if (hrtimer_get_expires_tv64(timer) > now) in hrtimer_forward()
946 hrtimer_add_expires(timer, interval); in hrtimer_forward()
953 * enqueue_hrtimer - internal function to (re)start a timer
955 * The timer is inserted in expiry order. Insertion into the
958 * Returns 1 when the new timer is the leftmost timer in the tree.
960 static int enqueue_hrtimer(struct hrtimer *timer, in enqueue_hrtimer() argument
964 debug_activate(timer, mode); in enqueue_hrtimer()
969 WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED); in enqueue_hrtimer()
971 return timerqueue_add(&base->active, &timer->node); in enqueue_hrtimer()
975 * __remove_hrtimer - internal function to remove a timer
979 * High resolution timer mode reprograms the clock event device when the
980 * timer is the one which expires next. The caller can disable this by setting
982 * anyway (e.g. timer interrupt)
984 static void __remove_hrtimer(struct hrtimer *timer, in __remove_hrtimer() argument
989 u8 state = timer->state; in __remove_hrtimer()
992 WRITE_ONCE(timer->state, newstate); in __remove_hrtimer()
996 if (!timerqueue_del(&base->active, &timer->node)) in __remove_hrtimer()
1002 * timer on a remote cpu. No harm as we never dereference in __remove_hrtimer()
1005 * remote cpu later on if the same timer gets enqueued again. in __remove_hrtimer()
1007 if (reprogram && timer == cpu_base->next_timer) in __remove_hrtimer()
1015 remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart) in remove_hrtimer() argument
1017 u8 state = timer->state; in remove_hrtimer()
1023 * Remove the timer and force reprogramming when high in remove_hrtimer()
1024 * resolution mode is active and the timer is on the current in remove_hrtimer()
1025 * CPU. If we remove a timer on another CPU, reprogramming is in remove_hrtimer()
1030 debug_deactivate(timer); in remove_hrtimer()
1036 __remove_hrtimer(timer, base, state, reprogram); in remove_hrtimer()
1042 static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim, in hrtimer_update_lowres() argument
1051 timer->is_rel = mode & HRTIMER_MODE_REL; in hrtimer_update_lowres()
1052 if (timer->is_rel) in hrtimer_update_lowres()
1083 static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, in __hrtimer_start_range_ns() argument
1089 /* Remove an active timer from the queue: */ in __hrtimer_start_range_ns()
1090 remove_hrtimer(timer, base, true); in __hrtimer_start_range_ns()
1095 tim = hrtimer_update_lowres(timer, tim, mode); in __hrtimer_start_range_ns()
1097 hrtimer_set_expires_range_ns(timer, tim, delta_ns); in __hrtimer_start_range_ns()
1099 /* Switch the timer base, if necessary: */ in __hrtimer_start_range_ns()
1100 new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); in __hrtimer_start_range_ns()
1102 return enqueue_hrtimer(timer, new_base, mode); in __hrtimer_start_range_ns()
1107 * @timer: the timer to be added
1109 * @delta_ns: "slack" range for the timer
1110 * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
1114 void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, in hrtimer_start_range_ns() argument
1126 WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft); in hrtimer_start_range_ns()
1128 WARN_ON_ONCE(!(mode & HRTIMER_MODE_HARD) ^ !timer->is_hard); in hrtimer_start_range_ns()
1130 base = lock_hrtimer_base(timer, &flags); in hrtimer_start_range_ns()
1132 if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base)) in hrtimer_start_range_ns()
1133 hrtimer_reprogram(timer, true); in hrtimer_start_range_ns()
1135 unlock_hrtimer_base(timer, &flags); in hrtimer_start_range_ns()
1140 * hrtimer_try_to_cancel - try to deactivate a timer
1141 * @timer: hrtimer to stop
1145 * * 0 when the timer was not active
1146 * * 1 when the timer was active
1147 * * -1 when the timer is currently executing the callback function and
1150 int hrtimer_try_to_cancel(struct hrtimer *timer) in hrtimer_try_to_cancel() argument
1157 * Check lockless first. If the timer is not active (neither in hrtimer_try_to_cancel()
1162 if (!hrtimer_active(timer)) in hrtimer_try_to_cancel()
1165 base = lock_hrtimer_base(timer, &flags); in hrtimer_try_to_cancel()
1167 if (!hrtimer_callback_running(timer)) in hrtimer_try_to_cancel()
1168 ret = remove_hrtimer(timer, base, false); in hrtimer_try_to_cancel()
1170 unlock_hrtimer_base(timer, &flags); in hrtimer_try_to_cancel()
1197 * the timer callback to finish. Drop expiry_lock and reaquire it. That
1213 * deletion of a timer failed because the timer callback function was
1217 * in the middle of a timer callback, then calling del_timer_sync() can
1220 * - If the caller is on a remote CPU then it has to spin wait for the timer
1223 * - If the caller originates from the task which preempted the timer
1224 * handler on the same CPU, then spin waiting for the timer handler to
1227 void hrtimer_cancel_wait_running(const struct hrtimer *timer) in hrtimer_cancel_wait_running() argument
1230 struct hrtimer_clock_base *base = READ_ONCE(timer->base); in hrtimer_cancel_wait_running()
1233 * Just relax if the timer expires in hard interrupt context or if in hrtimer_cancel_wait_running()
1236 if (!timer->is_soft || is_migration_base(base)) { in hrtimer_cancel_wait_running()
1243 * held by the softirq across the timer callback. Drop the lock in hrtimer_cancel_wait_running()
1244 * immediately so the softirq can expire the next timer. In theory in hrtimer_cancel_wait_running()
1245 * the timer could already be running again, but that's more than in hrtimer_cancel_wait_running()
1265 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
1266 * @timer: the timer to be cancelled
1269 * 0 when the timer was not active
1270 * 1 when the timer was active
1272 int hrtimer_cancel(struct hrtimer *timer) in hrtimer_cancel() argument
1277 ret = hrtimer_try_to_cancel(timer); in hrtimer_cancel()
1280 hrtimer_cancel_wait_running(timer); in hrtimer_cancel()
1287 * hrtimer_get_remaining - get remaining time for the timer
1288 * @timer: the timer to read
1291 ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust) in __hrtimer_get_remaining() argument
1296 lock_hrtimer_base(timer, &flags); in __hrtimer_get_remaining()
1298 rem = hrtimer_expires_remaining_adjusted(timer); in __hrtimer_get_remaining()
1300 rem = hrtimer_expires_remaining(timer); in __hrtimer_get_remaining()
1301 unlock_hrtimer_base(timer, &flags); in __hrtimer_get_remaining()
1311 * Returns the next expiry time or KTIME_MAX if no timer is pending.
1330 * hrtimer_next_event_without - time until next expiry event w/o one timer
1331 * @exclude: timer to exclude
1375 static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, in __hrtimer_init() argument
1391 memset(timer, 0, sizeof(struct hrtimer)); in __hrtimer_init()
1405 timer->is_soft = softtimer; in __hrtimer_init()
1406 timer->is_hard = !!(mode & HRTIMER_MODE_HARD); in __hrtimer_init()
1407 timer->base = &cpu_base->clock_base[base]; in __hrtimer_init()
1408 timerqueue_init(&timer->node); in __hrtimer_init()
1412 * hrtimer_init - initialize a timer to the given clock
1413 * @timer: the timer to be initialized
1423 void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, in hrtimer_init() argument
1426 debug_init(timer, clock_id, mode); in hrtimer_init()
1427 __hrtimer_init(timer, clock_id, mode); in hrtimer_init()
1432 * A timer is active, when it is enqueued into the rbtree or the
1438 bool hrtimer_active(const struct hrtimer *timer) in hrtimer_active() argument
1444 base = READ_ONCE(timer->base); in hrtimer_active()
1447 if (timer->state != HRTIMER_STATE_INACTIVE || in hrtimer_active()
1448 base->running == timer) in hrtimer_active()
1452 base != READ_ONCE(timer->base)); in hrtimer_active()
1462 * - queued: the timer is queued
1463 * - callback: the timer is being ran
1464 * - post: the timer is inactive or (re)queued
1466 * On the read side we ensure we observe timer->state and cpu_base->running
1468 * This includes timer->base changing because sequence numbers alone are
1478 struct hrtimer *timer, ktime_t *now, in __run_hrtimer() argument
1487 debug_deactivate(timer); in __run_hrtimer()
1488 base->running = timer; in __run_hrtimer()
1495 * timer->state == INACTIVE. in __run_hrtimer()
1499 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0); in __run_hrtimer()
1500 fn = timer->function; in __run_hrtimer()
1504 * timer is restarted with a period then it becomes an absolute in __run_hrtimer()
1505 * timer. If its not restarted it does not matter. in __run_hrtimer()
1508 timer->is_rel = false; in __run_hrtimer()
1511 * The timer is marked as running in the CPU base, so it is in __run_hrtimer()
1516 trace_hrtimer_expire_entry(timer, now); in __run_hrtimer()
1517 expires_in_hardirq = lockdep_hrtimer_enter(timer); in __run_hrtimer()
1519 restart = fn(timer); in __run_hrtimer()
1522 trace_hrtimer_expire_exit(timer); in __run_hrtimer()
1531 * hrtimer_start_range_ns() can have popped in and enqueued the timer in __run_hrtimer()
1535 !(timer->state & HRTIMER_STATE_ENQUEUED)) in __run_hrtimer()
1536 enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS); in __run_hrtimer()
1542 * hrtimer_active() cannot observe base->running.timer == NULL && in __run_hrtimer()
1543 * timer->state == INACTIVE. in __run_hrtimer()
1547 WARN_ON_ONCE(base->running != timer); in __run_hrtimer()
1564 struct hrtimer *timer; in __hrtimer_run_queues() local
1566 timer = container_of(node, struct hrtimer, node); in __hrtimer_run_queues()
1577 * are right-of a not yet expired timer, because that in __hrtimer_run_queues()
1578 * timer will have to trigger a wakeup anyway. in __hrtimer_run_queues()
1580 if (basenow < hrtimer_get_softexpires_tv64(timer)) in __hrtimer_run_queues()
1583 __run_hrtimer(cpu_base, base, timer, &basenow, flags); in __hrtimer_run_queues()
1612 * High resolution timer interrupt
1632 * held to prevent that a timer is enqueued in our queue via in hrtimer_interrupt()
1664 * The next timer was already expired due to: in hrtimer_interrupt()
1765 static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) in hrtimer_wakeup() argument
1768 container_of(timer, struct hrtimer_sleeper, timer); in hrtimer_wakeup()
1779 * hrtimer_sleeper_start_expires - Start a hrtimer sleeper timer
1781 * @mode: timer mode abs/rel
1796 if (IS_ENABLED(CONFIG_PREEMPT_RT) && sl->timer.is_hard) in hrtimer_sleeper_start_expires()
1799 hrtimer_start_expires(&sl->timer, mode); in hrtimer_sleeper_start_expires()
1830 __hrtimer_init(&sl->timer, clock_id, mode); in __hrtimer_init_sleeper()
1831 sl->timer.function = hrtimer_wakeup; in __hrtimer_init_sleeper()
1839 * @mode: timer mode abs/rel
1844 debug_init(&sl->timer, clock_id, mode); in hrtimer_init_sleeper()
1880 hrtimer_cancel(&t->timer); in do_nanosleep()
1892 ktime_t rem = hrtimer_expires_remaining(&t->timer); in do_nanosleep()
1911 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); in hrtimer_nanosleep_restart()
1913 destroy_hrtimer_on_stack(&t.timer); in hrtimer_nanosleep_restart()
1930 hrtimer_set_expires_range_ns(&t.timer, rqtp, slack); in hrtimer_nanosleep()
1943 restart->nanosleep.clockid = t.timer.base->clockid; in hrtimer_nanosleep()
1944 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); in hrtimer_nanosleep()
1946 destroy_hrtimer_on_stack(&t.timer); in hrtimer_nanosleep()
2024 struct hrtimer *timer; in migrate_hrtimer_list() local
2028 timer = container_of(node, struct hrtimer, node); in migrate_hrtimer_list()
2029 BUG_ON(hrtimer_callback_running(timer)); in migrate_hrtimer_list()
2030 debug_deactivate(timer); in migrate_hrtimer_list()
2034 * timer could be seen as !active and just vanish away in migrate_hrtimer_list()
2037 __remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0); in migrate_hrtimer_list()
2038 timer->base = new_base; in migrate_hrtimer_list()
2041 * reprogram the event device in case the timer in migrate_hrtimer_list()
2047 enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS); in migrate_hrtimer_list()
2082 * timer on this CPU. Update it. in hrtimers_dead_cpu()
2108 * @mode: timer mode
2109 * @clock_id: timer clock to be used
2135 hrtimer_set_expires_range_ns(&t.timer, *expires, delta); in schedule_hrtimeout_range_clock()
2141 hrtimer_cancel(&t.timer); in schedule_hrtimeout_range_clock()
2142 destroy_hrtimer_on_stack(&t.timer); in schedule_hrtimeout_range_clock()
2153 * @mode: timer mode
2162 * but may decide to fire the timer earlier, but no earlier than @expires.
2177 * Returns 0 when the timer has expired. If the task was woken before the
2178 * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or
2192 * @mode: timer mode
2211 * Returns 0 when the timer has expired. If the task was woken before the
2212 * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or