Lines Matching refs:timer

9  *  In contrast to the low-resolution timeout API, aka timer wheel,
16 * Based on the original timer wheel code
42 #include <linux/timer.h>
48 #include <trace/events/timer.h>
64 * The timer bases:
67 * into the timer bases by the hrtimer_base_type enum. When trying
137 * timer->base->cpu_base
151 * means that all timers which are tied to this base via timer->base are
157 * When the timer's base is locked, and the timer removed from list, it is
158 * possible to set timer->base = &migration_base and drop the lock: the timer
162 struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
164 __acquires(&timer->base->lock)
169 base = READ_ONCE(timer->base);
172 if (likely(base == timer->base))
174 /* The timer has migrated to another CPU: */
185 * If the elected target is remote and its next event is after the timer
195 static bool hrtimer_suitable_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base,
210 * next remote target event is after this timer. Keep the
217 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
238 * We switch the timer base to a power-optimized selected CPU target,
241 * - timer migration is enabled
242 * - the timer callback is not running
243 * - the timer is not the first expiring timer on the new target
245 * If one of the above requirements is not fulfilled we move the timer
247 * the timer callback is currently running.
250 switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
264 * We are trying to move timer to new_base.
265 * However we can't change timer's base while it is running,
268 * code will take care of this when the timer function has
270 * the timer is enqueued.
272 if (unlikely(hrtimer_callback_running(timer)))
276 WRITE_ONCE(timer->base, &migration_base);
280 if (!hrtimer_suitable_target(timer, new_base, new_cpu_base,
285 WRITE_ONCE(timer->base, base);
288 WRITE_ONCE(timer->base, new_base);
290 if (!hrtimer_suitable_target(timer, new_base, new_cpu_base, this_cpu_base)) {
301 lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
302 __acquires(&timer->base->cpu_base->lock)
304 struct hrtimer_clock_base *base = timer->base;
378 struct hrtimer *timer = addr;
382 hrtimer_cancel(timer);
383 debug_object_init(timer, &hrtimer_debug_descr);
412 struct hrtimer *timer = addr;
416 hrtimer_cancel(timer);
417 debug_object_free(timer, &hrtimer_debug_descr);
432 static inline void debug_hrtimer_init(struct hrtimer *timer)
434 debug_object_init(timer, &hrtimer_debug_descr);
437 static inline void debug_hrtimer_init_on_stack(struct hrtimer *timer)
439 debug_object_init_on_stack(timer, &hrtimer_debug_descr);
442 static inline void debug_hrtimer_activate(struct hrtimer *timer,
445 debug_object_activate(timer, &hrtimer_debug_descr);
448 static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
450 debug_object_deactivate(timer, &hrtimer_debug_descr);
453 void destroy_hrtimer_on_stack(struct hrtimer *timer)
455 debug_object_free(timer, &hrtimer_debug_descr);
461 static inline void debug_hrtimer_init(struct hrtimer *timer) { }
462 static inline void debug_hrtimer_init_on_stack(struct hrtimer *timer) { }
463 static inline void debug_hrtimer_activate(struct hrtimer *timer,
465 static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
468 static inline void debug_setup(struct hrtimer *timer, clockid_t clockid, enum hrtimer_mode mode)
470 debug_hrtimer_init(timer);
471 trace_hrtimer_setup(timer, clockid, mode);
474 static inline void debug_setup_on_stack(struct hrtimer *timer, clockid_t clockid,
477 debug_hrtimer_init_on_stack(timer);
478 trace_hrtimer_setup(timer, clockid, mode);
481 static inline void debug_activate(struct hrtimer *timer,
484 debug_hrtimer_activate(timer, mode);
485 trace_hrtimer_start(timer, mode);
488 static inline void debug_deactivate(struct hrtimer *timer)
490 debug_hrtimer_deactivate(timer);
491 trace_hrtimer_cancel(timer);
521 struct hrtimer *timer;
524 timer = container_of(next, struct hrtimer, node);
525 if (timer == exclude) {
526 /* Get to the next timer in the queue. */
531 timer = container_of(next, struct hrtimer, node);
533 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
537 /* Skip cpu_base update if a timer is being excluded. */
541 if (timer->is_soft)
542 cpu_base->softirq_next_timer = timer;
544 cpu_base->next_timer = timer;
623 * If a softirq timer is expiring first, update cpu_base->next_timer
669 * If a hang was detected in the last timer interrupt then we
706 /* High resolution timer related functions */
710 * High resolution timer enabled ?
782 * If high resolution mode is active then the next expiring timer
787 * of the next expiring timer is enough. The return from the SMP
804 * When a timer is enqueued and expires earlier than the already enqueued
805 * timers, we have to check, whether it expires earlier than the timer for
810 static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram)
813 struct hrtimer_clock_base *base = timer->base;
814 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
816 WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
819 * CLOCK_REALTIME timer might be requested with an absolute
825 if (timer->is_soft) {
841 timer_cpu_base->softirq_next_timer = timer;
850 * If the timer is not on the current cpu, we cannot reprogram
866 cpu_base->next_timer = timer;
868 __hrtimer_reprogram(cpu_base, timer, expires);
886 * the next expiring timer.
900 * will reevaluate the first expiring timer of all clock bases
908 * timer in a clock base is moving ahead of the first expiring timer of
940 * when the change moves an affected timer ahead of the first expiring
941 * timer on that CPU. Obviously remote per CPU clock event devices cannot
1017 void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
1018 __releases(&timer->base->cpu_base->lock)
1020 raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
1024 * hrtimer_forward() - forward the timer expiry
1025 * @timer: hrtimer to forward
1029 * Forward the timer expiry so it will expire in the future.
1032 * This only updates the timer expiry value and does not requeue the timer.
1036 * Context: Can be safely called from the callback function of @timer. If called
1037 * from other contexts @timer must neither be enqueued nor running the
1042 u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
1047 delta = ktime_sub(now, hrtimer_get_expires(timer));
1052 if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED))
1062 hrtimer_add_expires_ns(timer, incr * orun);
1063 if (hrtimer_get_expires_tv64(timer) > now)
1071 hrtimer_add_expires(timer, interval);
1078 * enqueue_hrtimer - internal function to (re)start a timer
1080 * The timer is inserted in expiry order. Insertion into the
1083 * Returns true when the new timer is the leftmost timer in the tree.
1085 static bool enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base,
1088 debug_activate(timer, mode);
1094 WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED);
1096 return timerqueue_add(&base->active, &timer->node);
1100 * __remove_hrtimer - internal function to remove a timer
1104 * High resolution timer mode reprograms the clock event device when the
1105 * timer is the one which expires next. The caller can disable this by setting
1107 * anyway (e.g. timer interrupt)
1109 static void __remove_hrtimer(struct hrtimer *timer,
1114 u8 state = timer->state;
1117 WRITE_ONCE(timer->state, newstate);
1121 if (!timerqueue_del(&base->active, &timer->node))
1127 * timer on a remote cpu. No harm as we never dereference
1130 * remote cpu later on if the same timer gets enqueued again.
1132 if (reprogram && timer == cpu_base->next_timer)
1140 remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base,
1143 u8 state = timer->state;
1149 * Remove the timer and force reprogramming when high
1150 * resolution mode is active and the timer is on the current
1151 * CPU. If we remove a timer on another CPU, reprogramming is
1156 debug_deactivate(timer);
1160 * If the timer is not restarted then reprogramming is
1161 * required if the timer is local. If it is local and about
1170 __remove_hrtimer(timer, base, state, reprogram);
1176 static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
1185 timer->is_rel = mode & HRTIMER_MODE_REL;
1186 if (timer->is_rel)
1217 static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
1226 * If the timer is on the local cpu base and is the first expiring
1227 * timer then this might end up reprogramming the hardware twice
1229 * reprogram on removal, keep the timer local to the current CPU
1231 * it is the new first expiring timer again or not.
1234 force_local &= base->cpu_base->next_timer == timer;
1243 * Remove an active timer from the queue. In case it is not queued
1247 * If it's on the current CPU and the first expiring timer, then
1248 * skip reprogramming, keep the timer local and enforce
1249 * reprogramming later if it was the first expiring timer. This
1253 remove_hrtimer(timer, base, true, force_local);
1258 tim = hrtimer_update_lowres(timer, tim, mode);
1260 hrtimer_set_expires_range_ns(timer, tim, delta_ns);
1262 /* Switch the timer base, if necessary: */
1264 new_base = switch_hrtimer_base(timer, base,
1270 first = enqueue_hrtimer(timer, new_base, mode);
1273 * If the current CPU base is online, then the timer is
1275 * expiring timer there.
1282 * already offline. If the timer is the first to expire,
1296 * hardware by evaluating the new first expiring timer.
1304 * @timer: the timer to be added
1306 * @delta_ns: "slack" range for the timer
1307 * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
1311 void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
1323 WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft);
1325 WARN_ON_ONCE(!(mode & HRTIMER_MODE_HARD) ^ !timer->is_hard);
1327 base = lock_hrtimer_base(timer, &flags);
1329 if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base))
1330 hrtimer_reprogram(timer, true);
1332 unlock_hrtimer_base(timer, &flags);
1337 * hrtimer_try_to_cancel - try to deactivate a timer
1338 * @timer: hrtimer to stop
1342 * * 0 when the timer was not active
1343 * * 1 when the timer was active
1344 * * -1 when the timer is currently executing the callback function and
1347 int hrtimer_try_to_cancel(struct hrtimer *timer)
1354 * Check lockless first. If the timer is not active (neither
1359 if (!hrtimer_active(timer))
1362 base = lock_hrtimer_base(timer, &flags);
1364 if (!hrtimer_callback_running(timer))
1365 ret = remove_hrtimer(timer, base, false, false);
1367 unlock_hrtimer_base(timer, &flags);
1396 * the timer callback to finish. Drop expiry_lock and reacquire it. That
1424 * deletion of a timer failed because the timer callback function was
1428 * in the middle of a timer callback, then calling hrtimer_cancel() can
1431 * - If the caller is on a remote CPU then it has to spin wait for the timer
1434 * - If the caller originates from the task which preempted the timer
1435 * handler on the same CPU, then spin waiting for the timer handler to
1438 void hrtimer_cancel_wait_running(const struct hrtimer *timer)
1441 struct hrtimer_clock_base *base = READ_ONCE(timer->base);
1444 * Just relax if the timer expires in hard interrupt context or if
1447 if (!timer->is_soft || is_migration_base(base)) {
1454 * held by the softirq across the timer callback. Drop the lock
1455 * immediately so the softirq can expire the next timer. In theory
1456 * the timer could already be running again, but that's more than
1476 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
1477 * @timer: the timer to be cancelled
1480 * 0 when the timer was not active
1481 * 1 when the timer was active
1483 int hrtimer_cancel(struct hrtimer *timer)
1488 ret = hrtimer_try_to_cancel(timer);
1491 hrtimer_cancel_wait_running(timer);
1498 * __hrtimer_get_remaining - get remaining time for the timer
1499 * @timer: the timer to read
1502 ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust)
1507 lock_hrtimer_base(timer, &flags);
1509 rem = hrtimer_expires_remaining_adjusted(timer);
1511 rem = hrtimer_expires_remaining(timer);
1512 unlock_hrtimer_base(timer, &flags);
1522 * Returns the next expiry time or KTIME_MAX if no timer is pending.
1541 * hrtimer_next_event_without - time until next expiry event w/o one timer
1542 * @exclude: timer to exclude
1591 static void __hrtimer_setup(struct hrtimer *timer,
1608 memset(timer, 0, sizeof(struct hrtimer));
1622 timer->is_soft = softtimer;
1623 timer->is_hard = !!(mode & HRTIMER_MODE_HARD);
1624 timer->base = &cpu_base->clock_base[base];
1625 timerqueue_init(&timer->node);
1628 ACCESS_PRIVATE(timer, function) = hrtimer_dummy_timeout;
1630 ACCESS_PRIVATE(timer, function) = function;
1634 * hrtimer_setup - initialize a timer to the given clock
1635 * @timer: the timer to be initialized
1646 void hrtimer_setup(struct hrtimer *timer, enum hrtimer_restart (*function)(struct hrtimer *),
1649 debug_setup(timer, clock_id, mode);
1650 __hrtimer_setup(timer, function, clock_id, mode);
1655 * hrtimer_setup_on_stack - initialize a timer on stack memory
1656 * @timer: The timer to be initialized
1659 * @mode: The timer mode
1664 void hrtimer_setup_on_stack(struct hrtimer *timer,
1668 debug_setup_on_stack(timer, clock_id, mode);
1669 __hrtimer_setup(timer, function, clock_id, mode);
1674 * A timer is active, when it is enqueued into the rbtree or the
1680 bool hrtimer_active(const struct hrtimer *timer)
1686 base = READ_ONCE(timer->base);
1689 if (timer->state != HRTIMER_STATE_INACTIVE ||
1690 base->running == timer)
1694 base != READ_ONCE(timer->base));
1704 * - queued: the timer is queued
1705 * - callback: the timer is being ran
1706 * - post: the timer is inactive or (re)queued
1708 * On the read side we ensure we observe timer->state and cpu_base->running
1710 * This includes timer->base changing because sequence numbers alone are
1720 struct hrtimer *timer, ktime_t *now,
1729 debug_deactivate(timer);
1730 base->running = timer;
1737 * timer->state == INACTIVE.
1741 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
1742 fn = ACCESS_PRIVATE(timer, function);
1746 * timer is restarted with a period then it becomes an absolute
1747 * timer. If its not restarted it does not matter.
1750 timer->is_rel = false;
1753 * The timer is marked as running in the CPU base, so it is
1758 trace_hrtimer_expire_entry(timer, now);
1759 expires_in_hardirq = lockdep_hrtimer_enter(timer);
1761 restart = fn(timer);
1764 trace_hrtimer_expire_exit(timer);
1773 * hrtimer_start_range_ns() can have popped in and enqueued the timer
1777 !(timer->state & HRTIMER_STATE_ENQUEUED))
1778 enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS);
1784 * hrtimer_active() cannot observe base->running.timer == NULL &&
1785 * timer->state == INACTIVE.
1789 WARN_ON_ONCE(base->running != timer);
1806 struct hrtimer *timer;
1808 timer = container_of(node, struct hrtimer, node);
1819 * are right-of a not yet expired timer, because that
1820 * timer will have to trigger a wakeup anyway.
1822 if (basenow < hrtimer_get_softexpires_tv64(timer))
1825 __run_hrtimer(cpu_base, base, timer, &basenow, flags);
1854 * High resolution timer interrupt
1874 * held to prevent that a timer is enqueued in our queue via
1906 * The next timer was already expired due to:
1989 static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
1992 container_of(timer, struct hrtimer_sleeper, timer);
2003 * hrtimer_sleeper_start_expires - Start a hrtimer sleeper timer
2005 * @mode: timer mode abs/rel
2020 if (IS_ENABLED(CONFIG_PREEMPT_RT) && sl->timer.is_hard)
2023 hrtimer_start_expires(&sl->timer, mode);
2054 __hrtimer_setup(&sl->timer, hrtimer_wakeup, clock_id, mode);
2062 * @mode: timer mode abs/rel
2067 debug_setup_on_stack(&sl->timer, clock_id, mode);
2102 hrtimer_cancel(&t->timer);
2114 ktime_t rem = hrtimer_expires_remaining(&t->timer);
2132 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
2134 destroy_hrtimer_on_stack(&t.timer);
2146 hrtimer_set_expires_range_ns(&t.timer, rqtp, current->timer_slack_ns);
2158 restart->nanosleep.clockid = t.timer.base->clockid;
2159 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
2162 destroy_hrtimer_on_stack(&t.timer);
2251 struct hrtimer *timer;
2255 timer = container_of(node, struct hrtimer, node);
2256 BUG_ON(hrtimer_callback_running(timer));
2257 debug_deactivate(timer);
2261 * timer could be seen as !active and just vanish away
2264 __remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0);
2265 timer->base = new_base;
2268 * reprogram the event device in case the timer
2274 enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS);
2300 * timer on this CPU. Update it.