Lines Matching +full:timer +full:-

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
4 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
5 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
7 * High-resolution kernel timers
9 * In contrast to the low-resolution timeout API, aka timer wheel,
16 * Based on the original timer wheel code
41 #include <linux/timer.h>
47 #include <trace/events/timer.h>
49 #include "tick-internal.h"
53 * cpu_base->active
56 #define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1)
61 * The timer bases:
64 * into the timer bases by the hrtimer_base_type enum. When trying
118 [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES,
135 * timer->base->cpu_base
154 * means that all timers which are tied to this base via timer->base are
160 * When the timer's base is locked, and the timer removed from list, it is
161 * possible to set timer->base = &migration_base and drop the lock: the timer
165 struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, in lock_hrtimer_base() argument
167 __acquires(&timer->base->lock) in lock_hrtimer_base()
172 base = READ_ONCE(timer->base); in lock_hrtimer_base()
174 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); in lock_hrtimer_base()
175 if (likely(base == timer->base)) in lock_hrtimer_base()
177 /* The timer has migrated to another CPU: */ in lock_hrtimer_base()
178 raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags); in lock_hrtimer_base()
185 * We do not migrate the timer when it is expiring before the next
191 * Called with cpu_base->lock of target cpu held.
194 hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base) in hrtimer_check_target() argument
198 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); in hrtimer_check_target()
199 return expires < new_base->cpu_base->expires_next; in hrtimer_check_target()
214 * We switch the timer base to a power-optimized selected CPU target,
216 * - NO_HZ_COMMON is enabled
217 * - timer migration is enabled
218 * - the timer callback is not running
219 * - the timer is not the first expiring timer on the new target
221 * If one of the above requirements is not fulfilled we move the timer
223 * the timer callback is currently running.
226 switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, in switch_hrtimer_base() argument
231 int basenum = base->index; in switch_hrtimer_base()
236 new_base = &new_cpu_base->clock_base[basenum]; in switch_hrtimer_base()
240 * We are trying to move timer to new_base. in switch_hrtimer_base()
241 * However we can't change timer's base while it is running, in switch_hrtimer_base()
244 * code will take care of this when the timer function has in switch_hrtimer_base()
246 * the timer is enqueued. in switch_hrtimer_base()
248 if (unlikely(hrtimer_callback_running(timer))) in switch_hrtimer_base()
252 WRITE_ONCE(timer->base, &migration_base); in switch_hrtimer_base()
253 raw_spin_unlock(&base->cpu_base->lock); in switch_hrtimer_base()
254 raw_spin_lock(&new_base->cpu_base->lock); in switch_hrtimer_base()
257 hrtimer_check_target(timer, new_base)) { in switch_hrtimer_base()
258 raw_spin_unlock(&new_base->cpu_base->lock); in switch_hrtimer_base()
259 raw_spin_lock(&base->cpu_base->lock); in switch_hrtimer_base()
261 WRITE_ONCE(timer->base, base); in switch_hrtimer_base()
264 WRITE_ONCE(timer->base, new_base); in switch_hrtimer_base()
267 hrtimer_check_target(timer, new_base)) { in switch_hrtimer_base()
283 lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) in lock_hrtimer_base() argument
284 __acquires(&timer->base->cpu_base->lock) in lock_hrtimer_base()
286 struct hrtimer_clock_base *base = timer->base; in lock_hrtimer_base()
288 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); in lock_hrtimer_base()
312 tmp = dclc < 0 ? -dclc : dclc; in __ktime_divns()
321 return dclc < 0 ? -tmp : tmp; in __ktime_divns()
351 return ((struct hrtimer *) addr)->function; in hrtimer_debug_hint()
356 * - an active object is initialized
360 struct hrtimer *timer = addr; in hrtimer_fixup_init() local
364 hrtimer_cancel(timer); in hrtimer_fixup_init()
365 debug_object_init(timer, &hrtimer_debug_descr); in hrtimer_fixup_init()
374 * - an active object is activated
375 * - an unknown non-static object is activated
390 * - an active object is freed
394 struct hrtimer *timer = addr; in hrtimer_fixup_free() local
398 hrtimer_cancel(timer); in hrtimer_fixup_free()
399 debug_object_free(timer, &hrtimer_debug_descr); in hrtimer_fixup_free()
414 static inline void debug_hrtimer_init(struct hrtimer *timer) in debug_hrtimer_init() argument
416 debug_object_init(timer, &hrtimer_debug_descr); in debug_hrtimer_init()
419 static inline void debug_hrtimer_activate(struct hrtimer *timer, in debug_hrtimer_activate() argument
422 debug_object_activate(timer, &hrtimer_debug_descr); in debug_hrtimer_activate()
425 static inline void debug_hrtimer_deactivate(struct hrtimer *timer) in debug_hrtimer_deactivate() argument
427 debug_object_deactivate(timer, &hrtimer_debug_descr); in debug_hrtimer_deactivate()
430 static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
433 void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id, in hrtimer_init_on_stack() argument
436 debug_object_init_on_stack(timer, &hrtimer_debug_descr); in hrtimer_init_on_stack()
437 __hrtimer_init(timer, clock_id, mode); in hrtimer_init_on_stack()
447 debug_object_init_on_stack(&sl->timer, &hrtimer_debug_descr); in hrtimer_init_sleeper_on_stack()
452 void destroy_hrtimer_on_stack(struct hrtimer *timer) in destroy_hrtimer_on_stack() argument
454 debug_object_free(timer, &hrtimer_debug_descr); in destroy_hrtimer_on_stack()
460 static inline void debug_hrtimer_init(struct hrtimer *timer) { } in debug_hrtimer_init() argument
461 static inline void debug_hrtimer_activate(struct hrtimer *timer, in debug_hrtimer_activate() argument
463 static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } in debug_hrtimer_deactivate() argument
467 debug_init(struct hrtimer *timer, clockid_t clockid, in debug_init() argument
470 debug_hrtimer_init(timer); in debug_init()
471 trace_hrtimer_init(timer, clockid, mode); in debug_init()
474 static inline void debug_activate(struct hrtimer *timer, in debug_activate() argument
477 debug_hrtimer_activate(timer, mode); in debug_activate()
478 trace_hrtimer_start(timer, mode); in debug_activate()
481 static inline void debug_deactivate(struct hrtimer *timer) in debug_deactivate() argument
483 debug_hrtimer_deactivate(timer); in debug_deactivate()
484 trace_hrtimer_cancel(timer); in debug_deactivate()
498 return &cpu_base->clock_base[idx]; in __next_base()
514 struct hrtimer *timer; in __hrtimer_next_event_base() local
516 next = timerqueue_getnext(&base->active); in __hrtimer_next_event_base()
517 timer = container_of(next, struct hrtimer, node); in __hrtimer_next_event_base()
518 if (timer == exclude) { in __hrtimer_next_event_base()
519 /* Get to the next timer in the queue. */ in __hrtimer_next_event_base()
524 timer = container_of(next, struct hrtimer, node); in __hrtimer_next_event_base()
526 expires = ktime_sub(hrtimer_get_expires(timer), base->offset); in __hrtimer_next_event_base()
530 /* Skip cpu_base update if a timer is being excluded. */ in __hrtimer_next_event_base()
534 if (timer->is_soft) in __hrtimer_next_event_base()
535 cpu_base->softirq_next_timer = timer; in __hrtimer_next_event_base()
537 cpu_base->next_timer = timer; in __hrtimer_next_event_base()
541 * clock_was_set() might have changed base->offset of any of in __hrtimer_next_event_base()
559 * hrtimer_run_softirq(), hrtimer_update_softirq_timer() will re-add these bases.
566 * - HRTIMER_ACTIVE_ALL,
567 * - HRTIMER_ACTIVE_SOFT, or
568 * - HRTIMER_ACTIVE_HARD.
577 if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) { in __hrtimer_get_next_event()
578 active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT; in __hrtimer_get_next_event()
579 cpu_base->softirq_next_timer = NULL; in __hrtimer_get_next_event()
583 next_timer = cpu_base->softirq_next_timer; in __hrtimer_get_next_event()
587 active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD; in __hrtimer_get_next_event()
588 cpu_base->next_timer = next_timer; in __hrtimer_get_next_event()
605 if (!cpu_base->softirq_activated) { in hrtimer_update_next_event()
611 cpu_base->softirq_expires_next = soft; in hrtimer_update_next_event()
616 * If a softirq timer is expiring first, update cpu_base->next_timer in hrtimer_update_next_event()
620 cpu_base->next_timer = cpu_base->softirq_next_timer; in hrtimer_update_next_event()
629 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; in hrtimer_update_base()
630 ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset; in hrtimer_update_base()
631 ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset; in hrtimer_update_base()
633 ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq, in hrtimer_update_base()
636 base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real; in hrtimer_update_base()
637 base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot; in hrtimer_update_base()
638 base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai; in hrtimer_update_base()
649 cpu_base->hres_active : 0; in __hrtimer_hres_active()
661 cpu_base->expires_next = expires_next; in __hrtimer_reprogram()
667 * If a hang was detected in the last timer interrupt then we in __hrtimer_reprogram()
680 if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected) in __hrtimer_reprogram()
689 * Called with interrupts disabled and base->lock held
698 if (skip_equal && expires_next == cpu_base->expires_next) in hrtimer_force_reprogram()
701 __hrtimer_reprogram(cpu_base, cpu_base->next_timer, expires_next); in hrtimer_force_reprogram()
704 /* High resolution timer related functions */
708 * High resolution timer enabled ?
725 * hrtimer_high_res_enabled - query, if the highres mode is enabled
743 base->cpu); in hrtimer_switch_to_hres()
746 base->hres_active = 1; in hrtimer_switch_to_hres()
766 * - CONFIG_HIGH_RES_TIMERS is enabled.
767 * - CONFIG_NOHZ_COMMON is enabled
782 * If high resolution mode is active then the next expiring timer in retrigger_next_event()
787 * of the next expiring timer is enough. The return from the SMP in retrigger_next_event()
794 raw_spin_lock(&base->lock); in retrigger_next_event()
800 raw_spin_unlock(&base->lock); in retrigger_next_event()
804 * When a timer is enqueued and expires earlier than the already enqueued
805 * timers, we have to check, whether it expires earlier than the timer for
808 * Called with interrupts disabled and base->cpu_base.lock held
810 static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram) in hrtimer_reprogram() argument
813 struct hrtimer_clock_base *base = timer->base; in hrtimer_reprogram()
814 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); in hrtimer_reprogram()
816 WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0); in hrtimer_reprogram()
819 * CLOCK_REALTIME timer might be requested with an absolute in hrtimer_reprogram()
820 * expiry time which is less than base->offset. Set it to 0. in hrtimer_reprogram()
825 if (timer->is_soft) { in hrtimer_reprogram()
830 * first hard hrtimer on the remote CPU - in hrtimer_reprogram()
833 struct hrtimer_cpu_base *timer_cpu_base = base->cpu_base; in hrtimer_reprogram()
835 if (timer_cpu_base->softirq_activated) in hrtimer_reprogram()
838 if (!ktime_before(expires, timer_cpu_base->softirq_expires_next)) in hrtimer_reprogram()
841 timer_cpu_base->softirq_next_timer = timer; in hrtimer_reprogram()
842 timer_cpu_base->softirq_expires_next = expires; in hrtimer_reprogram()
844 if (!ktime_before(expires, timer_cpu_base->expires_next) || in hrtimer_reprogram()
850 * If the timer is not on the current cpu, we cannot reprogram in hrtimer_reprogram()
853 if (base->cpu_base != cpu_base) in hrtimer_reprogram()
856 if (expires >= cpu_base->expires_next) in hrtimer_reprogram()
863 if (cpu_base->in_hrtirq) in hrtimer_reprogram()
866 cpu_base->next_timer = timer; in hrtimer_reprogram()
868 __hrtimer_reprogram(cpu_base, timer, expires); in hrtimer_reprogram()
886 * the next expiring timer. in update_needs_ipi()
888 seq = cpu_base->clock_was_set_seq; in update_needs_ipi()
895 if (seq == cpu_base->clock_was_set_seq) in update_needs_ipi()
900 * will reevaluate the first expiring timer of all clock bases in update_needs_ipi()
903 if (cpu_base->in_hrtirq) in update_needs_ipi()
908 * timer in a clock base is moving ahead of the first expiring timer of in update_needs_ipi()
912 active &= cpu_base->active_bases; in update_needs_ipi()
917 next = timerqueue_getnext(&base->active); in update_needs_ipi()
918 expires = ktime_sub(next->expires, base->offset); in update_needs_ipi()
919 if (expires < cpu_base->expires_next) in update_needs_ipi()
923 if (base->clockid < HRTIMER_BASE_MONOTONIC_SOFT) in update_needs_ipi()
925 if (cpu_base->softirq_activated) in update_needs_ipi()
927 if (expires < cpu_base->softirq_expires_next) in update_needs_ipi()
940 * when the change moves an affected timer ahead of the first expiring
941 * timer on that CPU. Obviously remote per CPU clock event devices cannot
967 raw_spin_lock_irqsave(&cpu_base->lock, flags); in clock_was_set()
972 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in clock_was_set()
1017 void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) in unlock_hrtimer_base() argument
1018 __releases(&timer->base->cpu_base->lock) in unlock_hrtimer_base()
1020 raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); in unlock_hrtimer_base()
1024 * hrtimer_forward - forward the timer expiry
1025 * @timer: hrtimer to forward
1029 * Forward the timer expiry so it will expire in the future.
1032 * Can be safely called from the callback function of @timer. If
1033 * called from other contexts @timer must neither be enqueued nor
1037 * Note: This only updates the timer expiry value and does not requeue
1038 * the timer.
1040 u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) in hrtimer_forward() argument
1045 delta = ktime_sub(now, hrtimer_get_expires(timer)); in hrtimer_forward()
1050 if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED)) in hrtimer_forward()
1060 hrtimer_add_expires_ns(timer, incr * orun); in hrtimer_forward()
1061 if (hrtimer_get_expires_tv64(timer) > now) in hrtimer_forward()
1069 hrtimer_add_expires(timer, interval); in hrtimer_forward()
1076 * enqueue_hrtimer - internal function to (re)start a timer
1078 * The timer is inserted in expiry order. Insertion into the
1081 * Returns 1 when the new timer is the leftmost timer in the tree.
1083 static int enqueue_hrtimer(struct hrtimer *timer, in enqueue_hrtimer() argument
1087 debug_activate(timer, mode); in enqueue_hrtimer()
1088 WARN_ON_ONCE(!base->cpu_base->online); in enqueue_hrtimer()
1090 base->cpu_base->active_bases |= 1 << base->index; in enqueue_hrtimer()
1093 WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED); in enqueue_hrtimer()
1095 return timerqueue_add(&base->active, &timer->node); in enqueue_hrtimer()
1099 * __remove_hrtimer - internal function to remove a timer
1103 * High resolution timer mode reprograms the clock event device when the
1104 * timer is the one which expires next. The caller can disable this by setting
1106 * anyway (e.g. timer interrupt)
1108 static void __remove_hrtimer(struct hrtimer *timer, in __remove_hrtimer() argument
1112 struct hrtimer_cpu_base *cpu_base = base->cpu_base; in __remove_hrtimer()
1113 u8 state = timer->state; in __remove_hrtimer()
1116 WRITE_ONCE(timer->state, newstate); in __remove_hrtimer()
1120 if (!timerqueue_del(&base->active, &timer->node)) in __remove_hrtimer()
1121 cpu_base->active_bases &= ~(1 << base->index); in __remove_hrtimer()
1125 * cpu_base->next_timer. This happens when we remove the first in __remove_hrtimer()
1126 * timer on a remote cpu. No harm as we never dereference in __remove_hrtimer()
1127 * cpu_base->next_timer. So the worst thing what can happen is in __remove_hrtimer()
1129 * remote cpu later on if the same timer gets enqueued again. in __remove_hrtimer()
1131 if (reprogram && timer == cpu_base->next_timer) in __remove_hrtimer()
1139 remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, in remove_hrtimer() argument
1142 u8 state = timer->state; in remove_hrtimer()
1148 * Remove the timer and force reprogramming when high in remove_hrtimer()
1149 * resolution mode is active and the timer is on the current in remove_hrtimer()
1150 * CPU. If we remove a timer on another CPU, reprogramming is in remove_hrtimer()
1155 debug_deactivate(timer); in remove_hrtimer()
1156 reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases); in remove_hrtimer()
1159 * If the timer is not restarted then reprogramming is in remove_hrtimer()
1160 * required if the timer is local. If it is local and about in remove_hrtimer()
1169 __remove_hrtimer(timer, base, state, reprogram); in remove_hrtimer()
1175 static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim, in hrtimer_update_lowres() argument
1184 timer->is_rel = mode & HRTIMER_MODE_REL; in hrtimer_update_lowres()
1185 if (timer->is_rel) in hrtimer_update_lowres()
1204 * hrtimer. cpu_base->softirq_expires_next needs to be updated! in hrtimer_update_softirq_timer()
1210 * cpu_base->*next_timer is recomputed by __hrtimer_get_next_event() in hrtimer_update_softirq_timer()
1211 * cpu_base->*expires_next is only set by hrtimer_reprogram() in hrtimer_update_softirq_timer()
1213 hrtimer_reprogram(cpu_base->softirq_next_timer, reprogram); in hrtimer_update_softirq_timer()
1216 static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, in __hrtimer_start_range_ns() argument
1224 * If the timer is on the local cpu base and is the first expiring in __hrtimer_start_range_ns()
1225 * timer then this might end up reprogramming the hardware twice in __hrtimer_start_range_ns()
1227 * reprogram on removal, keep the timer local to the current CPU in __hrtimer_start_range_ns()
1229 * it is the new first expiring timer again or not. in __hrtimer_start_range_ns()
1231 force_local = base->cpu_base == this_cpu_ptr(&hrtimer_bases); in __hrtimer_start_range_ns()
1232 force_local &= base->cpu_base->next_timer == timer; in __hrtimer_start_range_ns()
1235 * Remove an active timer from the queue. In case it is not queued in __hrtimer_start_range_ns()
1239 * If it's on the current CPU and the first expiring timer, then in __hrtimer_start_range_ns()
1240 * skip reprogramming, keep the timer local and enforce in __hrtimer_start_range_ns()
1241 * reprogramming later if it was the first expiring timer. This in __hrtimer_start_range_ns()
1245 remove_hrtimer(timer, base, true, force_local); in __hrtimer_start_range_ns()
1248 tim = ktime_add_safe(tim, base->get_time()); in __hrtimer_start_range_ns()
1250 tim = hrtimer_update_lowres(timer, tim, mode); in __hrtimer_start_range_ns()
1252 hrtimer_set_expires_range_ns(timer, tim, delta_ns); in __hrtimer_start_range_ns()
1254 /* Switch the timer base, if necessary: */ in __hrtimer_start_range_ns()
1256 new_base = switch_hrtimer_base(timer, base, in __hrtimer_start_range_ns()
1262 first = enqueue_hrtimer(timer, new_base, mode); in __hrtimer_start_range_ns()
1267 * Timer was forced to stay on the current CPU to avoid in __hrtimer_start_range_ns()
1269 * hardware by evaluating the new first expiring timer. in __hrtimer_start_range_ns()
1271 hrtimer_force_reprogram(new_base->cpu_base, 1); in __hrtimer_start_range_ns()
1276 * hrtimer_start_range_ns - (re)start an hrtimer
1277 * @timer: the timer to be added
1279 * @delta_ns: "slack" range for the timer
1280 * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
1284 void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, in hrtimer_start_range_ns() argument
1296 WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft); in hrtimer_start_range_ns()
1298 WARN_ON_ONCE(!(mode & HRTIMER_MODE_HARD) ^ !timer->is_hard); in hrtimer_start_range_ns()
1300 base = lock_hrtimer_base(timer, &flags); in hrtimer_start_range_ns()
1302 if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base)) in hrtimer_start_range_ns()
1303 hrtimer_reprogram(timer, true); in hrtimer_start_range_ns()
1305 unlock_hrtimer_base(timer, &flags); in hrtimer_start_range_ns()
1310 * hrtimer_try_to_cancel - try to deactivate a timer
1311 * @timer: hrtimer to stop
1315 * * 0 when the timer was not active
1316 * * 1 when the timer was active
1317 * * -1 when the timer is currently executing the callback function and
1320 int hrtimer_try_to_cancel(struct hrtimer *timer) in hrtimer_try_to_cancel() argument
1324 int ret = -1; in hrtimer_try_to_cancel()
1327 * Check lockless first. If the timer is not active (neither in hrtimer_try_to_cancel()
1332 if (!hrtimer_active(timer)) in hrtimer_try_to_cancel()
1335 base = lock_hrtimer_base(timer, &flags); in hrtimer_try_to_cancel()
1337 if (!hrtimer_callback_running(timer)) in hrtimer_try_to_cancel()
1338 ret = remove_hrtimer(timer, base, false, false); in hrtimer_try_to_cancel()
1340 unlock_hrtimer_base(timer, &flags); in hrtimer_try_to_cancel()
1350 spin_lock_init(&base->softirq_expiry_lock); in hrtimer_cpu_base_init_expiry_lock()
1355 spin_lock(&base->softirq_expiry_lock); in hrtimer_cpu_base_lock_expiry()
1360 spin_unlock(&base->softirq_expiry_lock); in hrtimer_cpu_base_unlock_expiry()
1366 * If there is a waiter for cpu_base->expiry_lock, then it was waiting for
1367 * the timer callback to finish. Drop expiry_lock and reacquire it. That
1373 if (atomic_read(&cpu_base->timer_waiters)) { in hrtimer_sync_wait_running()
1374 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in hrtimer_sync_wait_running()
1375 spin_unlock(&cpu_base->softirq_expiry_lock); in hrtimer_sync_wait_running()
1376 spin_lock(&cpu_base->softirq_expiry_lock); in hrtimer_sync_wait_running()
1377 raw_spin_lock_irq(&cpu_base->lock); in hrtimer_sync_wait_running()
1383 * deletion of a timer failed because the timer callback function was
1387 * in the middle of a timer callback, then calling del_timer_sync() can
1390 * - If the caller is on a remote CPU then it has to spin wait for the timer
1393 * - If the caller originates from the task which preempted the timer
1394 * handler on the same CPU, then spin waiting for the timer handler to
1397 void hrtimer_cancel_wait_running(const struct hrtimer *timer) in hrtimer_cancel_wait_running() argument
1400 struct hrtimer_clock_base *base = READ_ONCE(timer->base); in hrtimer_cancel_wait_running()
1403 * Just relax if the timer expires in hard interrupt context or if in hrtimer_cancel_wait_running()
1406 if (!timer->is_soft || is_migration_base(base)) { in hrtimer_cancel_wait_running()
1413 * held by the softirq across the timer callback. Drop the lock in hrtimer_cancel_wait_running()
1414 * immediately so the softirq can expire the next timer. In theory in hrtimer_cancel_wait_running()
1415 * the timer could already be running again, but that's more than in hrtimer_cancel_wait_running()
1418 atomic_inc(&base->cpu_base->timer_waiters); in hrtimer_cancel_wait_running()
1419 spin_lock_bh(&base->cpu_base->softirq_expiry_lock); in hrtimer_cancel_wait_running()
1420 atomic_dec(&base->cpu_base->timer_waiters); in hrtimer_cancel_wait_running()
1421 spin_unlock_bh(&base->cpu_base->softirq_expiry_lock); in hrtimer_cancel_wait_running()
1435 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
1436 * @timer: the timer to be cancelled
1439 * 0 when the timer was not active
1440 * 1 when the timer was active
1442 int hrtimer_cancel(struct hrtimer *timer) in hrtimer_cancel() argument
1447 ret = hrtimer_try_to_cancel(timer); in hrtimer_cancel()
1450 hrtimer_cancel_wait_running(timer); in hrtimer_cancel()
1457 * __hrtimer_get_remaining - get remaining time for the timer
1458 * @timer: the timer to read
1461 ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust) in __hrtimer_get_remaining() argument
1466 lock_hrtimer_base(timer, &flags); in __hrtimer_get_remaining()
1468 rem = hrtimer_expires_remaining_adjusted(timer); in __hrtimer_get_remaining()
1470 rem = hrtimer_expires_remaining(timer); in __hrtimer_get_remaining()
1471 unlock_hrtimer_base(timer, &flags); in __hrtimer_get_remaining()
1479 * hrtimer_get_next_event - get the time until next expiry event
1481 * Returns the next expiry time or KTIME_MAX if no timer is pending.
1489 raw_spin_lock_irqsave(&cpu_base->lock, flags); in hrtimer_get_next_event()
1494 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in hrtimer_get_next_event()
1500 * hrtimer_next_event_without - time until next expiry event w/o one timer
1501 * @exclude: timer to exclude
1512 raw_spin_lock_irqsave(&cpu_base->lock, flags); in hrtimer_next_event_without()
1517 if (!cpu_base->softirq_activated) { in hrtimer_next_event_without()
1518 active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT; in hrtimer_next_event_without()
1522 active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD; in hrtimer_next_event_without()
1527 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in hrtimer_next_event_without()
1545 static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, in __hrtimer_init() argument
1561 memset(timer, 0, sizeof(struct hrtimer)); in __hrtimer_init()
1575 timer->is_soft = softtimer; in __hrtimer_init()
1576 timer->is_hard = !!(mode & HRTIMER_MODE_HARD); in __hrtimer_init()
1577 timer->base = &cpu_base->clock_base[base]; in __hrtimer_init()
1578 timerqueue_init(&timer->node); in __hrtimer_init()
1582 * hrtimer_init - initialize a timer to the given clock
1583 * @timer: the timer to be initialized
1593 void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, in hrtimer_init() argument
1596 debug_init(timer, clock_id, mode); in hrtimer_init()
1597 __hrtimer_init(timer, clock_id, mode); in hrtimer_init()
1602 * A timer is active, when it is enqueued into the rbtree or the
1608 bool hrtimer_active(const struct hrtimer *timer) in hrtimer_active() argument
1614 base = READ_ONCE(timer->base); in hrtimer_active()
1615 seq = raw_read_seqcount_begin(&base->seq); in hrtimer_active()
1617 if (timer->state != HRTIMER_STATE_INACTIVE || in hrtimer_active()
1618 base->running == timer) in hrtimer_active()
1621 } while (read_seqcount_retry(&base->seq, seq) || in hrtimer_active()
1622 base != READ_ONCE(timer->base)); in hrtimer_active()
1632 * - queued: the timer is queued
1633 * - callback: the timer is being ran
1634 * - post: the timer is inactive or (re)queued
1636 * On the read side we ensure we observe timer->state and cpu_base->running
1638 * This includes timer->base changing because sequence numbers alone are
1648 struct hrtimer *timer, ktime_t *now, in __run_hrtimer() argument
1649 unsigned long flags) __must_hold(&cpu_base->lock) in __run_hrtimer()
1655 lockdep_assert_held(&cpu_base->lock); in __run_hrtimer()
1657 debug_deactivate(timer); in __run_hrtimer()
1658 base->running = timer; in __run_hrtimer()
1661 * Separate the ->running assignment from the ->state assignment. in __run_hrtimer()
1664 * hrtimer_active() cannot observe base->running == NULL && in __run_hrtimer()
1665 * timer->state == INACTIVE. in __run_hrtimer()
1667 raw_write_seqcount_barrier(&base->seq); in __run_hrtimer()
1669 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0); in __run_hrtimer()
1670 fn = timer->function; in __run_hrtimer()
1674 * timer is restarted with a period then it becomes an absolute in __run_hrtimer()
1675 * timer. If its not restarted it does not matter. in __run_hrtimer()
1678 timer->is_rel = false; in __run_hrtimer()
1681 * The timer is marked as running in the CPU base, so it is in __run_hrtimer()
1685 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in __run_hrtimer()
1686 trace_hrtimer_expire_entry(timer, now); in __run_hrtimer()
1687 expires_in_hardirq = lockdep_hrtimer_enter(timer); in __run_hrtimer()
1689 restart = fn(timer); in __run_hrtimer()
1692 trace_hrtimer_expire_exit(timer); in __run_hrtimer()
1693 raw_spin_lock_irq(&cpu_base->lock); in __run_hrtimer()
1700 * Note: Because we dropped the cpu_base->lock above, in __run_hrtimer()
1701 * hrtimer_start_range_ns() can have popped in and enqueued the timer in __run_hrtimer()
1705 !(timer->state & HRTIMER_STATE_ENQUEUED)) in __run_hrtimer()
1706 enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS); in __run_hrtimer()
1709 * Separate the ->running assignment from the ->state assignment. in __run_hrtimer()
1712 * hrtimer_active() cannot observe base->running.timer == NULL && in __run_hrtimer()
1713 * timer->state == INACTIVE. in __run_hrtimer()
1715 raw_write_seqcount_barrier(&base->seq); in __run_hrtimer()
1717 WARN_ON_ONCE(base->running != timer); in __run_hrtimer()
1718 base->running = NULL; in __run_hrtimer()
1725 unsigned int active = cpu_base->active_bases & active_mask; in __hrtimer_run_queues()
1731 basenow = ktime_add(now, base->offset); in __hrtimer_run_queues()
1733 while ((node = timerqueue_getnext(&base->active))) { in __hrtimer_run_queues()
1734 struct hrtimer *timer; in __hrtimer_run_queues() local
1736 timer = container_of(node, struct hrtimer, node); in __hrtimer_run_queues()
1747 * are right-of a not yet expired timer, because that in __hrtimer_run_queues()
1748 * timer will have to trigger a wakeup anyway. in __hrtimer_run_queues()
1750 if (basenow < hrtimer_get_softexpires_tv64(timer)) in __hrtimer_run_queues()
1753 __run_hrtimer(cpu_base, base, timer, &basenow, flags); in __hrtimer_run_queues()
1767 raw_spin_lock_irqsave(&cpu_base->lock, flags); in hrtimer_run_softirq()
1772 cpu_base->softirq_activated = 0; in hrtimer_run_softirq()
1775 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in hrtimer_run_softirq()
1782 * High resolution timer interrupt
1792 BUG_ON(!cpu_base->hres_active); in hrtimer_interrupt()
1793 cpu_base->nr_events++; in hrtimer_interrupt()
1794 dev->next_event = KTIME_MAX; in hrtimer_interrupt()
1796 raw_spin_lock_irqsave(&cpu_base->lock, flags); in hrtimer_interrupt()
1799 cpu_base->in_hrtirq = 1; in hrtimer_interrupt()
1801 * We set expires_next to KTIME_MAX here with cpu_base->lock in hrtimer_interrupt()
1802 * held to prevent that a timer is enqueued in our queue via in hrtimer_interrupt()
1807 cpu_base->expires_next = KTIME_MAX; in hrtimer_interrupt()
1809 if (!ktime_before(now, cpu_base->softirq_expires_next)) { in hrtimer_interrupt()
1810 cpu_base->softirq_expires_next = KTIME_MAX; in hrtimer_interrupt()
1811 cpu_base->softirq_activated = 1; in hrtimer_interrupt()
1823 cpu_base->expires_next = expires_next; in hrtimer_interrupt()
1824 cpu_base->in_hrtirq = 0; in hrtimer_interrupt()
1825 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in hrtimer_interrupt()
1829 cpu_base->hang_detected = 0; in hrtimer_interrupt()
1834 * The next timer was already expired due to: in hrtimer_interrupt()
1835 * - tracing in hrtimer_interrupt()
1836 * - long lasting callbacks in hrtimer_interrupt()
1837 * - being scheduled away when running in a VM in hrtimer_interrupt()
1846 raw_spin_lock_irqsave(&cpu_base->lock, flags); in hrtimer_interrupt()
1848 cpu_base->nr_retries++; in hrtimer_interrupt()
1857 cpu_base->nr_hangs++; in hrtimer_interrupt()
1858 cpu_base->hang_detected = 1; in hrtimer_interrupt()
1859 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in hrtimer_interrupt()
1862 if ((unsigned int)delta > cpu_base->max_hang_time) in hrtimer_interrupt()
1863 cpu_base->max_hang_time = (unsigned int) delta; in hrtimer_interrupt()
1885 if (td && td->evtdev) in __hrtimer_peek_ahead_timers()
1886 hrtimer_interrupt(td->evtdev); in __hrtimer_peek_ahead_timers()
1919 raw_spin_lock_irqsave(&cpu_base->lock, flags); in hrtimer_run_queues()
1922 if (!ktime_before(now, cpu_base->softirq_expires_next)) { in hrtimer_run_queues()
1923 cpu_base->softirq_expires_next = KTIME_MAX; in hrtimer_run_queues()
1924 cpu_base->softirq_activated = 1; in hrtimer_run_queues()
1929 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in hrtimer_run_queues()
1935 static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) in hrtimer_wakeup() argument
1938 container_of(timer, struct hrtimer_sleeper, timer); in hrtimer_wakeup()
1939 struct task_struct *task = t->task; in hrtimer_wakeup()
1941 t->task = NULL; in hrtimer_wakeup()
1949 * hrtimer_sleeper_start_expires - Start a hrtimer sleeper timer
1951 * @mode: timer mode abs/rel
1966 if (IS_ENABLED(CONFIG_PREEMPT_RT) && sl->timer.is_hard) in hrtimer_sleeper_start_expires()
1969 hrtimer_start_expires(&sl->timer, mode); in hrtimer_sleeper_start_expires()
1990 * OTOH, privileged real-time user space applications rely on the in __hrtimer_init_sleeper()
1992 * a real-time scheduling class, mark the mode for hard interrupt in __hrtimer_init_sleeper()
2000 __hrtimer_init(&sl->timer, clock_id, mode); in __hrtimer_init_sleeper()
2001 sl->timer.function = hrtimer_wakeup; in __hrtimer_init_sleeper()
2002 sl->task = current; in __hrtimer_init_sleeper()
2006 * hrtimer_init_sleeper - initialize sleeper to the given clock
2009 * @mode: timer mode abs/rel
2014 debug_init(&sl->timer, clock_id, mode); in hrtimer_init_sleeper()
2022 switch(restart->nanosleep.type) { in nanosleep_copyout()
2025 if (put_old_timespec32(ts, restart->nanosleep.compat_rmtp)) in nanosleep_copyout()
2026 return -EFAULT; in nanosleep_copyout()
2030 if (put_timespec64(ts, restart->nanosleep.rmtp)) in nanosleep_copyout()
2031 return -EFAULT; in nanosleep_copyout()
2036 return -ERESTART_RESTARTBLOCK; in nanosleep_copyout()
2047 if (likely(t->task)) in do_nanosleep()
2050 hrtimer_cancel(&t->timer); in do_nanosleep()
2053 } while (t->task && !signal_pending(current)); in do_nanosleep()
2057 if (!t->task) in do_nanosleep()
2060 restart = &current->restart_block; in do_nanosleep()
2061 if (restart->nanosleep.type != TT_NONE) { in do_nanosleep()
2062 ktime_t rem = hrtimer_expires_remaining(&t->timer); in do_nanosleep()
2071 return -ERESTART_RESTARTBLOCK; in do_nanosleep()
2079 hrtimer_init_sleeper_on_stack(&t, restart->nanosleep.clockid, in hrtimer_nanosleep_restart()
2081 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); in hrtimer_nanosleep_restart()
2083 destroy_hrtimer_on_stack(&t.timer); in hrtimer_nanosleep_restart()
2095 slack = current->timer_slack_ns; in hrtimer_nanosleep()
2100 hrtimer_set_expires_range_ns(&t.timer, rqtp, slack); in hrtimer_nanosleep()
2102 if (ret != -ERESTART_RESTARTBLOCK) in hrtimer_nanosleep()
2107 ret = -ERESTARTNOHAND; in hrtimer_nanosleep()
2111 restart = &current->restart_block; in hrtimer_nanosleep()
2112 restart->nanosleep.clockid = t.timer.base->clockid; in hrtimer_nanosleep()
2113 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); in hrtimer_nanosleep()
2116 destroy_hrtimer_on_stack(&t.timer); in hrtimer_nanosleep()
2128 return -EFAULT; in SYSCALL_DEFINE2()
2131 return -EINVAL; in SYSCALL_DEFINE2()
2133 current->restart_block.fn = do_no_restart_syscall; in SYSCALL_DEFINE2()
2134 current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE; in SYSCALL_DEFINE2()
2135 current->restart_block.nanosleep.rmtp = rmtp; in SYSCALL_DEFINE2()
2150 return -EFAULT; in SYSCALL_DEFINE2()
2153 return -EINVAL; in SYSCALL_DEFINE2()
2155 current->restart_block.fn = do_no_restart_syscall; in SYSCALL_DEFINE2()
2156 current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; in SYSCALL_DEFINE2()
2157 current->restart_block.nanosleep.compat_rmtp = rmtp; in SYSCALL_DEFINE2()
2164 * Functions related to boot-time initialization:
2172 struct hrtimer_clock_base *clock_b = &cpu_base->clock_base[i]; in hrtimers_prepare_cpu()
2174 clock_b->cpu_base = cpu_base; in hrtimers_prepare_cpu()
2175 seqcount_raw_spinlock_init(&clock_b->seq, &cpu_base->lock); in hrtimers_prepare_cpu()
2176 timerqueue_init_head(&clock_b->active); in hrtimers_prepare_cpu()
2179 cpu_base->cpu = cpu; in hrtimers_prepare_cpu()
2180 cpu_base->active_bases = 0; in hrtimers_prepare_cpu()
2181 cpu_base->hres_active = 0; in hrtimers_prepare_cpu()
2182 cpu_base->hang_detected = 0; in hrtimers_prepare_cpu()
2183 cpu_base->next_timer = NULL; in hrtimers_prepare_cpu()
2184 cpu_base->softirq_next_timer = NULL; in hrtimers_prepare_cpu()
2185 cpu_base->expires_next = KTIME_MAX; in hrtimers_prepare_cpu()
2186 cpu_base->softirq_expires_next = KTIME_MAX; in hrtimers_prepare_cpu()
2187 cpu_base->online = 1; in hrtimers_prepare_cpu()
2197 struct hrtimer *timer; in migrate_hrtimer_list() local
2200 while ((node = timerqueue_getnext(&old_base->active))) { in migrate_hrtimer_list()
2201 timer = container_of(node, struct hrtimer, node); in migrate_hrtimer_list()
2202 BUG_ON(hrtimer_callback_running(timer)); in migrate_hrtimer_list()
2203 debug_deactivate(timer); in migrate_hrtimer_list()
2207 * timer could be seen as !active and just vanish away in migrate_hrtimer_list()
2210 __remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0); in migrate_hrtimer_list()
2211 timer->base = new_base; in migrate_hrtimer_list()
2214 * reprogram the event device in case the timer in migrate_hrtimer_list()
2220 enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS); in migrate_hrtimer_list()
2238 raw_spin_lock(&old_base->lock); in hrtimers_cpu_dying()
2239 raw_spin_lock_nested(&new_base->lock, SINGLE_DEPTH_NESTING); in hrtimers_cpu_dying()
2242 migrate_hrtimer_list(&old_base->clock_base[i], in hrtimers_cpu_dying()
2243 &new_base->clock_base[i]); in hrtimers_cpu_dying()
2248 * timer on this CPU. Update it. in hrtimers_cpu_dying()
2254 raw_spin_unlock(&new_base->lock); in hrtimers_cpu_dying()
2255 old_base->online = 0; in hrtimers_cpu_dying()
2256 raw_spin_unlock(&old_base->lock); in hrtimers_cpu_dying()
2270 * schedule_hrtimeout_range_clock - sleep until timeout
2273 * @mode: timer mode
2274 * @clock_id: timer clock to be used
2296 return -EINTR; in schedule_hrtimeout_range_clock()
2307 hrtimer_set_expires_range_ns(&t.timer, *expires, delta); in schedule_hrtimeout_range_clock()
2313 hrtimer_cancel(&t.timer); in schedule_hrtimeout_range_clock()
2314 destroy_hrtimer_on_stack(&t.timer); in schedule_hrtimeout_range_clock()
2318 return !t.task ? 0 : -EINTR; in schedule_hrtimeout_range_clock()
2323 * schedule_hrtimeout_range - sleep until timeout
2326 * @mode: timer mode
2336 * but may decide to fire the timer earlier, but no earlier than @expires.
2338 * You can set the task state as follows -
2340 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
2344 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
2351 * Returns 0 when the timer has expired. If the task was woken before the
2352 * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or
2353 * by an explicit wakeup, it returns -EINTR.
2364 * schedule_hrtimeout - sleep until timeout
2366 * @mode: timer mode
2372 * You can set the task state as follows -
2374 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
2378 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
2385 * Returns 0 when the timer has expired. If the task was woken before the
2386 * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or
2387 * by an explicit wakeup, it returns -EINTR.