Lines Matching +full:clock +full:- +full:mult
1 // SPDX-License-Identifier: GPL-2.0
15 #include <linux/sched/clock.h>
26 #include "tick-internal.h"
61 * struct tk_fast - NMI safe timekeeper
74 /* Suspend-time cycles value for halted fast timekeeper. */
91 * returns nanoseconds already so no conversion is required, hence mult=1
97 .clock = &dummy_clock, \
99 .mult = 1, \
117 while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) { in tk_normalize_xtime()
118 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift; in tk_normalize_xtime()
119 tk->xtime_sec++; in tk_normalize_xtime()
121 while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) { in tk_normalize_xtime()
122 tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift; in tk_normalize_xtime()
123 tk->raw_sec++; in tk_normalize_xtime()
131 ts.tv_sec = tk->xtime_sec; in tk_xtime()
132 ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); in tk_xtime()
138 tk->xtime_sec = ts->tv_sec; in tk_set_xtime()
139 tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift; in tk_set_xtime()
144 tk->xtime_sec += ts->tv_sec; in tk_xtime_add()
145 tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift; in tk_xtime_add()
154 * Verify consistency of: offset_real = -wall_to_monotonic in tk_set_wall_to_mono()
157 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec, in tk_set_wall_to_mono()
158 -tk->wall_to_monotonic.tv_nsec); in tk_set_wall_to_mono()
159 WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp)); in tk_set_wall_to_mono()
160 tk->wall_to_monotonic = wtm; in tk_set_wall_to_mono()
161 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec); in tk_set_wall_to_mono()
162 tk->offs_real = timespec64_to_ktime(tmp); in tk_set_wall_to_mono()
163 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0)); in tk_set_wall_to_mono()
168 tk->offs_boot = ktime_add(tk->offs_boot, delta); in tk_update_sleep_time()
173 tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot); in tk_update_sleep_time()
177 * tk_clock_read - atomic clocksource read() helper
183 * clock reference passed to the read function. This can cause crashes if
186 * a read of the fast-timekeeper tkrs (which is protected by its own locking
191 struct clocksource *clock = READ_ONCE(tkr->clock); in tk_clock_read() local
193 return clock->read(clock); in tk_clock_read()
197 #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
202 u64 max_cycles = tk->tkr_mono.clock->max_cycles; in timekeeping_check_update()
203 const char *name = tk->tkr_mono.clock->name; in timekeeping_check_update()
206 …NING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value… in timekeeping_check_update()
211 …printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safet… in timekeeping_check_update()
217 if (tk->underflow_seen) { in timekeeping_check_update()
218 if (jiffies - tk->last_warning > WARNING_FREQ) { in timekeeping_check_update()
222 tk->last_warning = jiffies; in timekeeping_check_update()
224 tk->underflow_seen = 0; in timekeeping_check_update()
227 if (tk->overflow_seen) { in timekeeping_check_update()
228 if (jiffies - tk->last_warning > WARNING_FREQ) { in timekeeping_check_update()
232 tk->last_warning = jiffies; in timekeeping_check_update()
234 tk->overflow_seen = 0; in timekeeping_check_update()
254 last = tkr->cycle_last; in timekeeping_get_delta()
255 mask = tkr->mask; in timekeeping_get_delta()
256 max = tkr->clock->max_cycles; in timekeeping_get_delta()
263 * mask-relative negative values. in timekeeping_get_delta()
266 tk->underflow_seen = 1; in timekeeping_get_delta()
270 /* Cap delta value to the max_cycles values to avoid mult overflows */ in timekeeping_get_delta()
272 tk->overflow_seen = 1; in timekeeping_get_delta()
273 delta = tkr->clock->max_cycles; in timekeeping_get_delta()
290 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask); in timekeeping_get_delta()
297 * tk_setup_internals - Set up internals to use clocksource clock.
300 * @clock: Pointer to clocksource.
307 static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) in tk_setup_internals() argument
313 ++tk->cs_was_changed_seq; in tk_setup_internals()
314 old_clock = tk->tkr_mono.clock; in tk_setup_internals()
315 tk->tkr_mono.clock = clock; in tk_setup_internals()
316 tk->tkr_mono.mask = clock->mask; in tk_setup_internals()
317 tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono); in tk_setup_internals()
319 tk->tkr_raw.clock = clock; in tk_setup_internals()
320 tk->tkr_raw.mask = clock->mask; in tk_setup_internals()
321 tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last; in tk_setup_internals()
323 /* Do the ns -> cycle conversion first, using original mult */ in tk_setup_internals()
325 tmp <<= clock->shift; in tk_setup_internals()
327 tmp += clock->mult/2; in tk_setup_internals()
328 do_div(tmp, clock->mult); in tk_setup_internals()
333 tk->cycle_interval = interval; in tk_setup_internals()
335 /* Go back from cycles -> shifted ns */ in tk_setup_internals()
336 tk->xtime_interval = interval * clock->mult; in tk_setup_internals()
337 tk->xtime_remainder = ntpinterval - tk->xtime_interval; in tk_setup_internals()
338 tk->raw_interval = interval * clock->mult; in tk_setup_internals()
342 int shift_change = clock->shift - old_clock->shift; in tk_setup_internals()
344 tk->tkr_mono.xtime_nsec >>= -shift_change; in tk_setup_internals()
345 tk->tkr_raw.xtime_nsec >>= -shift_change; in tk_setup_internals()
347 tk->tkr_mono.xtime_nsec <<= shift_change; in tk_setup_internals()
348 tk->tkr_raw.xtime_nsec <<= shift_change; in tk_setup_internals()
352 tk->tkr_mono.shift = clock->shift; in tk_setup_internals()
353 tk->tkr_raw.shift = clock->shift; in tk_setup_internals()
355 tk->ntp_error = 0; in tk_setup_internals()
356 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; in tk_setup_internals()
357 tk->ntp_tick = ntpinterval << tk->ntp_error_shift; in tk_setup_internals()
360 * The timekeeper keeps its own mult values for the currently in tk_setup_internals()
362 * to counteract clock drifting. in tk_setup_internals()
364 tk->tkr_mono.mult = clock->mult; in tk_setup_internals()
365 tk->tkr_raw.mult = clock->mult; in tk_setup_internals()
366 tk->ntp_err_mult = 0; in tk_setup_internals()
367 tk->skip_second_overflow = 0; in tk_setup_internals()
383 nsec = delta * tkr->mult + tkr->xtime_nsec; in timekeeping_delta_to_ns()
384 nsec >>= tkr->shift; in timekeeping_delta_to_ns()
403 delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask); in timekeeping_cycles_to_ns()
408 * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
424 struct tk_read_base *base = tkf->base; in update_fast_timekeeper()
427 raw_write_seqcount_latch(&tkf->seq); in update_fast_timekeeper()
433 raw_write_seqcount_latch(&tkf->seq); in update_fast_timekeeper()
440 * ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
457 * |12345678---> reader order
478 seq = raw_read_seqcount_latch(&tkf->seq); in __ktime_get_fast_ns()
479 tkr = tkf->base + (seq & 0x01); in __ktime_get_fast_ns()
480 now = ktime_to_ns(tkr->base); in __ktime_get_fast_ns()
485 tkr->cycle_last, in __ktime_get_fast_ns()
486 tkr->mask)); in __ktime_get_fast_ns()
487 } while (read_seqcount_latch_retry(&tkf->seq, seq)); in __ktime_get_fast_ns()
505 * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
508 * separate timekeeper with updates to monotonic clock and boot offset
513 * is added to the old timekeeping making the clock appear to update slightly
521 * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
522 * partially updated. Since the tk->offs_boot update is a rare event, this
529 return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot)); in ktime_get_boot_fast_ns()
543 seq = raw_read_seqcount_latch(&tkf->seq); in __ktime_get_real_fast()
544 tkr = tkf->base + (seq & 0x01); in __ktime_get_real_fast()
545 basem = ktime_to_ns(tkr->base); in __ktime_get_real_fast()
546 baser = ktime_to_ns(tkr->base_real); in __ktime_get_real_fast()
550 tkr->cycle_last, tkr->mask)); in __ktime_get_real_fast()
551 } while (read_seqcount_latch_retry(&tkf->seq, seq)); in __ktime_get_real_fast()
559 * ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
568 * ktime_get_fast_timestamps: - NMI safe timestamps
571 * Stores clock monotonic, boottime and realtime timestamps.
598 * Access to the time keeper clock source is disabled accross the innermost
603 * clock, but it might affect some of the nasty low level debug printks.
605 * OTOH, access to sched clock is not guaranteed accross suspend/resume on
609 * using sched clock in a similar way as during early boot. But it's not as
611 * against the clock monotonic timestamp jumping backwards on resume.
617 snapshot->real = __ktime_get_real_fast(&tk_fast_mono, &snapshot->mono); in ktime_get_fast_timestamps()
618 snapshot->boot = snapshot->mono + ktime_to_ns(data_race(tk->offs_boot)); in ktime_get_fast_timestamps()
622 * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
634 const struct tk_read_base *tkr = &tk->tkr_mono; in halt_fast_timekeeper()
638 tkr_dummy.clock = &dummy_clock; in halt_fast_timekeeper()
639 tkr_dummy.base_real = tkr->base + tk->offs_real; in halt_fast_timekeeper()
642 tkr = &tk->tkr_raw; in halt_fast_timekeeper()
644 tkr_dummy.clock = &dummy_clock; in halt_fast_timekeeper()
656 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
674 * pvclock_gtod_unregister_notifier - unregister a pvclock
691 * tk_update_leap_state - helper to update the next_leap_ktime
695 tk->next_leap_ktime = ntp_get_next_leap(); in tk_update_leap_state()
696 if (tk->next_leap_ktime != KTIME_MAX) in tk_update_leap_state()
698 tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real); in tk_update_leap_state()
716 seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec); in tk_update_ktime_data()
717 nsec = (u32) tk->wall_to_monotonic.tv_nsec; in tk_update_ktime_data()
718 tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec); in tk_update_ktime_data()
723 * this into account before updating tk->ktime_sec. in tk_update_ktime_data()
725 nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); in tk_update_ktime_data()
728 tk->ktime_sec = seconds; in tk_update_ktime_data()
731 tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC); in tk_update_ktime_data()
738 tk->ntp_error = 0; in timekeeping_update()
748 tk->tkr_mono.base_real = tk->tkr_mono.base + tk->offs_real; in timekeeping_update()
749 update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono); in timekeeping_update()
750 update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw); in timekeeping_update()
753 tk->clock_was_set_seq++; in timekeeping_update()
755 * The mirroring of the data to the shadow-timekeeper needs in timekeeping_update()
756 * to happen last here to ensure we don't over-write the in timekeeping_update()
765 * timekeeping_forward_now - update clock to the current time
767 * Forward the current clock to update its state since the last call to
768 * update_wall_time(). This is useful before significant clock changes,
775 cycle_now = tk_clock_read(&tk->tkr_mono); in timekeeping_forward_now()
776 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask); in timekeeping_forward_now()
777 tk->tkr_mono.cycle_last = cycle_now; in timekeeping_forward_now()
778 tk->tkr_raw.cycle_last = cycle_now; in timekeeping_forward_now()
780 tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult; in timekeeping_forward_now()
783 tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift; in timekeeping_forward_now()
786 tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult; in timekeeping_forward_now()
789 tk->tkr_raw.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_raw.shift; in timekeeping_forward_now()
795 * ktime_get_real_ts64 - Returns the time of day in a timespec64.
811 ts->tv_sec = tk->xtime_sec; in ktime_get_real_ts64()
812 nsecs = timekeeping_get_ns(&tk->tkr_mono); in ktime_get_real_ts64()
816 ts->tv_nsec = 0; in ktime_get_real_ts64()
832 base = tk->tkr_mono.base; in ktime_get()
833 nsecs = timekeeping_get_ns(&tk->tkr_mono); in ktime_get()
851 nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift; in ktime_get_resolution_ns()
875 base = ktime_add(tk->tkr_mono.base, *offset); in ktime_get_with_offset()
876 nsecs = timekeeping_get_ns(&tk->tkr_mono); in ktime_get_with_offset()
896 base = ktime_add(tk->tkr_mono.base, *offset); in ktime_get_coarse_with_offset()
897 nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; in ktime_get_coarse_with_offset()
906 * ktime_mono_to_any() - convert mononotic time to any other time
926 * ktime_get_raw - Returns the raw monotonic time in ktime_t format
937 base = tk->tkr_raw.base; in ktime_get_raw()
938 nsecs = timekeeping_get_ns(&tk->tkr_raw); in ktime_get_raw()
947 * ktime_get_ts64 - get the monotonic clock in timespec64 format
950 * The function calculates the monotonic clock from the realtime
951 * clock and the wall_to_monotonic offset and stores the result
965 ts->tv_sec = tk->xtime_sec; in ktime_get_ts64()
966 nsec = timekeeping_get_ns(&tk->tkr_mono); in ktime_get_ts64()
967 tomono = tk->wall_to_monotonic; in ktime_get_ts64()
971 ts->tv_sec += tomono.tv_sec; in ktime_get_ts64()
972 ts->tv_nsec = 0; in ktime_get_ts64()
978 * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
981 * serialized read. tk->ktime_sec is of type 'unsigned long' so this
991 return tk->ktime_sec; in ktime_get_seconds()
996 * ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
998 * Returns the wall clock seconds since 1970. This replaces the
1001 * For 64bit systems the fast access to tk->xtime_sec is preserved. On
1003 * counter to provide "atomic" access to the 64bit tk->xtime_sec
1013 return tk->xtime_sec; in ktime_get_real_seconds()
1017 seconds = tk->xtime_sec; in ktime_get_real_seconds()
1026 * __ktime_get_real_seconds - The same as ktime_get_real_seconds
1034 return tk->xtime_sec; in __ktime_get_real_seconds()
1038 * ktime_get_snapshot - snapshots the realtime/monotonic raw clocks with counter
1055 now = tk_clock_read(&tk->tkr_mono); in ktime_get_snapshot()
1056 systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq; in ktime_get_snapshot()
1057 systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq; in ktime_get_snapshot()
1058 base_real = ktime_add(tk->tkr_mono.base, in ktime_get_snapshot()
1060 base_raw = tk->tkr_raw.base; in ktime_get_snapshot()
1061 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now); in ktime_get_snapshot()
1062 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, now); in ktime_get_snapshot()
1065 systime_snapshot->cycles = now; in ktime_get_snapshot()
1066 systime_snapshot->real = ktime_add_ns(base_real, nsec_real); in ktime_get_snapshot()
1067 systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw); in ktime_get_snapshot()
1071 /* Scale base by mult/div checking for overflow */
1072 static int scale64_check_overflow(u64 mult, u64 div, u64 *base) in scale64_check_overflow() argument
1078 if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) || in scale64_check_overflow()
1079 ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem))) in scale64_check_overflow()
1080 return -EOVERFLOW; in scale64_check_overflow()
1081 tmp *= mult; in scale64_check_overflow()
1083 rem = div64_u64(rem * mult, div); in scale64_check_overflow()
1089 * adjust_historical_crosststamp - adjust crosstimestamp previous to current interval
1093 * @discontinuity: True indicates clock was set on history period
1122 total_history_cycles - partial_history_cycles : in adjust_historical_crosststamp()
1130 ktime_sub(ts->sys_monoraw, history->raw)); in adjust_historical_crosststamp()
1139 * mult(real)/mult(raw) yielding the realtime correction in adjust_historical_crosststamp()
1145 (corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult); in adjust_historical_crosststamp()
1148 ktime_sub(ts->sys_realtime, history->real)); in adjust_historical_crosststamp()
1157 ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw); in adjust_historical_crosststamp()
1158 ts->sys_realtime = ktime_add_ns(history->real, corr_real); in adjust_historical_crosststamp()
1160 ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw); in adjust_historical_crosststamp()
1161 ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real); in adjust_historical_crosststamp()
1168 * cycle_between - true if test occurs chronologically between before and after
1180 * get_device_system_crosststamp - Synchronously capture system/device timestamp
1215 ret = get_time_fn(&xtstamp->device, &system_counterval, ctx); in get_device_system_crosststamp()
1224 if (tk->tkr_mono.clock != system_counterval.cs) in get_device_system_crosststamp()
1225 return -ENODEV; in get_device_system_crosststamp()
1232 now = tk_clock_read(&tk->tkr_mono); in get_device_system_crosststamp()
1233 interval_start = tk->tkr_mono.cycle_last; in get_device_system_crosststamp()
1235 clock_was_set_seq = tk->clock_was_set_seq; in get_device_system_crosststamp()
1236 cs_was_changed_seq = tk->cs_was_changed_seq; in get_device_system_crosststamp()
1243 base_real = ktime_add(tk->tkr_mono.base, in get_device_system_crosststamp()
1245 base_raw = tk->tkr_raw.base; in get_device_system_crosststamp()
1247 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, in get_device_system_crosststamp()
1249 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, in get_device_system_crosststamp()
1253 xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real); in get_device_system_crosststamp()
1254 xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw); in get_device_system_crosststamp()
1270 !cycle_between(history_begin->cycles, in get_device_system_crosststamp()
1272 history_begin->cs_was_changed_seq != cs_was_changed_seq) in get_device_system_crosststamp()
1273 return -EINVAL; in get_device_system_crosststamp()
1274 partial_history_cycles = cycles - system_counterval.cycles; in get_device_system_crosststamp()
1275 total_history_cycles = cycles - history_begin->cycles; in get_device_system_crosststamp()
1277 history_begin->clock_was_set_seq != clock_was_set_seq; in get_device_system_crosststamp()
1292 * do_settimeofday64 - Sets the time of day.
1305 return -EINVAL; in do_settimeofday64()
1313 ts_delta.tv_sec = ts->tv_sec - xt.tv_sec; in do_settimeofday64()
1314 ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec; in do_settimeofday64()
1316 if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) { in do_settimeofday64()
1317 ret = -EINVAL; in do_settimeofday64()
1321 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta)); in do_settimeofday64()
1341 * timekeeping_inject_offset - Adds or subtracts from the current time.
1353 if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC) in timekeeping_inject_offset()
1354 return -EINVAL; in timekeeping_inject_offset()
1363 if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 || in timekeeping_inject_offset()
1365 ret = -EINVAL; in timekeeping_inject_offset()
1370 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *ts)); in timekeeping_inject_offset()
1385 * Indicates if there is an offset between the system clock and the hardware
1386 * clock/persistent clock/rtc.
1397 * hard to make the program warp the clock precisely n hours) or
1400 * - TYT, 1992-01-01
1402 * The best thing to do is to keep the CMOS clock in universal time (UTC)
1419 * __timekeeping_set_tai_offset - Sets the TAI offset from UTC and monotonic
1424 tk->tai_offset = tai_offset; in __timekeeping_set_tai_offset()
1425 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0)); in __timekeeping_set_tai_offset()
1429 * change_clocksource - Swaps clocksources if a new one is available
1447 * for built-in code (owner == NULL) as well. in change_clocksource()
1449 if (try_module_get(new->owner)) { in change_clocksource()
1450 if (!new->enable || new->enable(new) == 0) { in change_clocksource()
1451 old = tk->tkr_mono.clock; in change_clocksource()
1453 if (old->disable) in change_clocksource()
1454 old->disable(old); in change_clocksource()
1455 module_put(old->owner); in change_clocksource()
1457 module_put(new->owner); in change_clocksource()
1469 * timekeeping_notify - Install a new clock source
1470 * @clock: pointer to the clock source
1472 * This function is called from clocksource.c after a new, better clock
1475 int timekeeping_notify(struct clocksource *clock) in timekeeping_notify() argument
1479 if (tk->tkr_mono.clock == clock) in timekeeping_notify()
1481 stop_machine(change_clocksource, clock, NULL); in timekeeping_notify()
1483 return tk->tkr_mono.clock == clock ? 0 : -1; in timekeeping_notify()
1487 * ktime_get_raw_ts64 - Returns the raw monotonic time in a timespec
1490 * Returns the raw monotonic time (completely un-modified by ntp)
1500 ts->tv_sec = tk->raw_sec; in ktime_get_raw_ts64()
1501 nsecs = timekeeping_get_ns(&tk->tkr_raw); in ktime_get_raw_ts64()
1505 ts->tv_nsec = 0; in ktime_get_raw_ts64()
1512 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
1523 ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; in timekeeping_valid_for_hres()
1531 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
1542 ret = tk->tkr_mono.clock->max_idle_ns; in timekeeping_max_deferment()
1550 * read_persistent_clock64 - Return time from the persistent clock.
1553 * Reads the time from the battery backed persistent clock.
1556 * XXX - Do be sure to remove it once all arches implement it.
1560 ts->tv_sec = 0; in read_persistent_clock64()
1561 ts->tv_nsec = 0; in read_persistent_clock64()
1565 * read_persistent_wall_and_boot_offset - Read persistent clock, and also offset
1569 * wall_time - current time as returned by persistent clock
1570 * boot_offset - offset that is defined as wall_time - boot_time
1573 * support dedicated boot time clock will provide the best estimate of the
1599 /* Flag for if there is a persistent clock on this platform */
1603 * timekeeping_init - Initializes the clocksource and common timekeeping values
1609 struct clocksource *clock; in timekeeping_init() local
1617 pr_warn("Persistent clock returned invalid value"); in timekeeping_init()
1634 clock = clocksource_default_clock(); in timekeeping_init()
1635 if (clock->enable) in timekeeping_init()
1636 clock->enable(clock); in timekeeping_init()
1637 tk_setup_internals(tk, clock); in timekeeping_init()
1640 tk->raw_sec = 0; in timekeeping_init()
1650 /* time in seconds when suspend began for persistent clock */
1654 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
1670 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta)); in __timekeeping_inject_sleeptime()
1679 * 1) non-stop clocksource
1680 * 2) persistent clock (ie: RTC accessible when irqs are off)
1712 * timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values
1716 * because their RTC/persistent clock is only accessible when irqs are enabled.
1747 * timekeeping_resume - Resumes the generic timekeeping subsystem.
1752 struct clocksource *clock = tk->tkr_mono.clock; in timekeeping_resume() local
1769 * used: Nonstop clocksource during suspend, persistent clock and rtc in timekeeping_resume()
1774 * suspend-nonstop clocksource -> persistent clock -> rtc in timekeeping_resume()
1778 cycle_now = tk_clock_read(&tk->tkr_mono); in timekeeping_resume()
1779 nsec = clocksource_stop_suspend_timing(clock, cycle_now); in timekeeping_resume()
1793 /* Re-base the last cycle value */ in timekeeping_resume()
1794 tk->tkr_mono.cycle_last = cycle_now; in timekeeping_resume()
1795 tk->tkr_raw.cycle_last = cycle_now; in timekeeping_resume()
1797 tk->ntp_error = 0; in timekeeping_resume()
1840 curr_clock = tk->tkr_mono.clock; in timekeeping_suspend()
1841 cycle_now = tk->tkr_mono.cycle_last; in timekeeping_suspend()
1898 s64 interval = tk->cycle_interval; in timekeeping_apply_adjustment()
1902 } else if (mult_adj == -1) { in timekeeping_apply_adjustment()
1903 interval = -interval; in timekeeping_apply_adjustment()
1904 offset = -offset; in timekeeping_apply_adjustment()
1921 * xtime_interval = cycle_interval * mult in timekeeping_apply_adjustment()
1922 * So if mult is being incremented by one: in timekeeping_apply_adjustment()
1923 * xtime_interval = cycle_interval * (mult + 1) in timekeeping_apply_adjustment()
1925 * xtime_interval = (cycle_interval * mult) + cycle_interval in timekeeping_apply_adjustment()
1929 * So offset stores the non-accumulated cycles. Thus the current in timekeeping_apply_adjustment()
1932 * Now, even though we're adjusting the clock frequency, we have in timekeeping_apply_adjustment()
1953 * xtime_nsec_2 = xtime_nsec_1 - offset in timekeeping_apply_adjustment()
1955 * xtime_nsec -= offset in timekeeping_apply_adjustment()
1957 if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) { in timekeeping_apply_adjustment()
1958 /* NTP adjustment caused clocksource mult overflow */ in timekeeping_apply_adjustment()
1963 tk->tkr_mono.mult += mult_adj; in timekeeping_apply_adjustment()
1964 tk->xtime_interval += interval; in timekeeping_apply_adjustment()
1965 tk->tkr_mono.xtime_nsec -= offset; in timekeeping_apply_adjustment()
1974 u32 mult; in timekeeping_adjust() local
1980 if (likely(tk->ntp_tick == ntp_tick_length())) { in timekeeping_adjust()
1981 mult = tk->tkr_mono.mult - tk->ntp_err_mult; in timekeeping_adjust()
1983 tk->ntp_tick = ntp_tick_length(); in timekeeping_adjust()
1984 mult = div64_u64((tk->ntp_tick >> tk->ntp_error_shift) - in timekeeping_adjust()
1985 tk->xtime_remainder, tk->cycle_interval); in timekeeping_adjust()
1989 * If the clock is behind the NTP time, increase the multiplier by 1 in timekeeping_adjust()
1991 * tick division, the clock will slow down. Otherwise it will stay in timekeeping_adjust()
1992 * ahead until the tick length changes to a non-divisible value. in timekeeping_adjust()
1994 tk->ntp_err_mult = tk->ntp_error > 0 ? 1 : 0; in timekeeping_adjust()
1995 mult += tk->ntp_err_mult; in timekeeping_adjust()
1997 timekeeping_apply_adjustment(tk, offset, mult - tk->tkr_mono.mult); in timekeeping_adjust()
1999 if (unlikely(tk->tkr_mono.clock->maxadj && in timekeeping_adjust()
2000 (abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult) in timekeeping_adjust()
2001 > tk->tkr_mono.clock->maxadj))) { in timekeeping_adjust()
2004 tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult, in timekeeping_adjust()
2005 (long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj); in timekeeping_adjust()
2018 if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) { in timekeeping_adjust()
2019 tk->tkr_mono.xtime_nsec += (u64)NSEC_PER_SEC << in timekeeping_adjust()
2020 tk->tkr_mono.shift; in timekeeping_adjust()
2021 tk->xtime_sec--; in timekeeping_adjust()
2022 tk->skip_second_overflow = 1; in timekeeping_adjust()
2027 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
2036 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift; in accumulate_nsecs_to_secs()
2039 while (tk->tkr_mono.xtime_nsec >= nsecps) { in accumulate_nsecs_to_secs()
2042 tk->tkr_mono.xtime_nsec -= nsecps; in accumulate_nsecs_to_secs()
2043 tk->xtime_sec++; in accumulate_nsecs_to_secs()
2049 if (unlikely(tk->skip_second_overflow)) { in accumulate_nsecs_to_secs()
2050 tk->skip_second_overflow = 0; in accumulate_nsecs_to_secs()
2055 leap = second_overflow(tk->xtime_sec); in accumulate_nsecs_to_secs()
2059 tk->xtime_sec += leap; in accumulate_nsecs_to_secs()
2064 timespec64_sub(tk->wall_to_monotonic, ts)); in accumulate_nsecs_to_secs()
2066 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap); in accumulate_nsecs_to_secs()
2075 * logarithmic_accumulation - shifted accumulation of cycles
2086 u64 interval = tk->cycle_interval << shift; in logarithmic_accumulation()
2094 offset -= interval; in logarithmic_accumulation()
2095 tk->tkr_mono.cycle_last += interval; in logarithmic_accumulation()
2096 tk->tkr_raw.cycle_last += interval; in logarithmic_accumulation()
2098 tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift; in logarithmic_accumulation()
2102 tk->tkr_raw.xtime_nsec += tk->raw_interval << shift; in logarithmic_accumulation()
2103 snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift; in logarithmic_accumulation()
2104 while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) { in logarithmic_accumulation()
2105 tk->tkr_raw.xtime_nsec -= snsec_per_sec; in logarithmic_accumulation()
2106 tk->raw_sec++; in logarithmic_accumulation()
2109 /* Accumulate error between NTP and clock interval */ in logarithmic_accumulation()
2110 tk->ntp_error += tk->ntp_tick << shift; in logarithmic_accumulation()
2111 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) << in logarithmic_accumulation()
2112 (tk->ntp_error_shift + shift); in logarithmic_accumulation()
2118 * timekeeping_advance - Updates the timekeeper to the current time and
2137 offset = real_tk->cycle_interval; in timekeeping_advance()
2142 offset = clocksource_delta(tk_clock_read(&tk->tkr_mono), in timekeeping_advance()
2143 tk->tkr_mono.cycle_last, tk->tkr_mono.mask); in timekeeping_advance()
2146 if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK) in timekeeping_advance()
2161 shift = ilog2(offset) - ilog2(tk->cycle_interval); in timekeeping_advance()
2164 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1; in timekeeping_advance()
2166 while (offset >= tk->cycle_interval) { in timekeeping_advance()
2169 if (offset < tk->cycle_interval<<shift) in timekeeping_advance()
2170 shift--; in timekeeping_advance()
2205 * update_wall_time - Uses the current clocksource to increment the wall time
2214 * getboottime64 - Return the real time of system boot.
2217 * Returns the wall-time of boot in a timespec64.
2221 * basically means that however wrong your real time clock is at boot time,
2227 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot); in getboottime64()
2256 mono = tk->wall_to_monotonic; in ktime_get_coarse_ts64()
2274 * ktime_get_update_offsets_now - hrtimer helper
2275 * @cwsseq: pointer to check and store the clock was set sequence number
2276 * @offs_real: pointer to storage for monotonic -> realtime offset
2277 * @offs_boot: pointer to storage for monotonic -> boottime offset
2278 * @offs_tai: pointer to storage for monotonic -> clock tai offset
2297 base = tk->tkr_mono.base; in ktime_get_update_offsets_now()
2298 nsecs = timekeeping_get_ns(&tk->tkr_mono); in ktime_get_update_offsets_now()
2301 if (*cwsseq != tk->clock_was_set_seq) { in ktime_get_update_offsets_now()
2302 *cwsseq = tk->clock_was_set_seq; in ktime_get_update_offsets_now()
2303 *offs_real = tk->offs_real; in ktime_get_update_offsets_now()
2304 *offs_boot = tk->offs_boot; in ktime_get_update_offsets_now()
2305 *offs_tai = tk->offs_tai; in ktime_get_update_offsets_now()
2309 if (unlikely(base >= tk->next_leap_ktime)) in ktime_get_update_offsets_now()
2310 *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0)); in ktime_get_update_offsets_now()
2318 * timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
2322 if (txc->modes & ADJ_ADJTIME) { in timekeeping_validate_timex()
2324 if (!(txc->modes & ADJ_OFFSET_SINGLESHOT)) in timekeeping_validate_timex()
2325 return -EINVAL; in timekeeping_validate_timex()
2326 if (!(txc->modes & ADJ_OFFSET_READONLY) && in timekeeping_validate_timex()
2328 return -EPERM; in timekeeping_validate_timex()
2330 /* In order to modify anything, you gotta be super-user! */ in timekeeping_validate_timex()
2331 if (txc->modes && !capable(CAP_SYS_TIME)) in timekeeping_validate_timex()
2332 return -EPERM; in timekeeping_validate_timex()
2337 if (txc->modes & ADJ_TICK && in timekeeping_validate_timex()
2338 (txc->tick < 900000/USER_HZ || in timekeeping_validate_timex()
2339 txc->tick > 1100000/USER_HZ)) in timekeeping_validate_timex()
2340 return -EINVAL; in timekeeping_validate_timex()
2343 if (txc->modes & ADJ_SETOFFSET) { in timekeeping_validate_timex()
2344 /* In order to inject time, you gotta be super-user! */ in timekeeping_validate_timex()
2346 return -EPERM; in timekeeping_validate_timex()
2353 * The field tv_usec/tv_nsec must always be non-negative and in timekeeping_validate_timex()
2356 if (txc->time.tv_usec < 0) in timekeeping_validate_timex()
2357 return -EINVAL; in timekeeping_validate_timex()
2359 if (txc->modes & ADJ_NANO) { in timekeeping_validate_timex()
2360 if (txc->time.tv_usec >= NSEC_PER_SEC) in timekeeping_validate_timex()
2361 return -EINVAL; in timekeeping_validate_timex()
2363 if (txc->time.tv_usec >= USEC_PER_SEC) in timekeeping_validate_timex()
2364 return -EINVAL; in timekeeping_validate_timex()
2370 * only happen on 64-bit systems: in timekeeping_validate_timex()
2372 if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) { in timekeeping_validate_timex()
2373 if (LLONG_MIN / PPM_SCALE > txc->freq) in timekeeping_validate_timex()
2374 return -EINVAL; in timekeeping_validate_timex()
2375 if (LLONG_MAX / PPM_SCALE < txc->freq) in timekeeping_validate_timex()
2376 return -EINVAL; in timekeeping_validate_timex()
2384 * do_adjtimex() - Accessor function to NTP __do_adjtimex function
2400 if (txc->modes & ADJ_SETOFFSET) { in do_adjtimex()
2402 delta.tv_sec = txc->time.tv_sec; in do_adjtimex()
2403 delta.tv_nsec = txc->time.tv_usec; in do_adjtimex()
2404 if (!(txc->modes & ADJ_NANO)) in do_adjtimex()
2420 orig_tai = tai = tk->tai_offset; in do_adjtimex()
2435 if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK)) in do_adjtimex()
2448 * hardpps() - Accessor function to NTP __hardpps function
2466 * xtime_update() - advances the timekeeping infrastructure