Lines Matching +full:timebase +full:- +full:frequency

1 // SPDX-License-Identifier: GPL-2.0-or-later
8 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
11 * to make clock more stable (2.4.0-test5). The only thing
20 * - improve precision and reproducibility of timebase frequency
22 * - for astronomical applications: add a new function to get
26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
49 #include <linux/posix-timers.h>
71 #include <asm/asm-prototypes.h>
80 .name = "timebase",
144 * Factor for converting from cputime_t (timebase ticks) to
145 * microseconds. This is stored as 0.64 fixed-point binary fraction.
164 * or if that doesn't exist return the timebase value passed in.
185 u64 i = local_paca->dtl_ridx; in scan_dispatch_log()
186 struct dtl_entry *dtl = local_paca->dtl_curr; in scan_dispatch_log()
187 struct dtl_entry *dtl_end = local_paca->dispatch_log_end; in scan_dispatch_log()
188 struct lppaca *vpa = local_paca->lppaca_ptr; in scan_dispatch_log()
196 if (i == be64_to_cpu(vpa->dtl_idx)) in scan_dispatch_log()
198 while (i < be64_to_cpu(vpa->dtl_idx)) { in scan_dispatch_log()
199 dtb = be64_to_cpu(dtl->timebase); in scan_dispatch_log()
200 tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) + in scan_dispatch_log()
201 be32_to_cpu(dtl->ready_to_enqueue_time); in scan_dispatch_log()
203 if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) { in scan_dispatch_log()
205 i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG; in scan_dispatch_log()
206 dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); in scan_dispatch_log()
217 dtl = local_paca->dispatch_log; in scan_dispatch_log()
219 local_paca->dtl_ridx = i; in scan_dispatch_log()
220 local_paca->dtl_curr = dtl; in scan_dispatch_log()
232 struct cpu_accounting_data *acct = &local_paca->accounting; in accumulate_stolen_time()
242 sst = scan_dispatch_log(acct->starttime_user); in accumulate_stolen_time()
243 ust = scan_dispatch_log(acct->starttime); in accumulate_stolen_time()
244 acct->stime -= sst; in accumulate_stolen_time()
245 acct->utime -= ust; in accumulate_stolen_time()
246 acct->steal_time += ust + sst; in accumulate_stolen_time()
256 if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) in calculate_stolen_time()
283 deltascaled = nowscaled - acct->startspurr; in vtime_delta_scaled()
284 acct->startspurr = nowscaled; in vtime_delta_scaled()
285 utime = acct->utime - acct->utime_sspurr; in vtime_delta_scaled()
286 acct->utime_sspurr = acct->utime; in vtime_delta_scaled()
293 * and user time (udelta) values obtained from the timebase in vtime_delta_scaled()
295 * the user ticks get saved up in paca->user_time_scaled to be in vtime_delta_scaled()
303 utime_scaled = deltascaled - stime_scaled; in vtime_delta_scaled()
308 acct->utime_scaled += utime_scaled; in vtime_delta_scaled()
324 stime = now - acct->starttime; in vtime_delta()
325 acct->starttime = now; in vtime_delta()
341 stime -= min(stime, steal_time); in vtime_account_kernel()
342 acct->steal_time += steal_time; in vtime_account_kernel()
344 if ((tsk->flags & PF_VCPU) && !irq_count()) { in vtime_account_kernel()
345 acct->gtime += stime; in vtime_account_kernel()
347 acct->utime_scaled += stime_scaled; in vtime_account_kernel()
351 acct->hardirq_time += stime; in vtime_account_kernel()
353 acct->softirq_time += stime; in vtime_account_kernel()
355 acct->stime += stime; in vtime_account_kernel()
358 acct->stime_scaled += stime_scaled; in vtime_account_kernel()
370 acct->idle_time += stime + steal_time; in vtime_account_idle()
377 if (acct->utime_scaled) in vtime_flush_scaled()
378 tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled); in vtime_flush_scaled()
379 if (acct->stime_scaled) in vtime_flush_scaled()
380 tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled); in vtime_flush_scaled()
382 acct->utime_scaled = 0; in vtime_flush_scaled()
383 acct->utime_sspurr = 0; in vtime_flush_scaled()
384 acct->stime_scaled = 0; in vtime_flush_scaled()
393 * get_paca()->user_time_scaled is up to date.
399 if (acct->utime) in vtime_flush()
400 account_user_time(tsk, cputime_to_nsecs(acct->utime)); in vtime_flush()
402 if (acct->gtime) in vtime_flush()
403 account_guest_time(tsk, cputime_to_nsecs(acct->gtime)); in vtime_flush()
405 if (IS_ENABLED(CONFIG_PPC_SPLPAR) && acct->steal_time) { in vtime_flush()
406 account_steal_time(cputime_to_nsecs(acct->steal_time)); in vtime_flush()
407 acct->steal_time = 0; in vtime_flush()
410 if (acct->idle_time) in vtime_flush()
411 account_idle_time(cputime_to_nsecs(acct->idle_time)); in vtime_flush()
413 if (acct->stime) in vtime_flush()
414 account_system_index_time(tsk, cputime_to_nsecs(acct->stime), in vtime_flush()
417 if (acct->hardirq_time) in vtime_flush()
418 account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time), in vtime_flush()
420 if (acct->softirq_time) in vtime_flush()
421 account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time), in vtime_flush()
426 acct->utime = 0; in vtime_flush()
427 acct->gtime = 0; in vtime_flush()
428 acct->idle_time = 0; in vtime_flush()
429 acct->stime = 0; in vtime_flush()
430 acct->hardirq_time = 0; in vtime_flush()
431 acct->softirq_time = 0; in vtime_flush()
452 while (mftb() - start < loops) in __delay()
471 return regs->link; in profile_pc()
481 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
508 #else /* 32-bit */
521 * 64-bit code that uses irq soft-mask can just cause an immediate in arch_irq_work_raise()
527 * level manipulations of irq soft-mask state (e.g., trace_hardirqs_on) in arch_irq_work_raise()
545 * timer_interrupt - gets called when the decrementer overflows,
578 /* Conditionally hard-enable interrupts now that the DEC has been in timer_interrupt()
601 if (evt->event_handler) in timer_interrupt()
602 evt->event_handler(evt); in timer_interrupt()
605 now = *next_tb - now; in timer_interrupt()
672 * Scheduler clock - returns current time in nanosec units.
676 * are 64-bit unsigned numbers.
680 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift; in sched_clock()
687 * Running clock - attempts to give a view of time passing for a virtualised
695 * timebase into the VTB when it takes a guest off the CPU, reading the in running_clock()
703 return mulhdu(get_vtb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift; in running_clock()
711 return local_clock() - kcpustat_this_cpu->cpustat[CPUTIME_STEAL]; in running_clock()
721 /* The cpu node should have timebase and clock frequency properties */ in get_freq()
747 * The watchdog may have already been enabled by u-boot. So leave in start_cpu_decrementer()
760 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) && in generic_calibrate_decr()
761 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) { in generic_calibrate_decr()
763 printk(KERN_ERR "WARNING: Estimating decrementer frequency " in generic_calibrate_decr()
769 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) && in generic_calibrate_decr()
770 !get_freq("clock-frequency", 1, &ppc_proc_freq)) { in generic_calibrate_decr()
772 printk(KERN_ERR "WARNING: Estimating processor frequency " in generic_calibrate_decr()
782 return -ENODEV; in update_persistent_clock64()
794 ts->tv_nsec = 0; in __read_persistent_clock()
803 ts->tv_sec = ppc_md.get_boot_time() - timezone_offset; in __read_persistent_clock()
808 ts->tv_sec = 0; in __read_persistent_clock()
813 ts->tv_sec = rtc_tm_to_time64(&tm); in __read_persistent_clock()
821 if (ts->tv_sec < 0) { in read_persistent_clock64()
822 ts->tv_sec = 0; in read_persistent_clock64()
823 ts->tv_nsec = 0; in read_persistent_clock64()
838 struct clocksource *clock = tk->tkr_mono.clock; in update_vsyscall()
839 u32 mult = tk->tkr_mono.mult; in update_vsyscall()
840 u32 shift = tk->tkr_mono.shift; in update_vsyscall()
841 u64 cycle_last = tk->tkr_mono.cycle_last; in update_vsyscall()
848 xt.tv_sec = tk->xtime_sec; in update_vsyscall()
849 xt.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); in update_vsyscall()
852 ++vdso_data->tb_update_count; in update_vsyscall()
857 * 0.64 fixed-point fraction. in update_vsyscall()
859 * (as long as the timebase frequency is >= 1.049 MHz) in update_vsyscall()
862 * For a shift of 24 the error is about 0.5e-9, or about 0.5ns in update_vsyscall()
864 * For high frequency clocks such as the 512MHz timebase clock in update_vsyscall()
867 * (295147905179 ~= 2^(20+64-16) / 1e9) and then do the in update_vsyscall()
870 * the error is only about 1.2e-12, or 0.7ns over 10 minutes). in update_vsyscall()
872 if (mult <= 62500000 && clock->shift >= 16) in update_vsyscall()
873 new_tb_to_xs = ((u64) mult * 295147905179ULL) >> (clock->shift - 16); in update_vsyscall()
875 new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift); in update_vsyscall()
878 * Compute the fractional second in units of 2^-32 seconds. in update_vsyscall()
879 * The fractional second is tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift in update_vsyscall()
881 * it in units of 2^-32 seconds. in update_vsyscall()
883 * generates shift values in the range 0 - 32. in update_vsyscall()
885 frac_sec = tk->tkr_mono.xtime_nsec << (32 - shift); in update_vsyscall()
890 * stamp_xsec is in units of 2^-20 seconds. in update_vsyscall()
893 new_stamp_xsec += tk->xtime_sec * XSEC_PER_SEC; in update_vsyscall()
904 vdso_data->tb_orig_stamp = cycle_last; in update_vsyscall()
905 vdso_data->stamp_xsec = new_stamp_xsec; in update_vsyscall()
906 vdso_data->tb_to_xs = new_tb_to_xs; in update_vsyscall()
907 vdso_data->wtom_clock_sec = tk->wall_to_monotonic.tv_sec; in update_vsyscall()
908 vdso_data->wtom_clock_nsec = tk->wall_to_monotonic.tv_nsec; in update_vsyscall()
909 vdso_data->stamp_xtime_sec = xt.tv_sec; in update_vsyscall()
910 vdso_data->stamp_xtime_nsec = xt.tv_nsec; in update_vsyscall()
911 vdso_data->stamp_sec_fraction = frac_sec; in update_vsyscall()
912 vdso_data->hrtimer_res = hrtimer_resolution; in update_vsyscall()
914 ++(vdso_data->tb_update_count); in update_vsyscall()
919 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; in update_vsyscall_tz()
920 vdso_data->tz_dsttime = sys_tz.tz_dsttime; in update_vsyscall_tz()
929 clock->name); in clocksource_init()
934 clock->name, clock->mult, clock->shift); in clocksource_init()
961 dec->cpumask = cpumask_of(cpu); in register_decrementer_clockevent()
966 dec->name, dec->mult, dec->shift, cpu); in register_decrementer_clockevent()
969 decrementer_clockevent.mult = dec->mult; in register_decrementer_clockevent()
970 decrementer_clockevent.shift = dec->shift; in register_decrementer_clockevent()
1000 if (of_property_read_u32(cpu, "ibm,dec-bits", &bits) == 0) { in set_decrementer_max()
1002 pr_warn("time_init: firmware supplied invalid ibm,dec-bits"); in set_decrementer_max()
1007 decrementer_max = (1ul << (bits - 1)) - 1; in set_decrementer_max()
1043 /* Normal PowerPC with timebase register */ in time_init()
1045 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n", in time_init()
1047 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", in time_init()
1058 * which is the timebase frequency. in time_init()
1060 * the 128-bit result as a 64.64 fixed-point number. in time_init()
1073 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ in time_init()
1078 sys_tz.tz_minuteswest = -timezone_offset / 60; in time_init()
1082 vdso_data->tb_update_count = 0; in time_init()
1083 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; in time_init()
1104 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1120 ra = ((u64)(a - (w * divisor)) << 32) + b; in div128_by_32()
1131 dr->result_high = ((u64)w << 32) + x; in div128_by_32()
1132 dr->result_low = ((u64)y << 32) + z; in div128_by_32()
1136 /* We don't need to calibrate delay, we use the CPU timebase for that */
1155 return -EOPNOTSUPP; in rtc_generic_set_time()
1158 return -EOPNOTSUPP; in rtc_generic_set_time()
1173 return -ENODEV; in rtc_init()
1175 pdev = platform_device_register_data(NULL, "rtc-generic", -1, in rtc_init()