Lines Matching +full:fixed +full:- +full:factor +full:- +full:clock

7  * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
10 * to make clock more stable (2.4.0-test5). The only thing
19 * - improve precision and reproducibility of timebase frequency
21 * against the Titan chip's clock.)
22 * - for astronomical applications: add a new function to get
26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
53 #include <linux/posix-timers.h>
162 * These are all stored as 0.64 fixed-point binary fractions.
214 u64 i = local_paca->dtl_ridx; in scan_dispatch_log()
215 struct dtl_entry *dtl = local_paca->dtl_curr; in scan_dispatch_log()
216 struct dtl_entry *dtl_end = local_paca->dispatch_log_end; in scan_dispatch_log()
217 struct lppaca *vpa = local_paca->lppaca_ptr; in scan_dispatch_log()
225 if (i == vpa->dtl_idx) in scan_dispatch_log()
227 while (i < vpa->dtl_idx) { in scan_dispatch_log()
230 dtb = dtl->timebase; in scan_dispatch_log()
231 tb_delta = dtl->enqueue_to_dispatch_time + in scan_dispatch_log()
232 dtl->ready_to_enqueue_time; in scan_dispatch_log()
234 if (i + N_DISPATCH_LOG < vpa->dtl_idx) { in scan_dispatch_log()
236 i = vpa->dtl_idx - N_DISPATCH_LOG; in scan_dispatch_log()
237 dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); in scan_dispatch_log()
246 dtl = local_paca->dispatch_log; in scan_dispatch_log()
248 local_paca->dtl_ridx = i; in scan_dispatch_log()
249 local_paca->dtl_curr = dtl; in scan_dispatch_log()
261 u8 save_soft_enabled = local_paca->soft_enabled; in accumulate_stolen_time()
262 u8 save_hard_enabled = local_paca->hard_enabled; in accumulate_stolen_time()
270 local_paca->soft_enabled = 0; in accumulate_stolen_time()
271 local_paca->hard_enabled = 0; in accumulate_stolen_time()
273 sst = scan_dispatch_log(local_paca->starttime_user); in accumulate_stolen_time()
274 ust = scan_dispatch_log(local_paca->starttime); in accumulate_stolen_time()
275 local_paca->system_time -= sst; in accumulate_stolen_time()
276 local_paca->user_time -= ust; in accumulate_stolen_time()
277 local_paca->stolen_time += ust + sst; in accumulate_stolen_time()
279 local_paca->soft_enabled = save_soft_enabled; in accumulate_stolen_time()
280 local_paca->hard_enabled = save_hard_enabled; in accumulate_stolen_time()
287 if (get_paca()->dtl_ridx != get_paca()->lppaca_ptr->dtl_idx) { in calculate_stolen_time()
289 get_paca()->system_time -= stolen; in calculate_stolen_time()
292 stolen += get_paca()->stolen_time; in calculate_stolen_time()
293 get_paca()->stolen_time = 0; in calculate_stolen_time()
318 get_paca()->system_time += now - get_paca()->starttime; in account_system_vtime()
319 get_paca()->starttime = now; in account_system_vtime()
320 deltascaled = nowscaled - get_paca()->startspurr; in account_system_vtime()
321 get_paca()->startspurr = nowscaled; in account_system_vtime()
325 delta = get_paca()->system_time; in account_system_vtime()
326 get_paca()->system_time = 0; in account_system_vtime()
327 udelta = get_paca()->user_time - get_paca()->utime_sspurr; in account_system_vtime()
328 get_paca()->utime_sspurr = get_paca()->user_time; in account_system_vtime()
337 * the user ticks get saved up in paca->user_time_scaled to be in account_system_vtime()
345 user_scaled = deltascaled - sys_scaled; in account_system_vtime()
350 get_paca()->user_time_scaled += user_scaled; in account_system_vtime()
370 * get_paca()->user_time_scaled is up to date.
376 utime = get_paca()->user_time; in account_process_tick()
377 utimescaled = get_paca()->user_time_scaled; in account_process_tick()
378 get_paca()->user_time = 0; in account_process_tick()
379 get_paca()->user_time_scaled = 0; in account_process_tick()
380 get_paca()->utime_sspurr = 0; in account_process_tick()
397 diff = get_rtcl() - start; in __delay()
403 while (get_tbl() - start < loops) in __delay()
422 return regs->link; in profile_pc()
432 * This function recalibrates the timebase based on the 49-bit time-of-day
443 return -ENODEV; in iSeries_tb_recal()
448 unsigned long tb_ticks = tb - iSeries_recal_tb; in iSeries_tb_recal()
449 unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12; in iSeries_tb_recal()
453 long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy; in iSeries_tb_recal()
459 tick_diff = -tick_diff; in iSeries_tb_recal()
460 sign = '-'; in iSeries_tb_recal()
469 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; in iSeries_tb_recal()
500 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
527 #else /* 32-bit */
563 * timer_interrupt - gets called when the decrementer overflows,
602 get_lppaca()->int_dword.fields.decr_int = 0; in timer_interrupt()
606 if (evt->event_handler) in timer_interrupt()
607 evt->event_handler(evt); in timer_interrupt()
618 cu->current_tb = mfspr(SPRN_PURR); in timer_interrupt()
663 * Scheduler clock - returns current time in nanosec units.
667 * are 64-bit unsigned numbers.
673 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift; in sched_clock()
682 /* The cpu node should have timebase and clock frequency properties */ in get_freq()
714 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) && in generic_calibrate_decr()
715 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) { in generic_calibrate_decr()
723 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) && in generic_calibrate_decr()
724 !get_freq("clock-frequency", 1, &ppc_proc_freq)) { in generic_calibrate_decr()
739 tm.tm_year -= 1900; in update_persistent_clock()
740 tm.tm_mon -= 1; in update_persistent_clock()
750 ts->tv_nsec = 0; in __read_persistent_clock()
759 ts->tv_sec = ppc_md.get_boot_time() - timezone_offset; in __read_persistent_clock()
764 ts->tv_sec = 0; in __read_persistent_clock()
769 ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, in __read_persistent_clock()
777 /* Sanitize it in case real time clock is set below EPOCH */ in read_persistent_clock()
778 if (ts->tv_sec < 0) { in read_persistent_clock()
779 ts->tv_sec = 0; in read_persistent_clock()
780 ts->tv_nsec = 0; in read_persistent_clock()
797 struct clocksource *clock, u32 mult) in update_vsyscall() argument
802 if (clock != &clocksource_timebase) in update_vsyscall()
806 ++vdso_data->tb_update_count; in update_vsyscall()
810 new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift); in update_vsyscall()
811 new_stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC; in update_vsyscall()
813 new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC; in update_vsyscall()
815 BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC); in update_vsyscall()
817 frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32; in update_vsyscall()
828 * vdso_data->tb_update_count already. in update_vsyscall()
830 vdso_data->tb_orig_stamp = clock->cycle_last; in update_vsyscall()
831 vdso_data->stamp_xsec = new_stamp_xsec; in update_vsyscall()
832 vdso_data->tb_to_xs = new_tb_to_xs; in update_vsyscall()
833 vdso_data->wtom_clock_sec = wtm->tv_sec; in update_vsyscall()
834 vdso_data->wtom_clock_nsec = wtm->tv_nsec; in update_vsyscall()
835 vdso_data->stamp_xtime = *wall_time; in update_vsyscall()
836 vdso_data->stamp_sec_fraction = frac_sec; in update_vsyscall()
838 ++(vdso_data->tb_update_count); in update_vsyscall()
844 ++vdso_data->tb_update_count; in update_vsyscall_tz()
846 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; in update_vsyscall_tz()
847 vdso_data->tz_dsttime = sys_tz.tz_dsttime; in update_vsyscall_tz()
849 ++vdso_data->tb_update_count; in update_vsyscall_tz()
854 struct clocksource *clock; in clocksource_init() local
857 clock = &clocksource_rtc; in clocksource_init()
859 clock = &clocksource_timebase; in clocksource_init()
861 if (clocksource_register_hz(clock, tb_ticks_per_sec)) { in clocksource_init()
863 clock->name); in clocksource_init()
868 clock->name, clock->mult, clock->shift); in clocksource_init()
891 dec->cpumask = cpumask_of(cpu); in register_decrementer_clockevent()
894 dec->name, dec->mult, dec->shift, cpu); in register_decrementer_clockevent()
951 * Compute scale factor for sched_clock. in time_init()
955 * the 128-bit result as a 64.64 fixed-point number. in time_init()
957 * giving us the scale factor and shift count to use in in time_init()
973 sys_tz.tz_minuteswest = -timezone_offset / 60; in time_init()
977 vdso_data->tb_update_count = 0; in time_init()
978 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; in time_init()
1000 #define days_in_month(a) (month_days[(a) - 1])
1007 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
1016 lastYear = tm->tm_year - 1; in GregorianDay()
1021 leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400; in GregorianDay()
1029 day = tm->tm_mon > 2 && leapyear(tm->tm_year); in GregorianDay()
1031 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] + in GregorianDay()
1032 tm->tm_mday; in GregorianDay()
1034 tm->tm_wday = day % 7; in GregorianDay()
1046 tm->tm_hour = hms / 3600; in to_tm()
1047 tm->tm_min = (hms % 3600) / 60; in to_tm()
1048 tm->tm_sec = (hms % 3600) % 60; in to_tm()
1052 day -= days_in_year(i); in to_tm()
1053 tm->tm_year = i; in to_tm()
1056 if (leapyear(tm->tm_year)) in to_tm()
1059 day -= days_in_month(i); in to_tm()
1061 tm->tm_mon = i; in to_tm()
1064 tm->tm_mday = day + 1; in to_tm()
1073 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1089 ra = ((u64)(a - (w * divisor)) << 32) + b; in div128_by_32()
1100 dr->result_high = ((u64)w << 32) + x; in div128_by_32()
1101 dr->result_low = ((u64)y << 32) + z; in div128_by_32()
1119 return -ENODEV; in rtc_init()
1121 pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0); in rtc_init()