Lines Matching full:cs
119 static void __clocksource_change_rating(struct clocksource *cs, int rating);
145 static void __clocksource_unstable(struct clocksource *cs) in __clocksource_unstable() argument
147 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); in __clocksource_unstable()
148 cs->flags |= CLOCK_SOURCE_UNSTABLE; in __clocksource_unstable()
154 if (list_empty(&cs->list)) { in __clocksource_unstable()
155 cs->rating = 0; in __clocksource_unstable()
159 if (cs->mark_unstable) in __clocksource_unstable()
160 cs->mark_unstable(cs); in __clocksource_unstable()
169 * @cs: clocksource to be marked unstable
174 void clocksource_mark_unstable(struct clocksource *cs) in clocksource_mark_unstable() argument
179 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { in clocksource_mark_unstable()
180 if (!list_empty(&cs->list) && list_empty(&cs->wd_list)) in clocksource_mark_unstable()
181 list_add(&cs->wd_list, &watchdog_list); in clocksource_mark_unstable()
182 __clocksource_unstable(cs); in clocksource_mark_unstable()
189 struct clocksource *cs; in clocksource_watchdog() local
200 list_for_each_entry(cs, &watchdog_list, wd_list) { in clocksource_watchdog()
203 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { in clocksource_watchdog()
210 csnow = cs->read(cs); in clocksource_watchdog()
215 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) || in clocksource_watchdog()
217 cs->flags |= CLOCK_SOURCE_WATCHDOG; in clocksource_watchdog()
218 cs->wd_last = wdnow; in clocksource_watchdog()
219 cs->cs_last = csnow; in clocksource_watchdog()
223 delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask); in clocksource_watchdog()
227 delta = clocksource_delta(csnow, cs->cs_last, cs->mask); in clocksource_watchdog()
228 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); in clocksource_watchdog()
229 wdlast = cs->wd_last; /* save these in case we print them */ in clocksource_watchdog()
230 cslast = cs->cs_last; in clocksource_watchdog()
231 cs->cs_last = csnow; in clocksource_watchdog()
232 cs->wd_last = wdnow; in clocksource_watchdog()
240 smp_processor_id(), cs->name); in clocksource_watchdog()
244 cs->name, csnow, cslast, cs->mask); in clocksource_watchdog()
245 __clocksource_unstable(cs); in clocksource_watchdog()
249 if (cs == curr_clocksource && cs->tick_stable) in clocksource_watchdog()
250 cs->tick_stable(cs); in clocksource_watchdog()
252 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && in clocksource_watchdog()
253 (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && in clocksource_watchdog()
256 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; in clocksource_watchdog()
273 if (cs != curr_clocksource) { in clocksource_watchdog()
274 cs->flags |= CLOCK_SOURCE_RESELECT; in clocksource_watchdog()
329 struct clocksource *cs; in clocksource_reset_watchdog() local
331 list_for_each_entry(cs, &watchdog_list, wd_list) in clocksource_reset_watchdog()
332 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; in clocksource_reset_watchdog()
340 static void clocksource_enqueue_watchdog(struct clocksource *cs) in clocksource_enqueue_watchdog() argument
342 INIT_LIST_HEAD(&cs->wd_list); in clocksource_enqueue_watchdog()
344 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { in clocksource_enqueue_watchdog()
345 /* cs is a clocksource to be watched. */ in clocksource_enqueue_watchdog()
346 list_add(&cs->wd_list, &watchdog_list); in clocksource_enqueue_watchdog()
347 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; in clocksource_enqueue_watchdog()
349 /* cs is a watchdog. */ in clocksource_enqueue_watchdog()
350 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) in clocksource_enqueue_watchdog()
351 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; in clocksource_enqueue_watchdog()
357 struct clocksource *cs, *old_wd; in clocksource_select_watchdog() local
366 list_for_each_entry(cs, &clocksource_list, list) { in clocksource_select_watchdog()
367 /* cs is a clocksource to be watched. */ in clocksource_select_watchdog()
368 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) in clocksource_select_watchdog()
372 if (fallback && cs == old_wd) in clocksource_select_watchdog()
376 if (!watchdog || cs->rating > watchdog->rating) in clocksource_select_watchdog()
377 watchdog = cs; in clocksource_select_watchdog()
392 static void clocksource_dequeue_watchdog(struct clocksource *cs) in clocksource_dequeue_watchdog() argument
394 if (cs != watchdog) { in clocksource_dequeue_watchdog()
395 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { in clocksource_dequeue_watchdog()
396 /* cs is a watched clocksource. */ in clocksource_dequeue_watchdog()
397 list_del_init(&cs->wd_list); in clocksource_dequeue_watchdog()
406 struct clocksource *cs, *tmp; in __clocksource_watchdog_kthread() local
411 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { in __clocksource_watchdog_kthread()
412 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { in __clocksource_watchdog_kthread()
413 list_del_init(&cs->wd_list); in __clocksource_watchdog_kthread()
414 __clocksource_change_rating(cs, 0); in __clocksource_watchdog_kthread()
417 if (cs->flags & CLOCK_SOURCE_RESELECT) { in __clocksource_watchdog_kthread()
418 cs->flags &= ~CLOCK_SOURCE_RESELECT; in __clocksource_watchdog_kthread()
438 static bool clocksource_is_watchdog(struct clocksource *cs) in clocksource_is_watchdog() argument
440 return cs == watchdog; in clocksource_is_watchdog()
445 static void clocksource_enqueue_watchdog(struct clocksource *cs) in clocksource_enqueue_watchdog() argument
447 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) in clocksource_enqueue_watchdog()
448 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; in clocksource_enqueue_watchdog()
452 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } in clocksource_dequeue_watchdog() argument
455 static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } in clocksource_is_watchdog() argument
456 void clocksource_mark_unstable(struct clocksource *cs) { } in clocksource_mark_unstable() argument
463 static bool clocksource_is_suspend(struct clocksource *cs) in clocksource_is_suspend() argument
465 return cs == suspend_clocksource; in clocksource_is_suspend()
468 static void __clocksource_suspend_select(struct clocksource *cs) in __clocksource_suspend_select() argument
473 if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP)) in __clocksource_suspend_select()
481 if (cs->suspend || cs->resume) { in __clocksource_suspend_select()
483 cs->name); in __clocksource_suspend_select()
487 if (!suspend_clocksource || cs->rating > suspend_clocksource->rating) in __clocksource_suspend_select()
488 suspend_clocksource = cs; in __clocksource_suspend_select()
497 struct clocksource *cs, *old_suspend; in clocksource_suspend_select() local
503 list_for_each_entry(cs, &clocksource_list, list) { in clocksource_suspend_select()
505 if (fallback && cs == old_suspend) in clocksource_suspend_select()
508 __clocksource_suspend_select(cs); in clocksource_suspend_select()
514 * @cs: current clocksource from timekeeping
525 void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles) in clocksource_start_suspend_timing() argument
535 if (clocksource_is_suspend(cs)) { in clocksource_start_suspend_timing()
551 * @cs: current clocksource from timekeeping
563 u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now) in clocksource_stop_suspend_timing() argument
575 if (clocksource_is_suspend(cs)) in clocksource_stop_suspend_timing()
591 if (!clocksource_is_suspend(cs) && suspend_clocksource->disable) in clocksource_stop_suspend_timing()
602 struct clocksource *cs; in clocksource_suspend() local
604 list_for_each_entry_reverse(cs, &clocksource_list, list) in clocksource_suspend()
605 if (cs->suspend) in clocksource_suspend()
606 cs->suspend(cs); in clocksource_suspend()
614 struct clocksource *cs; in clocksource_resume() local
616 list_for_each_entry(cs, &clocksource_list, list) in clocksource_resume()
617 if (cs->resume) in clocksource_resume()
618 cs->resume(cs); in clocksource_resume()
637 * @cs: Pointer to clocksource
640 static u32 clocksource_max_adjustment(struct clocksource *cs) in clocksource_max_adjustment() argument
646 ret = (u64)cs->mult * 11; in clocksource_max_adjustment()
698 * @cs: Pointer to clocksource to be updated
701 static inline void clocksource_update_max_deferment(struct clocksource *cs) in clocksource_update_max_deferment() argument
703 cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift, in clocksource_update_max_deferment()
704 cs->maxadj, cs->mask, in clocksource_update_max_deferment()
705 &cs->max_cycles); in clocksource_update_max_deferment()
712 struct clocksource *cs; in clocksource_find_best() local
722 list_for_each_entry(cs, &clocksource_list, list) { in clocksource_find_best()
723 if (skipcur && cs == curr_clocksource) in clocksource_find_best()
725 if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES)) in clocksource_find_best()
727 return cs; in clocksource_find_best()
735 struct clocksource *best, *cs; in __clocksource_select() local
746 list_for_each_entry(cs, &clocksource_list, list) { in __clocksource_select()
747 if (skipcur && cs == curr_clocksource) in __clocksource_select()
749 if (strcmp(cs->name, override_name) != 0) in __clocksource_select()
756 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) { in __clocksource_select()
758 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { in __clocksource_select()
760 cs->name); in __clocksource_select()
768 cs->name); in __clocksource_select()
772 best = cs; in __clocksource_select()
832 static void clocksource_enqueue(struct clocksource *cs) in clocksource_enqueue() argument
839 if (tmp->rating < cs->rating) in clocksource_enqueue()
843 list_add(&cs->list, entry); in clocksource_enqueue()
848 * @cs: clocksource to be registered
858 void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq) in __clocksource_update_freq_scale() argument
876 sec = cs->mask; in __clocksource_update_freq_scale()
881 else if (sec > 600 && cs->mask > UINT_MAX) in __clocksource_update_freq_scale()
884 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, in __clocksource_update_freq_scale()
891 cs->maxadj = clocksource_max_adjustment(cs); in __clocksource_update_freq_scale()
892 while (freq && ((cs->mult + cs->maxadj < cs->mult) in __clocksource_update_freq_scale()
893 || (cs->mult - cs->maxadj > cs->mult))) { in __clocksource_update_freq_scale()
894 cs->mult >>= 1; in __clocksource_update_freq_scale()
895 cs->shift--; in __clocksource_update_freq_scale()
896 cs->maxadj = clocksource_max_adjustment(cs); in __clocksource_update_freq_scale()
903 WARN_ONCE(cs->mult + cs->maxadj < cs->mult, in __clocksource_update_freq_scale()
905 cs->name); in __clocksource_update_freq_scale()
907 clocksource_update_max_deferment(cs); in __clocksource_update_freq_scale()
910 cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns); in __clocksource_update_freq_scale()
916 * @cs: clocksource to be registered
925 int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) in __clocksource_register_scale() argument
929 clocksource_arch_init(cs); in __clocksource_register_scale()
931 if (cs->vdso_clock_mode < 0 || in __clocksource_register_scale()
932 cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) { in __clocksource_register_scale()
934 cs->name, cs->vdso_clock_mode); in __clocksource_register_scale()
935 cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE; in __clocksource_register_scale()
939 __clocksource_update_freq_scale(cs, scale, freq); in __clocksource_register_scale()
945 clocksource_enqueue(cs); in __clocksource_register_scale()
946 clocksource_enqueue_watchdog(cs); in __clocksource_register_scale()
951 __clocksource_suspend_select(cs); in __clocksource_register_scale()
957 static void __clocksource_change_rating(struct clocksource *cs, int rating) in __clocksource_change_rating() argument
959 list_del(&cs->list); in __clocksource_change_rating()
960 cs->rating = rating; in __clocksource_change_rating()
961 clocksource_enqueue(cs); in __clocksource_change_rating()
966 * @cs: clocksource to be changed
969 void clocksource_change_rating(struct clocksource *cs, int rating) in clocksource_change_rating() argument
975 __clocksource_change_rating(cs, rating); in clocksource_change_rating()
986 * Unbind clocksource @cs. Called with clocksource_mutex held
988 static int clocksource_unbind(struct clocksource *cs) in clocksource_unbind() argument
992 if (clocksource_is_watchdog(cs)) { in clocksource_unbind()
995 if (clocksource_is_watchdog(cs)) in clocksource_unbind()
999 if (cs == curr_clocksource) { in clocksource_unbind()
1002 if (curr_clocksource == cs) in clocksource_unbind()
1006 if (clocksource_is_suspend(cs)) { in clocksource_unbind()
1016 clocksource_dequeue_watchdog(cs); in clocksource_unbind()
1017 list_del_init(&cs->list); in clocksource_unbind()
1025 * @cs: clocksource to be unregistered
1027 int clocksource_unregister(struct clocksource *cs) in clocksource_unregister() argument
1032 if (!list_empty(&cs->list)) in clocksource_unregister()
1033 ret = clocksource_unbind(cs); in clocksource_unregister()
1119 struct clocksource *cs; in unbind_clocksource_store() local
1129 list_for_each_entry(cs, &clocksource_list, list) { in unbind_clocksource_store()
1130 if (strcmp(cs->name, name)) in unbind_clocksource_store()
1132 ret = clocksource_unbind(cs); in unbind_clocksource_store()