xref: /linux/kernel/time/clocksource.c (revision c1fe867b5bf9c57ab7856486d342720e2b205eed)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * This file contains the functions which manage clocksource drivers.
4  *
5  * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/clocksource.h>
11 #include <linux/cpu.h>
12 #include <linux/delay.h>
13 #include <linux/device.h>
14 #include <linux/init.h>
15 #include <linux/kthread.h>
16 #include <linux/module.h>
17 #include <linux/prandom.h>
18 #include <linux/sched.h>
19 #include <linux/tick.h>
20 #include <linux/topology.h>
21 
22 #include "tick-internal.h"
23 #include "timekeeping_internal.h"
24 
25 static void clocksource_enqueue(struct clocksource *cs);
26 
27 static noinline u64 cycles_to_nsec_safe(struct clocksource *cs, u64 start, u64 end)
28 {
29 	u64 delta = clocksource_delta(end, start, cs->mask, cs->max_raw_delta);
30 
31 	if (likely(delta < cs->max_cycles))
32 		return clocksource_cyc2ns(delta, cs->mult, cs->shift);
33 
34 	return mul_u64_u32_shr(delta, cs->mult, cs->shift);
35 }
36 
37 /**
38  * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
39  * @mult:	pointer to mult variable
40  * @shift:	pointer to shift variable
41  * @from:	frequency to convert from
42  * @to:		frequency to convert to
43  * @maxsec:	guaranteed runtime conversion range in seconds
44  *
45  * The function evaluates the shift/mult pair for the scaled math
46  * operations of clocksources and clockevents.
47  *
48  * @to and @from are frequency values in HZ. For clock sources @to is
49  * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
50  * event @to is the counter frequency and @from is NSEC_PER_SEC.
51  *
52  * The @maxsec conversion range argument controls the time frame in
53  * seconds which must be covered by the runtime conversion with the
54  * calculated mult and shift factors. This guarantees that no 64bit
55  * overflow happens when the input value of the conversion is
56  * multiplied with the calculated mult factor. Larger ranges may
57  * reduce the conversion accuracy by choosing smaller mult and shift
58  * factors.
59  */
60 void
61 clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
62 {
63 	u64 tmp;
64 	u32 sft, sftacc= 32;
65 
66 	/*
67 	 * Calculate the shift factor which is limiting the conversion
68 	 * range:
69 	 */
70 	tmp = ((u64)maxsec * from) >> 32;
71 	while (tmp) {
72 		tmp >>=1;
73 		sftacc--;
74 	}
75 
76 	/*
77 	 * Find the conversion shift/mult pair which has the best
78 	 * accuracy and fits the maxsec conversion range:
79 	 */
80 	for (sft = 32; sft > 0; sft--) {
81 		tmp = (u64) to << sft;
82 		tmp += from / 2;
83 		do_div(tmp, from);
84 		if ((tmp >> sftacc) == 0)
85 			break;
86 	}
87 	*mult = tmp;
88 	*shift = sft;
89 }
90 EXPORT_SYMBOL_GPL(clocks_calc_mult_shift);
91 
92 /*[Clocksource internal variables]---------
93  * curr_clocksource:
94  *	currently selected clocksource.
95  * suspend_clocksource:
96  *	used to calculate the suspend time.
97  * clocksource_list:
98  *	linked list with the registered clocksources
99  * clocksource_mutex:
100  *	protects manipulations to curr_clocksource and the clocksource_list
101  * override_name:
102  *	Name of the user-specified clocksource.
103  */
104 static struct clocksource *curr_clocksource;
105 static struct clocksource *suspend_clocksource;
106 static LIST_HEAD(clocksource_list);
107 static DEFINE_MUTEX(clocksource_mutex);
108 static char override_name[CS_NAME_LEN];
109 static int finished_booting;
110 static u64 suspend_start;
111 
112 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
113 static void clocksource_watchdog_work(struct work_struct *work);
114 static void clocksource_select(void);
115 
116 static LIST_HEAD(watchdog_list);
117 static struct clocksource *watchdog;
118 static struct timer_list watchdog_timer;
119 static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
120 static DEFINE_SPINLOCK(watchdog_lock);
121 static int watchdog_running;
122 static atomic_t watchdog_reset_pending;
123 
124 /* Watchdog interval: 0.5sec. */
125 #define WATCHDOG_INTERVAL		(HZ >> 1)
126 #define WATCHDOG_INTERVAL_NS		(WATCHDOG_INTERVAL * (NSEC_PER_SEC / HZ))
127 
128 /* Maximum time between two reference watchdog readouts */
129 #define WATCHDOG_READOUT_MAX_NS		(50U * NSEC_PER_USEC)
130 
131 /*
132  * Maximum time between two remote readouts for NUMA=n. On NUMA enabled systems
133  * the timeout is calculated from the numa distance.
134  */
135 #define WATCHDOG_DEFAULT_TIMEOUT_NS	(50U * NSEC_PER_USEC)
136 
137 /*
138  * Remote timeout NUMA distance multiplier. The local distance is 10. The
139  * default remote distance is 20. ACPI tables provide more accurate numbers
140  * which are guaranteed to be greater than the local distance.
141  *
142  * This results in a 5us base value, which is equivalent to the above !NUMA
143  * default.
144  */
145 #define WATCHDOG_NUMA_MULTIPLIER_NS	((u64)(WATCHDOG_DEFAULT_TIMEOUT_NS / LOCAL_DISTANCE))
146 
147 /* Limit the NUMA timeout in case the distance values are insanely big */
148 #define WATCHDOG_NUMA_MAX_TIMEOUT_NS	((u64)(500U * NSEC_PER_USEC))
149 
150 /* Shift values to calculate the approximate $N ppm of a given delta. */
151 #define SHIFT_500PPM			11
152 #define SHIFT_4000PPM			8
153 
154 /* Number of attempts to read the watchdog */
155 #define WATCHDOG_FREQ_RETRIES		3
156 
157 /* Five reads local and remote for inter CPU skew detection */
158 #define WATCHDOG_REMOTE_MAX_SEQ		10
159 
160 static inline void clocksource_watchdog_lock(unsigned long *flags)
161 {
162 	spin_lock_irqsave(&watchdog_lock, *flags);
163 }
164 
165 static inline void clocksource_watchdog_unlock(unsigned long *flags)
166 {
167 	spin_unlock_irqrestore(&watchdog_lock, *flags);
168 }
169 
170 static int clocksource_watchdog_kthread(void *data);
171 
172 static void clocksource_watchdog_work(struct work_struct *work)
173 {
174 	/*
175 	 * We cannot directly run clocksource_watchdog_kthread() here, because
176 	 * clocksource_select() calls timekeeping_notify() which uses
177 	 * stop_machine(). One cannot use stop_machine() from a workqueue() due
178 	 * lock inversions wrt CPU hotplug.
179 	 *
180 	 * Also, we only ever run this work once or twice during the lifetime
181 	 * of the kernel, so there is no point in creating a more permanent
182 	 * kthread for this.
183 	 *
184 	 * If kthread_run fails the next watchdog scan over the
185 	 * watchdog_list will find the unstable clock again.
186 	 */
187 	kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
188 }
189 
190 static void clocksource_change_rating(struct clocksource *cs, int rating)
191 {
192 	list_del(&cs->list);
193 	cs->rating = rating;
194 	clocksource_enqueue(cs);
195 }
196 
197 static void __clocksource_unstable(struct clocksource *cs)
198 {
199 	cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
200 	cs->flags |= CLOCK_SOURCE_UNSTABLE;
201 
202 	/*
203 	 * If the clocksource is registered clocksource_watchdog_kthread() will
204 	 * re-rate and re-select.
205 	 */
206 	if (list_empty(&cs->list)) {
207 		cs->rating = 0;
208 		return;
209 	}
210 
211 	if (cs->mark_unstable)
212 		cs->mark_unstable(cs);
213 
214 	/* kick clocksource_watchdog_kthread() */
215 	if (finished_booting)
216 		schedule_work(&watchdog_work);
217 }
218 
219 /**
220  * clocksource_mark_unstable - mark clocksource unstable via watchdog
221  * @cs:		clocksource to be marked unstable
222  *
223  * This function is called by the x86 TSC code to mark clocksources as unstable;
224  * it defers demotion and re-selection to a kthread.
225  */
226 void clocksource_mark_unstable(struct clocksource *cs)
227 {
228 	unsigned long flags;
229 
230 	spin_lock_irqsave(&watchdog_lock, flags);
231 	if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
232 		if (!list_empty(&cs->list) && list_empty(&cs->wd_list))
233 			list_add(&cs->wd_list, &watchdog_list);
234 		__clocksource_unstable(cs);
235 	}
236 	spin_unlock_irqrestore(&watchdog_lock, flags);
237 }
238 
239 static inline void clocksource_reset_watchdog(void)
240 {
241 	struct clocksource *cs;
242 
243 	list_for_each_entry(cs, &watchdog_list, wd_list)
244 		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
245 }
246 
247 enum wd_result {
248 	WD_SUCCESS,
249 	WD_FREQ_NO_WATCHDOG,
250 	WD_FREQ_TIMEOUT,
251 	WD_FREQ_RESET,
252 	WD_FREQ_SKEWED,
253 	WD_CPU_TIMEOUT,
254 	WD_CPU_SKEWED,
255 };
256 
257 struct watchdog_cpu_data {
258 	/* Keep first as it is 32 byte aligned */
259 	call_single_data_t	csd;
260 	atomic_t		remote_inprogress;
261 	enum wd_result		result;
262 	u64			cpu_ts[2];
263 	struct clocksource	*cs;
264 	/* Ensure that the sequence is in a separate cache line */
265 	atomic_t		seq ____cacheline_aligned;
266 	/* Set by the control CPU according to NUMA distance */
267 	u64			timeout_ns;
268 };
269 
270 struct watchdog_data {
271 	raw_spinlock_t	lock;
272 	enum wd_result	result;
273 
274 	u64		wd_seq;
275 	u64		wd_delta;
276 	u64		cs_delta;
277 	u64		cpu_ts[2];
278 
279 	unsigned int	curr_cpu;
280 } ____cacheline_aligned_in_smp;
281 
282 static void watchdog_check_skew_remote(void *unused);
283 
284 static DEFINE_PER_CPU_ALIGNED(struct watchdog_cpu_data, watchdog_cpu_data) = {
285 	.csd	= CSD_INIT(watchdog_check_skew_remote, NULL),
286 };
287 
288 static struct watchdog_data watchdog_data = {
289 	.lock	= __RAW_SPIN_LOCK_UNLOCKED(watchdog_data.lock),
290 };
291 
292 static inline void watchdog_set_result(struct watchdog_cpu_data *wd, enum wd_result result)
293 {
294 	guard(raw_spinlock)(&watchdog_data.lock);
295 	if (!wd->result) {
296 		atomic_set(&wd->seq, WATCHDOG_REMOTE_MAX_SEQ);
297 		WRITE_ONCE(wd->result, result);
298 	}
299 }
300 
301 /* Wait for the sequence number to hand over control. */
302 static bool watchdog_wait_seq(struct watchdog_cpu_data *wd, u64 start, int seq)
303 {
304 	for(int cnt = 0; atomic_read(&wd->seq) < seq; cnt++) {
305 		/* Bail if the other side set an error result */
306 		if (READ_ONCE(wd->result) != WD_SUCCESS)
307 			return false;
308 
309 		/* Prevent endless loops if the other CPU does not react. */
310 		if (cnt == 5000) {
311 			u64 nsecs = ktime_get_raw_fast_ns();
312 
313 			if (nsecs - start >=wd->timeout_ns) {
314 				watchdog_set_result(wd, WD_CPU_TIMEOUT);
315 				return false;
316 			}
317 			cnt = 0;
318 		}
319 		cpu_relax();
320 	}
321 	return seq < WATCHDOG_REMOTE_MAX_SEQ;
322 }
323 
324 static void watchdog_check_skew(struct watchdog_cpu_data *wd, int index)
325 {
326 	u64 prev, now, delta, start = ktime_get_raw_fast_ns();
327 	int local = index, remote = (index + 1) & 0x1;
328 	struct clocksource *cs = wd->cs;
329 
330 	/* Set the local timestamp so that the first iteration works correctly */
331 	wd->cpu_ts[local] = cs->read(cs);
332 
333 	/* Signal arrival */
334 	atomic_inc(&wd->seq);
335 
336 	for (int seq = local + 2; seq < WATCHDOG_REMOTE_MAX_SEQ; seq += 2) {
337 		if (!watchdog_wait_seq(wd, start, seq))
338 			return;
339 
340 		/* Capture local timestamp before possible non-local coherency overhead */
341 		now = cs->read(cs);
342 
343 		/* Store local timestamp before reading remote to limit coherency stalls */
344 		wd->cpu_ts[local] = now;
345 
346 		prev = wd->cpu_ts[remote];
347 		delta = (now - prev) & cs->mask;
348 
349 		if (delta > cs->max_raw_delta) {
350 			watchdog_set_result(wd, WD_CPU_SKEWED);
351 			return;
352 		}
353 
354 		/* Hand over to the remote CPU */
355 		atomic_inc(&wd->seq);
356 	}
357 }
358 
359 static void watchdog_check_skew_remote(void *unused)
360 {
361 	struct watchdog_cpu_data *wd = this_cpu_ptr(&watchdog_cpu_data);
362 
363 	atomic_inc(&wd->remote_inprogress);
364 	watchdog_check_skew(wd, 1);
365 	atomic_dec(&wd->remote_inprogress);
366 }
367 
368 static inline bool wd_csd_locked(struct watchdog_cpu_data *wd)
369 {
370 	return READ_ONCE(wd->csd.node.u_flags) & CSD_FLAG_LOCK;
371 }
372 
373 /*
374  * This is only invoked for remote CPUs. See watchdog_check_cpu_skew().
375  */
376 static inline u64 wd_get_remote_timeout(unsigned int remote_cpu)
377 {
378 	unsigned int n1, n2;
379 	u64 ns;
380 
381 	if (nr_node_ids == 1)
382 		return WATCHDOG_DEFAULT_TIMEOUT_NS;
383 
384 	n1 = cpu_to_node(smp_processor_id());
385 	n2 = cpu_to_node(remote_cpu);
386 	ns = WATCHDOG_NUMA_MULTIPLIER_NS * node_distance(n1, n2);
387 	return min(ns, WATCHDOG_NUMA_MAX_TIMEOUT_NS);
388 }
389 
390 static void __watchdog_check_cpu_skew(struct clocksource *cs, unsigned int cpu)
391 {
392 	struct watchdog_cpu_data *wd;
393 
394 	wd = per_cpu_ptr(&watchdog_cpu_data, cpu);
395 	if (atomic_read(&wd->remote_inprogress) || wd_csd_locked(wd)) {
396 		watchdog_data.result = WD_CPU_TIMEOUT;
397 		return;
398 	}
399 
400 	atomic_set(&wd->seq, 0);
401 	wd->result = WD_SUCCESS;
402 	wd->cs = cs;
403 	/* Store the current CPU ID for the watchdog test unit */
404 	cs->wd_cpu = smp_processor_id();
405 
406 	wd->timeout_ns = wd_get_remote_timeout(cpu);
407 
408 	/* Kick the remote CPU into the watchdog function */
409 	if (WARN_ON_ONCE(smp_call_function_single_async(cpu, &wd->csd))) {
410 		watchdog_data.result = WD_CPU_TIMEOUT;
411 		return;
412 	}
413 
414 	scoped_guard(irq)
415 		watchdog_check_skew(wd, 0);
416 
417 	scoped_guard(raw_spinlock_irq, &watchdog_data.lock) {
418 		watchdog_data.result = wd->result;
419 		memcpy(watchdog_data.cpu_ts, wd->cpu_ts, sizeof(wd->cpu_ts));
420 	}
421 }
422 
423 static void watchdog_check_cpu_skew(struct clocksource *cs)
424 {
425 	unsigned int cpu = watchdog_data.curr_cpu;
426 
427 	cpu = cpumask_next_wrap(cpu, cpu_online_mask);
428 	watchdog_data.curr_cpu = cpu;
429 
430 	/* Skip the current CPU. Handles num_online_cpus() == 1 as well */
431 	if (cpu == smp_processor_id())
432 		return;
433 
434 	/* Don't interfere with the test mechanics */
435 	if ((cs->flags & CLOCK_SOURCE_WDTEST) && !(cs->flags & CLOCK_SOURCE_WDTEST_PERCPU))
436 		return;
437 
438 	__watchdog_check_cpu_skew(cs, cpu);
439 }
440 
441 static bool watchdog_check_freq(struct clocksource *cs, bool reset_pending)
442 {
443 	unsigned int ppm_shift = SHIFT_4000PPM;
444 	u64 wd_ts0, wd_ts1, cs_ts;
445 
446 	watchdog_data.result = WD_SUCCESS;
447 	if (!watchdog) {
448 		watchdog_data.result = WD_FREQ_NO_WATCHDOG;
449 		return false;
450 	}
451 
452 	if (cs->flags & CLOCK_SOURCE_WDTEST_PERCPU)
453 		return true;
454 
455 	/*
456 	 * If both the clocksource and the watchdog claim they are
457 	 * calibrated use 500ppm limit. Uncalibrated clocksources need a
458 	 * larger allowance because thefirmware supplied frequencies can be
459 	 * way off.
460 	 */
461 	if (watchdog->flags & CLOCK_SOURCE_CALIBRATED && cs->flags & CLOCK_SOURCE_CALIBRATED)
462 		ppm_shift = SHIFT_500PPM;
463 
464 	for (int retries = 0; retries < WATCHDOG_FREQ_RETRIES; retries++) {
465 		s64 wd_last, cs_last, wd_seq, wd_delta, cs_delta, max_delta;
466 
467 		scoped_guard(irq) {
468 			wd_ts0 = watchdog->read(watchdog);
469 			cs_ts = cs->read(cs);
470 			wd_ts1 = watchdog->read(watchdog);
471 		}
472 
473 		wd_last = cs->wd_last;
474 		cs_last = cs->cs_last;
475 
476 		/* Validate the watchdog readout window */
477 		wd_seq = cycles_to_nsec_safe(watchdog, wd_ts0, wd_ts1);
478 		if (wd_seq > WATCHDOG_READOUT_MAX_NS) {
479 			/* Store for printout in case all retries fail */
480 			watchdog_data.wd_seq = wd_seq;
481 			continue;
482 		}
483 
484 		/* Store for subsequent processing */
485 		cs->wd_last = wd_ts0;
486 		cs->cs_last = cs_ts;
487 
488 		/* First round or reset pending? */
489 		if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) || reset_pending)
490 			goto reset;
491 
492 		/* Calculate the nanosecond deltas from the last invocation */
493 		wd_delta = cycles_to_nsec_safe(watchdog, wd_last, wd_ts0);
494 		cs_delta = cycles_to_nsec_safe(cs, cs_last, cs_ts);
495 
496 		watchdog_data.wd_delta = wd_delta;
497 		watchdog_data.cs_delta = cs_delta;
498 
499 		/*
500 		 * Ensure that the deltas are within the readout limits of
501 		 * the clocksource and the watchdog. Long delays can cause
502 		 * clocksources to overflow.
503 		 */
504 		max_delta = max(wd_delta, cs_delta);
505 		if (max_delta > cs->max_idle_ns || max_delta > watchdog->max_idle_ns)
506 			goto reset;
507 
508 		/*
509 		 * Calculate and validate the skew against the allowed PPM
510 		 * value of the maximum delta plus the watchdog readout
511 		 * time.
512 		 */
513 		if (abs(wd_delta - cs_delta) < (max_delta >> ppm_shift) + wd_seq)
514 			return true;
515 
516 		watchdog_data.result = WD_FREQ_SKEWED;
517 		return false;
518 	}
519 
520 	watchdog_data.result = WD_FREQ_TIMEOUT;
521 	return false;
522 
523 reset:
524 	cs->flags |= CLOCK_SOURCE_WATCHDOG;
525 	watchdog_data.result = WD_FREQ_RESET;
526 	return false;
527 }
528 
529 /* Synchronization for sched clock */
530 static void clocksource_tick_stable(struct clocksource *cs)
531 {
532 	if (cs == curr_clocksource && cs->tick_stable)
533 		cs->tick_stable(cs);
534 }
535 
536 /* Conditionaly enable high resolution mode */
537 static void clocksource_enable_highres(struct clocksource *cs)
538 {
539 	if ((cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) ||
540 	    !(cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) ||
541 	    !watchdog || !(watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS))
542 		return;
543 
544 	/* Mark it valid for high-res. */
545 	cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
546 
547 	/*
548 	 * Can't schedule work before finished_booting is
549 	 * true. clocksource_done_booting will take care of it.
550 	 */
551 	if (!finished_booting)
552 		return;
553 
554 	if (cs->flags & CLOCK_SOURCE_WDTEST)
555 		return;
556 
557 	/*
558 	 * If this is not the current clocksource let the watchdog thread
559 	 * reselect it. Due to the change to high res this clocksource
560 	 * might be preferred now. If it is the current clocksource let the
561 	 * tick code know about that change.
562 	 */
563 	if (cs != curr_clocksource) {
564 		cs->flags |= CLOCK_SOURCE_RESELECT;
565 		schedule_work(&watchdog_work);
566 	} else {
567 		tick_clock_notify();
568 	}
569 }
570 
571 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 2);
572 
573 static void watchdog_print_freq_timeout(struct clocksource *cs)
574 {
575 	if (!__ratelimit(&ratelimit_state))
576 		return;
577 	pr_info("Watchdog %s read timed out. Readout sequence took: %lluns\n",
578 		watchdog->name, watchdog_data.wd_seq);
579 }
580 
581 static void watchdog_print_freq_skew(struct clocksource *cs)
582 {
583 	pr_warn("Marking clocksource %s unstable due to frequency skew\n", cs->name);
584 	pr_warn("Watchdog    %20s interval: %16lluns\n", watchdog->name, watchdog_data.wd_delta);
585 	pr_warn("Clocksource %20s interval: %16lluns\n", cs->name, watchdog_data.cs_delta);
586 }
587 
588 static void watchdog_handle_remote_timeout(struct clocksource *cs)
589 {
590 	pr_info_once("Watchdog remote CPU %u read timed out\n", watchdog_data.curr_cpu);
591 }
592 
593 static void watchdog_print_remote_skew(struct clocksource *cs)
594 {
595 	pr_warn("Marking clocksource %s unstable due to inter CPU skew\n", cs->name);
596 	if (watchdog_data.cpu_ts[0] < watchdog_data.cpu_ts[1]) {
597 		pr_warn("CPU%u %16llu < CPU%u %16llu (cycles)\n", smp_processor_id(),
598 			watchdog_data.cpu_ts[0], watchdog_data.curr_cpu, watchdog_data.cpu_ts[1]);
599 	} else {
600 		pr_warn("CPU%u %16llu < CPU%u %16llu (cycles)\n", watchdog_data.curr_cpu,
601 			watchdog_data.cpu_ts[1], smp_processor_id(), watchdog_data.cpu_ts[0]);
602 	}
603 }
604 
605 static void watchdog_check_result(struct clocksource *cs)
606 {
607 	switch (watchdog_data.result) {
608 	case WD_SUCCESS:
609 		clocksource_tick_stable(cs);
610 		clocksource_enable_highres(cs);
611 		return;
612 
613 	case WD_FREQ_TIMEOUT:
614 		watchdog_print_freq_timeout(cs);
615 		/* Try again later and invalidate the reference timestamps. */
616 		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
617 		return;
618 
619 	case WD_FREQ_NO_WATCHDOG:
620 	case WD_FREQ_RESET:
621 		/*
622 		 * Nothing to do when the reference timestamps were reset
623 		 * or no watchdog clocksource registered.
624 		 */
625 		return;
626 
627 	case WD_FREQ_SKEWED:
628 		watchdog_print_freq_skew(cs);
629 		break;
630 
631 	case WD_CPU_TIMEOUT:
632 		/* Remote check timed out. Try again next cycle. */
633 		watchdog_handle_remote_timeout(cs);
634 		return;
635 
636 	case WD_CPU_SKEWED:
637 		watchdog_print_remote_skew(cs);
638 		break;
639 	}
640 	__clocksource_unstable(cs);
641 }
642 
643 static void clocksource_watchdog(struct timer_list *unused)
644 {
645 	struct clocksource *cs;
646 	bool reset_pending;
647 
648 	guard(spinlock)(&watchdog_lock);
649 	if (!watchdog_running)
650 		return;
651 
652 	reset_pending = atomic_read(&watchdog_reset_pending);
653 
654 	list_for_each_entry(cs, &watchdog_list, wd_list) {
655 		/* Clocksource already marked unstable? */
656 		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
657 			if (finished_booting)
658 				schedule_work(&watchdog_work);
659 			continue;
660 		}
661 
662 		/* Compare against watchdog clocksource if available */
663 		if (watchdog_check_freq(cs, reset_pending)) {
664 			/* Check for inter CPU skew */
665 			watchdog_check_cpu_skew(cs);
666 		}
667 
668 		watchdog_check_result(cs);
669 	}
670 
671 	/* Clear after the full clocksource walk */
672 	if (reset_pending)
673 		atomic_dec(&watchdog_reset_pending);
674 
675 	/* Could have been rearmed by a stop/start cycle */
676 	if (!timer_pending(&watchdog_timer)) {
677 		watchdog_timer.expires += WATCHDOG_INTERVAL;
678 		add_timer_local(&watchdog_timer);
679 	}
680 }
681 
682 static inline void clocksource_start_watchdog(void)
683 {
684 	if (watchdog_running || list_empty(&watchdog_list))
685 		return;
686 	timer_setup(&watchdog_timer, clocksource_watchdog, TIMER_PINNED);
687 	watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
688 
689 	add_timer_on(&watchdog_timer, get_boot_cpu_id());
690 	watchdog_running = 1;
691 }
692 
693 static inline void clocksource_stop_watchdog(void)
694 {
695 	if (!watchdog_running || !list_empty(&watchdog_list))
696 		return;
697 	timer_delete(&watchdog_timer);
698 	watchdog_running = 0;
699 }
700 
701 static void clocksource_resume_watchdog(void)
702 {
703 	atomic_inc(&watchdog_reset_pending);
704 }
705 
706 static void clocksource_enqueue_watchdog(struct clocksource *cs)
707 {
708 	INIT_LIST_HEAD(&cs->wd_list);
709 
710 	if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
711 		/* cs is a clocksource to be watched. */
712 		list_add(&cs->wd_list, &watchdog_list);
713 		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
714 	} else {
715 		/* cs is a watchdog. */
716 		if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
717 			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
718 	}
719 }
720 
721 static void clocksource_select_watchdog(bool fallback)
722 {
723 	struct clocksource *cs, *old_wd;
724 	unsigned long flags;
725 
726 	spin_lock_irqsave(&watchdog_lock, flags);
727 	/* save current watchdog */
728 	old_wd = watchdog;
729 	if (fallback)
730 		watchdog = NULL;
731 
732 	list_for_each_entry(cs, &clocksource_list, list) {
733 		/* cs is a clocksource to be watched. */
734 		if (cs->flags & CLOCK_SOURCE_MUST_VERIFY)
735 			continue;
736 
737 		/*
738 		 * If it's not continuous, don't put the fox in charge of
739 		 * the henhouse.
740 		 */
741 		if (!(cs->flags & CLOCK_SOURCE_IS_CONTINUOUS))
742 			continue;
743 
744 		/* Skip current if we were requested for a fallback. */
745 		if (fallback && cs == old_wd)
746 			continue;
747 
748 		/* Pick the best watchdog. */
749 		if (!watchdog || cs->rating > watchdog->rating)
750 			watchdog = cs;
751 	}
752 	/* If we failed to find a fallback restore the old one. */
753 	if (!watchdog)
754 		watchdog = old_wd;
755 
756 	/* If we changed the watchdog we need to reset cycles. */
757 	if (watchdog != old_wd)
758 		clocksource_reset_watchdog();
759 
760 	/* Check if the watchdog timer needs to be started. */
761 	clocksource_start_watchdog();
762 	spin_unlock_irqrestore(&watchdog_lock, flags);
763 }
764 
765 static void clocksource_dequeue_watchdog(struct clocksource *cs)
766 {
767 	if (cs != watchdog) {
768 		if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
769 			/* cs is a watched clocksource. */
770 			list_del_init(&cs->wd_list);
771 			/* Check if the watchdog timer needs to be stopped. */
772 			clocksource_stop_watchdog();
773 		}
774 	}
775 }
776 
777 static int __clocksource_watchdog_kthread(void)
778 {
779 	struct clocksource *cs, *tmp;
780 	unsigned long flags;
781 	int select = 0;
782 
783 	spin_lock_irqsave(&watchdog_lock, flags);
784 	list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
785 		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
786 			list_del_init(&cs->wd_list);
787 			clocksource_change_rating(cs, 0);
788 			select = 1;
789 		}
790 		if (cs->flags & CLOCK_SOURCE_RESELECT) {
791 			cs->flags &= ~CLOCK_SOURCE_RESELECT;
792 			select = 1;
793 		}
794 	}
795 	/* Check if the watchdog timer needs to be stopped. */
796 	clocksource_stop_watchdog();
797 	spin_unlock_irqrestore(&watchdog_lock, flags);
798 
799 	return select;
800 }
801 
802 static int clocksource_watchdog_kthread(void *data)
803 {
804 	mutex_lock(&clocksource_mutex);
805 	if (__clocksource_watchdog_kthread())
806 		clocksource_select();
807 	mutex_unlock(&clocksource_mutex);
808 	return 0;
809 }
810 
811 static bool clocksource_is_watchdog(struct clocksource *cs)
812 {
813 	return cs == watchdog;
814 }
815 
816 #else /* CONFIG_CLOCKSOURCE_WATCHDOG */
817 
818 static void clocksource_enqueue_watchdog(struct clocksource *cs)
819 {
820 	if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
821 		cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
822 }
823 
824 static void clocksource_select_watchdog(bool fallback) { }
825 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
826 static inline void clocksource_resume_watchdog(void) { }
827 static inline int __clocksource_watchdog_kthread(void) { return 0; }
828 static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
829 void clocksource_mark_unstable(struct clocksource *cs) { }
830 
831 static inline void clocksource_watchdog_lock(unsigned long *flags) { }
832 static inline void clocksource_watchdog_unlock(unsigned long *flags) { }
833 
834 #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
835 
836 static bool clocksource_is_suspend(struct clocksource *cs)
837 {
838 	return cs == suspend_clocksource;
839 }
840 
841 static void __clocksource_suspend_select(struct clocksource *cs)
842 {
843 	/*
844 	 * Skip the clocksource which will be stopped in suspend state.
845 	 */
846 	if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP))
847 		return;
848 
849 	/*
850 	 * The nonstop clocksource can be selected as the suspend clocksource to
851 	 * calculate the suspend time, so it should not supply suspend/resume
852 	 * interfaces to suspend the nonstop clocksource when system suspends.
853 	 */
854 	if (cs->suspend || cs->resume) {
855 		pr_warn("Nonstop clocksource %s should not supply suspend/resume interfaces\n",
856 			cs->name);
857 	}
858 
859 	/* Pick the best rating. */
860 	if (!suspend_clocksource || cs->rating > suspend_clocksource->rating)
861 		suspend_clocksource = cs;
862 }
863 
864 /**
865  * clocksource_suspend_select - Select the best clocksource for suspend timing
866  * @fallback:	if select a fallback clocksource
867  */
868 static void clocksource_suspend_select(bool fallback)
869 {
870 	struct clocksource *cs, *old_suspend;
871 
872 	old_suspend = suspend_clocksource;
873 	if (fallback)
874 		suspend_clocksource = NULL;
875 
876 	list_for_each_entry(cs, &clocksource_list, list) {
877 		/* Skip current if we were requested for a fallback. */
878 		if (fallback && cs == old_suspend)
879 			continue;
880 
881 		__clocksource_suspend_select(cs);
882 	}
883 }
884 
885 /**
886  * clocksource_start_suspend_timing - Start measuring the suspend timing
887  * @cs:			current clocksource from timekeeping
888  * @start_cycles:	current cycles from timekeeping
889  *
890  * This function will save the start cycle values of suspend timer to calculate
891  * the suspend time when resuming system.
892  *
893  * This function is called late in the suspend process from timekeeping_suspend(),
894  * that means processes are frozen, non-boot cpus and interrupts are disabled
895  * now. It is therefore possible to start the suspend timer without taking the
896  * clocksource mutex.
897  */
898 void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles)
899 {
900 	if (!suspend_clocksource)
901 		return;
902 
903 	/*
904 	 * If current clocksource is the suspend timer, we should use the
905 	 * tkr_mono.cycle_last value as suspend_start to avoid same reading
906 	 * from suspend timer.
907 	 */
908 	if (clocksource_is_suspend(cs)) {
909 		suspend_start = start_cycles;
910 		return;
911 	}
912 
913 	if (suspend_clocksource->enable &&
914 	    suspend_clocksource->enable(suspend_clocksource)) {
915 		pr_warn_once("Failed to enable the non-suspend-able clocksource.\n");
916 		return;
917 	}
918 
919 	suspend_start = suspend_clocksource->read(suspend_clocksource);
920 }
921 
922 /**
923  * clocksource_stop_suspend_timing - Stop measuring the suspend timing
924  * @cs:		current clocksource from timekeeping
925  * @cycle_now:	current cycles from timekeeping
926  *
927  * This function will calculate the suspend time from suspend timer.
928  *
929  * Returns nanoseconds since suspend started, 0 if no usable suspend clocksource.
930  *
931  * This function is called early in the resume process from timekeeping_resume(),
932  * that means there is only one cpu, no processes are running and the interrupts
933  * are disabled. It is therefore possible to stop the suspend timer without
934  * taking the clocksource mutex.
935  */
936 u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
937 {
938 	u64 now, nsec = 0;
939 
940 	if (!suspend_clocksource)
941 		return 0;
942 
943 	/*
944 	 * If current clocksource is the suspend timer, we should use the
945 	 * tkr_mono.cycle_last value from timekeeping as current cycle to
946 	 * avoid same reading from suspend timer.
947 	 */
948 	if (clocksource_is_suspend(cs))
949 		now = cycle_now;
950 	else
951 		now = suspend_clocksource->read(suspend_clocksource);
952 
953 	if (now > suspend_start)
954 		nsec = cycles_to_nsec_safe(suspend_clocksource, suspend_start, now);
955 
956 	/*
957 	 * Disable the suspend timer to save power if current clocksource is
958 	 * not the suspend timer.
959 	 */
960 	if (!clocksource_is_suspend(cs) && suspend_clocksource->disable)
961 		suspend_clocksource->disable(suspend_clocksource);
962 
963 	return nsec;
964 }
965 
966 /**
967  * clocksource_suspend - suspend the clocksource(s)
968  */
969 void clocksource_suspend(void)
970 {
971 	struct clocksource *cs;
972 
973 	list_for_each_entry_reverse(cs, &clocksource_list, list)
974 		if (cs->suspend)
975 			cs->suspend(cs);
976 }
977 
978 /**
979  * clocksource_resume - resume the clocksource(s)
980  */
981 void clocksource_resume(void)
982 {
983 	struct clocksource *cs;
984 
985 	list_for_each_entry(cs, &clocksource_list, list)
986 		if (cs->resume)
987 			cs->resume(cs);
988 
989 	clocksource_resume_watchdog();
990 }
991 
992 /**
993  * clocksource_touch_watchdog - Update watchdog
994  *
995  * Update the watchdog after exception contexts such as kgdb so as not
996  * to incorrectly trip the watchdog. This might fail when the kernel
997  * was stopped in code which holds watchdog_lock.
998  */
999 void clocksource_touch_watchdog(void)
1000 {
1001 	clocksource_resume_watchdog();
1002 }
1003 
1004 /**
1005  * clocksource_max_adjustment- Returns max adjustment amount
1006  * @cs:         Pointer to clocksource
1007  *
1008  */
1009 static u32 clocksource_max_adjustment(struct clocksource *cs)
1010 {
1011 	u64 ret;
1012 	/*
1013 	 * We won't try to correct for more than 11% adjustments (110,000 ppm),
1014 	 */
1015 	ret = (u64)cs->mult * 11;
1016 	do_div(ret,100);
1017 	return (u32)ret;
1018 }
1019 
1020 /**
1021  * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted
1022  * @mult:	cycle to nanosecond multiplier
1023  * @shift:	cycle to nanosecond divisor (power of two)
1024  * @maxadj:	maximum adjustment value to mult (~11%)
1025  * @mask:	bitmask for two's complement subtraction of non 64 bit counters
1026  * @max_cyc:	maximum cycle value before potential overflow (does not include
1027  *		any safety margin)
1028  *
1029  * NOTE: This function includes a safety margin of 50%, in other words, we
1030  * return half the number of nanoseconds the hardware counter can technically
1031  * cover. This is done so that we can potentially detect problems caused by
1032  * delayed timers or bad hardware, which might result in time intervals that
1033  * are larger than what the math used can handle without overflows.
1034  */
1035 u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc)
1036 {
1037 	u64 max_nsecs, max_cycles;
1038 
1039 	/*
1040 	 * Calculate the maximum number of cycles that we can pass to the
1041 	 * cyc2ns() function without overflowing a 64-bit result.
1042 	 */
1043 	max_cycles = ULLONG_MAX;
1044 	do_div(max_cycles, mult+maxadj);
1045 
1046 	/*
1047 	 * The actual maximum number of cycles we can defer the clocksource is
1048 	 * determined by the minimum of max_cycles and mask.
1049 	 * Note: Here we subtract the maxadj to make sure we don't sleep for
1050 	 * too long if there's a large negative adjustment.
1051 	 */
1052 	max_cycles = min(max_cycles, mask);
1053 	max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);
1054 
1055 	/* return the max_cycles value as well if requested */
1056 	if (max_cyc)
1057 		*max_cyc = max_cycles;
1058 
1059 	/* Return 50% of the actual maximum, so we can detect bad values */
1060 	max_nsecs >>= 1;
1061 
1062 	return max_nsecs;
1063 }
1064 
1065 /**
1066  * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles
1067  * @cs:         Pointer to clocksource to be updated
1068  *
1069  */
1070 static inline void clocksource_update_max_deferment(struct clocksource *cs)
1071 {
1072 	cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift,
1073 						cs->maxadj, cs->mask,
1074 						&cs->max_cycles);
1075 
1076 	/*
1077 	 * Threshold for detecting negative motion in clocksource_delta().
1078 	 *
1079 	 * Allow for 0.875 of the counter width so that overly long idle
1080 	 * sleeps, which go slightly over mask/2, do not trigger the
1081 	 * negative motion detection.
1082 	 */
1083 	cs->max_raw_delta = (cs->mask >> 1) + (cs->mask >> 2) + (cs->mask >> 3);
1084 }
1085 
1086 static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur)
1087 {
1088 	struct clocksource *cs;
1089 
1090 	if (!finished_booting || list_empty(&clocksource_list))
1091 		return NULL;
1092 
1093 	/*
1094 	 * We pick the clocksource with the highest rating. If oneshot
1095 	 * mode is active, we pick the highres valid clocksource with
1096 	 * the best rating.
1097 	 */
1098 	list_for_each_entry(cs, &clocksource_list, list) {
1099 		if (skipcur && cs == curr_clocksource)
1100 			continue;
1101 		if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES))
1102 			continue;
1103 		if (cs->flags & CLOCK_SOURCE_WDTEST)
1104 			continue;
1105 		return cs;
1106 	}
1107 	return NULL;
1108 }
1109 
1110 static void __clocksource_select(bool skipcur)
1111 {
1112 	bool oneshot = tick_oneshot_mode_active();
1113 	struct clocksource *best, *cs;
1114 
1115 	/* Find the best suitable clocksource */
1116 	best = clocksource_find_best(oneshot, skipcur);
1117 	if (!best)
1118 		return;
1119 
1120 	if (!strlen(override_name))
1121 		goto found;
1122 
1123 	/* Check for the override clocksource. */
1124 	list_for_each_entry(cs, &clocksource_list, list) {
1125 		if (skipcur && cs == curr_clocksource)
1126 			continue;
1127 		if (strcmp(cs->name, override_name) != 0)
1128 			continue;
1129 		if (cs->flags & CLOCK_SOURCE_WDTEST)
1130 			continue;
1131 		/*
1132 		 * Check to make sure we don't switch to a non-highres
1133 		 * capable clocksource if the tick code is in oneshot
1134 		 * mode (highres or nohz)
1135 		 */
1136 		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) {
1137 			/* Override clocksource cannot be used. */
1138 			if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
1139 				pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/NOHZ mode\n",
1140 					cs->name);
1141 				override_name[0] = 0;
1142 			} else {
1143 				/*
1144 				 * The override cannot be currently verified.
1145 				 * Deferring to let the watchdog check.
1146 				 */
1147 				pr_info("Override clocksource %s is not currently HRT compatible - deferring\n",
1148 					cs->name);
1149 			}
1150 		} else
1151 			/* Override clocksource can be used. */
1152 			best = cs;
1153 		break;
1154 	}
1155 
1156 found:
1157 	if (curr_clocksource != best && !timekeeping_notify(best)) {
1158 		pr_info("Switched to clocksource %s\n", best->name);
1159 		curr_clocksource = best;
1160 	}
1161 }
1162 
1163 /**
1164  * clocksource_select - Select the best clocksource available
1165  *
1166  * Private function. Must hold clocksource_mutex when called.
1167  *
1168  * Select the clocksource with the best rating, or the clocksource,
1169  * which is selected by userspace override.
1170  */
1171 static void clocksource_select(void)
1172 {
1173 	__clocksource_select(false);
1174 }
1175 
1176 static void clocksource_select_fallback(void)
1177 {
1178 	__clocksource_select(true);
1179 }
1180 
1181 /*
1182  * clocksource_done_booting - Called near the end of core bootup
1183  *
1184  * Hack to avoid lots of clocksource churn at boot time.
1185  * We use fs_initcall because we want this to start before
1186  * device_initcall but after subsys_initcall.
1187  */
1188 static int __init clocksource_done_booting(void)
1189 {
1190 	mutex_lock(&clocksource_mutex);
1191 	curr_clocksource = clocksource_default_clock();
1192 	finished_booting = 1;
1193 	/*
1194 	 * Run the watchdog first to eliminate unstable clock sources
1195 	 */
1196 	__clocksource_watchdog_kthread();
1197 	clocksource_select();
1198 	mutex_unlock(&clocksource_mutex);
1199 	return 0;
1200 }
1201 fs_initcall(clocksource_done_booting);
1202 
1203 /*
1204  * Enqueue the clocksource sorted by rating
1205  */
1206 static void clocksource_enqueue(struct clocksource *cs)
1207 {
1208 	struct list_head *entry = &clocksource_list;
1209 	struct clocksource *tmp;
1210 
1211 	list_for_each_entry(tmp, &clocksource_list, list) {
1212 		/* Keep track of the place, where to insert */
1213 		if (tmp->rating < cs->rating)
1214 			break;
1215 		entry = &tmp->list;
1216 	}
1217 	list_add(&cs->list, entry);
1218 }
1219 
1220 /**
1221  * __clocksource_update_freq_scale - Used update clocksource with new freq
1222  * @cs:		clocksource to be registered
1223  * @scale:	Scale factor multiplied against freq to get clocksource hz
1224  * @freq:	clocksource frequency (cycles per second) divided by scale
1225  *
1226  * This should only be called from the clocksource->enable() method.
1227  *
1228  * This *SHOULD NOT* be called directly! Please use the
1229  * __clocksource_update_freq_hz() or __clocksource_update_freq_khz() helper
1230  * functions.
1231  */
1232 void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq)
1233 {
1234 	u64 sec;
1235 
1236 	/*
1237 	 * Default clocksources are *special* and self-define their mult/shift.
1238 	 * But, you're not special, so you should specify a freq value.
1239 	 */
1240 	if (freq) {
1241 		/*
1242 		 * Calc the maximum number of seconds which we can run before
1243 		 * wrapping around. For clocksources which have a mask > 32-bit
1244 		 * we need to limit the max sleep time to have a good
1245 		 * conversion precision. 10 minutes is still a reasonable
1246 		 * amount. That results in a shift value of 24 for a
1247 		 * clocksource with mask >= 40-bit and f >= 4GHz. That maps to
1248 		 * ~ 0.06ppm granularity for NTP.
1249 		 */
1250 		sec = cs->mask;
1251 		do_div(sec, freq);
1252 		do_div(sec, scale);
1253 		if (!sec)
1254 			sec = 1;
1255 		else if (sec > 600 && cs->mask > UINT_MAX)
1256 			sec = 600;
1257 
1258 		clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
1259 				       NSEC_PER_SEC / scale, sec * scale);
1260 
1261 		/* Update cs::freq_khz */
1262 		cs->freq_khz = div_u64((u64)freq * scale, 1000);
1263 	}
1264 
1265 	/*
1266 	 * Ensure clocksources that have large 'mult' values don't overflow
1267 	 * when adjusted.
1268 	 */
1269 	cs->maxadj = clocksource_max_adjustment(cs);
1270 	while (freq && ((cs->mult + cs->maxadj < cs->mult)
1271 		|| (cs->mult - cs->maxadj > cs->mult))) {
1272 		cs->mult >>= 1;
1273 		cs->shift--;
1274 		cs->maxadj = clocksource_max_adjustment(cs);
1275 	}
1276 
1277 	/*
1278 	 * Only warn for *special* clocksources that self-define
1279 	 * their mult/shift values and don't specify a freq.
1280 	 */
1281 	WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
1282 		"timekeeping: Clocksource %s might overflow on 11%% adjustment\n",
1283 		cs->name);
1284 
1285 	clocksource_update_max_deferment(cs);
1286 
1287 	pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n",
1288 		cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns);
1289 }
1290 EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
1291 
1292 /**
1293  * __clocksource_register_scale - Used to install new clocksources
1294  * @cs:		clocksource to be registered
1295  * @scale:	Scale factor multiplied against freq to get clocksource hz
1296  * @freq:	clocksource frequency (cycles per second) divided by scale
1297  *
1298  * Returns -EBUSY if registration fails, zero otherwise.
1299  *
1300  * This *SHOULD NOT* be called directly! Please use the
1301  * clocksource_register_hz() or clocksource_register_khz helper functions.
1302  */
1303 int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
1304 {
1305 	unsigned long flags;
1306 
1307 	clocksource_arch_init(cs);
1308 
1309 	if (WARN_ON_ONCE((unsigned int)cs->id >= CSID_MAX))
1310 		cs->id = CSID_GENERIC;
1311 
1312 	if (WARN_ON_ONCE(!freq && cs->flags & CLOCK_SOURCE_HAS_COUPLED_CLOCK_EVENT))
1313 		cs->flags &= ~CLOCK_SOURCE_HAS_COUPLED_CLOCK_EVENT;
1314 
1315 	if (cs->vdso_clock_mode < 0 ||
1316 	    cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) {
1317 		pr_warn("clocksource %s registered with invalid VDSO mode %d. Disabling VDSO support.\n",
1318 			cs->name, cs->vdso_clock_mode);
1319 		cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE;
1320 	}
1321 
1322 	/* Initialize mult/shift and max_idle_ns */
1323 	__clocksource_update_freq_scale(cs, scale, freq);
1324 
1325 	/* Add clocksource to the clocksource list */
1326 	mutex_lock(&clocksource_mutex);
1327 
1328 	clocksource_watchdog_lock(&flags);
1329 	clocksource_enqueue(cs);
1330 	clocksource_enqueue_watchdog(cs);
1331 	clocksource_watchdog_unlock(&flags);
1332 
1333 	clocksource_select();
1334 	clocksource_select_watchdog(false);
1335 	__clocksource_suspend_select(cs);
1336 	mutex_unlock(&clocksource_mutex);
1337 	return 0;
1338 }
1339 EXPORT_SYMBOL_GPL(__clocksource_register_scale);
1340 
1341 /*
1342  * Unbind clocksource @cs. Called with clocksource_mutex held
1343  */
1344 static int clocksource_unbind(struct clocksource *cs)
1345 {
1346 	unsigned long flags;
1347 
1348 	if (clocksource_is_watchdog(cs)) {
1349 		/* Select and try to install a replacement watchdog. */
1350 		clocksource_select_watchdog(true);
1351 		if (clocksource_is_watchdog(cs))
1352 			return -EBUSY;
1353 	}
1354 
1355 	if (cs == curr_clocksource) {
1356 		/* Select and try to install a replacement clock source */
1357 		clocksource_select_fallback();
1358 		if (curr_clocksource == cs)
1359 			return -EBUSY;
1360 	}
1361 
1362 	if (clocksource_is_suspend(cs)) {
1363 		/*
1364 		 * Select and try to install a replacement suspend clocksource.
1365 		 * If no replacement suspend clocksource, we will just let the
1366 		 * clocksource go and have no suspend clocksource.
1367 		 */
1368 		clocksource_suspend_select(true);
1369 	}
1370 
1371 	clocksource_watchdog_lock(&flags);
1372 	clocksource_dequeue_watchdog(cs);
1373 	list_del_init(&cs->list);
1374 	clocksource_watchdog_unlock(&flags);
1375 
1376 	return 0;
1377 }
1378 
1379 /**
1380  * clocksource_unregister - remove a registered clocksource
1381  * @cs:	clocksource to be unregistered
1382  */
1383 int clocksource_unregister(struct clocksource *cs)
1384 {
1385 	int ret = 0;
1386 
1387 	mutex_lock(&clocksource_mutex);
1388 	if (!list_empty(&cs->list))
1389 		ret = clocksource_unbind(cs);
1390 	mutex_unlock(&clocksource_mutex);
1391 	return ret;
1392 }
1393 EXPORT_SYMBOL(clocksource_unregister);
1394 
1395 #ifdef CONFIG_SYSFS
1396 /**
1397  * current_clocksource_show - sysfs interface for current clocksource
1398  * @dev:	unused
1399  * @attr:	unused
1400  * @buf:	char buffer to be filled with clocksource list
1401  *
1402  * Provides sysfs interface for listing current clocksource.
1403  */
1404 static ssize_t current_clocksource_show(struct device *dev,
1405 					struct device_attribute *attr,
1406 					char *buf)
1407 {
1408 	ssize_t count = 0;
1409 
1410 	mutex_lock(&clocksource_mutex);
1411 	count = sysfs_emit(buf, "%s\n", curr_clocksource->name);
1412 	mutex_unlock(&clocksource_mutex);
1413 
1414 	return count;
1415 }
1416 
1417 ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt)
1418 {
1419 	size_t ret = cnt;
1420 
1421 	/* strings from sysfs write are not 0 terminated! */
1422 	if (!cnt || cnt >= CS_NAME_LEN)
1423 		return -EINVAL;
1424 
1425 	/* strip of \n: */
1426 	if (buf[cnt-1] == '\n')
1427 		cnt--;
1428 	if (cnt > 0)
1429 		memcpy(dst, buf, cnt);
1430 	dst[cnt] = 0;
1431 	return ret;
1432 }
1433 
1434 /**
1435  * current_clocksource_store - interface for manually overriding clocksource
1436  * @dev:	unused
1437  * @attr:	unused
1438  * @buf:	name of override clocksource
1439  * @count:	length of buffer
1440  *
1441  * Takes input from sysfs interface for manually overriding the default
1442  * clocksource selection.
1443  */
1444 static ssize_t current_clocksource_store(struct device *dev,
1445 					 struct device_attribute *attr,
1446 					 const char *buf, size_t count)
1447 {
1448 	ssize_t ret;
1449 
1450 	mutex_lock(&clocksource_mutex);
1451 
1452 	ret = sysfs_get_uname(buf, override_name, count);
1453 	if (ret >= 0)
1454 		clocksource_select();
1455 
1456 	mutex_unlock(&clocksource_mutex);
1457 
1458 	return ret;
1459 }
1460 static DEVICE_ATTR_RW(current_clocksource);
1461 
1462 /**
1463  * unbind_clocksource_store - interface for manually unbinding clocksource
1464  * @dev:	unused
1465  * @attr:	unused
1466  * @buf:	unused
1467  * @count:	length of buffer
1468  *
1469  * Takes input from sysfs interface for manually unbinding a clocksource.
1470  */
1471 static ssize_t unbind_clocksource_store(struct device *dev,
1472 					struct device_attribute *attr,
1473 					const char *buf, size_t count)
1474 {
1475 	struct clocksource *cs;
1476 	char name[CS_NAME_LEN];
1477 	ssize_t ret;
1478 
1479 	ret = sysfs_get_uname(buf, name, count);
1480 	if (ret < 0)
1481 		return ret;
1482 
1483 	ret = -ENODEV;
1484 	mutex_lock(&clocksource_mutex);
1485 	list_for_each_entry(cs, &clocksource_list, list) {
1486 		if (strcmp(cs->name, name))
1487 			continue;
1488 		ret = clocksource_unbind(cs);
1489 		break;
1490 	}
1491 	mutex_unlock(&clocksource_mutex);
1492 
1493 	return ret ? ret : count;
1494 }
1495 static DEVICE_ATTR_WO(unbind_clocksource);
1496 
1497 /**
1498  * available_clocksource_show - sysfs interface for listing clocksource
1499  * @dev:	unused
1500  * @attr:	unused
1501  * @buf:	char buffer to be filled with clocksource list
1502  *
1503  * Provides sysfs interface for listing registered clocksources
1504  */
1505 static ssize_t available_clocksource_show(struct device *dev,
1506 					  struct device_attribute *attr,
1507 					  char *buf)
1508 {
1509 	struct clocksource *src;
1510 	ssize_t count = 0;
1511 
1512 	mutex_lock(&clocksource_mutex);
1513 	list_for_each_entry(src, &clocksource_list, list) {
1514 		/*
1515 		 * Don't show non-HRES clocksource if the tick code is
1516 		 * in one shot mode (highres=on or nohz=on)
1517 		 */
1518 		if (!tick_oneshot_mode_active() ||
1519 		    (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
1520 			count += snprintf(buf + count,
1521 				  max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
1522 				  "%s ", src->name);
1523 	}
1524 	mutex_unlock(&clocksource_mutex);
1525 
1526 	count += snprintf(buf + count,
1527 			  max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
1528 
1529 	return count;
1530 }
1531 static DEVICE_ATTR_RO(available_clocksource);
1532 
1533 static struct attribute *clocksource_attrs[] = {
1534 	&dev_attr_current_clocksource.attr,
1535 	&dev_attr_unbind_clocksource.attr,
1536 	&dev_attr_available_clocksource.attr,
1537 	NULL
1538 };
1539 ATTRIBUTE_GROUPS(clocksource);
1540 
1541 static const struct bus_type clocksource_subsys = {
1542 	.name = "clocksource",
1543 	.dev_name = "clocksource",
1544 };
1545 
1546 static struct device device_clocksource = {
1547 	.id	= 0,
1548 	.bus	= &clocksource_subsys,
1549 	.groups	= clocksource_groups,
1550 };
1551 
1552 static int __init init_clocksource_sysfs(void)
1553 {
1554 	int error = subsys_system_register(&clocksource_subsys, NULL);
1555 
1556 	if (!error)
1557 		error = device_register(&device_clocksource);
1558 
1559 	return error;
1560 }
1561 
1562 device_initcall(init_clocksource_sysfs);
1563 #endif /* CONFIG_SYSFS */
1564 
1565 /**
1566  * boot_override_clocksource - boot clock override
1567  * @str:	override name
1568  *
1569  * Takes a clocksource= boot argument and uses it
1570  * as the clocksource override name.
1571  */
1572 static int __init boot_override_clocksource(char* str)
1573 {
1574 	mutex_lock(&clocksource_mutex);
1575 	if (str)
1576 		strscpy(override_name, str);
1577 	mutex_unlock(&clocksource_mutex);
1578 	return 1;
1579 }
1580 
1581 __setup("clocksource=", boot_override_clocksource);
1582 
1583 /**
1584  * boot_override_clock - Compatibility layer for deprecated boot option
1585  * @str:	override name
1586  *
1587  * DEPRECATED! Takes a clock= boot argument and uses it
1588  * as the clocksource override name
1589  */
1590 static int __init boot_override_clock(char* str)
1591 {
1592 	if (!strcmp(str, "pmtmr")) {
1593 		pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n");
1594 		return boot_override_clocksource("acpi_pm");
1595 	}
1596 	pr_warn("clock= boot option is deprecated - use clocksource=xyz\n");
1597 	return boot_override_clocksource(str);
1598 }
1599 
1600 __setup("clock=", boot_override_clock);
1601