1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4 #include <linux/kernel.h>
5 #include <linux/sched.h>
6 #include <linux/sched/clock.h>
7 #include <linux/init.h>
8 #include <linux/export.h>
9 #include <linux/timer.h>
10 #include <linux/acpi_pmtmr.h>
11 #include <linux/cpufreq.h>
12 #include <linux/delay.h>
13 #include <linux/clocksource.h>
14 #include <linux/kvm_types.h>
15 #include <linux/percpu.h>
16 #include <linux/timex.h>
17 #include <linux/static_key.h>
18 #include <linux/static_call.h>
19
20 #include <asm/cpuid/api.h>
21 #include <asm/hpet.h>
22 #include <asm/timer.h>
23 #include <asm/vgtod.h>
24 #include <asm/time.h>
25 #include <asm/delay.h>
26 #include <asm/hypervisor.h>
27 #include <asm/nmi.h>
28 #include <asm/x86_init.h>
29 #include <asm/geode.h>
30 #include <asm/apic.h>
31 #include <asm/cpu_device_id.h>
32 #include <asm/i8259.h>
33 #include <asm/msr.h>
34 #include <asm/topology.h>
35 #include <asm/uv/uv.h>
36 #include <asm/sev.h>
37
38 unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
39 EXPORT_SYMBOL(cpu_khz);
40
41 unsigned int __read_mostly tsc_khz;
42 EXPORT_SYMBOL(tsc_khz);
43
44 #define KHZ 1000
45
46 /*
47 * TSC can be unstable due to cpufreq or due to unsynced TSCs
48 */
49 static int __read_mostly tsc_unstable;
50 static unsigned int __initdata tsc_early_khz;
51
52 static DEFINE_STATIC_KEY_FALSE_RO(__use_tsc);
53
54 int tsc_clocksource_reliable;
55
56 static int __read_mostly tsc_force_recalibrate;
57
58 static struct clocksource_base art_base_clk = {
59 .id = CSID_X86_ART,
60 };
61 static bool have_art;
62
63 struct cyc2ns {
64 struct cyc2ns_data data[2]; /* 0 + 2*16 = 32 */
65 seqcount_latch_t seq; /* 32 + 4 = 36 */
66
67 }; /* fits one cacheline */
68
69 static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
70
tsc_early_khz_setup(char * buf)71 static int __init tsc_early_khz_setup(char *buf)
72 {
73 return kstrtouint(buf, 0, &tsc_early_khz);
74 }
75 early_param("tsc_early_khz", tsc_early_khz_setup);
76
__cyc2ns_read(struct cyc2ns_data * data)77 __always_inline void __cyc2ns_read(struct cyc2ns_data *data)
78 {
79 int seq, idx;
80
81 do {
82 seq = this_cpu_read(cyc2ns.seq.seqcount.sequence);
83 idx = seq & 1;
84
85 data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset);
86 data->cyc2ns_mul = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul);
87 data->cyc2ns_shift = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift);
88
89 } while (unlikely(seq != this_cpu_read(cyc2ns.seq.seqcount.sequence)));
90 }
91
cyc2ns_read_begin(struct cyc2ns_data * data)92 __always_inline void cyc2ns_read_begin(struct cyc2ns_data *data)
93 {
94 preempt_disable_notrace();
95 __cyc2ns_read(data);
96 }
97
cyc2ns_read_end(void)98 __always_inline void cyc2ns_read_end(void)
99 {
100 preempt_enable_notrace();
101 }
102
103 /*
104 * Accelerators for sched_clock()
105 * convert from cycles(64bits) => nanoseconds (64bits)
106 * basic equation:
107 * ns = cycles / (freq / ns_per_sec)
108 * ns = cycles * (ns_per_sec / freq)
109 * ns = cycles * (10^9 / (cpu_khz * 10^3))
110 * ns = cycles * (10^6 / cpu_khz)
111 *
112 * Then we use scaling math (suggested by george@mvista.com) to get:
113 * ns = cycles * (10^6 * SC / cpu_khz) / SC
114 * ns = cycles * cyc2ns_scale / SC
115 *
116 * And since SC is a constant power of two, we can convert the div
117 * into a shift. The larger SC is, the more accurate the conversion, but
118 * cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication
119 * (64-bit result) can be used.
120 *
121 * We can use khz divisor instead of mhz to keep a better precision.
122 * (mathieu.desnoyers@polymtl.ca)
123 *
124 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
125 */
126
__cycles_2_ns(unsigned long long cyc)127 static __always_inline unsigned long long __cycles_2_ns(unsigned long long cyc)
128 {
129 struct cyc2ns_data data;
130 unsigned long long ns;
131
132 __cyc2ns_read(&data);
133
134 ns = data.cyc2ns_offset;
135 ns += mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
136
137 return ns;
138 }
139
cycles_2_ns(unsigned long long cyc)140 static __always_inline unsigned long long cycles_2_ns(unsigned long long cyc)
141 {
142 unsigned long long ns;
143 preempt_disable_notrace();
144 ns = __cycles_2_ns(cyc);
145 preempt_enable_notrace();
146 return ns;
147 }
148
__set_cyc2ns_scale(unsigned long khz,int cpu,unsigned long long tsc_now)149 static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
150 {
151 unsigned long long ns_now;
152 struct cyc2ns_data data;
153 struct cyc2ns *c2n;
154
155 ns_now = cycles_2_ns(tsc_now);
156
157 /*
158 * Compute a new multiplier as per the above comment and ensure our
159 * time function is continuous; see the comment near struct
160 * cyc2ns_data.
161 */
162 clocks_calc_mult_shift(&data.cyc2ns_mul, &data.cyc2ns_shift, khz,
163 NSEC_PER_MSEC, 0);
164
165 /*
166 * cyc2ns_shift is exported via arch_perf_update_userpage() where it is
167 * not expected to be greater than 31 due to the original published
168 * conversion algorithm shifting a 32-bit value (now specifies a 64-bit
169 * value) - refer perf_event_mmap_page documentation in perf_event.h.
170 */
171 if (data.cyc2ns_shift == 32) {
172 data.cyc2ns_shift = 31;
173 data.cyc2ns_mul >>= 1;
174 }
175
176 data.cyc2ns_offset = ns_now -
177 mul_u64_u32_shr(tsc_now, data.cyc2ns_mul, data.cyc2ns_shift);
178
179 c2n = per_cpu_ptr(&cyc2ns, cpu);
180
181 write_seqcount_latch_begin(&c2n->seq);
182 c2n->data[0] = data;
183 write_seqcount_latch(&c2n->seq);
184 c2n->data[1] = data;
185 write_seqcount_latch_end(&c2n->seq);
186 }
187
set_cyc2ns_scale(unsigned long khz,int cpu,unsigned long long tsc_now)188 static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
189 {
190 unsigned long flags;
191
192 local_irq_save(flags);
193 sched_clock_idle_sleep_event();
194
195 if (khz)
196 __set_cyc2ns_scale(khz, cpu, tsc_now);
197
198 sched_clock_idle_wakeup_event();
199 local_irq_restore(flags);
200 }
201
202 /*
203 * Initialize cyc2ns for boot cpu
204 */
cyc2ns_init_boot_cpu(void)205 static void __init cyc2ns_init_boot_cpu(void)
206 {
207 struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
208
209 seqcount_latch_init(&c2n->seq);
210 __set_cyc2ns_scale(tsc_khz, smp_processor_id(), rdtsc());
211 }
212
213 /*
214 * Secondary CPUs do not run through tsc_init(), so set up
215 * all the scale factors for all CPUs, assuming the same
216 * speed as the bootup CPU.
217 */
cyc2ns_init_secondary_cpus(void)218 static void __init cyc2ns_init_secondary_cpus(void)
219 {
220 unsigned int cpu, this_cpu = smp_processor_id();
221 struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
222 struct cyc2ns_data *data = c2n->data;
223
224 for_each_possible_cpu(cpu) {
225 if (cpu != this_cpu) {
226 seqcount_latch_init(&c2n->seq);
227 c2n = per_cpu_ptr(&cyc2ns, cpu);
228 c2n->data[0] = data[0];
229 c2n->data[1] = data[1];
230 }
231 }
232 }
233
234 /*
235 * Scheduler clock - returns current time in nanosec units.
236 */
native_sched_clock(void)237 noinstr u64 native_sched_clock(void)
238 {
239 if (static_branch_likely(&__use_tsc)) {
240 u64 tsc_now = rdtsc();
241
242 /* return the value in ns */
243 return __cycles_2_ns(tsc_now);
244 }
245
246 /*
247 * Fall back to jiffies if there's no TSC available:
248 * ( But note that we still use it if the TSC is marked
249 * unstable. We do this because unlike Time Of Day,
250 * the scheduler clock tolerates small errors and it's
251 * very important for it to be as fast as the platform
252 * can achieve it. )
253 */
254
255 /* No locking but a rare wrong value is not a big deal: */
256 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
257 }
258
259 /*
260 * Generate a sched_clock if you already have a TSC value.
261 */
native_sched_clock_from_tsc(u64 tsc)262 u64 native_sched_clock_from_tsc(u64 tsc)
263 {
264 return cycles_2_ns(tsc);
265 }
266
267 /* We need to define a real function for sched_clock, to override the
268 weak default version */
269 #ifdef CONFIG_PARAVIRT
270 DEFINE_STATIC_CALL(pv_sched_clock, native_sched_clock);
271
sched_clock_noinstr(void)272 noinstr u64 sched_clock_noinstr(void)
273 {
274 return static_call(pv_sched_clock)();
275 }
276
using_native_sched_clock(void)277 bool using_native_sched_clock(void)
278 {
279 return static_call_query(pv_sched_clock) == native_sched_clock;
280 }
281
paravirt_set_sched_clock(u64 (* func)(void))282 void paravirt_set_sched_clock(u64 (*func)(void))
283 {
284 static_call_update(pv_sched_clock, func);
285 }
286 #else
287 u64 sched_clock_noinstr(void) __attribute__((alias("native_sched_clock")));
288
using_native_sched_clock(void)289 bool using_native_sched_clock(void) { return true; }
paravirt_set_sched_clock(u64 (* func)(void))290 void paravirt_set_sched_clock(u64 (*func)(void)) { }
291 #endif
292
sched_clock(void)293 notrace u64 sched_clock(void)
294 {
295 u64 now;
296 preempt_disable_notrace();
297 now = sched_clock_noinstr();
298 preempt_enable_notrace();
299 return now;
300 }
301
check_tsc_unstable(void)302 int check_tsc_unstable(void)
303 {
304 return tsc_unstable;
305 }
306 EXPORT_SYMBOL_GPL(check_tsc_unstable);
307
308 #ifdef CONFIG_X86_TSC
notsc_setup(char * str)309 int __init notsc_setup(char *str)
310 {
311 mark_tsc_unstable("boot parameter notsc");
312 return 1;
313 }
314 #else
315 /*
316 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
317 * in cpu/common.c
318 */
notsc_setup(char * str)319 int __init notsc_setup(char *str)
320 {
321 setup_clear_cpu_cap(X86_FEATURE_TSC);
322 return 1;
323 }
324 #endif
325
326 __setup("notsc", notsc_setup);
327
328 static int no_sched_irq_time;
329 static int no_tsc_watchdog;
330 static int tsc_as_watchdog;
331
tsc_setup(char * str)332 static int __init tsc_setup(char *str)
333 {
334 if (!strcmp(str, "reliable"))
335 tsc_clocksource_reliable = 1;
336 if (!strncmp(str, "noirqtime", 9))
337 no_sched_irq_time = 1;
338 if (!strcmp(str, "unstable"))
339 mark_tsc_unstable("boot parameter");
340 if (!strcmp(str, "nowatchdog")) {
341 no_tsc_watchdog = 1;
342 if (tsc_as_watchdog)
343 pr_alert("%s: Overriding earlier tsc=watchdog with tsc=nowatchdog\n",
344 __func__);
345 tsc_as_watchdog = 0;
346 }
347 if (!strcmp(str, "recalibrate"))
348 tsc_force_recalibrate = 1;
349 if (!strcmp(str, "watchdog")) {
350 if (no_tsc_watchdog)
351 pr_alert("%s: tsc=watchdog overridden by earlier tsc=nowatchdog\n",
352 __func__);
353 else
354 tsc_as_watchdog = 1;
355 }
356 return 1;
357 }
358
359 __setup("tsc=", tsc_setup);
360
361 #define MAX_RETRIES 5
362 #define TSC_DEFAULT_THRESHOLD 0x20000
363
364 /*
365 * Read TSC and the reference counters. Take care of any disturbances
366 */
tsc_read_refs(u64 * p,int hpet)367 static u64 tsc_read_refs(u64 *p, int hpet)
368 {
369 u64 t1, t2;
370 u64 thresh = tsc_khz ? tsc_khz >> 5 : TSC_DEFAULT_THRESHOLD;
371 int i;
372
373 for (i = 0; i < MAX_RETRIES; i++) {
374 t1 = get_cycles();
375 if (hpet)
376 *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
377 else
378 *p = acpi_pm_read_early();
379 t2 = get_cycles();
380 if ((t2 - t1) < thresh)
381 return t2;
382 }
383 return ULLONG_MAX;
384 }
385
386 /*
387 * Calculate the TSC frequency from HPET reference
388 */
calc_hpet_ref(u64 deltatsc,u64 hpet1,u64 hpet2)389 static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
390 {
391 u64 tmp;
392
393 if (hpet2 < hpet1)
394 hpet2 += 0x100000000ULL;
395 hpet2 -= hpet1;
396 tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
397 do_div(tmp, 1000000);
398 deltatsc = div64_u64(deltatsc, tmp);
399
400 return (unsigned long) deltatsc;
401 }
402
403 /*
404 * Calculate the TSC frequency from PMTimer reference
405 */
calc_pmtimer_ref(u64 deltatsc,u64 pm1,u64 pm2)406 static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2)
407 {
408 u64 tmp;
409
410 if (!pm1 && !pm2)
411 return ULONG_MAX;
412
413 if (pm2 < pm1)
414 pm2 += (u64)ACPI_PM_OVRRUN;
415 pm2 -= pm1;
416 tmp = pm2 * 1000000000LL;
417 do_div(tmp, PMTMR_TICKS_PER_SEC);
418 do_div(deltatsc, tmp);
419
420 return (unsigned long) deltatsc;
421 }
422
423 #define CAL_MS 10
424 #define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS))
425 #define CAL_PIT_LOOPS 1000
426
427 #define CAL2_MS 50
428 #define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS))
429 #define CAL2_PIT_LOOPS 5000
430
431
432 /*
433 * Try to calibrate the TSC against the Programmable
434 * Interrupt Timer and return the frequency of the TSC
435 * in kHz.
436 *
437 * Return ULONG_MAX on failure to calibrate.
438 */
pit_calibrate_tsc(u32 latch,unsigned long ms,int loopmin)439 static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
440 {
441 u64 tsc, t1, t2, delta;
442 unsigned long tscmin, tscmax;
443 int pitcnt;
444
445 if (!has_legacy_pic()) {
446 /*
447 * Relies on tsc_early_delay_calibrate() to have given us semi
448 * usable udelay(), wait for the same 50ms we would have with
449 * the PIT loop below.
450 */
451 udelay(10 * USEC_PER_MSEC);
452 udelay(10 * USEC_PER_MSEC);
453 udelay(10 * USEC_PER_MSEC);
454 udelay(10 * USEC_PER_MSEC);
455 udelay(10 * USEC_PER_MSEC);
456 return ULONG_MAX;
457 }
458
459 /* Set the Gate high, disable speaker */
460 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
461
462 /*
463 * Setup CTC channel 2* for mode 0, (interrupt on terminal
464 * count mode), binary count. Set the latch register to 50ms
465 * (LSB then MSB) to begin countdown.
466 */
467 outb(0xb0, 0x43);
468 outb(latch & 0xff, 0x42);
469 outb(latch >> 8, 0x42);
470
471 tsc = t1 = t2 = get_cycles();
472
473 pitcnt = 0;
474 tscmax = 0;
475 tscmin = ULONG_MAX;
476 while ((inb(0x61) & 0x20) == 0) {
477 t2 = get_cycles();
478 delta = t2 - tsc;
479 tsc = t2;
480 if ((unsigned long) delta < tscmin)
481 tscmin = (unsigned int) delta;
482 if ((unsigned long) delta > tscmax)
483 tscmax = (unsigned int) delta;
484 pitcnt++;
485 }
486
487 /*
488 * Sanity checks:
489 *
490 * If we were not able to read the PIT more than loopmin
491 * times, then we have been hit by a massive SMI
492 *
493 * If the maximum is 10 times larger than the minimum,
494 * then we got hit by an SMI as well.
495 */
496 if (pitcnt < loopmin || tscmax > 10 * tscmin)
497 return ULONG_MAX;
498
499 /* Calculate the PIT value */
500 delta = t2 - t1;
501 do_div(delta, ms);
502 return delta;
503 }
504
505 /*
506 * This reads the current MSB of the PIT counter, and
507 * checks if we are running on sufficiently fast and
508 * non-virtualized hardware.
509 *
510 * Our expectations are:
511 *
512 * - the PIT is running at roughly 1.19MHz
513 *
514 * - each IO is going to take about 1us on real hardware,
515 * but we allow it to be much faster (by a factor of 10) or
516 * _slightly_ slower (ie we allow up to a 2us read+counter
517 * update - anything else implies a unacceptably slow CPU
518 * or PIT for the fast calibration to work.
519 *
520 * - with 256 PIT ticks to read the value, we have 214us to
521 * see the same MSB (and overhead like doing a single TSC
522 * read per MSB value etc).
523 *
524 * - We're doing 2 reads per loop (LSB, MSB), and we expect
525 * them each to take about a microsecond on real hardware.
526 * So we expect a count value of around 100. But we'll be
527 * generous, and accept anything over 50.
528 *
529 * - if the PIT is stuck, and we see *many* more reads, we
530 * return early (and the next caller of pit_expect_msb()
531 * then consider it a failure when they don't see the
532 * next expected value).
533 *
534 * These expectations mean that we know that we have seen the
535 * transition from one expected value to another with a fairly
536 * high accuracy, and we didn't miss any events. We can thus
537 * use the TSC value at the transitions to calculate a pretty
538 * good value for the TSC frequency.
539 */
pit_verify_msb(unsigned char val)540 static inline int pit_verify_msb(unsigned char val)
541 {
542 /* Ignore LSB */
543 inb(0x42);
544 return inb(0x42) == val;
545 }
546
pit_expect_msb(unsigned char val,u64 * tscp,unsigned long * deltap)547 static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
548 {
549 int count;
550 u64 tsc = 0, prev_tsc = 0;
551
552 for (count = 0; count < 50000; count++) {
553 if (!pit_verify_msb(val))
554 break;
555 prev_tsc = tsc;
556 tsc = get_cycles();
557 }
558 *deltap = get_cycles() - prev_tsc;
559 *tscp = tsc;
560
561 /*
562 * We require _some_ success, but the quality control
563 * will be based on the error terms on the TSC values.
564 */
565 return count > 5;
566 }
567
568 /*
569 * How many MSB values do we want to see? We aim for
570 * a maximum error rate of 500ppm (in practice the
571 * real error is much smaller), but refuse to spend
572 * more than 50ms on it.
573 */
574 #define MAX_QUICK_PIT_MS 50
575 #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
576
quick_pit_calibrate(void)577 static unsigned long quick_pit_calibrate(void)
578 {
579 int i;
580 u64 tsc, delta;
581 unsigned long d1, d2;
582
583 if (!has_legacy_pic())
584 return 0;
585
586 /* Set the Gate high, disable speaker */
587 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
588
589 /*
590 * Counter 2, mode 0 (one-shot), binary count
591 *
592 * NOTE! Mode 2 decrements by two (and then the
593 * output is flipped each time, giving the same
594 * final output frequency as a decrement-by-one),
595 * so mode 0 is much better when looking at the
596 * individual counts.
597 */
598 outb(0xb0, 0x43);
599
600 /* Start at 0xffff */
601 outb(0xff, 0x42);
602 outb(0xff, 0x42);
603
604 /*
605 * The PIT starts counting at the next edge, so we
606 * need to delay for a microsecond. The easiest way
607 * to do that is to just read back the 16-bit counter
608 * once from the PIT.
609 */
610 pit_verify_msb(0);
611
612 if (pit_expect_msb(0xff, &tsc, &d1)) {
613 for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) {
614 if (!pit_expect_msb(0xff-i, &delta, &d2))
615 break;
616
617 delta -= tsc;
618
619 /*
620 * Extrapolate the error and fail fast if the error will
621 * never be below 500 ppm.
622 */
623 if (i == 1 &&
624 d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11)
625 return 0;
626
627 /*
628 * Iterate until the error is less than 500 ppm
629 */
630 if (d1+d2 >= delta >> 11)
631 continue;
632
633 /*
634 * Check the PIT one more time to verify that
635 * all TSC reads were stable wrt the PIT.
636 *
637 * This also guarantees serialization of the
638 * last cycle read ('d2') in pit_expect_msb.
639 */
640 if (!pit_verify_msb(0xfe - i))
641 break;
642 goto success;
643 }
644 }
645 pr_info("Fast TSC calibration failed\n");
646 return 0;
647
648 success:
649 /*
650 * Ok, if we get here, then we've seen the
651 * MSB of the PIT decrement 'i' times, and the
652 * error has shrunk to less than 500 ppm.
653 *
654 * As a result, we can depend on there not being
655 * any odd delays anywhere, and the TSC reads are
656 * reliable (within the error).
657 *
658 * kHz = ticks / time-in-seconds / 1000;
659 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000
660 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000)
661 */
662 delta *= PIT_TICK_RATE;
663 do_div(delta, i*256*1000);
664 pr_info("Fast TSC calibration using PIT\n");
665 return delta;
666 }
667
668 /**
669 * native_calibrate_tsc - determine TSC frequency
670 * Determine TSC frequency via CPUID, else return 0.
671 */
native_calibrate_tsc(void)672 unsigned long native_calibrate_tsc(void)
673 {
674 unsigned int eax_denominator, ebx_numerator, ecx_hz, edx;
675 unsigned int crystal_khz;
676
677 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
678 return 0;
679
680 if (boot_cpu_data.cpuid_level < CPUID_LEAF_TSC)
681 return 0;
682
683 eax_denominator = ebx_numerator = ecx_hz = edx = 0;
684
685 /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
686 cpuid(CPUID_LEAF_TSC, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
687
688 if (ebx_numerator == 0 || eax_denominator == 0)
689 return 0;
690
691 crystal_khz = ecx_hz / 1000;
692
693 /*
694 * Denverton SoCs don't report crystal clock, and also don't support
695 * CPUID_LEAF_FREQ for the calculation below, so hardcode the 25MHz
696 * crystal clock.
697 */
698 if (crystal_khz == 0 &&
699 boot_cpu_data.x86_vfm == INTEL_ATOM_GOLDMONT_D)
700 crystal_khz = 25000;
701
702 /*
703 * TSC frequency reported directly by CPUID is a "hardware reported"
704 * frequency and is the most accurate one so far we have. This
705 * is considered a known frequency.
706 */
707 if (crystal_khz != 0)
708 setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
709
710 /*
711 * Some Intel SoCs like Skylake and Kabylake don't report the crystal
712 * clock, but we can easily calculate it to a high degree of accuracy
713 * by considering the crystal ratio and the CPU speed.
714 */
715 if (crystal_khz == 0 && boot_cpu_data.cpuid_level >= CPUID_LEAF_FREQ) {
716 unsigned int eax_base_mhz, ebx, ecx, edx;
717
718 cpuid(CPUID_LEAF_FREQ, &eax_base_mhz, &ebx, &ecx, &edx);
719 crystal_khz = eax_base_mhz * 1000 *
720 eax_denominator / ebx_numerator;
721 }
722
723 if (crystal_khz == 0)
724 return 0;
725
726 /*
727 * For Atom SoCs TSC is the only reliable clocksource.
728 * Mark TSC reliable so no watchdog on it.
729 */
730 if (boot_cpu_data.x86_vfm == INTEL_ATOM_GOLDMONT)
731 setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
732
733 #ifdef CONFIG_X86_LOCAL_APIC
734 /*
735 * The local APIC appears to be fed by the core crystal clock
736 * (which sounds entirely sensible). We can set the global
737 * lapic_timer_period here to avoid having to calibrate the APIC
738 * timer later.
739 */
740 lapic_timer_period = crystal_khz * 1000 / HZ;
741 #endif
742
743 return crystal_khz * ebx_numerator / eax_denominator;
744 }
745
cpu_khz_from_cpuid(void)746 static unsigned long cpu_khz_from_cpuid(void)
747 {
748 unsigned int eax_base_mhz, ebx_max_mhz, ecx_bus_mhz, edx;
749
750 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
751 return 0;
752
753 if (boot_cpu_data.cpuid_level < CPUID_LEAF_FREQ)
754 return 0;
755
756 eax_base_mhz = ebx_max_mhz = ecx_bus_mhz = edx = 0;
757
758 cpuid(CPUID_LEAF_FREQ, &eax_base_mhz, &ebx_max_mhz, &ecx_bus_mhz, &edx);
759
760 return eax_base_mhz * 1000;
761 }
762
763 /*
764 * calibrate cpu using pit, hpet, and ptimer methods. They are available
765 * later in boot after acpi is initialized.
766 */
pit_hpet_ptimer_calibrate_cpu(void)767 static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
768 {
769 u64 tsc1, tsc2, delta, ref1, ref2;
770 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
771 unsigned long flags, latch, ms;
772 int hpet = is_hpet_enabled(), i, loopmin;
773
774 /*
775 * Run 5 calibration loops to get the lowest frequency value
776 * (the best estimate). We use two different calibration modes
777 * here:
778 *
779 * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and
780 * load a timeout of 50ms. We read the time right after we
781 * started the timer and wait until the PIT count down reaches
782 * zero. In each wait loop iteration we read the TSC and check
783 * the delta to the previous read. We keep track of the min
784 * and max values of that delta. The delta is mostly defined
785 * by the IO time of the PIT access, so we can detect when
786 * any disturbance happened between the two reads. If the
787 * maximum time is significantly larger than the minimum time,
788 * then we discard the result and have another try.
789 *
790 * 2) Reference counter. If available we use the HPET or the
791 * PMTIMER as a reference to check the sanity of that value.
792 * We use separate TSC readouts and check inside of the
793 * reference read for any possible disturbance. We discard
794 * disturbed values here as well. We do that around the PIT
795 * calibration delay loop as we have to wait for a certain
796 * amount of time anyway.
797 */
798
799 /* Preset PIT loop values */
800 latch = CAL_LATCH;
801 ms = CAL_MS;
802 loopmin = CAL_PIT_LOOPS;
803
804 for (i = 0; i < 3; i++) {
805 unsigned long tsc_pit_khz;
806
807 /*
808 * Read the start value and the reference count of
809 * hpet/pmtimer when available. Then do the PIT
810 * calibration, which will take at least 50ms, and
811 * read the end value.
812 */
813 local_irq_save(flags);
814 tsc1 = tsc_read_refs(&ref1, hpet);
815 tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
816 tsc2 = tsc_read_refs(&ref2, hpet);
817 local_irq_restore(flags);
818
819 /* Pick the lowest PIT TSC calibration so far */
820 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
821
822 /* hpet or pmtimer available ? */
823 if (ref1 == ref2)
824 continue;
825
826 /* Check, whether the sampling was disturbed */
827 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
828 continue;
829
830 tsc2 = (tsc2 - tsc1) * 1000000LL;
831 if (hpet)
832 tsc2 = calc_hpet_ref(tsc2, ref1, ref2);
833 else
834 tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2);
835
836 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
837
838 /* Check the reference deviation */
839 delta = ((u64) tsc_pit_min) * 100;
840 do_div(delta, tsc_ref_min);
841
842 /*
843 * If both calibration results are inside a 10% window
844 * then we can be sure, that the calibration
845 * succeeded. We break out of the loop right away. We
846 * use the reference value, as it is more precise.
847 */
848 if (delta >= 90 && delta <= 110) {
849 pr_info("PIT calibration matches %s. %d loops\n",
850 hpet ? "HPET" : "PMTIMER", i + 1);
851 return tsc_ref_min;
852 }
853
854 /*
855 * Check whether PIT failed more than once. This
856 * happens in virtualized environments. We need to
857 * give the virtual PC a slightly longer timeframe for
858 * the HPET/PMTIMER to make the result precise.
859 */
860 if (i == 1 && tsc_pit_min == ULONG_MAX) {
861 latch = CAL2_LATCH;
862 ms = CAL2_MS;
863 loopmin = CAL2_PIT_LOOPS;
864 }
865 }
866
867 /*
868 * Now check the results.
869 */
870 if (tsc_pit_min == ULONG_MAX) {
871 /* PIT gave no useful value */
872 pr_warn("Unable to calibrate against PIT\n");
873
874 /* We don't have an alternative source, disable TSC */
875 if (!hpet && !ref1 && !ref2) {
876 pr_notice("No reference (HPET/PMTIMER) available\n");
877 return 0;
878 }
879
880 /* The alternative source failed as well, disable TSC */
881 if (tsc_ref_min == ULONG_MAX) {
882 pr_warn("HPET/PMTIMER calibration failed\n");
883 return 0;
884 }
885
886 /* Use the alternative source */
887 pr_info("using %s reference calibration\n",
888 hpet ? "HPET" : "PMTIMER");
889
890 return tsc_ref_min;
891 }
892
893 /* We don't have an alternative source, use the PIT calibration value */
894 if (!hpet && !ref1 && !ref2) {
895 pr_info("Using PIT calibration value\n");
896 return tsc_pit_min;
897 }
898
899 /* The alternative source failed, use the PIT calibration value */
900 if (tsc_ref_min == ULONG_MAX) {
901 pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n");
902 return tsc_pit_min;
903 }
904
905 /*
906 * The calibration values differ too much. In doubt, we use
907 * the PIT value as we know that there are PMTIMERs around
908 * running at double speed. At least we let the user know:
909 */
910 pr_warn("PIT calibration deviates from %s: %lu %lu\n",
911 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
912 pr_info("Using PIT calibration value\n");
913 return tsc_pit_min;
914 }
915
916 /**
917 * native_calibrate_cpu_early - can calibrate the cpu early in boot
918 */
native_calibrate_cpu_early(void)919 unsigned long native_calibrate_cpu_early(void)
920 {
921 unsigned long flags, fast_calibrate = cpu_khz_from_cpuid();
922
923 if (!fast_calibrate)
924 fast_calibrate = cpu_khz_from_msr();
925 if (!fast_calibrate) {
926 local_irq_save(flags);
927 fast_calibrate = quick_pit_calibrate();
928 local_irq_restore(flags);
929 }
930 return fast_calibrate;
931 }
932
933
934 /**
935 * native_calibrate_cpu - calibrate the cpu
936 */
native_calibrate_cpu(void)937 static unsigned long native_calibrate_cpu(void)
938 {
939 unsigned long tsc_freq = native_calibrate_cpu_early();
940
941 if (!tsc_freq)
942 tsc_freq = pit_hpet_ptimer_calibrate_cpu();
943
944 return tsc_freq;
945 }
946
recalibrate_cpu_khz(void)947 void recalibrate_cpu_khz(void)
948 {
949 #ifndef CONFIG_SMP
950 unsigned long cpu_khz_old = cpu_khz;
951
952 if (!boot_cpu_has(X86_FEATURE_TSC))
953 return;
954
955 cpu_khz = x86_platform.calibrate_cpu();
956 tsc_khz = x86_platform.calibrate_tsc();
957 if (tsc_khz == 0)
958 tsc_khz = cpu_khz;
959 else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
960 cpu_khz = tsc_khz;
961 cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy,
962 cpu_khz_old, cpu_khz);
963 #endif
964 }
965 EXPORT_SYMBOL_GPL(recalibrate_cpu_khz);
966
967
968 static unsigned long long cyc2ns_suspend;
969
tsc_save_sched_clock_state(void)970 void tsc_save_sched_clock_state(void)
971 {
972 if (!static_branch_likely(&__use_tsc) && !sched_clock_stable())
973 return;
974
975 cyc2ns_suspend = sched_clock();
976 }
977
978 /*
979 * Even on processors with invariant TSC, TSC gets reset in some the
980 * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
981 * arbitrary value (still sync'd across cpu's) during resume from such sleep
982 * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
983 * that sched_clock() continues from the point where it was left off during
984 * suspend.
985 */
tsc_restore_sched_clock_state(void)986 void tsc_restore_sched_clock_state(void)
987 {
988 unsigned long long offset;
989 unsigned long flags;
990 int cpu;
991
992 if (!static_branch_likely(&__use_tsc) && !sched_clock_stable())
993 return;
994
995 local_irq_save(flags);
996
997 /*
998 * We're coming out of suspend, there's no concurrency yet; don't
999 * bother being nice about the RCU stuff, just write to both
1000 * data fields.
1001 */
1002
1003 this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0);
1004 this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0);
1005
1006 offset = cyc2ns_suspend - sched_clock();
1007
1008 for_each_possible_cpu(cpu) {
1009 per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset;
1010 per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset;
1011 }
1012
1013 local_irq_restore(flags);
1014 }
1015
1016 #ifdef CONFIG_CPU_FREQ
1017 /*
1018 * Frequency scaling support. Adjust the TSC based timer when the CPU frequency
1019 * changes.
1020 *
1021 * NOTE: On SMP the situation is not fixable in general, so simply mark the TSC
1022 * as unstable and give up in those cases.
1023 *
1024 * Should fix up last_tsc too. Currently gettimeofday in the
1025 * first tick after the change will be slightly wrong.
1026 */
1027
1028 static unsigned int ref_freq;
1029 static unsigned long loops_per_jiffy_ref;
1030 static unsigned long tsc_khz_ref;
1031
time_cpufreq_notifier(struct notifier_block * nb,unsigned long val,void * data)1032 static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
1033 void *data)
1034 {
1035 struct cpufreq_freqs *freq = data;
1036
1037 if (num_online_cpus() > 1) {
1038 mark_tsc_unstable("cpufreq changes on SMP");
1039 return 0;
1040 }
1041
1042 if (!ref_freq) {
1043 ref_freq = freq->old;
1044 loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
1045 tsc_khz_ref = tsc_khz;
1046 }
1047
1048 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
1049 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
1050 boot_cpu_data.loops_per_jiffy =
1051 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
1052
1053 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
1054 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
1055 mark_tsc_unstable("cpufreq changes");
1056
1057 set_cyc2ns_scale(tsc_khz, freq->policy->cpu, rdtsc());
1058 }
1059
1060 return 0;
1061 }
1062
1063 static struct notifier_block time_cpufreq_notifier_block = {
1064 .notifier_call = time_cpufreq_notifier
1065 };
1066
cpufreq_register_tsc_scaling(void)1067 static int __init cpufreq_register_tsc_scaling(void)
1068 {
1069 if (!boot_cpu_has(X86_FEATURE_TSC))
1070 return 0;
1071 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1072 return 0;
1073 cpufreq_register_notifier(&time_cpufreq_notifier_block,
1074 CPUFREQ_TRANSITION_NOTIFIER);
1075 return 0;
1076 }
1077
1078 core_initcall(cpufreq_register_tsc_scaling);
1079
1080 #endif /* CONFIG_CPU_FREQ */
1081
1082 #define ART_MIN_DENOMINATOR (1)
1083
1084 /*
1085 * If ART is present detect the numerator:denominator to convert to TSC
1086 */
detect_art(void)1087 static void __init detect_art(void)
1088 {
1089 unsigned int unused;
1090
1091 if (boot_cpu_data.cpuid_level < CPUID_LEAF_TSC)
1092 return;
1093
1094 /*
1095 * Don't enable ART in a VM, non-stop TSC and TSC_ADJUST required,
1096 * and the TSC counter resets must not occur asynchronously.
1097 */
1098 if (boot_cpu_has(X86_FEATURE_HYPERVISOR) ||
1099 !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
1100 !boot_cpu_has(X86_FEATURE_TSC_ADJUST) ||
1101 tsc_async_resets)
1102 return;
1103
1104 cpuid(CPUID_LEAF_TSC, &art_base_clk.denominator,
1105 &art_base_clk.numerator, &art_base_clk.freq_khz, &unused);
1106
1107 art_base_clk.freq_khz /= KHZ;
1108 if (art_base_clk.denominator < ART_MIN_DENOMINATOR)
1109 return;
1110
1111 rdmsrq(MSR_IA32_TSC_ADJUST, art_base_clk.offset);
1112
1113 /* Make this sticky over multiple CPU init calls */
1114 setup_force_cpu_cap(X86_FEATURE_ART);
1115 }
1116
1117
1118 /* clocksource code */
1119
tsc_resume(struct clocksource * cs)1120 static void tsc_resume(struct clocksource *cs)
1121 {
1122 tsc_verify_tsc_adjust(true);
1123 }
1124
1125 /*
1126 * We used to compare the TSC to the cycle_last value in the clocksource
1127 * structure to avoid a nasty time-warp. This can be observed in a
1128 * very small window right after one CPU updated cycle_last under
1129 * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
1130 * is smaller than the cycle_last reference value due to a TSC which
1131 * is slightly behind. This delta is nowhere else observable, but in
1132 * that case it results in a forward time jump in the range of hours
1133 * due to the unsigned delta calculation of the time keeping core
1134 * code, which is necessary to support wrapping clocksources like pm
1135 * timer.
1136 *
1137 * This sanity check is now done in the core timekeeping code.
1138 * checking the result of read_tsc() - cycle_last for being negative.
1139 * That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
1140 */
read_tsc(struct clocksource * cs)1141 static u64 read_tsc(struct clocksource *cs)
1142 {
1143 return (u64)rdtsc_ordered();
1144 }
1145
tsc_cs_mark_unstable(struct clocksource * cs)1146 static void tsc_cs_mark_unstable(struct clocksource *cs)
1147 {
1148 if (tsc_unstable)
1149 return;
1150
1151 tsc_unstable = 1;
1152 if (using_native_sched_clock())
1153 clear_sched_clock_stable();
1154 pr_info("Marking TSC unstable due to clocksource watchdog\n");
1155 }
1156
tsc_cs_tick_stable(struct clocksource * cs)1157 static void tsc_cs_tick_stable(struct clocksource *cs)
1158 {
1159 if (tsc_unstable)
1160 return;
1161
1162 if (using_native_sched_clock())
1163 sched_clock_tick_stable();
1164 }
1165
tsc_cs_enable(struct clocksource * cs)1166 static int tsc_cs_enable(struct clocksource *cs)
1167 {
1168 vclocks_set_used(VDSO_CLOCKMODE_TSC);
1169 return 0;
1170 }
1171
1172 /*
1173 * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
1174 */
1175 static struct clocksource clocksource_tsc_early = {
1176 .name = "tsc-early",
1177 .rating = 299,
1178 .uncertainty_margin = 32 * NSEC_PER_MSEC,
1179 .read = read_tsc,
1180 .mask = CLOCKSOURCE_MASK(64),
1181 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
1182 CLOCK_SOURCE_MUST_VERIFY,
1183 .id = CSID_X86_TSC_EARLY,
1184 .vdso_clock_mode = VDSO_CLOCKMODE_TSC,
1185 .enable = tsc_cs_enable,
1186 .resume = tsc_resume,
1187 .mark_unstable = tsc_cs_mark_unstable,
1188 .tick_stable = tsc_cs_tick_stable,
1189 .list = LIST_HEAD_INIT(clocksource_tsc_early.list),
1190 };
1191
1192 /*
1193 * Must mark VALID_FOR_HRES early such that when we unregister tsc_early
1194 * this one will immediately take over. We will only register if TSC has
1195 * been found good.
1196 */
1197 static struct clocksource clocksource_tsc = {
1198 .name = "tsc",
1199 .rating = 300,
1200 .read = read_tsc,
1201 .mask = CLOCKSOURCE_MASK(64),
1202 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
1203 CLOCK_SOURCE_VALID_FOR_HRES |
1204 CLOCK_SOURCE_MUST_VERIFY |
1205 CLOCK_SOURCE_VERIFY_PERCPU,
1206 .id = CSID_X86_TSC,
1207 .vdso_clock_mode = VDSO_CLOCKMODE_TSC,
1208 .enable = tsc_cs_enable,
1209 .resume = tsc_resume,
1210 .mark_unstable = tsc_cs_mark_unstable,
1211 .tick_stable = tsc_cs_tick_stable,
1212 .list = LIST_HEAD_INIT(clocksource_tsc.list),
1213 };
1214
mark_tsc_unstable(char * reason)1215 void mark_tsc_unstable(char *reason)
1216 {
1217 if (tsc_unstable)
1218 return;
1219
1220 tsc_unstable = 1;
1221 if (using_native_sched_clock())
1222 clear_sched_clock_stable();
1223 pr_info("Marking TSC unstable due to %s\n", reason);
1224
1225 clocksource_mark_unstable(&clocksource_tsc_early);
1226 clocksource_mark_unstable(&clocksource_tsc);
1227 }
1228
1229 EXPORT_SYMBOL_GPL(mark_tsc_unstable);
1230
tsc_disable_clocksource_watchdog(void)1231 static void __init tsc_disable_clocksource_watchdog(void)
1232 {
1233 clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
1234 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
1235 }
1236
tsc_clocksource_watchdog_disabled(void)1237 bool tsc_clocksource_watchdog_disabled(void)
1238 {
1239 return !(clocksource_tsc.flags & CLOCK_SOURCE_MUST_VERIFY) &&
1240 tsc_as_watchdog && !no_tsc_watchdog;
1241 }
1242
check_system_tsc_reliable(void)1243 static void __init check_system_tsc_reliable(void)
1244 {
1245 #if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
1246 if (is_geode_lx()) {
1247 /* RTSC counts during suspend */
1248 #define RTSC_SUSP 0x100
1249 unsigned long res_low, res_high;
1250
1251 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
1252 /* Geode_LX - the OLPC CPU has a very reliable TSC */
1253 if (res_low & RTSC_SUSP)
1254 tsc_clocksource_reliable = 1;
1255 }
1256 #endif
1257 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
1258 tsc_clocksource_reliable = 1;
1259
1260 /*
1261 * Disable the clocksource watchdog when the system has:
1262 * - TSC running at constant frequency
1263 * - TSC which does not stop in C-States
1264 * - the TSC_ADJUST register which allows to detect even minimal
1265 * modifications
1266 * - not more than four packages
1267 */
1268 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
1269 boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
1270 boot_cpu_has(X86_FEATURE_TSC_ADJUST) &&
1271 topology_max_packages() <= 4)
1272 tsc_disable_clocksource_watchdog();
1273 }
1274
1275 /*
1276 * Make an educated guess if the TSC is trustworthy and synchronized
1277 * over all CPUs.
1278 */
unsynchronized_tsc(void)1279 int unsynchronized_tsc(void)
1280 {
1281 if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_unstable)
1282 return 1;
1283
1284 #ifdef CONFIG_SMP
1285 if (apic_is_clustered_box())
1286 return 1;
1287 #endif
1288
1289 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1290 return 0;
1291
1292 if (tsc_clocksource_reliable)
1293 return 0;
1294 /*
1295 * Intel systems are normally all synchronized.
1296 * Exceptions must mark TSC as unstable:
1297 */
1298 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1299 /* assume multi socket systems are not synchronized: */
1300 if (topology_max_packages() > 1)
1301 return 1;
1302 }
1303
1304 return 0;
1305 }
1306
1307 static void tsc_refine_calibration_work(struct work_struct *work);
1308 static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
1309 /**
1310 * tsc_refine_calibration_work - Further refine tsc freq calibration
1311 * @work: ignored.
1312 *
1313 * This functions uses delayed work over a period of a
1314 * second to further refine the TSC freq value. Since this is
1315 * timer based, instead of loop based, we don't block the boot
1316 * process while this longer calibration is done.
1317 *
1318 * If there are any calibration anomalies (too many SMIs, etc),
1319 * or the refined calibration is off by 1% of the fast early
1320 * calibration, we throw out the new calibration and use the
1321 * early calibration.
1322 */
tsc_refine_calibration_work(struct work_struct * work)1323 static void tsc_refine_calibration_work(struct work_struct *work)
1324 {
1325 static u64 tsc_start = ULLONG_MAX, ref_start;
1326 static int hpet;
1327 u64 tsc_stop, ref_stop, delta;
1328 unsigned long freq;
1329 int cpu;
1330
1331 /* Don't bother refining TSC on unstable systems */
1332 if (tsc_unstable)
1333 goto unreg;
1334
1335 /*
1336 * Since the work is started early in boot, we may be
1337 * delayed the first time we expire. So set the workqueue
1338 * again once we know timers are working.
1339 */
1340 if (tsc_start == ULLONG_MAX) {
1341 restart:
1342 /*
1343 * Only set hpet once, to avoid mixing hardware
1344 * if the hpet becomes enabled later.
1345 */
1346 hpet = is_hpet_enabled();
1347 tsc_start = tsc_read_refs(&ref_start, hpet);
1348 schedule_delayed_work(&tsc_irqwork, HZ);
1349 return;
1350 }
1351
1352 tsc_stop = tsc_read_refs(&ref_stop, hpet);
1353
1354 /* hpet or pmtimer available ? */
1355 if (ref_start == ref_stop)
1356 goto out;
1357
1358 /* Check, whether the sampling was disturbed */
1359 if (tsc_stop == ULLONG_MAX)
1360 goto restart;
1361
1362 delta = tsc_stop - tsc_start;
1363 delta *= 1000000LL;
1364 if (hpet)
1365 freq = calc_hpet_ref(delta, ref_start, ref_stop);
1366 else
1367 freq = calc_pmtimer_ref(delta, ref_start, ref_stop);
1368
1369 /* Will hit this only if tsc_force_recalibrate has been set */
1370 if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
1371
1372 /* Warn if the deviation exceeds 500 ppm */
1373 if (abs(tsc_khz - freq) > (tsc_khz >> 11)) {
1374 pr_warn("Warning: TSC freq calibrated by CPUID/MSR differs from what is calibrated by HW timer, please check with vendor!!\n");
1375 pr_info("Previous calibrated TSC freq:\t %lu.%03lu MHz\n",
1376 (unsigned long)tsc_khz / 1000,
1377 (unsigned long)tsc_khz % 1000);
1378 }
1379
1380 pr_info("TSC freq recalibrated by [%s]:\t %lu.%03lu MHz\n",
1381 hpet ? "HPET" : "PM_TIMER",
1382 (unsigned long)freq / 1000,
1383 (unsigned long)freq % 1000);
1384
1385 return;
1386 }
1387
1388 /* Make sure we're within 1% */
1389 if (abs(tsc_khz - freq) > tsc_khz/100)
1390 goto out;
1391
1392 tsc_khz = freq;
1393 pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n",
1394 (unsigned long)tsc_khz / 1000,
1395 (unsigned long)tsc_khz % 1000);
1396
1397 /* Inform the TSC deadline clockevent devices about the recalibration */
1398 lapic_update_tsc_freq();
1399
1400 /* Update the sched_clock() rate to match the clocksource one */
1401 for_each_possible_cpu(cpu)
1402 set_cyc2ns_scale(tsc_khz, cpu, tsc_stop);
1403
1404 out:
1405 if (tsc_unstable)
1406 goto unreg;
1407
1408 if (boot_cpu_has(X86_FEATURE_ART)) {
1409 have_art = true;
1410 clocksource_tsc.base = &art_base_clk;
1411 }
1412 clocksource_register_khz(&clocksource_tsc, tsc_khz);
1413 unreg:
1414 clocksource_unregister(&clocksource_tsc_early);
1415 }
1416
1417
init_tsc_clocksource(void)1418 static int __init init_tsc_clocksource(void)
1419 {
1420 if (!boot_cpu_has(X86_FEATURE_TSC) || !tsc_khz)
1421 return 0;
1422
1423 if (tsc_unstable) {
1424 clocksource_unregister(&clocksource_tsc_early);
1425 return 0;
1426 }
1427
1428 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
1429 clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
1430
1431 /*
1432 * When TSC frequency is known (retrieved via MSR or CPUID), we skip
1433 * the refined calibration and directly register it as a clocksource.
1434 */
1435 if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
1436 if (boot_cpu_has(X86_FEATURE_ART)) {
1437 have_art = true;
1438 clocksource_tsc.base = &art_base_clk;
1439 }
1440 clocksource_register_khz(&clocksource_tsc, tsc_khz);
1441 clocksource_unregister(&clocksource_tsc_early);
1442
1443 if (!tsc_force_recalibrate)
1444 return 0;
1445 }
1446
1447 schedule_delayed_work(&tsc_irqwork, 0);
1448 return 0;
1449 }
1450 /*
1451 * We use device_initcall here, to ensure we run after the hpet
1452 * is fully initialized, which may occur at fs_initcall time.
1453 */
1454 device_initcall(init_tsc_clocksource);
1455
determine_cpu_tsc_frequencies(bool early)1456 static bool __init determine_cpu_tsc_frequencies(bool early)
1457 {
1458 /* Make sure that cpu and tsc are not already calibrated */
1459 WARN_ON(cpu_khz || tsc_khz);
1460
1461 if (early) {
1462 cpu_khz = x86_platform.calibrate_cpu();
1463 if (tsc_early_khz) {
1464 tsc_khz = tsc_early_khz;
1465 } else {
1466 tsc_khz = x86_platform.calibrate_tsc();
1467 clocksource_tsc.freq_khz = tsc_khz;
1468 }
1469 } else {
1470 /* We should not be here with non-native cpu calibration */
1471 WARN_ON(x86_platform.calibrate_cpu != native_calibrate_cpu);
1472 cpu_khz = pit_hpet_ptimer_calibrate_cpu();
1473 }
1474
1475 /*
1476 * Trust non-zero tsc_khz as authoritative,
1477 * and use it to sanity check cpu_khz,
1478 * which will be off if system timer is off.
1479 */
1480 if (tsc_khz == 0)
1481 tsc_khz = cpu_khz;
1482 else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
1483 cpu_khz = tsc_khz;
1484
1485 if (tsc_khz == 0)
1486 return false;
1487
1488 pr_info("Detected %lu.%03lu MHz processor\n",
1489 (unsigned long)cpu_khz / KHZ,
1490 (unsigned long)cpu_khz % KHZ);
1491
1492 if (cpu_khz != tsc_khz) {
1493 pr_info("Detected %lu.%03lu MHz TSC",
1494 (unsigned long)tsc_khz / KHZ,
1495 (unsigned long)tsc_khz % KHZ);
1496 }
1497 return true;
1498 }
1499
get_loops_per_jiffy(void)1500 static unsigned long __init get_loops_per_jiffy(void)
1501 {
1502 u64 lpj = (u64)tsc_khz * KHZ;
1503
1504 do_div(lpj, HZ);
1505 return lpj;
1506 }
1507
tsc_enable_sched_clock(void)1508 static void __init tsc_enable_sched_clock(void)
1509 {
1510 loops_per_jiffy = get_loops_per_jiffy();
1511 use_tsc_delay();
1512
1513 /* Sanitize TSC ADJUST before cyc2ns gets initialized */
1514 tsc_store_and_check_tsc_adjust(true);
1515 cyc2ns_init_boot_cpu();
1516 static_branch_enable(&__use_tsc);
1517 }
1518
tsc_early_init(void)1519 void __init tsc_early_init(void)
1520 {
1521 if (!boot_cpu_has(X86_FEATURE_TSC))
1522 return;
1523 /* Don't change UV TSC multi-chassis synchronization */
1524 if (is_early_uv_system())
1525 return;
1526
1527 snp_secure_tsc_init();
1528
1529 if (!determine_cpu_tsc_frequencies(true))
1530 return;
1531 tsc_enable_sched_clock();
1532 }
1533
tsc_init(void)1534 void __init tsc_init(void)
1535 {
1536 if (!cpu_feature_enabled(X86_FEATURE_TSC)) {
1537 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1538 return;
1539 }
1540
1541 /*
1542 * native_calibrate_cpu_early can only calibrate using methods that are
1543 * available early in boot.
1544 */
1545 if (x86_platform.calibrate_cpu == native_calibrate_cpu_early)
1546 x86_platform.calibrate_cpu = native_calibrate_cpu;
1547
1548 if (!tsc_khz) {
1549 /* We failed to determine frequencies earlier, try again */
1550 if (!determine_cpu_tsc_frequencies(false)) {
1551 mark_tsc_unstable("could not calculate TSC khz");
1552 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1553 return;
1554 }
1555 tsc_enable_sched_clock();
1556 }
1557
1558 cyc2ns_init_secondary_cpus();
1559
1560 if (!no_sched_irq_time)
1561 enable_sched_clock_irqtime();
1562
1563 lpj_fine = get_loops_per_jiffy();
1564
1565 check_system_tsc_reliable();
1566
1567 if (unsynchronized_tsc()) {
1568 mark_tsc_unstable("TSCs unsynchronized");
1569 return;
1570 }
1571
1572 if (tsc_clocksource_reliable || no_tsc_watchdog)
1573 tsc_disable_clocksource_watchdog();
1574
1575 clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
1576 detect_art();
1577 }
1578
1579 #ifdef CONFIG_SMP
1580 /*
1581 * Check whether existing calibration data can be reused.
1582 */
calibrate_delay_is_known(void)1583 unsigned long calibrate_delay_is_known(void)
1584 {
1585 int sibling, cpu = smp_processor_id();
1586 int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
1587 const struct cpumask *mask = topology_core_cpumask(cpu);
1588
1589 /*
1590 * If TSC has constant frequency and TSC is synchronized across
1591 * sockets then reuse CPU0 calibration.
1592 */
1593 if (constant_tsc && !tsc_unstable)
1594 return cpu_data(0).loops_per_jiffy;
1595
1596 /*
1597 * If TSC has constant frequency and TSC is not synchronized across
1598 * sockets and this is not the first CPU in the socket, then reuse
1599 * the calibration value of an already online CPU on that socket.
1600 *
1601 * This assumes that CONSTANT_TSC is consistent for all CPUs in a
1602 * socket.
1603 */
1604 if (!constant_tsc || !mask)
1605 return 0;
1606
1607 sibling = cpumask_any_but(mask, cpu);
1608 if (sibling < nr_cpu_ids)
1609 return cpu_data(sibling).loops_per_jiffy;
1610 return 0;
1611 }
1612 #endif
1613