1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4 #include <linux/kernel.h>
5 #include <linux/sched.h>
6 #include <linux/sched/clock.h>
7 #include <linux/init.h>
8 #include <linux/export.h>
9 #include <linux/timer.h>
10 #include <linux/acpi_pmtmr.h>
11 #include <linux/cpufreq.h>
12 #include <linux/delay.h>
13 #include <linux/clocksource.h>
14 #include <linux/percpu.h>
15 #include <linux/timex.h>
16 #include <linux/static_key.h>
19 #include <asm/timer.h>
20 #include <asm/vgtod.h>
22 #include <asm/delay.h>
23 #include <asm/hypervisor.h>
25 #include <asm/x86_init.h>
26 #include <asm/geode.h>
28 #include <asm/intel-family.h>
29 #include <asm/i8259.h>
30 #include <asm/uv/uv.h>
32 unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
33 EXPORT_SYMBOL(cpu_khz);
35 unsigned int __read_mostly tsc_khz;
36 EXPORT_SYMBOL(tsc_khz);
41 * TSC can be unstable due to cpufreq or due to unsynced TSCs
43 static int __read_mostly tsc_unstable;
44 static unsigned int __initdata tsc_early_khz;
46 static DEFINE_STATIC_KEY_FALSE(__use_tsc);
48 int tsc_clocksource_reliable;
50 static u32 art_to_tsc_numerator;
51 static u32 art_to_tsc_denominator;
52 static u64 art_to_tsc_offset;
53 struct clocksource *art_related_clocksource;
56 struct cyc2ns_data data[2]; /* 0 + 2*16 = 32 */
57 seqcount_t seq; /* 32 + 4 = 36 */
59 }; /* fits one cacheline */
61 static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
63 static int __init tsc_early_khz_setup(char *buf)
65 return kstrtouint(buf, 0, &tsc_early_khz);
67 early_param("tsc_early_khz", tsc_early_khz_setup);
69 __always_inline void cyc2ns_read_begin(struct cyc2ns_data *data)
73 preempt_disable_notrace();
76 seq = this_cpu_read(cyc2ns.seq.sequence);
79 data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset);
80 data->cyc2ns_mul = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul);
81 data->cyc2ns_shift = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift);
83 } while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence)));
86 __always_inline void cyc2ns_read_end(void)
88 preempt_enable_notrace();
92 * Accelerators for sched_clock()
93 * convert from cycles(64bits) => nanoseconds (64bits)
95 * ns = cycles / (freq / ns_per_sec)
96 * ns = cycles * (ns_per_sec / freq)
97 * ns = cycles * (10^9 / (cpu_khz * 10^3))
98 * ns = cycles * (10^6 / cpu_khz)
100 * Then we use scaling math (suggested by george@mvista.com) to get:
101 * ns = cycles * (10^6 * SC / cpu_khz) / SC
102 * ns = cycles * cyc2ns_scale / SC
104 * And since SC is a constant power of two, we can convert the div
105 * into a shift. The larger SC is, the more accurate the conversion, but
106 * cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication
107 * (64-bit result) can be used.
109 * We can use khz divisor instead of mhz to keep a better precision.
110 * (mathieu.desnoyers@polymtl.ca)
112 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
115 static __always_inline unsigned long long cycles_2_ns(unsigned long long cyc)
117 struct cyc2ns_data data;
118 unsigned long long ns;
120 cyc2ns_read_begin(&data);
122 ns = data.cyc2ns_offset;
123 ns += mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
130 static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
132 unsigned long long ns_now;
133 struct cyc2ns_data data;
136 ns_now = cycles_2_ns(tsc_now);
139 * Compute a new multiplier as per the above comment and ensure our
140 * time function is continuous; see the comment near struct
143 clocks_calc_mult_shift(&data.cyc2ns_mul, &data.cyc2ns_shift, khz,
147 * cyc2ns_shift is exported via arch_perf_update_userpage() where it is
148 * not expected to be greater than 31 due to the original published
149 * conversion algorithm shifting a 32-bit value (now specifies a 64-bit
150 * value) - refer perf_event_mmap_page documentation in perf_event.h.
152 if (data.cyc2ns_shift == 32) {
153 data.cyc2ns_shift = 31;
154 data.cyc2ns_mul >>= 1;
157 data.cyc2ns_offset = ns_now -
158 mul_u64_u32_shr(tsc_now, data.cyc2ns_mul, data.cyc2ns_shift);
160 c2n = per_cpu_ptr(&cyc2ns, cpu);
162 raw_write_seqcount_latch(&c2n->seq);
164 raw_write_seqcount_latch(&c2n->seq);
168 static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
172 local_irq_save(flags);
173 sched_clock_idle_sleep_event();
176 __set_cyc2ns_scale(khz, cpu, tsc_now);
178 sched_clock_idle_wakeup_event();
179 local_irq_restore(flags);
183 * Initialize cyc2ns for boot cpu
185 static void __init cyc2ns_init_boot_cpu(void)
187 struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
189 seqcount_init(&c2n->seq);
190 __set_cyc2ns_scale(tsc_khz, smp_processor_id(), rdtsc());
194 * Secondary CPUs do not run through tsc_init(), so set up
195 * all the scale factors for all CPUs, assuming the same
196 * speed as the bootup CPU.
198 static void __init cyc2ns_init_secondary_cpus(void)
200 unsigned int cpu, this_cpu = smp_processor_id();
201 struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
202 struct cyc2ns_data *data = c2n->data;
204 for_each_possible_cpu(cpu) {
205 if (cpu != this_cpu) {
206 seqcount_init(&c2n->seq);
207 c2n = per_cpu_ptr(&cyc2ns, cpu);
208 c2n->data[0] = data[0];
209 c2n->data[1] = data[1];
215 * Scheduler clock - returns current time in nanosec units.
217 u64 native_sched_clock(void)
219 if (static_branch_likely(&__use_tsc)) {
220 u64 tsc_now = rdtsc();
222 /* return the value in ns */
223 return cycles_2_ns(tsc_now);
227 * Fall back to jiffies if there's no TSC available:
228 * ( But note that we still use it if the TSC is marked
229 * unstable. We do this because unlike Time Of Day,
230 * the scheduler clock tolerates small errors and it's
231 * very important for it to be as fast as the platform
235 /* No locking but a rare wrong value is not a big deal: */
236 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
240 * Generate a sched_clock if you already have a TSC value.
242 u64 native_sched_clock_from_tsc(u64 tsc)
244 return cycles_2_ns(tsc);
247 /* We need to define a real function for sched_clock, to override the
248 weak default version */
249 #ifdef CONFIG_PARAVIRT
250 unsigned long long sched_clock(void)
252 return paravirt_sched_clock();
255 bool using_native_sched_clock(void)
257 return pv_ops.time.sched_clock == native_sched_clock;
261 sched_clock(void) __attribute__((alias("native_sched_clock")));
263 bool using_native_sched_clock(void) { return true; }
266 int check_tsc_unstable(void)
270 EXPORT_SYMBOL_GPL(check_tsc_unstable);
272 #ifdef CONFIG_X86_TSC
273 int __init notsc_setup(char *str)
275 mark_tsc_unstable("boot parameter notsc");
280 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
283 int __init notsc_setup(char *str)
285 setup_clear_cpu_cap(X86_FEATURE_TSC);
290 __setup("notsc", notsc_setup);
292 static int no_sched_irq_time;
293 static int no_tsc_watchdog;
295 static int __init tsc_setup(char *str)
297 if (!strcmp(str, "reliable"))
298 tsc_clocksource_reliable = 1;
299 if (!strncmp(str, "noirqtime", 9))
300 no_sched_irq_time = 1;
301 if (!strcmp(str, "unstable"))
302 mark_tsc_unstable("boot parameter");
303 if (!strcmp(str, "nowatchdog"))
308 __setup("tsc=", tsc_setup);
310 #define MAX_RETRIES 5
311 #define TSC_DEFAULT_THRESHOLD 0x20000
314 * Read TSC and the reference counters. Take care of any disturbances
316 static u64 tsc_read_refs(u64 *p, int hpet)
319 u64 thresh = tsc_khz ? tsc_khz >> 5 : TSC_DEFAULT_THRESHOLD;
322 for (i = 0; i < MAX_RETRIES; i++) {
325 *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
327 *p = acpi_pm_read_early();
329 if ((t2 - t1) < thresh)
336 * Calculate the TSC frequency from HPET reference
338 static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
343 hpet2 += 0x100000000ULL;
345 tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
346 do_div(tmp, 1000000);
347 deltatsc = div64_u64(deltatsc, tmp);
349 return (unsigned long) deltatsc;
353 * Calculate the TSC frequency from PMTimer reference
355 static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2)
363 pm2 += (u64)ACPI_PM_OVRRUN;
365 tmp = pm2 * 1000000000LL;
366 do_div(tmp, PMTMR_TICKS_PER_SEC);
367 do_div(deltatsc, tmp);
369 return (unsigned long) deltatsc;
373 #define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS))
374 #define CAL_PIT_LOOPS 1000
377 #define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS))
378 #define CAL2_PIT_LOOPS 5000
382 * Try to calibrate the TSC against the Programmable
383 * Interrupt Timer and return the frequency of the TSC
386 * Return ULONG_MAX on failure to calibrate.
388 static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
390 u64 tsc, t1, t2, delta;
391 unsigned long tscmin, tscmax;
394 if (!has_legacy_pic()) {
396 * Relies on tsc_early_delay_calibrate() to have given us semi
397 * usable udelay(), wait for the same 50ms we would have with
398 * the PIT loop below.
400 udelay(10 * USEC_PER_MSEC);
401 udelay(10 * USEC_PER_MSEC);
402 udelay(10 * USEC_PER_MSEC);
403 udelay(10 * USEC_PER_MSEC);
404 udelay(10 * USEC_PER_MSEC);
408 /* Set the Gate high, disable speaker */
409 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
412 * Setup CTC channel 2* for mode 0, (interrupt on terminal
413 * count mode), binary count. Set the latch register to 50ms
414 * (LSB then MSB) to begin countdown.
417 outb(latch & 0xff, 0x42);
418 outb(latch >> 8, 0x42);
420 tsc = t1 = t2 = get_cycles();
425 while ((inb(0x61) & 0x20) == 0) {
429 if ((unsigned long) delta < tscmin)
430 tscmin = (unsigned int) delta;
431 if ((unsigned long) delta > tscmax)
432 tscmax = (unsigned int) delta;
439 * If we were not able to read the PIT more than loopmin
440 * times, then we have been hit by a massive SMI
442 * If the maximum is 10 times larger than the minimum,
443 * then we got hit by an SMI as well.
445 if (pitcnt < loopmin || tscmax > 10 * tscmin)
448 /* Calculate the PIT value */
455 * This reads the current MSB of the PIT counter, and
456 * checks if we are running on sufficiently fast and
457 * non-virtualized hardware.
459 * Our expectations are:
461 * - the PIT is running at roughly 1.19MHz
463 * - each IO is going to take about 1us on real hardware,
464 * but we allow it to be much faster (by a factor of 10) or
465 * _slightly_ slower (ie we allow up to a 2us read+counter
466 * update - anything else implies a unacceptably slow CPU
467 * or PIT for the fast calibration to work.
469 * - with 256 PIT ticks to read the value, we have 214us to
470 * see the same MSB (and overhead like doing a single TSC
471 * read per MSB value etc).
473 * - We're doing 2 reads per loop (LSB, MSB), and we expect
474 * them each to take about a microsecond on real hardware.
475 * So we expect a count value of around 100. But we'll be
476 * generous, and accept anything over 50.
478 * - if the PIT is stuck, and we see *many* more reads, we
479 * return early (and the next caller of pit_expect_msb()
480 * then consider it a failure when they don't see the
481 * next expected value).
483 * These expectations mean that we know that we have seen the
484 * transition from one expected value to another with a fairly
485 * high accuracy, and we didn't miss any events. We can thus
486 * use the TSC value at the transitions to calculate a pretty
487 * good value for the TSC frequency.
489 static inline int pit_verify_msb(unsigned char val)
493 return inb(0x42) == val;
496 static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
499 u64 tsc = 0, prev_tsc = 0;
501 for (count = 0; count < 50000; count++) {
502 if (!pit_verify_msb(val))
507 *deltap = get_cycles() - prev_tsc;
511 * We require _some_ success, but the quality control
512 * will be based on the error terms on the TSC values.
518 * How many MSB values do we want to see? We aim for
519 * a maximum error rate of 500ppm (in practice the
520 * real error is much smaller), but refuse to spend
521 * more than 50ms on it.
523 #define MAX_QUICK_PIT_MS 50
524 #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
526 static unsigned long quick_pit_calibrate(void)
530 unsigned long d1, d2;
532 if (!has_legacy_pic())
535 /* Set the Gate high, disable speaker */
536 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
539 * Counter 2, mode 0 (one-shot), binary count
541 * NOTE! Mode 2 decrements by two (and then the
542 * output is flipped each time, giving the same
543 * final output frequency as a decrement-by-one),
544 * so mode 0 is much better when looking at the
549 /* Start at 0xffff */
554 * The PIT starts counting at the next edge, so we
555 * need to delay for a microsecond. The easiest way
556 * to do that is to just read back the 16-bit counter
561 if (pit_expect_msb(0xff, &tsc, &d1)) {
562 for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) {
563 if (!pit_expect_msb(0xff-i, &delta, &d2))
569 * Extrapolate the error and fail fast if the error will
570 * never be below 500 ppm.
573 d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11)
577 * Iterate until the error is less than 500 ppm
579 if (d1+d2 >= delta >> 11)
583 * Check the PIT one more time to verify that
584 * all TSC reads were stable wrt the PIT.
586 * This also guarantees serialization of the
587 * last cycle read ('d2') in pit_expect_msb.
589 if (!pit_verify_msb(0xfe - i))
594 pr_info("Fast TSC calibration failed\n");
599 * Ok, if we get here, then we've seen the
600 * MSB of the PIT decrement 'i' times, and the
601 * error has shrunk to less than 500 ppm.
603 * As a result, we can depend on there not being
604 * any odd delays anywhere, and the TSC reads are
605 * reliable (within the error).
607 * kHz = ticks / time-in-seconds / 1000;
608 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000
609 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000)
611 delta *= PIT_TICK_RATE;
612 do_div(delta, i*256*1000);
613 pr_info("Fast TSC calibration using PIT\n");
618 * native_calibrate_tsc
619 * Determine TSC frequency via CPUID, else return 0.
621 unsigned long native_calibrate_tsc(void)
623 unsigned int eax_denominator, ebx_numerator, ecx_hz, edx;
624 unsigned int crystal_khz;
626 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
629 if (boot_cpu_data.cpuid_level < 0x15)
632 eax_denominator = ebx_numerator = ecx_hz = edx = 0;
634 /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
635 cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
637 if (ebx_numerator == 0 || eax_denominator == 0)
640 crystal_khz = ecx_hz / 1000;
643 * Denverton SoCs don't report crystal clock, and also don't support
644 * CPUID.0x16 for the calculation below, so hardcode the 25MHz crystal
647 if (crystal_khz == 0 &&
648 boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT_D)
652 * TSC frequency reported directly by CPUID is a "hardware reported"
653 * frequency and is the most accurate one so far we have. This
654 * is considered a known frequency.
656 if (crystal_khz != 0)
657 setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
660 * Some Intel SoCs like Skylake and Kabylake don't report the crystal
661 * clock, but we can easily calculate it to a high degree of accuracy
662 * by considering the crystal ratio and the CPU speed.
664 if (crystal_khz == 0 && boot_cpu_data.cpuid_level >= 0x16) {
665 unsigned int eax_base_mhz, ebx, ecx, edx;
667 cpuid(0x16, &eax_base_mhz, &ebx, &ecx, &edx);
668 crystal_khz = eax_base_mhz * 1000 *
669 eax_denominator / ebx_numerator;
672 if (crystal_khz == 0)
676 * For Atom SoCs TSC is the only reliable clocksource.
677 * Mark TSC reliable so no watchdog on it.
679 if (boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT)
680 setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
682 #ifdef CONFIG_X86_LOCAL_APIC
684 * The local APIC appears to be fed by the core crystal clock
685 * (which sounds entirely sensible). We can set the global
686 * lapic_timer_period here to avoid having to calibrate the APIC
689 lapic_timer_period = crystal_khz * 1000 / HZ;
692 return crystal_khz * ebx_numerator / eax_denominator;
695 static unsigned long cpu_khz_from_cpuid(void)
697 unsigned int eax_base_mhz, ebx_max_mhz, ecx_bus_mhz, edx;
699 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
702 if (boot_cpu_data.cpuid_level < 0x16)
705 eax_base_mhz = ebx_max_mhz = ecx_bus_mhz = edx = 0;
707 cpuid(0x16, &eax_base_mhz, &ebx_max_mhz, &ecx_bus_mhz, &edx);
709 return eax_base_mhz * 1000;
713 * calibrate cpu using pit, hpet, and ptimer methods. They are available
714 * later in boot after acpi is initialized.
716 static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
718 u64 tsc1, tsc2, delta, ref1, ref2;
719 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
720 unsigned long flags, latch, ms;
721 int hpet = is_hpet_enabled(), i, loopmin;
724 * Run 5 calibration loops to get the lowest frequency value
725 * (the best estimate). We use two different calibration modes
728 * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and
729 * load a timeout of 50ms. We read the time right after we
730 * started the timer and wait until the PIT count down reaches
731 * zero. In each wait loop iteration we read the TSC and check
732 * the delta to the previous read. We keep track of the min
733 * and max values of that delta. The delta is mostly defined
734 * by the IO time of the PIT access, so we can detect when
735 * any disturbance happened between the two reads. If the
736 * maximum time is significantly larger than the minimum time,
737 * then we discard the result and have another try.
739 * 2) Reference counter. If available we use the HPET or the
740 * PMTIMER as a reference to check the sanity of that value.
741 * We use separate TSC readouts and check inside of the
742 * reference read for any possible disturbance. We dicard
743 * disturbed values here as well. We do that around the PIT
744 * calibration delay loop as we have to wait for a certain
745 * amount of time anyway.
748 /* Preset PIT loop values */
751 loopmin = CAL_PIT_LOOPS;
753 for (i = 0; i < 3; i++) {
754 unsigned long tsc_pit_khz;
757 * Read the start value and the reference count of
758 * hpet/pmtimer when available. Then do the PIT
759 * calibration, which will take at least 50ms, and
760 * read the end value.
762 local_irq_save(flags);
763 tsc1 = tsc_read_refs(&ref1, hpet);
764 tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
765 tsc2 = tsc_read_refs(&ref2, hpet);
766 local_irq_restore(flags);
768 /* Pick the lowest PIT TSC calibration so far */
769 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
771 /* hpet or pmtimer available ? */
775 /* Check, whether the sampling was disturbed */
776 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
779 tsc2 = (tsc2 - tsc1) * 1000000LL;
781 tsc2 = calc_hpet_ref(tsc2, ref1, ref2);
783 tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2);
785 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
787 /* Check the reference deviation */
788 delta = ((u64) tsc_pit_min) * 100;
789 do_div(delta, tsc_ref_min);
792 * If both calibration results are inside a 10% window
793 * then we can be sure, that the calibration
794 * succeeded. We break out of the loop right away. We
795 * use the reference value, as it is more precise.
797 if (delta >= 90 && delta <= 110) {
798 pr_info("PIT calibration matches %s. %d loops\n",
799 hpet ? "HPET" : "PMTIMER", i + 1);
804 * Check whether PIT failed more than once. This
805 * happens in virtualized environments. We need to
806 * give the virtual PC a slightly longer timeframe for
807 * the HPET/PMTIMER to make the result precise.
809 if (i == 1 && tsc_pit_min == ULONG_MAX) {
812 loopmin = CAL2_PIT_LOOPS;
817 * Now check the results.
819 if (tsc_pit_min == ULONG_MAX) {
820 /* PIT gave no useful value */
821 pr_warn("Unable to calibrate against PIT\n");
823 /* We don't have an alternative source, disable TSC */
824 if (!hpet && !ref1 && !ref2) {
825 pr_notice("No reference (HPET/PMTIMER) available\n");
829 /* The alternative source failed as well, disable TSC */
830 if (tsc_ref_min == ULONG_MAX) {
831 pr_warn("HPET/PMTIMER calibration failed\n");
835 /* Use the alternative source */
836 pr_info("using %s reference calibration\n",
837 hpet ? "HPET" : "PMTIMER");
842 /* We don't have an alternative source, use the PIT calibration value */
843 if (!hpet && !ref1 && !ref2) {
844 pr_info("Using PIT calibration value\n");
848 /* The alternative source failed, use the PIT calibration value */
849 if (tsc_ref_min == ULONG_MAX) {
850 pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n");
855 * The calibration values differ too much. In doubt, we use
856 * the PIT value as we know that there are PMTIMERs around
857 * running at double speed. At least we let the user know:
859 pr_warn("PIT calibration deviates from %s: %lu %lu\n",
860 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
861 pr_info("Using PIT calibration value\n");
866 * native_calibrate_cpu_early - can calibrate the cpu early in boot
868 unsigned long native_calibrate_cpu_early(void)
870 unsigned long flags, fast_calibrate = cpu_khz_from_cpuid();
873 fast_calibrate = cpu_khz_from_msr();
874 if (!fast_calibrate) {
875 local_irq_save(flags);
876 fast_calibrate = quick_pit_calibrate();
877 local_irq_restore(flags);
879 return fast_calibrate;
884 * native_calibrate_cpu - calibrate the cpu
886 static unsigned long native_calibrate_cpu(void)
888 unsigned long tsc_freq = native_calibrate_cpu_early();
891 tsc_freq = pit_hpet_ptimer_calibrate_cpu();
896 void recalibrate_cpu_khz(void)
899 unsigned long cpu_khz_old = cpu_khz;
901 if (!boot_cpu_has(X86_FEATURE_TSC))
904 cpu_khz = x86_platform.calibrate_cpu();
905 tsc_khz = x86_platform.calibrate_tsc();
908 else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
910 cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy,
911 cpu_khz_old, cpu_khz);
915 EXPORT_SYMBOL(recalibrate_cpu_khz);
918 static unsigned long long cyc2ns_suspend;
920 void tsc_save_sched_clock_state(void)
922 if (!sched_clock_stable())
925 cyc2ns_suspend = sched_clock();
929 * Even on processors with invariant TSC, TSC gets reset in some the
930 * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
931 * arbitrary value (still sync'd across cpu's) during resume from such sleep
932 * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
933 * that sched_clock() continues from the point where it was left off during
936 void tsc_restore_sched_clock_state(void)
938 unsigned long long offset;
942 if (!sched_clock_stable())
945 local_irq_save(flags);
948 * We're coming out of suspend, there's no concurrency yet; don't
949 * bother being nice about the RCU stuff, just write to both
953 this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0);
954 this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0);
956 offset = cyc2ns_suspend - sched_clock();
958 for_each_possible_cpu(cpu) {
959 per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset;
960 per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset;
963 local_irq_restore(flags);
966 #ifdef CONFIG_CPU_FREQ
968 * Frequency scaling support. Adjust the TSC based timer when the CPU frequency
971 * NOTE: On SMP the situation is not fixable in general, so simply mark the TSC
972 * as unstable and give up in those cases.
974 * Should fix up last_tsc too. Currently gettimeofday in the
975 * first tick after the change will be slightly wrong.
978 static unsigned int ref_freq;
979 static unsigned long loops_per_jiffy_ref;
980 static unsigned long tsc_khz_ref;
982 static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
985 struct cpufreq_freqs *freq = data;
987 if (num_online_cpus() > 1) {
988 mark_tsc_unstable("cpufreq changes on SMP");
993 ref_freq = freq->old;
994 loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
995 tsc_khz_ref = tsc_khz;
998 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
999 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
1000 boot_cpu_data.loops_per_jiffy =
1001 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
1003 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
1004 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
1005 mark_tsc_unstable("cpufreq changes");
1007 set_cyc2ns_scale(tsc_khz, freq->policy->cpu, rdtsc());
1013 static struct notifier_block time_cpufreq_notifier_block = {
1014 .notifier_call = time_cpufreq_notifier
1017 static int __init cpufreq_register_tsc_scaling(void)
1019 if (!boot_cpu_has(X86_FEATURE_TSC))
1021 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1023 cpufreq_register_notifier(&time_cpufreq_notifier_block,
1024 CPUFREQ_TRANSITION_NOTIFIER);
1028 core_initcall(cpufreq_register_tsc_scaling);
1030 #endif /* CONFIG_CPU_FREQ */
1032 #define ART_CPUID_LEAF (0x15)
1033 #define ART_MIN_DENOMINATOR (1)
1037 * If ART is present detect the numerator:denominator to convert to TSC
1039 static void __init detect_art(void)
1041 unsigned int unused[2];
1043 if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF)
1047 * Don't enable ART in a VM, non-stop TSC and TSC_ADJUST required,
1048 * and the TSC counter resets must not occur asynchronously.
1050 if (boot_cpu_has(X86_FEATURE_HYPERVISOR) ||
1051 !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
1052 !boot_cpu_has(X86_FEATURE_TSC_ADJUST) ||
1056 cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator,
1057 &art_to_tsc_numerator, unused, unused+1);
1059 if (art_to_tsc_denominator < ART_MIN_DENOMINATOR)
1062 rdmsrl(MSR_IA32_TSC_ADJUST, art_to_tsc_offset);
1064 /* Make this sticky over multiple CPU init calls */
1065 setup_force_cpu_cap(X86_FEATURE_ART);
1069 /* clocksource code */
1071 static void tsc_resume(struct clocksource *cs)
1073 tsc_verify_tsc_adjust(true);
1077 * We used to compare the TSC to the cycle_last value in the clocksource
1078 * structure to avoid a nasty time-warp. This can be observed in a
1079 * very small window right after one CPU updated cycle_last under
1080 * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
1081 * is smaller than the cycle_last reference value due to a TSC which
1082 * is slighty behind. This delta is nowhere else observable, but in
1083 * that case it results in a forward time jump in the range of hours
1084 * due to the unsigned delta calculation of the time keeping core
1085 * code, which is necessary to support wrapping clocksources like pm
1088 * This sanity check is now done in the core timekeeping code.
1089 * checking the result of read_tsc() - cycle_last for being negative.
1090 * That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
1092 static u64 read_tsc(struct clocksource *cs)
1094 return (u64)rdtsc_ordered();
1097 static void tsc_cs_mark_unstable(struct clocksource *cs)
1103 if (using_native_sched_clock())
1104 clear_sched_clock_stable();
1105 disable_sched_clock_irqtime();
1106 pr_info("Marking TSC unstable due to clocksource watchdog\n");
1109 static void tsc_cs_tick_stable(struct clocksource *cs)
1114 if (using_native_sched_clock())
1115 sched_clock_tick_stable();
1118 static int tsc_cs_enable(struct clocksource *cs)
1120 vclocks_set_used(VDSO_CLOCKMODE_TSC);
1125 * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
1127 static struct clocksource clocksource_tsc_early = {
1128 .name = "tsc-early",
1131 .mask = CLOCKSOURCE_MASK(64),
1132 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
1133 CLOCK_SOURCE_MUST_VERIFY,
1134 .vdso_clock_mode = VDSO_CLOCKMODE_TSC,
1135 .enable = tsc_cs_enable,
1136 .resume = tsc_resume,
1137 .mark_unstable = tsc_cs_mark_unstable,
1138 .tick_stable = tsc_cs_tick_stable,
1139 .list = LIST_HEAD_INIT(clocksource_tsc_early.list),
1143 * Must mark VALID_FOR_HRES early such that when we unregister tsc_early
1144 * this one will immediately take over. We will only register if TSC has
1147 static struct clocksource clocksource_tsc = {
1151 .mask = CLOCKSOURCE_MASK(64),
1152 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
1153 CLOCK_SOURCE_VALID_FOR_HRES |
1154 CLOCK_SOURCE_MUST_VERIFY,
1155 .vdso_clock_mode = VDSO_CLOCKMODE_TSC,
1156 .enable = tsc_cs_enable,
1157 .resume = tsc_resume,
1158 .mark_unstable = tsc_cs_mark_unstable,
1159 .tick_stable = tsc_cs_tick_stable,
1160 .list = LIST_HEAD_INIT(clocksource_tsc.list),
1163 void mark_tsc_unstable(char *reason)
1169 if (using_native_sched_clock())
1170 clear_sched_clock_stable();
1171 disable_sched_clock_irqtime();
1172 pr_info("Marking TSC unstable due to %s\n", reason);
1174 clocksource_mark_unstable(&clocksource_tsc_early);
1175 clocksource_mark_unstable(&clocksource_tsc);
1178 EXPORT_SYMBOL_GPL(mark_tsc_unstable);
1180 static void __init check_system_tsc_reliable(void)
1182 #if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
1183 if (is_geode_lx()) {
1184 /* RTSC counts during suspend */
1185 #define RTSC_SUSP 0x100
1186 unsigned long res_low, res_high;
1188 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
1189 /* Geode_LX - the OLPC CPU has a very reliable TSC */
1190 if (res_low & RTSC_SUSP)
1191 tsc_clocksource_reliable = 1;
1194 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
1195 tsc_clocksource_reliable = 1;
1199 * Make an educated guess if the TSC is trustworthy and synchronized
1202 int unsynchronized_tsc(void)
1204 if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_unstable)
1208 if (apic_is_clustered_box())
1212 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1215 if (tsc_clocksource_reliable)
1218 * Intel systems are normally all synchronized.
1219 * Exceptions must mark TSC as unstable:
1221 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1222 /* assume multi socket systems are not synchronized: */
1223 if (num_possible_cpus() > 1)
1231 * Convert ART to TSC given numerator/denominator found in detect_art()
1233 struct system_counterval_t convert_art_to_tsc(u64 art)
1237 rem = do_div(art, art_to_tsc_denominator);
1239 res = art * art_to_tsc_numerator;
1240 tmp = rem * art_to_tsc_numerator;
1242 do_div(tmp, art_to_tsc_denominator);
1243 res += tmp + art_to_tsc_offset;
1245 return (struct system_counterval_t) {.cs = art_related_clocksource,
1248 EXPORT_SYMBOL(convert_art_to_tsc);
1251 * convert_art_ns_to_tsc() - Convert ART in nanoseconds to TSC.
1252 * @art_ns: ART (Always Running Timer) in unit of nanoseconds
1254 * PTM requires all timestamps to be in units of nanoseconds. When user
1255 * software requests a cross-timestamp, this function converts system timestamp
1258 * This is valid when CPU feature flag X86_FEATURE_TSC_KNOWN_FREQ is set
1259 * indicating the tsc_khz is derived from CPUID[15H]. Drivers should check
1260 * that this flag is set before conversion to TSC is attempted.
1263 * struct system_counterval_t - system counter value with the pointer to the
1264 * corresponding clocksource
1265 * @cycles: System counter value
1266 * @cs: Clocksource corresponding to system counter value. Used
1267 * by timekeeping code to verify comparibility of two cycle
1271 struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns)
1275 rem = do_div(art_ns, USEC_PER_SEC);
1277 res = art_ns * tsc_khz;
1278 tmp = rem * tsc_khz;
1280 do_div(tmp, USEC_PER_SEC);
1283 return (struct system_counterval_t) { .cs = art_related_clocksource,
1286 EXPORT_SYMBOL(convert_art_ns_to_tsc);
1289 static void tsc_refine_calibration_work(struct work_struct *work);
1290 static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
1292 * tsc_refine_calibration_work - Further refine tsc freq calibration
1295 * This functions uses delayed work over a period of a
1296 * second to further refine the TSC freq value. Since this is
1297 * timer based, instead of loop based, we don't block the boot
1298 * process while this longer calibration is done.
1300 * If there are any calibration anomalies (too many SMIs, etc),
1301 * or the refined calibration is off by 1% of the fast early
1302 * calibration, we throw out the new calibration and use the
1303 * early calibration.
1305 static void tsc_refine_calibration_work(struct work_struct *work)
1307 static u64 tsc_start = ULLONG_MAX, ref_start;
1309 u64 tsc_stop, ref_stop, delta;
1313 /* Don't bother refining TSC on unstable systems */
1318 * Since the work is started early in boot, we may be
1319 * delayed the first time we expire. So set the workqueue
1320 * again once we know timers are working.
1322 if (tsc_start == ULLONG_MAX) {
1325 * Only set hpet once, to avoid mixing hardware
1326 * if the hpet becomes enabled later.
1328 hpet = is_hpet_enabled();
1329 tsc_start = tsc_read_refs(&ref_start, hpet);
1330 schedule_delayed_work(&tsc_irqwork, HZ);
1334 tsc_stop = tsc_read_refs(&ref_stop, hpet);
1336 /* hpet or pmtimer available ? */
1337 if (ref_start == ref_stop)
1340 /* Check, whether the sampling was disturbed */
1341 if (tsc_stop == ULLONG_MAX)
1344 delta = tsc_stop - tsc_start;
1347 freq = calc_hpet_ref(delta, ref_start, ref_stop);
1349 freq = calc_pmtimer_ref(delta, ref_start, ref_stop);
1351 /* Make sure we're within 1% */
1352 if (abs(tsc_khz - freq) > tsc_khz/100)
1356 pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n",
1357 (unsigned long)tsc_khz / 1000,
1358 (unsigned long)tsc_khz % 1000);
1360 /* Inform the TSC deadline clockevent devices about the recalibration */
1361 lapic_update_tsc_freq();
1363 /* Update the sched_clock() rate to match the clocksource one */
1364 for_each_possible_cpu(cpu)
1365 set_cyc2ns_scale(tsc_khz, cpu, tsc_stop);
1371 if (boot_cpu_has(X86_FEATURE_ART))
1372 art_related_clocksource = &clocksource_tsc;
1373 clocksource_register_khz(&clocksource_tsc, tsc_khz);
1375 clocksource_unregister(&clocksource_tsc_early);
1379 static int __init init_tsc_clocksource(void)
1381 if (!boot_cpu_has(X86_FEATURE_TSC) || !tsc_khz)
1387 if (tsc_clocksource_reliable || no_tsc_watchdog)
1388 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
1390 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
1391 clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
1394 * When TSC frequency is known (retrieved via MSR or CPUID), we skip
1395 * the refined calibration and directly register it as a clocksource.
1397 if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
1398 if (boot_cpu_has(X86_FEATURE_ART))
1399 art_related_clocksource = &clocksource_tsc;
1400 clocksource_register_khz(&clocksource_tsc, tsc_khz);
1402 clocksource_unregister(&clocksource_tsc_early);
1406 schedule_delayed_work(&tsc_irqwork, 0);
1410 * We use device_initcall here, to ensure we run after the hpet
1411 * is fully initialized, which may occur at fs_initcall time.
1413 device_initcall(init_tsc_clocksource);
1415 static bool __init determine_cpu_tsc_frequencies(bool early)
1417 /* Make sure that cpu and tsc are not already calibrated */
1418 WARN_ON(cpu_khz || tsc_khz);
1421 cpu_khz = x86_platform.calibrate_cpu();
1423 tsc_khz = tsc_early_khz;
1425 tsc_khz = x86_platform.calibrate_tsc();
1427 /* We should not be here with non-native cpu calibration */
1428 WARN_ON(x86_platform.calibrate_cpu != native_calibrate_cpu);
1429 cpu_khz = pit_hpet_ptimer_calibrate_cpu();
1433 * Trust non-zero tsc_khz as authoritative,
1434 * and use it to sanity check cpu_khz,
1435 * which will be off if system timer is off.
1439 else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
1445 pr_info("Detected %lu.%03lu MHz processor\n",
1446 (unsigned long)cpu_khz / KHZ,
1447 (unsigned long)cpu_khz % KHZ);
1449 if (cpu_khz != tsc_khz) {
1450 pr_info("Detected %lu.%03lu MHz TSC",
1451 (unsigned long)tsc_khz / KHZ,
1452 (unsigned long)tsc_khz % KHZ);
1457 static unsigned long __init get_loops_per_jiffy(void)
1459 u64 lpj = (u64)tsc_khz * KHZ;
1465 static void __init tsc_enable_sched_clock(void)
1467 /* Sanitize TSC ADJUST before cyc2ns gets initialized */
1468 tsc_store_and_check_tsc_adjust(true);
1469 cyc2ns_init_boot_cpu();
1470 static_branch_enable(&__use_tsc);
1473 void __init tsc_early_init(void)
1475 if (!boot_cpu_has(X86_FEATURE_TSC))
1477 /* Don't change UV TSC multi-chassis synchronization */
1478 if (is_early_uv_system())
1480 if (!determine_cpu_tsc_frequencies(true))
1482 loops_per_jiffy = get_loops_per_jiffy();
1484 tsc_enable_sched_clock();
1487 void __init tsc_init(void)
1490 * native_calibrate_cpu_early can only calibrate using methods that are
1491 * available early in boot.
1493 if (x86_platform.calibrate_cpu == native_calibrate_cpu_early)
1494 x86_platform.calibrate_cpu = native_calibrate_cpu;
1496 if (!boot_cpu_has(X86_FEATURE_TSC)) {
1497 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1502 /* We failed to determine frequencies earlier, try again */
1503 if (!determine_cpu_tsc_frequencies(false)) {
1504 mark_tsc_unstable("could not calculate TSC khz");
1505 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1508 tsc_enable_sched_clock();
1511 cyc2ns_init_secondary_cpus();
1513 if (!no_sched_irq_time)
1514 enable_sched_clock_irqtime();
1516 lpj_fine = get_loops_per_jiffy();
1519 check_system_tsc_reliable();
1521 if (unsynchronized_tsc()) {
1522 mark_tsc_unstable("TSCs unsynchronized");
1526 if (tsc_clocksource_reliable || no_tsc_watchdog)
1527 clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
1529 clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
1535 * If we have a constant TSC and are using the TSC for the delay loop,
1536 * we can skip clock calibration if another cpu in the same socket has already
1537 * been calibrated. This assumes that CONSTANT_TSC applies to all
1538 * cpus in the socket - this should be a safe assumption.
1540 unsigned long calibrate_delay_is_known(void)
1542 int sibling, cpu = smp_processor_id();
1543 int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
1544 const struct cpumask *mask = topology_core_cpumask(cpu);
1546 if (!constant_tsc || !mask)
1549 sibling = cpumask_any_but(mask, cpu);
1550 if (sibling < nr_cpu_ids)
1551 return cpu_data(sibling).loops_per_jiffy;