1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 #include <linux/kernel.h>
4 #include <linux/sched.h>
5 #include <linux/init.h>
6 #include <linux/module.h>
7 #include <linux/timer.h>
8 #include <linux/acpi_pmtmr.h>
9 #include <linux/cpufreq.h>
10 #include <linux/delay.h>
11 #include <linux/clocksource.h>
12 #include <linux/percpu.h>
13 #include <linux/timex.h>
16 #include <asm/timer.h>
17 #include <asm/vgtod.h>
19 #include <asm/delay.h>
20 #include <asm/hypervisor.h>
22 #include <asm/x86_init.h>
24 unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
25 EXPORT_SYMBOL(cpu_khz);
27 unsigned int __read_mostly tsc_khz;
28 EXPORT_SYMBOL(tsc_khz);
31 * TSC can be unstable due to cpufreq or due to unsynced TSCs
33 static int __read_mostly tsc_unstable;
35 /* native_sched_clock() is called before tsc_init(), so
36 we must start with the TSC soft disabled to prevent
37 erroneous rdtsc usage on !cpu_has_tsc processors */
38 static int __read_mostly tsc_disabled = -1;
40 int tsc_clocksource_reliable;
43 * Use a ring-buffer like data structure, where a writer advances the head by
44 * writing a new data entry and a reader advances the tail when it observes a
47 * Writers are made to wait on readers until there's space to write a new
50 * This means that we can always use an {offset, mul} pair to compute a ns
51 * value that is 'roughly' in the right direction, even if we're writing a new
52 * {offset, mul} pair during the clock read.
54 * The down-side is that we can no longer guarantee strict monotonicity anymore
55 * (assuming the TSC was that to begin with), because while we compute the
56 * intersection point of the two clock slopes and make sure the time is
57 * continuous at the point of switching; we can no longer guarantee a reader is
58 * strictly before or after the switch point.
60 * It does mean a reader no longer needs to disable IRQs in order to avoid
61 * CPU-Freq updates messing with his times, and similarly an NMI reader will
62 * no longer run the risk of hitting half-written state.
66 struct cyc2ns_data data[2]; /* 0 + 2*24 = 48 */
67 struct cyc2ns_data *head; /* 48 + 8 = 56 */
68 struct cyc2ns_data *tail; /* 56 + 8 = 64 */
69 }; /* exactly fits one cacheline */
71 static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
73 struct cyc2ns_data *cyc2ns_read_begin(void)
75 struct cyc2ns_data *head;
79 head = this_cpu_read(cyc2ns.head);
81 * Ensure we observe the entry when we observe the pointer to it.
82 * matches the wmb from cyc2ns_write_end().
84 smp_read_barrier_depends();
91 void cyc2ns_read_end(struct cyc2ns_data *head)
95 * If we're the outer most nested read; update the tail pointer
96 * when we're done. This notifies possible pending writers
97 * that we've observed the head pointer and that the other
100 if (!--head->__count) {
102 * x86-TSO does not reorder writes with older reads;
103 * therefore once this write becomes visible to another
104 * cpu, we must be finished reading the cyc2ns_data.
106 * matches with cyc2ns_write_begin().
108 this_cpu_write(cyc2ns.tail, head);
114 * Begin writing a new @data entry for @cpu.
116 * Assumes some sort of write side lock; currently 'provided' by the assumption
117 * that cpufreq will call its notifiers sequentially.
119 static struct cyc2ns_data *cyc2ns_write_begin(int cpu)
121 struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
122 struct cyc2ns_data *data = c2n->data;
124 if (data == c2n->head)
127 /* XXX send an IPI to @cpu in order to guarantee a read? */
130 * When we observe the tail write from cyc2ns_read_end(),
131 * the cpu must be done with that entry and its safe
132 * to start writing to it.
134 while (c2n->tail == data)
140 static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
142 struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
145 * Ensure the @data writes are visible before we publish the
146 * entry. Matches the data-depencency in cyc2ns_read_begin().
150 ACCESS_ONCE(c2n->head) = data;
154 * Accelerators for sched_clock()
155 * convert from cycles(64bits) => nanoseconds (64bits)
157 * ns = cycles / (freq / ns_per_sec)
158 * ns = cycles * (ns_per_sec / freq)
159 * ns = cycles * (10^9 / (cpu_khz * 10^3))
160 * ns = cycles * (10^6 / cpu_khz)
162 * Then we use scaling math (suggested by george@mvista.com) to get:
163 * ns = cycles * (10^6 * SC / cpu_khz) / SC
164 * ns = cycles * cyc2ns_scale / SC
166 * And since SC is a constant power of two, we can convert the div
169 * We can use khz divisor instead of mhz to keep a better precision, since
170 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
171 * (mathieu.desnoyers@polymtl.ca)
173 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
176 #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
178 static void cyc2ns_data_init(struct cyc2ns_data *data)
180 data->cyc2ns_mul = 1U << CYC2NS_SCALE_FACTOR;
181 data->cyc2ns_shift = CYC2NS_SCALE_FACTOR;
182 data->cyc2ns_offset = 0;
186 static void cyc2ns_init(int cpu)
188 struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
190 cyc2ns_data_init(&c2n->data[0]);
191 cyc2ns_data_init(&c2n->data[1]);
193 c2n->head = c2n->data;
194 c2n->tail = c2n->data;
197 static inline unsigned long long cycles_2_ns(unsigned long long cyc)
199 struct cyc2ns_data *data, *tail;
200 unsigned long long ns;
203 * See cyc2ns_read_*() for details; replicated in order to avoid
204 * an extra few instructions that came with the abstraction.
205 * Notable, it allows us to only do the __count and tail update
206 * dance when its actually needed.
210 data = this_cpu_read(cyc2ns.head);
211 tail = this_cpu_read(cyc2ns.tail);
213 if (likely(data == tail)) {
214 ns = data->cyc2ns_offset;
215 ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR);
221 ns = data->cyc2ns_offset;
222 ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR);
226 if (!--data->__count)
227 this_cpu_write(cyc2ns.tail, data);
234 /* XXX surely we already have this someplace in the kernel?! */
235 #define DIV_ROUND(n, d) (((n) + ((d) / 2)) / (d))
237 static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
239 unsigned long long tsc_now, ns_now;
240 struct cyc2ns_data *data;
243 local_irq_save(flags);
244 sched_clock_idle_sleep_event();
249 data = cyc2ns_write_begin(cpu);
252 ns_now = cycles_2_ns(tsc_now);
255 * Compute a new multiplier as per the above comment and ensure our
256 * time function is continuous; see the comment near struct
259 data->cyc2ns_mul = DIV_ROUND(NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR, cpu_khz);
260 data->cyc2ns_shift = CYC2NS_SCALE_FACTOR;
261 data->cyc2ns_offset = ns_now -
262 mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR);
264 cyc2ns_write_end(cpu, data);
267 sched_clock_idle_wakeup_event(0);
268 local_irq_restore(flags);
271 * Scheduler clock - returns current time in nanosec units.
273 u64 native_sched_clock(void)
278 * Fall back to jiffies if there's no TSC available:
279 * ( But note that we still use it if the TSC is marked
280 * unstable. We do this because unlike Time Of Day,
281 * the scheduler clock tolerates small errors and it's
282 * very important for it to be as fast as the platform
285 if (unlikely(tsc_disabled)) {
286 /* No locking but a rare wrong value is not a big deal: */
287 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
290 /* read the Time Stamp Counter: */
293 /* return the value in ns */
294 return cycles_2_ns(tsc_now);
297 /* We need to define a real function for sched_clock, to override the
298 weak default version */
299 #ifdef CONFIG_PARAVIRT
300 unsigned long long sched_clock(void)
302 return paravirt_sched_clock();
306 sched_clock(void) __attribute__((alias("native_sched_clock")));
309 unsigned long long native_read_tsc(void)
311 return __native_read_tsc();
313 EXPORT_SYMBOL(native_read_tsc);
315 int check_tsc_unstable(void)
319 EXPORT_SYMBOL_GPL(check_tsc_unstable);
321 int check_tsc_disabled(void)
325 EXPORT_SYMBOL_GPL(check_tsc_disabled);
327 #ifdef CONFIG_X86_TSC
328 int __init notsc_setup(char *str)
330 pr_warn("Kernel compiled with CONFIG_X86_TSC, cannot disable TSC completely\n");
336 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
339 int __init notsc_setup(char *str)
341 setup_clear_cpu_cap(X86_FEATURE_TSC);
346 __setup("notsc", notsc_setup);
348 static int no_sched_irq_time;
350 static int __init tsc_setup(char *str)
352 if (!strcmp(str, "reliable"))
353 tsc_clocksource_reliable = 1;
354 if (!strncmp(str, "noirqtime", 9))
355 no_sched_irq_time = 1;
359 __setup("tsc=", tsc_setup);
361 #define MAX_RETRIES 5
362 #define SMI_TRESHOLD 50000
365 * Read TSC and the reference counters. Take care of SMI disturbance
367 static u64 tsc_read_refs(u64 *p, int hpet)
372 for (i = 0; i < MAX_RETRIES; i++) {
375 *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
377 *p = acpi_pm_read_early();
379 if ((t2 - t1) < SMI_TRESHOLD)
386 * Calculate the TSC frequency from HPET reference
388 static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
393 hpet2 += 0x100000000ULL;
395 tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
396 do_div(tmp, 1000000);
397 do_div(deltatsc, tmp);
399 return (unsigned long) deltatsc;
403 * Calculate the TSC frequency from PMTimer reference
405 static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2)
413 pm2 += (u64)ACPI_PM_OVRRUN;
415 tmp = pm2 * 1000000000LL;
416 do_div(tmp, PMTMR_TICKS_PER_SEC);
417 do_div(deltatsc, tmp);
419 return (unsigned long) deltatsc;
423 #define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS))
424 #define CAL_PIT_LOOPS 1000
427 #define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS))
428 #define CAL2_PIT_LOOPS 5000
432 * Try to calibrate the TSC against the Programmable
433 * Interrupt Timer and return the frequency of the TSC
436 * Return ULONG_MAX on failure to calibrate.
438 static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
440 u64 tsc, t1, t2, delta;
441 unsigned long tscmin, tscmax;
444 /* Set the Gate high, disable speaker */
445 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
448 * Setup CTC channel 2* for mode 0, (interrupt on terminal
449 * count mode), binary count. Set the latch register to 50ms
450 * (LSB then MSB) to begin countdown.
453 outb(latch & 0xff, 0x42);
454 outb(latch >> 8, 0x42);
456 tsc = t1 = t2 = get_cycles();
461 while ((inb(0x61) & 0x20) == 0) {
465 if ((unsigned long) delta < tscmin)
466 tscmin = (unsigned int) delta;
467 if ((unsigned long) delta > tscmax)
468 tscmax = (unsigned int) delta;
475 * If we were not able to read the PIT more than loopmin
476 * times, then we have been hit by a massive SMI
478 * If the maximum is 10 times larger than the minimum,
479 * then we got hit by an SMI as well.
481 if (pitcnt < loopmin || tscmax > 10 * tscmin)
484 /* Calculate the PIT value */
491 * This reads the current MSB of the PIT counter, and
492 * checks if we are running on sufficiently fast and
493 * non-virtualized hardware.
495 * Our expectations are:
497 * - the PIT is running at roughly 1.19MHz
499 * - each IO is going to take about 1us on real hardware,
500 * but we allow it to be much faster (by a factor of 10) or
501 * _slightly_ slower (ie we allow up to a 2us read+counter
502 * update - anything else implies a unacceptably slow CPU
503 * or PIT for the fast calibration to work.
505 * - with 256 PIT ticks to read the value, we have 214us to
506 * see the same MSB (and overhead like doing a single TSC
507 * read per MSB value etc).
509 * - We're doing 2 reads per loop (LSB, MSB), and we expect
510 * them each to take about a microsecond on real hardware.
511 * So we expect a count value of around 100. But we'll be
512 * generous, and accept anything over 50.
514 * - if the PIT is stuck, and we see *many* more reads, we
515 * return early (and the next caller of pit_expect_msb()
516 * then consider it a failure when they don't see the
517 * next expected value).
519 * These expectations mean that we know that we have seen the
520 * transition from one expected value to another with a fairly
521 * high accuracy, and we didn't miss any events. We can thus
522 * use the TSC value at the transitions to calculate a pretty
523 * good value for the TSC frequencty.
525 static inline int pit_verify_msb(unsigned char val)
529 return inb(0x42) == val;
532 static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
535 u64 tsc = 0, prev_tsc = 0;
537 for (count = 0; count < 50000; count++) {
538 if (!pit_verify_msb(val))
543 *deltap = get_cycles() - prev_tsc;
547 * We require _some_ success, but the quality control
548 * will be based on the error terms on the TSC values.
554 * How many MSB values do we want to see? We aim for
555 * a maximum error rate of 500ppm (in practice the
556 * real error is much smaller), but refuse to spend
557 * more than 50ms on it.
559 #define MAX_QUICK_PIT_MS 50
560 #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
562 static unsigned long quick_pit_calibrate(void)
566 unsigned long d1, d2;
568 /* Set the Gate high, disable speaker */
569 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
572 * Counter 2, mode 0 (one-shot), binary count
574 * NOTE! Mode 2 decrements by two (and then the
575 * output is flipped each time, giving the same
576 * final output frequency as a decrement-by-one),
577 * so mode 0 is much better when looking at the
582 /* Start at 0xffff */
587 * The PIT starts counting at the next edge, so we
588 * need to delay for a microsecond. The easiest way
589 * to do that is to just read back the 16-bit counter
594 if (pit_expect_msb(0xff, &tsc, &d1)) {
595 for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) {
596 if (!pit_expect_msb(0xff-i, &delta, &d2))
600 * Iterate until the error is less than 500 ppm
603 if (d1+d2 >= delta >> 11)
607 * Check the PIT one more time to verify that
608 * all TSC reads were stable wrt the PIT.
610 * This also guarantees serialization of the
611 * last cycle read ('d2') in pit_expect_msb.
613 if (!pit_verify_msb(0xfe - i))
618 pr_err("Fast TSC calibration failed\n");
623 * Ok, if we get here, then we've seen the
624 * MSB of the PIT decrement 'i' times, and the
625 * error has shrunk to less than 500 ppm.
627 * As a result, we can depend on there not being
628 * any odd delays anywhere, and the TSC reads are
629 * reliable (within the error).
631 * kHz = ticks / time-in-seconds / 1000;
632 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000
633 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000)
635 delta *= PIT_TICK_RATE;
636 do_div(delta, i*256*1000);
637 pr_info("Fast TSC calibration using PIT\n");
642 * native_calibrate_tsc - calibrate the tsc on boot
644 unsigned long native_calibrate_tsc(void)
646 u64 tsc1, tsc2, delta, ref1, ref2;
647 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
648 unsigned long flags, latch, ms, fast_calibrate;
649 int hpet = is_hpet_enabled(), i, loopmin;
651 local_irq_save(flags);
652 fast_calibrate = quick_pit_calibrate();
653 local_irq_restore(flags);
655 return fast_calibrate;
658 * Run 5 calibration loops to get the lowest frequency value
659 * (the best estimate). We use two different calibration modes
662 * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and
663 * load a timeout of 50ms. We read the time right after we
664 * started the timer and wait until the PIT count down reaches
665 * zero. In each wait loop iteration we read the TSC and check
666 * the delta to the previous read. We keep track of the min
667 * and max values of that delta. The delta is mostly defined
668 * by the IO time of the PIT access, so we can detect when a
669 * SMI/SMM disturbance happened between the two reads. If the
670 * maximum time is significantly larger than the minimum time,
671 * then we discard the result and have another try.
673 * 2) Reference counter. If available we use the HPET or the
674 * PMTIMER as a reference to check the sanity of that value.
675 * We use separate TSC readouts and check inside of the
676 * reference read for a SMI/SMM disturbance. We dicard
677 * disturbed values here as well. We do that around the PIT
678 * calibration delay loop as we have to wait for a certain
679 * amount of time anyway.
682 /* Preset PIT loop values */
685 loopmin = CAL_PIT_LOOPS;
687 for (i = 0; i < 3; i++) {
688 unsigned long tsc_pit_khz;
691 * Read the start value and the reference count of
692 * hpet/pmtimer when available. Then do the PIT
693 * calibration, which will take at least 50ms, and
694 * read the end value.
696 local_irq_save(flags);
697 tsc1 = tsc_read_refs(&ref1, hpet);
698 tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
699 tsc2 = tsc_read_refs(&ref2, hpet);
700 local_irq_restore(flags);
702 /* Pick the lowest PIT TSC calibration so far */
703 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
705 /* hpet or pmtimer available ? */
709 /* Check, whether the sampling was disturbed by an SMI */
710 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
713 tsc2 = (tsc2 - tsc1) * 1000000LL;
715 tsc2 = calc_hpet_ref(tsc2, ref1, ref2);
717 tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2);
719 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
721 /* Check the reference deviation */
722 delta = ((u64) tsc_pit_min) * 100;
723 do_div(delta, tsc_ref_min);
726 * If both calibration results are inside a 10% window
727 * then we can be sure, that the calibration
728 * succeeded. We break out of the loop right away. We
729 * use the reference value, as it is more precise.
731 if (delta >= 90 && delta <= 110) {
732 pr_info("PIT calibration matches %s. %d loops\n",
733 hpet ? "HPET" : "PMTIMER", i + 1);
738 * Check whether PIT failed more than once. This
739 * happens in virtualized environments. We need to
740 * give the virtual PC a slightly longer timeframe for
741 * the HPET/PMTIMER to make the result precise.
743 if (i == 1 && tsc_pit_min == ULONG_MAX) {
746 loopmin = CAL2_PIT_LOOPS;
751 * Now check the results.
753 if (tsc_pit_min == ULONG_MAX) {
754 /* PIT gave no useful value */
755 pr_warn("Unable to calibrate against PIT\n");
757 /* We don't have an alternative source, disable TSC */
758 if (!hpet && !ref1 && !ref2) {
759 pr_notice("No reference (HPET/PMTIMER) available\n");
763 /* The alternative source failed as well, disable TSC */
764 if (tsc_ref_min == ULONG_MAX) {
765 pr_warn("HPET/PMTIMER calibration failed\n");
769 /* Use the alternative source */
770 pr_info("using %s reference calibration\n",
771 hpet ? "HPET" : "PMTIMER");
776 /* We don't have an alternative source, use the PIT calibration value */
777 if (!hpet && !ref1 && !ref2) {
778 pr_info("Using PIT calibration value\n");
782 /* The alternative source failed, use the PIT calibration value */
783 if (tsc_ref_min == ULONG_MAX) {
784 pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n");
789 * The calibration values differ too much. In doubt, we use
790 * the PIT value as we know that there are PMTIMERs around
791 * running at double speed. At least we let the user know:
793 pr_warn("PIT calibration deviates from %s: %lu %lu\n",
794 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
795 pr_info("Using PIT calibration value\n");
799 int recalibrate_cpu_khz(void)
802 unsigned long cpu_khz_old = cpu_khz;
805 tsc_khz = x86_platform.calibrate_tsc();
807 cpu_data(0).loops_per_jiffy =
808 cpufreq_scale(cpu_data(0).loops_per_jiffy,
809 cpu_khz_old, cpu_khz);
818 EXPORT_SYMBOL(recalibrate_cpu_khz);
821 static unsigned long long cyc2ns_suspend;
823 void tsc_save_sched_clock_state(void)
825 if (!sched_clock_stable())
828 cyc2ns_suspend = sched_clock();
832 * Even on processors with invariant TSC, TSC gets reset in some the
833 * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
834 * arbitrary value (still sync'd across cpu's) during resume from such sleep
835 * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
836 * that sched_clock() continues from the point where it was left off during
839 void tsc_restore_sched_clock_state(void)
841 unsigned long long offset;
845 if (!sched_clock_stable())
848 local_irq_save(flags);
851 * We're comming out of suspend, there's no concurrency yet; don't
852 * bother being nice about the RCU stuff, just write to both
856 this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0);
857 this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0);
859 offset = cyc2ns_suspend - sched_clock();
861 for_each_possible_cpu(cpu) {
862 per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset;
863 per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset;
866 local_irq_restore(flags);
869 #ifdef CONFIG_CPU_FREQ
871 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
874 * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
875 * not that important because current Opteron setups do not support
876 * scaling on SMP anyroads.
878 * Should fix up last_tsc too. Currently gettimeofday in the
879 * first tick after the change will be slightly wrong.
882 static unsigned int ref_freq;
883 static unsigned long loops_per_jiffy_ref;
884 static unsigned long tsc_khz_ref;
886 static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
889 struct cpufreq_freqs *freq = data;
892 if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC))
895 lpj = &boot_cpu_data.loops_per_jiffy;
897 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
898 lpj = &cpu_data(freq->cpu).loops_per_jiffy;
902 ref_freq = freq->old;
903 loops_per_jiffy_ref = *lpj;
904 tsc_khz_ref = tsc_khz;
906 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
907 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
908 (val == CPUFREQ_RESUMECHANGE)) {
909 *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
911 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
912 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
913 mark_tsc_unstable("cpufreq changes");
916 set_cyc2ns_scale(tsc_khz, freq->cpu);
921 static struct notifier_block time_cpufreq_notifier_block = {
922 .notifier_call = time_cpufreq_notifier
925 static int __init cpufreq_tsc(void)
929 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
931 cpufreq_register_notifier(&time_cpufreq_notifier_block,
932 CPUFREQ_TRANSITION_NOTIFIER);
936 core_initcall(cpufreq_tsc);
938 #endif /* CONFIG_CPU_FREQ */
940 /* clocksource code */
942 static struct clocksource clocksource_tsc;
945 * We compare the TSC to the cycle_last value in the clocksource
946 * structure to avoid a nasty time-warp. This can be observed in a
947 * very small window right after one CPU updated cycle_last under
948 * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
949 * is smaller than the cycle_last reference value due to a TSC which
950 * is slighty behind. This delta is nowhere else observable, but in
951 * that case it results in a forward time jump in the range of hours
952 * due to the unsigned delta calculation of the time keeping core
953 * code, which is necessary to support wrapping clocksources like pm
956 static cycle_t read_tsc(struct clocksource *cs)
958 cycle_t ret = (cycle_t)get_cycles();
960 return ret >= clocksource_tsc.cycle_last ?
961 ret : clocksource_tsc.cycle_last;
964 static void resume_tsc(struct clocksource *cs)
966 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
967 clocksource_tsc.cycle_last = 0;
970 static struct clocksource clocksource_tsc = {
974 .resume = resume_tsc,
975 .mask = CLOCKSOURCE_MASK(64),
976 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
977 CLOCK_SOURCE_MUST_VERIFY,
979 .archdata = { .vclock_mode = VCLOCK_TSC },
983 void mark_tsc_unstable(char *reason)
987 clear_sched_clock_stable();
988 disable_sched_clock_irqtime();
989 pr_info("Marking TSC unstable due to %s\n", reason);
990 /* Change only the rating, when not registered */
991 if (clocksource_tsc.mult)
992 clocksource_mark_unstable(&clocksource_tsc);
994 clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
995 clocksource_tsc.rating = 0;
1000 EXPORT_SYMBOL_GPL(mark_tsc_unstable);
1002 static void __init check_system_tsc_reliable(void)
1004 #ifdef CONFIG_MGEODE_LX
1005 /* RTSC counts during suspend */
1006 #define RTSC_SUSP 0x100
1007 unsigned long res_low, res_high;
1009 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
1010 /* Geode_LX - the OLPC CPU has a very reliable TSC */
1011 if (res_low & RTSC_SUSP)
1012 tsc_clocksource_reliable = 1;
1014 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
1015 tsc_clocksource_reliable = 1;
1019 * Make an educated guess if the TSC is trustworthy and synchronized
1022 int unsynchronized_tsc(void)
1024 if (!cpu_has_tsc || tsc_unstable)
1028 if (apic_is_clustered_box())
1032 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1035 if (tsc_clocksource_reliable)
1038 * Intel systems are normally all synchronized.
1039 * Exceptions must mark TSC as unstable:
1041 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1042 /* assume multi socket systems are not synchronized: */
1043 if (num_possible_cpus() > 1)
1051 static void tsc_refine_calibration_work(struct work_struct *work);
1052 static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
1054 * tsc_refine_calibration_work - Further refine tsc freq calibration
1057 * This functions uses delayed work over a period of a
1058 * second to further refine the TSC freq value. Since this is
1059 * timer based, instead of loop based, we don't block the boot
1060 * process while this longer calibration is done.
1062 * If there are any calibration anomalies (too many SMIs, etc),
1063 * or the refined calibration is off by 1% of the fast early
1064 * calibration, we throw out the new calibration and use the
1065 * early calibration.
1067 static void tsc_refine_calibration_work(struct work_struct *work)
1069 static u64 tsc_start = -1, ref_start;
1071 u64 tsc_stop, ref_stop, delta;
1074 /* Don't bother refining TSC on unstable systems */
1075 if (check_tsc_unstable())
1079 * Since the work is started early in boot, we may be
1080 * delayed the first time we expire. So set the workqueue
1081 * again once we know timers are working.
1083 if (tsc_start == -1) {
1085 * Only set hpet once, to avoid mixing hardware
1086 * if the hpet becomes enabled later.
1088 hpet = is_hpet_enabled();
1089 schedule_delayed_work(&tsc_irqwork, HZ);
1090 tsc_start = tsc_read_refs(&ref_start, hpet);
1094 tsc_stop = tsc_read_refs(&ref_stop, hpet);
1096 /* hpet or pmtimer available ? */
1097 if (ref_start == ref_stop)
1100 /* Check, whether the sampling was disturbed by an SMI */
1101 if (tsc_start == ULLONG_MAX || tsc_stop == ULLONG_MAX)
1104 delta = tsc_stop - tsc_start;
1107 freq = calc_hpet_ref(delta, ref_start, ref_stop);
1109 freq = calc_pmtimer_ref(delta, ref_start, ref_stop);
1111 /* Make sure we're within 1% */
1112 if (abs(tsc_khz - freq) > tsc_khz/100)
1116 pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n",
1117 (unsigned long)tsc_khz / 1000,
1118 (unsigned long)tsc_khz % 1000);
1121 clocksource_register_khz(&clocksource_tsc, tsc_khz);
1125 static int __init init_tsc_clocksource(void)
1127 if (!cpu_has_tsc || tsc_disabled > 0 || !tsc_khz)
1130 if (tsc_clocksource_reliable)
1131 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
1132 /* lower the rating if we already know its unstable: */
1133 if (check_tsc_unstable()) {
1134 clocksource_tsc.rating = 0;
1135 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
1138 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
1139 clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
1142 * Trust the results of the earlier calibration on systems
1143 * exporting a reliable TSC.
1145 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
1146 clocksource_register_khz(&clocksource_tsc, tsc_khz);
1150 schedule_delayed_work(&tsc_irqwork, 0);
1154 * We use device_initcall here, to ensure we run after the hpet
1155 * is fully initialized, which may occur at fs_initcall time.
1157 device_initcall(init_tsc_clocksource);
1159 void __init tsc_init(void)
1164 x86_init.timers.tsc_pre_init();
1169 tsc_khz = x86_platform.calibrate_tsc();
1173 mark_tsc_unstable("could not calculate TSC khz");
1177 pr_info("Detected %lu.%03lu MHz processor\n",
1178 (unsigned long)cpu_khz / 1000,
1179 (unsigned long)cpu_khz % 1000);
1182 * Secondary CPUs do not run through tsc_init(), so set up
1183 * all the scale factors for all CPUs, assuming the same
1184 * speed as the bootup CPU. (cpufreq notifiers will fix this
1185 * up if their speed diverges)
1187 for_each_possible_cpu(cpu) {
1189 set_cyc2ns_scale(cpu_khz, cpu);
1192 if (tsc_disabled > 0)
1195 /* now allow native_sched_clock() to use rdtsc */
1198 if (!no_sched_irq_time)
1199 enable_sched_clock_irqtime();
1201 lpj = ((u64)tsc_khz * 1000);
1207 if (unsynchronized_tsc())
1208 mark_tsc_unstable("TSCs unsynchronized");
1210 check_system_tsc_reliable();
1215 * If we have a constant TSC and are using the TSC for the delay loop,
1216 * we can skip clock calibration if another cpu in the same socket has already
1217 * been calibrated. This assumes that CONSTANT_TSC applies to all
1218 * cpus in the socket - this should be a safe assumption.
1220 unsigned long calibrate_delay_is_known(void)
1222 int i, cpu = smp_processor_id();
1224 if (!tsc_disabled && !cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC))
1227 for_each_online_cpu(i)
1228 if (cpu_data(i).phys_proc_id == cpu_data(cpu).phys_proc_id)
1229 return cpu_data(i).loops_per_jiffy;