2 * linux/kernel/time/timekeeping.c
4 * Kernel timekeeping code and accessor functions
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/percpu.h>
14 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/syscore_ops.h>
18 #include <linux/clocksource.h>
19 #include <linux/jiffies.h>
20 #include <linux/time.h>
21 #include <linux/tick.h>
22 #include <linux/stop_machine.h>
24 /* Structure holding internal timekeeping values. */
26 /* Current clocksource used for timekeeping. */
27 struct clocksource *clock;
28 /* The shift value of the current clocksource. */
31 /* Number of clock cycles in one NTP interval. */
32 cycle_t cycle_interval;
33 /* Number of clock shifted nano seconds in one NTP interval. */
35 /* shifted nano seconds left over when rounding cycle_interval */
37 /* Raw nano seconds accumulated per NTP interval. */
40 /* Clock shifted nano seconds remainder not stored in xtime.tv_nsec. */
42 /* Difference between accumulated time and NTP time in ntp
43 * shifted nano seconds. */
45 /* Shift conversion between clock shifted nano seconds and
46 * ntp shifted nano seconds. */
48 /* NTP adjusted clock multiplier */
51 /* The current time */
52 struct timespec xtime;
54 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
55 * for sub jiffie times) to get to monotonic time. Monotonic is pegged
56 * at zero at system boot time, so wall_to_monotonic will be negative,
57 * however, we will ALWAYS keep the tv_nsec part positive so we can use
58 * the usual normalization.
60 * wall_to_monotonic is moved after resume from suspend for the
61 * monotonic time not to jump. We need to add total_sleep_time to
62 * wall_to_monotonic to get the real boot based time offset.
64 * - wall_to_monotonic is no longer the boot time, getboottime must be
67 struct timespec wall_to_monotonic;
68 /* time spent in suspend */
69 struct timespec total_sleep_time;
70 /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
71 struct timespec raw_time;
74 static struct timekeeper timekeeper;
77 * timekeeper_setup_internals - Set up internals to use clocksource clock.
79 * @clock: Pointer to clocksource.
81 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
82 * pair and interval request.
84 * Unless you're the timekeeping code, you should not be using this!
86 static void timekeeper_setup_internals(struct clocksource *clock)
91 timekeeper.clock = clock;
92 clock->cycle_last = clock->read(clock);
94 /* Do the ns -> cycle conversion first, using original mult */
95 tmp = NTP_INTERVAL_LENGTH;
99 do_div(tmp, clock->mult);
103 interval = (cycle_t) tmp;
104 timekeeper.cycle_interval = interval;
106 /* Go back from cycles -> shifted ns */
107 timekeeper.xtime_interval = (u64) interval * clock->mult;
108 timekeeper.xtime_remainder = ntpinterval - timekeeper.xtime_interval;
109 timekeeper.raw_interval =
110 ((u64) interval * clock->mult) >> clock->shift;
112 timekeeper.xtime_nsec = 0;
113 timekeeper.shift = clock->shift;
115 timekeeper.ntp_error = 0;
116 timekeeper.ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
119 * The timekeeper keeps its own mult values for the currently
120 * active clocksource. These value will be adjusted via NTP
121 * to counteract clock drifting.
123 timekeeper.mult = clock->mult;
126 /* Timekeeper helper functions. */
127 static inline s64 timekeeping_get_ns(void)
129 cycle_t cycle_now, cycle_delta;
130 struct clocksource *clock;
132 /* read clocksource: */
133 clock = timekeeper.clock;
134 cycle_now = clock->read(clock);
136 /* calculate the delta since the last update_wall_time: */
137 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
139 /* return delta convert to nanoseconds using ntp adjusted mult. */
140 return clocksource_cyc2ns(cycle_delta, timekeeper.mult,
144 static inline s64 timekeeping_get_ns_raw(void)
146 cycle_t cycle_now, cycle_delta;
147 struct clocksource *clock;
149 /* read clocksource: */
150 clock = timekeeper.clock;
151 cycle_now = clock->read(clock);
153 /* calculate the delta since the last update_wall_time: */
154 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
156 /* return delta convert to nanoseconds. */
157 return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
161 * This read-write spinlock protects us from races in SMP while
162 * playing with xtime.
164 __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
169 /* flag for if timekeeping is suspended */
170 int __read_mostly timekeeping_suspended;
172 /* must hold xtime_lock */
173 void timekeeping_leap_insert(int leapsecond)
175 timekeeper.xtime.tv_sec += leapsecond;
176 timekeeper.wall_to_monotonic.tv_sec -= leapsecond;
177 update_vsyscall(&timekeeper.xtime, &timekeeper.wall_to_monotonic,
178 timekeeper.clock, timekeeper.mult);
182 * timekeeping_forward_now - update clock to the current time
184 * Forward the current clock to update its state since the last call to
185 * update_wall_time(). This is useful before significant clock changes,
186 * as it avoids having to deal with this time offset explicitly.
188 static void timekeeping_forward_now(void)
190 cycle_t cycle_now, cycle_delta;
191 struct clocksource *clock;
194 clock = timekeeper.clock;
195 cycle_now = clock->read(clock);
196 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
197 clock->cycle_last = cycle_now;
199 nsec = clocksource_cyc2ns(cycle_delta, timekeeper.mult,
202 /* If arch requires, add in gettimeoffset() */
203 nsec += arch_gettimeoffset();
205 timespec_add_ns(&timekeeper.xtime, nsec);
207 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
208 timespec_add_ns(&timekeeper.raw_time, nsec);
212 * getnstimeofday - Returns the time of day in a timespec
213 * @ts: pointer to the timespec to be set
215 * Returns the time of day in a timespec.
217 void getnstimeofday(struct timespec *ts)
222 WARN_ON(timekeeping_suspended);
225 seq = read_seqbegin(&xtime_lock);
227 *ts = timekeeper.xtime;
228 nsecs = timekeeping_get_ns();
230 /* If arch requires, add in gettimeoffset() */
231 nsecs += arch_gettimeoffset();
233 } while (read_seqretry(&xtime_lock, seq));
235 timespec_add_ns(ts, nsecs);
238 EXPORT_SYMBOL(getnstimeofday);
240 ktime_t ktime_get(void)
245 WARN_ON(timekeeping_suspended);
248 seq = read_seqbegin(&xtime_lock);
249 secs = timekeeper.xtime.tv_sec +
250 timekeeper.wall_to_monotonic.tv_sec;
251 nsecs = timekeeper.xtime.tv_nsec +
252 timekeeper.wall_to_monotonic.tv_nsec;
253 nsecs += timekeeping_get_ns();
254 /* If arch requires, add in gettimeoffset() */
255 nsecs += arch_gettimeoffset();
257 } while (read_seqretry(&xtime_lock, seq));
259 * Use ktime_set/ktime_add_ns to create a proper ktime on
260 * 32-bit architectures without CONFIG_KTIME_SCALAR.
262 return ktime_add_ns(ktime_set(secs, 0), nsecs);
264 EXPORT_SYMBOL_GPL(ktime_get);
267 * ktime_get_ts - get the monotonic clock in timespec format
268 * @ts: pointer to timespec variable
270 * The function calculates the monotonic clock from the realtime
271 * clock and the wall_to_monotonic offset and stores the result
272 * in normalized timespec format in the variable pointed to by @ts.
274 void ktime_get_ts(struct timespec *ts)
276 struct timespec tomono;
280 WARN_ON(timekeeping_suspended);
283 seq = read_seqbegin(&xtime_lock);
284 *ts = timekeeper.xtime;
285 tomono = timekeeper.wall_to_monotonic;
286 nsecs = timekeeping_get_ns();
287 /* If arch requires, add in gettimeoffset() */
288 nsecs += arch_gettimeoffset();
290 } while (read_seqretry(&xtime_lock, seq));
292 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
293 ts->tv_nsec + tomono.tv_nsec + nsecs);
295 EXPORT_SYMBOL_GPL(ktime_get_ts);
297 #ifdef CONFIG_NTP_PPS
300 * getnstime_raw_and_real - get day and raw monotonic time in timespec format
301 * @ts_raw: pointer to the timespec to be set to raw monotonic time
302 * @ts_real: pointer to the timespec to be set to the time of day
304 * This function reads both the time of day and raw monotonic time at the
305 * same time atomically and stores the resulting timestamps in timespec
308 void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
311 s64 nsecs_raw, nsecs_real;
313 WARN_ON_ONCE(timekeeping_suspended);
318 seq = read_seqbegin(&xtime_lock);
320 *ts_raw = timekeeper.raw_time;
321 *ts_real = timekeeper.xtime;
323 nsecs_raw = timekeeping_get_ns_raw();
324 nsecs_real = timekeeping_get_ns();
326 /* If arch requires, add in gettimeoffset() */
327 arch_offset = arch_gettimeoffset();
328 nsecs_raw += arch_offset;
329 nsecs_real += arch_offset;
331 } while (read_seqretry(&xtime_lock, seq));
333 timespec_add_ns(ts_raw, nsecs_raw);
334 timespec_add_ns(ts_real, nsecs_real);
336 EXPORT_SYMBOL(getnstime_raw_and_real);
338 #endif /* CONFIG_NTP_PPS */
341 * do_gettimeofday - Returns the time of day in a timeval
342 * @tv: pointer to the timeval to be set
344 * NOTE: Users should be converted to using getnstimeofday()
346 void do_gettimeofday(struct timeval *tv)
350 getnstimeofday(&now);
351 tv->tv_sec = now.tv_sec;
352 tv->tv_usec = now.tv_nsec/1000;
355 EXPORT_SYMBOL(do_gettimeofday);
357 * do_settimeofday - Sets the time of day
358 * @tv: pointer to the timespec variable containing the new time
360 * Sets the time of day to the new time and update NTP and notify hrtimers
362 int do_settimeofday(const struct timespec *tv)
364 struct timespec ts_delta;
367 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
370 write_seqlock_irqsave(&xtime_lock, flags);
372 timekeeping_forward_now();
374 ts_delta.tv_sec = tv->tv_sec - timekeeper.xtime.tv_sec;
375 ts_delta.tv_nsec = tv->tv_nsec - timekeeper.xtime.tv_nsec;
376 timekeeper.wall_to_monotonic =
377 timespec_sub(timekeeper.wall_to_monotonic, ts_delta);
379 timekeeper.xtime = *tv;
381 timekeeper.ntp_error = 0;
384 update_vsyscall(&timekeeper.xtime, &timekeeper.wall_to_monotonic,
385 timekeeper.clock, timekeeper.mult);
387 write_sequnlock_irqrestore(&xtime_lock, flags);
389 /* signal hrtimers about time change */
395 EXPORT_SYMBOL(do_settimeofday);
399 * timekeeping_inject_offset - Adds or subtracts from the current time.
400 * @tv: pointer to the timespec variable containing the offset
402 * Adds or subtracts an offset value from the current time.
404 int timekeeping_inject_offset(struct timespec *ts)
408 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
411 write_seqlock_irqsave(&xtime_lock, flags);
413 timekeeping_forward_now();
415 timekeeper.xtime = timespec_add(timekeeper.xtime, *ts);
416 timekeeper.wall_to_monotonic =
417 timespec_sub(timekeeper.wall_to_monotonic, *ts);
419 timekeeper.ntp_error = 0;
422 update_vsyscall(&timekeeper.xtime, &timekeeper.wall_to_monotonic,
423 timekeeper.clock, timekeeper.mult);
425 write_sequnlock_irqrestore(&xtime_lock, flags);
427 /* signal hrtimers about time change */
432 EXPORT_SYMBOL(timekeeping_inject_offset);
435 * change_clocksource - Swaps clocksources if a new one is available
437 * Accumulates current time interval and initializes new clocksource
439 static int change_clocksource(void *data)
441 struct clocksource *new, *old;
443 new = (struct clocksource *) data;
445 timekeeping_forward_now();
446 if (!new->enable || new->enable(new) == 0) {
447 old = timekeeper.clock;
448 timekeeper_setup_internals(new);
456 * timekeeping_notify - Install a new clock source
457 * @clock: pointer to the clock source
459 * This function is called from clocksource.c after a new, better clock
460 * source has been registered. The caller holds the clocksource_mutex.
462 void timekeeping_notify(struct clocksource *clock)
464 if (timekeeper.clock == clock)
466 stop_machine(change_clocksource, clock, NULL);
471 * ktime_get_real - get the real (wall-) time in ktime_t format
473 * returns the time in ktime_t format
475 ktime_t ktime_get_real(void)
479 getnstimeofday(&now);
481 return timespec_to_ktime(now);
483 EXPORT_SYMBOL_GPL(ktime_get_real);
486 * getrawmonotonic - Returns the raw monotonic time in a timespec
487 * @ts: pointer to the timespec to be set
489 * Returns the raw monotonic time (completely un-modified by ntp)
491 void getrawmonotonic(struct timespec *ts)
497 seq = read_seqbegin(&xtime_lock);
498 nsecs = timekeeping_get_ns_raw();
499 *ts = timekeeper.raw_time;
501 } while (read_seqretry(&xtime_lock, seq));
503 timespec_add_ns(ts, nsecs);
505 EXPORT_SYMBOL(getrawmonotonic);
509 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
511 int timekeeping_valid_for_hres(void)
517 seq = read_seqbegin(&xtime_lock);
519 ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
521 } while (read_seqretry(&xtime_lock, seq));
527 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
529 * Caller must observe xtime_lock via read_seqbegin/read_seqretry to
530 * ensure that the clocksource does not change!
532 u64 timekeeping_max_deferment(void)
534 return timekeeper.clock->max_idle_ns;
538 * read_persistent_clock - Return time from the persistent clock.
540 * Weak dummy function for arches that do not yet support it.
541 * Reads the time from the battery backed persistent clock.
542 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
544 * XXX - Do be sure to remove it once all arches implement it.
546 void __attribute__((weak)) read_persistent_clock(struct timespec *ts)
553 * read_boot_clock - Return time of the system start.
555 * Weak dummy function for arches that do not yet support it.
556 * Function to read the exact time the system has been started.
557 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
559 * XXX - Do be sure to remove it once all arches implement it.
561 void __attribute__((weak)) read_boot_clock(struct timespec *ts)
568 * timekeeping_init - Initializes the clocksource and common timekeeping values
570 void __init timekeeping_init(void)
572 struct clocksource *clock;
574 struct timespec now, boot;
576 read_persistent_clock(&now);
577 read_boot_clock(&boot);
579 write_seqlock_irqsave(&xtime_lock, flags);
583 clock = clocksource_default_clock();
585 clock->enable(clock);
586 timekeeper_setup_internals(clock);
588 timekeeper.xtime.tv_sec = now.tv_sec;
589 timekeeper.xtime.tv_nsec = now.tv_nsec;
590 timekeeper.raw_time.tv_sec = 0;
591 timekeeper.raw_time.tv_nsec = 0;
592 if (boot.tv_sec == 0 && boot.tv_nsec == 0) {
593 boot.tv_sec = timekeeper.xtime.tv_sec;
594 boot.tv_nsec = timekeeper.xtime.tv_nsec;
596 set_normalized_timespec(&timekeeper.wall_to_monotonic,
597 -boot.tv_sec, -boot.tv_nsec);
598 timekeeper.total_sleep_time.tv_sec = 0;
599 timekeeper.total_sleep_time.tv_nsec = 0;
600 write_sequnlock_irqrestore(&xtime_lock, flags);
603 /* time in seconds when suspend began */
604 static struct timespec timekeeping_suspend_time;
607 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
608 * @delta: pointer to a timespec delta value
610 * Takes a timespec offset measuring a suspend interval and properly
611 * adds the sleep offset to the timekeeping variables.
613 static void __timekeeping_inject_sleeptime(struct timespec *delta)
615 if (!timespec_valid(delta)) {
616 printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
617 "sleep delta value!\n");
621 timekeeper.xtime = timespec_add(timekeeper.xtime, *delta);
622 timekeeper.wall_to_monotonic =
623 timespec_sub(timekeeper.wall_to_monotonic, *delta);
624 timekeeper.total_sleep_time = timespec_add(
625 timekeeper.total_sleep_time, *delta);
630 * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values
631 * @delta: pointer to a timespec delta value
633 * This hook is for architectures that cannot support read_persistent_clock
634 * because their RTC/persistent clock is only accessible when irqs are enabled.
636 * This function should only be called by rtc_resume(), and allows
637 * a suspend offset to be injected into the timekeeping values.
639 void timekeeping_inject_sleeptime(struct timespec *delta)
644 /* Make sure we don't set the clock twice */
645 read_persistent_clock(&ts);
646 if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
649 write_seqlock_irqsave(&xtime_lock, flags);
650 timekeeping_forward_now();
652 __timekeeping_inject_sleeptime(delta);
654 timekeeper.ntp_error = 0;
656 update_vsyscall(&timekeeper.xtime, &timekeeper.wall_to_monotonic,
657 timekeeper.clock, timekeeper.mult);
659 write_sequnlock_irqrestore(&xtime_lock, flags);
661 /* signal hrtimers about time change */
667 * timekeeping_resume - Resumes the generic timekeeping subsystem.
669 * This is for the generic clocksource timekeeping.
670 * xtime/wall_to_monotonic/jiffies/etc are
671 * still managed by arch specific suspend/resume code.
673 static void timekeeping_resume(void)
678 read_persistent_clock(&ts);
680 clocksource_resume();
682 write_seqlock_irqsave(&xtime_lock, flags);
684 if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
685 ts = timespec_sub(ts, timekeeping_suspend_time);
686 __timekeeping_inject_sleeptime(&ts);
688 /* re-base the last cycle value */
689 timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
690 timekeeper.ntp_error = 0;
691 timekeeping_suspended = 0;
692 write_sequnlock_irqrestore(&xtime_lock, flags);
694 touch_softlockup_watchdog();
696 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
698 /* Resume hrtimers */
702 static int timekeeping_suspend(void)
705 struct timespec delta, delta_delta;
706 static struct timespec old_delta;
708 read_persistent_clock(&timekeeping_suspend_time);
710 write_seqlock_irqsave(&xtime_lock, flags);
711 timekeeping_forward_now();
712 timekeeping_suspended = 1;
715 * To avoid drift caused by repeated suspend/resumes,
716 * which each can add ~1 second drift error,
717 * try to compensate so the difference in system time
718 * and persistent_clock time stays close to constant.
720 delta = timespec_sub(timekeeper.xtime, timekeeping_suspend_time);
721 delta_delta = timespec_sub(delta, old_delta);
722 if (abs(delta_delta.tv_sec) >= 2) {
724 * if delta_delta is too large, assume time correction
725 * has occured and set old_delta to the current delta.
729 /* Otherwise try to adjust old_system to compensate */
730 timekeeping_suspend_time =
731 timespec_add(timekeeping_suspend_time, delta_delta);
733 write_sequnlock_irqrestore(&xtime_lock, flags);
735 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
736 clocksource_suspend();
741 /* sysfs resume/suspend bits for timekeeping */
742 static struct syscore_ops timekeeping_syscore_ops = {
743 .resume = timekeeping_resume,
744 .suspend = timekeeping_suspend,
747 static int __init timekeeping_init_ops(void)
749 register_syscore_ops(&timekeeping_syscore_ops);
753 device_initcall(timekeeping_init_ops);
756 * If the error is already larger, we look ahead even further
757 * to compensate for late or lost adjustments.
759 static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval,
767 * Use the current error value to determine how much to look ahead.
768 * The larger the error the slower we adjust for it to avoid problems
769 * with losing too many ticks, otherwise we would overadjust and
770 * produce an even larger error. The smaller the adjustment the
771 * faster we try to adjust for it, as lost ticks can do less harm
772 * here. This is tuned so that an error of about 1 msec is adjusted
773 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
775 error2 = timekeeper.ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
776 error2 = abs(error2);
777 for (look_ahead = 0; error2 > 0; look_ahead++)
781 * Now calculate the error in (1 << look_ahead) ticks, but first
782 * remove the single look ahead already included in the error.
784 tick_error = tick_length >> (timekeeper.ntp_error_shift + 1);
785 tick_error -= timekeeper.xtime_interval >> 1;
786 error = ((error - tick_error) >> look_ahead) + tick_error;
788 /* Finally calculate the adjustment shift value. */
793 *interval = -*interval;
797 for (adj = 0; error > i; adj++)
806 * Adjust the multiplier to reduce the error value,
807 * this is optimized for the most common adjustments of -1,0,1,
808 * for other values we can do a bit more work.
810 static void timekeeping_adjust(s64 offset)
812 s64 error, interval = timekeeper.cycle_interval;
816 * The point of this is to check if the error is greater then half
819 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
821 * Note we subtract one in the shift, so that error is really error*2.
822 * This "saves" dividing(shifting) interval twice, but keeps the
823 * (error > interval) comparison as still measuring if error is
824 * larger then half an interval.
826 * Note: It does not "save" on aggravation when reading the code.
828 error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1);
829 if (error > interval) {
831 * We now divide error by 4(via shift), which checks if
832 * the error is greater then twice the interval.
833 * If it is greater, we need a bigadjust, if its smaller,
834 * we can adjust by 1.
838 * XXX - In update_wall_time, we round up to the next
839 * nanosecond, and store the amount rounded up into
840 * the error. This causes the likely below to be unlikely.
842 * The proper fix is to avoid rounding up by using
843 * the high precision timekeeper.xtime_nsec instead of
844 * xtime.tv_nsec everywhere. Fixing this will take some
847 if (likely(error <= interval))
850 adj = timekeeping_bigadjust(error, &interval, &offset);
851 } else if (error < -interval) {
852 /* See comment above, this is just switched for the negative */
854 if (likely(error >= -interval)) {
856 interval = -interval;
859 adj = timekeeping_bigadjust(error, &interval, &offset);
860 } else /* No adjustment needed */
863 WARN_ONCE(timekeeper.clock->maxadj &&
864 (timekeeper.mult + adj > timekeeper.clock->mult +
865 timekeeper.clock->maxadj),
866 "Adjusting %s more then 11%% (%ld vs %ld)\n",
867 timekeeper.clock->name, (long)timekeeper.mult + adj,
868 (long)timekeeper.clock->mult +
869 timekeeper.clock->maxadj);
871 * So the following can be confusing.
873 * To keep things simple, lets assume adj == 1 for now.
875 * When adj != 1, remember that the interval and offset values
876 * have been appropriately scaled so the math is the same.
878 * The basic idea here is that we're increasing the multiplier
879 * by one, this causes the xtime_interval to be incremented by
880 * one cycle_interval. This is because:
881 * xtime_interval = cycle_interval * mult
882 * So if mult is being incremented by one:
883 * xtime_interval = cycle_interval * (mult + 1)
885 * xtime_interval = (cycle_interval * mult) + cycle_interval
886 * Which can be shortened to:
887 * xtime_interval += cycle_interval
889 * So offset stores the non-accumulated cycles. Thus the current
890 * time (in shifted nanoseconds) is:
891 * now = (offset * adj) + xtime_nsec
892 * Now, even though we're adjusting the clock frequency, we have
893 * to keep time consistent. In other words, we can't jump back
894 * in time, and we also want to avoid jumping forward in time.
896 * So given the same offset value, we need the time to be the same
897 * both before and after the freq adjustment.
898 * now = (offset * adj_1) + xtime_nsec_1
899 * now = (offset * adj_2) + xtime_nsec_2
901 * (offset * adj_1) + xtime_nsec_1 =
902 * (offset * adj_2) + xtime_nsec_2
906 * (offset * adj_1) + xtime_nsec_1 =
907 * (offset * (adj_1+1)) + xtime_nsec_2
908 * (offset * adj_1) + xtime_nsec_1 =
909 * (offset * adj_1) + offset + xtime_nsec_2
910 * Canceling the sides:
911 * xtime_nsec_1 = offset + xtime_nsec_2
913 * xtime_nsec_2 = xtime_nsec_1 - offset
914 * Which simplfies to:
915 * xtime_nsec -= offset
917 * XXX - TODO: Doc ntp_error calculation.
919 timekeeper.mult += adj;
920 timekeeper.xtime_interval += interval;
921 timekeeper.xtime_nsec -= offset;
922 timekeeper.ntp_error -= (interval - offset) <<
923 timekeeper.ntp_error_shift;
928 * logarithmic_accumulation - shifted accumulation of cycles
930 * This functions accumulates a shifted interval of cycles into
931 * into a shifted interval nanoseconds. Allows for O(log) accumulation
934 * Returns the unconsumed cycles.
936 static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
938 u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
941 /* If the offset is smaller then a shifted interval, do nothing */
942 if (offset < timekeeper.cycle_interval<<shift)
945 /* Accumulate one shifted interval */
946 offset -= timekeeper.cycle_interval << shift;
947 timekeeper.clock->cycle_last += timekeeper.cycle_interval << shift;
949 timekeeper.xtime_nsec += timekeeper.xtime_interval << shift;
950 while (timekeeper.xtime_nsec >= nsecps) {
951 timekeeper.xtime_nsec -= nsecps;
952 timekeeper.xtime.tv_sec++;
956 /* Accumulate raw time */
957 raw_nsecs = timekeeper.raw_interval << shift;
958 raw_nsecs += timekeeper.raw_time.tv_nsec;
959 if (raw_nsecs >= NSEC_PER_SEC) {
960 u64 raw_secs = raw_nsecs;
961 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
962 timekeeper.raw_time.tv_sec += raw_secs;
964 timekeeper.raw_time.tv_nsec = raw_nsecs;
966 /* Accumulate error between NTP and clock interval */
967 timekeeper.ntp_error += tick_length << shift;
968 timekeeper.ntp_error -=
969 (timekeeper.xtime_interval + timekeeper.xtime_remainder) <<
970 (timekeeper.ntp_error_shift + shift);
977 * update_wall_time - Uses the current clocksource to increment the wall time
979 * Called from the timer interrupt, must hold a write on xtime_lock.
981 static void update_wall_time(void)
983 struct clocksource *clock;
985 int shift = 0, maxshift;
987 /* Make sure we're fully resumed: */
988 if (unlikely(timekeeping_suspended))
991 clock = timekeeper.clock;
993 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
994 offset = timekeeper.cycle_interval;
996 offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
998 timekeeper.xtime_nsec = (s64)timekeeper.xtime.tv_nsec <<
1002 * With NO_HZ we may have to accumulate many cycle_intervals
1003 * (think "ticks") worth of time at once. To do this efficiently,
1004 * we calculate the largest doubling multiple of cycle_intervals
1005 * that is smaller then the offset. We then accumulate that
1006 * chunk in one go, and then try to consume the next smaller
1009 shift = ilog2(offset) - ilog2(timekeeper.cycle_interval);
1010 shift = max(0, shift);
1011 /* Bound shift to one less then what overflows tick_length */
1012 maxshift = (8*sizeof(tick_length) - (ilog2(tick_length)+1)) - 1;
1013 shift = min(shift, maxshift);
1014 while (offset >= timekeeper.cycle_interval) {
1015 offset = logarithmic_accumulation(offset, shift);
1016 if(offset < timekeeper.cycle_interval<<shift)
1020 /* correct the clock when NTP error is too big */
1021 timekeeping_adjust(offset);
1024 * Since in the loop above, we accumulate any amount of time
1025 * in xtime_nsec over a second into xtime.tv_sec, its possible for
1026 * xtime_nsec to be fairly small after the loop. Further, if we're
1027 * slightly speeding the clocksource up in timekeeping_adjust(),
1028 * its possible the required corrective factor to xtime_nsec could
1029 * cause it to underflow.
1031 * Now, we cannot simply roll the accumulated second back, since
1032 * the NTP subsystem has been notified via second_overflow. So
1033 * instead we push xtime_nsec forward by the amount we underflowed,
1034 * and add that amount into the error.
1036 * We'll correct this error next time through this function, when
1037 * xtime_nsec is not as small.
1039 if (unlikely((s64)timekeeper.xtime_nsec < 0)) {
1040 s64 neg = -(s64)timekeeper.xtime_nsec;
1041 timekeeper.xtime_nsec = 0;
1042 timekeeper.ntp_error += neg << timekeeper.ntp_error_shift;
1047 * Store full nanoseconds into xtime after rounding it up and
1048 * add the remainder to the error difference.
1050 timekeeper.xtime.tv_nsec = ((s64)timekeeper.xtime_nsec >>
1051 timekeeper.shift) + 1;
1052 timekeeper.xtime_nsec -= (s64)timekeeper.xtime.tv_nsec <<
1054 timekeeper.ntp_error += timekeeper.xtime_nsec <<
1055 timekeeper.ntp_error_shift;
1058 * Finally, make sure that after the rounding
1059 * xtime.tv_nsec isn't larger then NSEC_PER_SEC
1061 if (unlikely(timekeeper.xtime.tv_nsec >= NSEC_PER_SEC)) {
1062 timekeeper.xtime.tv_nsec -= NSEC_PER_SEC;
1063 timekeeper.xtime.tv_sec++;
1067 /* check to see if there is a new clocksource to use */
1068 update_vsyscall(&timekeeper.xtime, &timekeeper.wall_to_monotonic,
1069 timekeeper.clock, timekeeper.mult);
1073 * getboottime - Return the real time of system boot.
1074 * @ts: pointer to the timespec to be set
1076 * Returns the wall-time of boot in a timespec.
1078 * This is based on the wall_to_monotonic offset and the total suspend
1079 * time. Calls to settimeofday will affect the value returned (which
1080 * basically means that however wrong your real time clock is at boot time,
1081 * you get the right time here).
1083 void getboottime(struct timespec *ts)
1085 struct timespec boottime = {
1086 .tv_sec = timekeeper.wall_to_monotonic.tv_sec +
1087 timekeeper.total_sleep_time.tv_sec,
1088 .tv_nsec = timekeeper.wall_to_monotonic.tv_nsec +
1089 timekeeper.total_sleep_time.tv_nsec
1092 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
1094 EXPORT_SYMBOL_GPL(getboottime);
1098 * get_monotonic_boottime - Returns monotonic time since boot
1099 * @ts: pointer to the timespec to be set
1101 * Returns the monotonic time since boot in a timespec.
1103 * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also
1104 * includes the time spent in suspend.
1106 void get_monotonic_boottime(struct timespec *ts)
1108 struct timespec tomono, sleep;
1112 WARN_ON(timekeeping_suspended);
1115 seq = read_seqbegin(&xtime_lock);
1116 *ts = timekeeper.xtime;
1117 tomono = timekeeper.wall_to_monotonic;
1118 sleep = timekeeper.total_sleep_time;
1119 nsecs = timekeeping_get_ns();
1121 } while (read_seqretry(&xtime_lock, seq));
1123 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec,
1124 ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs);
1126 EXPORT_SYMBOL_GPL(get_monotonic_boottime);
1129 * ktime_get_boottime - Returns monotonic time since boot in a ktime
1131 * Returns the monotonic time since boot in a ktime
1133 * This is similar to CLOCK_MONTONIC/ktime_get, but also
1134 * includes the time spent in suspend.
1136 ktime_t ktime_get_boottime(void)
1140 get_monotonic_boottime(&ts);
1141 return timespec_to_ktime(ts);
1143 EXPORT_SYMBOL_GPL(ktime_get_boottime);
1146 * monotonic_to_bootbased - Convert the monotonic time to boot based.
1147 * @ts: pointer to the timespec to be converted
1149 void monotonic_to_bootbased(struct timespec *ts)
1151 *ts = timespec_add(*ts, timekeeper.total_sleep_time);
1153 EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
1155 unsigned long get_seconds(void)
1157 return timekeeper.xtime.tv_sec;
1159 EXPORT_SYMBOL(get_seconds);
1161 struct timespec __current_kernel_time(void)
1163 return timekeeper.xtime;
1166 struct timespec current_kernel_time(void)
1168 struct timespec now;
1172 seq = read_seqbegin(&xtime_lock);
1174 now = timekeeper.xtime;
1175 } while (read_seqretry(&xtime_lock, seq));
1179 EXPORT_SYMBOL(current_kernel_time);
1181 struct timespec get_monotonic_coarse(void)
1183 struct timespec now, mono;
1187 seq = read_seqbegin(&xtime_lock);
1189 now = timekeeper.xtime;
1190 mono = timekeeper.wall_to_monotonic;
1191 } while (read_seqretry(&xtime_lock, seq));
1193 set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
1194 now.tv_nsec + mono.tv_nsec);
1199 * The 64-bit jiffies value is not atomic - you MUST NOT read it
1200 * without sampling the sequence number in xtime_lock.
1201 * jiffies is defined in the linker script...
1203 void do_timer(unsigned long ticks)
1205 jiffies_64 += ticks;
1207 calc_global_load(ticks);
1211 * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic,
1212 * and sleep offsets.
1213 * @xtim: pointer to timespec to be set with xtime
1214 * @wtom: pointer to timespec to be set with wall_to_monotonic
1215 * @sleep: pointer to timespec to be set with time in suspend
1217 void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
1218 struct timespec *wtom, struct timespec *sleep)
1223 seq = read_seqbegin(&xtime_lock);
1224 *xtim = timekeeper.xtime;
1225 *wtom = timekeeper.wall_to_monotonic;
1226 *sleep = timekeeper.total_sleep_time;
1227 } while (read_seqretry(&xtime_lock, seq));
1231 * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
1233 ktime_t ktime_get_monotonic_offset(void)
1236 struct timespec wtom;
1239 seq = read_seqbegin(&xtime_lock);
1240 wtom = timekeeper.wall_to_monotonic;
1241 } while (read_seqretry(&xtime_lock, seq));
1242 return timespec_to_ktime(wtom);
1246 * xtime_update() - advances the timekeeping infrastructure
1247 * @ticks: number of ticks, that have elapsed since the last call.
1249 * Must be called with interrupts disabled.
1251 void xtime_update(unsigned long ticks)
1253 write_seqlock(&xtime_lock);
1255 write_sequnlock(&xtime_lock);