2 * linux/kernel/time/timekeeping.c
4 * Kernel timekeeping code and accessor functions
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
11 #include <linux/timekeeper_internal.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/percpu.h>
15 #include <linux/init.h>
17 #include <linux/sched.h>
18 #include <linux/syscore_ops.h>
19 #include <linux/clocksource.h>
20 #include <linux/jiffies.h>
21 #include <linux/time.h>
22 #include <linux/tick.h>
23 #include <linux/stop_machine.h>
24 #include <linux/pvclock_gtod.h>
26 #include "tick-internal.h"
27 #include "ntp_internal.h"
28 #include "timekeeping_internal.h"
30 static struct timekeeper timekeeper;
31 static DEFINE_RAW_SPINLOCK(timekeeper_lock);
32 static seqcount_t timekeeper_seq;
33 static struct timekeeper shadow_timekeeper;
35 /* flag for if timekeeping is suspended */
36 int __read_mostly timekeeping_suspended;
38 /* Flag for if there is a persistent clock on this platform */
39 bool __read_mostly persistent_clock_exist = false;
41 static inline void tk_normalize_xtime(struct timekeeper *tk)
43 while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) {
44 tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift;
49 static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts)
51 tk->xtime_sec = ts->tv_sec;
52 tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift;
55 static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)
57 tk->xtime_sec += ts->tv_sec;
58 tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift;
59 tk_normalize_xtime(tk);
62 static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
67 * Verify consistency of: offset_real = -wall_to_monotonic
68 * before modifying anything
70 set_normalized_timespec(&tmp, -tk->wall_to_monotonic.tv_sec,
71 -tk->wall_to_monotonic.tv_nsec);
72 WARN_ON_ONCE(tk->offs_real.tv64 != timespec_to_ktime(tmp).tv64);
73 tk->wall_to_monotonic = wtm;
74 set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
75 tk->offs_real = timespec_to_ktime(tmp);
76 tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tk->tai_offset, 0));
79 static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
81 /* Verify consistency before modifying */
82 WARN_ON_ONCE(tk->offs_boot.tv64 != timespec_to_ktime(tk->total_sleep_time).tv64);
84 tk->total_sleep_time = t;
85 tk->offs_boot = timespec_to_ktime(t);
89 * timekeeper_setup_internals - Set up internals to use clocksource clock.
91 * @clock: Pointer to clocksource.
93 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
94 * pair and interval request.
96 * Unless you're the timekeeping code, you should not be using this!
98 static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
101 u64 tmp, ntpinterval;
102 struct clocksource *old_clock;
104 old_clock = tk->clock;
106 tk->cycle_last = clock->cycle_last = clock->read(clock);
108 /* Do the ns -> cycle conversion first, using original mult */
109 tmp = NTP_INTERVAL_LENGTH;
110 tmp <<= clock->shift;
112 tmp += clock->mult/2;
113 do_div(tmp, clock->mult);
117 interval = (cycle_t) tmp;
118 tk->cycle_interval = interval;
120 /* Go back from cycles -> shifted ns */
121 tk->xtime_interval = (u64) interval * clock->mult;
122 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
124 ((u64) interval * clock->mult) >> clock->shift;
126 /* if changing clocks, convert xtime_nsec shift units */
128 int shift_change = clock->shift - old_clock->shift;
129 if (shift_change < 0)
130 tk->xtime_nsec >>= -shift_change;
132 tk->xtime_nsec <<= shift_change;
134 tk->shift = clock->shift;
137 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
140 * The timekeeper keeps its own mult values for the currently
141 * active clocksource. These value will be adjusted via NTP
142 * to counteract clock drifting.
144 tk->mult = clock->mult;
147 /* Timekeeper helper functions. */
149 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
150 u32 (*arch_gettimeoffset)(void);
152 u32 get_arch_timeoffset(void)
154 if (likely(arch_gettimeoffset))
155 return arch_gettimeoffset();
159 static inline u32 get_arch_timeoffset(void) { return 0; }
162 static inline s64 timekeeping_get_ns(struct timekeeper *tk)
164 cycle_t cycle_now, cycle_delta;
165 struct clocksource *clock;
168 /* read clocksource: */
170 cycle_now = clock->read(clock);
172 /* calculate the delta since the last update_wall_time: */
173 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
175 nsec = cycle_delta * tk->mult + tk->xtime_nsec;
178 /* If arch requires, add in get_arch_timeoffset() */
179 return nsec + get_arch_timeoffset();
182 static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
184 cycle_t cycle_now, cycle_delta;
185 struct clocksource *clock;
188 /* read clocksource: */
190 cycle_now = clock->read(clock);
192 /* calculate the delta since the last update_wall_time: */
193 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
195 /* convert delta to nanoseconds. */
196 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
198 /* If arch requires, add in get_arch_timeoffset() */
199 return nsec + get_arch_timeoffset();
202 static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
204 static void update_pvclock_gtod(struct timekeeper *tk)
206 raw_notifier_call_chain(&pvclock_gtod_chain, 0, tk);
210 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
212 int pvclock_gtod_register_notifier(struct notifier_block *nb)
214 struct timekeeper *tk = &timekeeper;
218 raw_spin_lock_irqsave(&timekeeper_lock, flags);
219 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
220 update_pvclock_gtod(tk);
221 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
225 EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
228 * pvclock_gtod_unregister_notifier - unregister a pvclock
229 * timedata update listener
231 int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
236 raw_spin_lock_irqsave(&timekeeper_lock, flags);
237 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
238 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
242 EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
244 /* must hold timekeeper_lock */
245 static void timekeeping_update(struct timekeeper *tk, bool clearntp, bool mirror)
252 update_pvclock_gtod(tk);
255 memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper));
259 * timekeeping_forward_now - update clock to the current time
261 * Forward the current clock to update its state since the last call to
262 * update_wall_time(). This is useful before significant clock changes,
263 * as it avoids having to deal with this time offset explicitly.
265 static void timekeeping_forward_now(struct timekeeper *tk)
267 cycle_t cycle_now, cycle_delta;
268 struct clocksource *clock;
272 cycle_now = clock->read(clock);
273 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
274 tk->cycle_last = clock->cycle_last = cycle_now;
276 tk->xtime_nsec += cycle_delta * tk->mult;
278 /* If arch requires, add in get_arch_timeoffset() */
279 tk->xtime_nsec += (u64)get_arch_timeoffset() << tk->shift;
281 tk_normalize_xtime(tk);
283 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
284 timespec_add_ns(&tk->raw_time, nsec);
288 * __getnstimeofday - Returns the time of day in a timespec.
289 * @ts: pointer to the timespec to be set
291 * Updates the time of day in the timespec.
292 * Returns 0 on success, or -ve when suspended (timespec will be undefined).
294 int __getnstimeofday(struct timespec *ts)
296 struct timekeeper *tk = &timekeeper;
301 seq = read_seqcount_begin(&timekeeper_seq);
303 ts->tv_sec = tk->xtime_sec;
304 nsecs = timekeeping_get_ns(tk);
306 } while (read_seqcount_retry(&timekeeper_seq, seq));
309 timespec_add_ns(ts, nsecs);
312 * Do not bail out early, in case there were callers still using
313 * the value, even in the face of the WARN_ON.
315 if (unlikely(timekeeping_suspended))
319 EXPORT_SYMBOL(__getnstimeofday);
322 * getnstimeofday - Returns the time of day in a timespec.
323 * @ts: pointer to the timespec to be set
325 * Returns the time of day in a timespec (WARN if suspended).
327 void getnstimeofday(struct timespec *ts)
329 WARN_ON(__getnstimeofday(ts));
331 EXPORT_SYMBOL(getnstimeofday);
333 ktime_t ktime_get(void)
335 struct timekeeper *tk = &timekeeper;
339 WARN_ON(timekeeping_suspended);
342 seq = read_seqcount_begin(&timekeeper_seq);
343 secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
344 nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec;
346 } while (read_seqcount_retry(&timekeeper_seq, seq));
348 * Use ktime_set/ktime_add_ns to create a proper ktime on
349 * 32-bit architectures without CONFIG_KTIME_SCALAR.
351 return ktime_add_ns(ktime_set(secs, 0), nsecs);
353 EXPORT_SYMBOL_GPL(ktime_get);
356 * ktime_get_ts - get the monotonic clock in timespec format
357 * @ts: pointer to timespec variable
359 * The function calculates the monotonic clock from the realtime
360 * clock and the wall_to_monotonic offset and stores the result
361 * in normalized timespec format in the variable pointed to by @ts.
363 void ktime_get_ts(struct timespec *ts)
365 struct timekeeper *tk = &timekeeper;
366 struct timespec tomono;
370 WARN_ON(timekeeping_suspended);
373 seq = read_seqcount_begin(&timekeeper_seq);
374 ts->tv_sec = tk->xtime_sec;
375 nsec = timekeeping_get_ns(tk);
376 tomono = tk->wall_to_monotonic;
378 } while (read_seqcount_retry(&timekeeper_seq, seq));
380 ts->tv_sec += tomono.tv_sec;
382 timespec_add_ns(ts, nsec + tomono.tv_nsec);
384 EXPORT_SYMBOL_GPL(ktime_get_ts);
388 * timekeeping_clocktai - Returns the TAI time of day in a timespec
389 * @ts: pointer to the timespec to be set
391 * Returns the time of day in a timespec.
393 void timekeeping_clocktai(struct timespec *ts)
395 struct timekeeper *tk = &timekeeper;
399 WARN_ON(timekeeping_suspended);
402 seq = read_seqcount_begin(&timekeeper_seq);
404 ts->tv_sec = tk->xtime_sec + tk->tai_offset;
405 nsecs = timekeeping_get_ns(tk);
407 } while (read_seqcount_retry(&timekeeper_seq, seq));
410 timespec_add_ns(ts, nsecs);
413 EXPORT_SYMBOL(timekeeping_clocktai);
417 * ktime_get_clocktai - Returns the TAI time of day in a ktime
419 * Returns the time of day in a ktime.
421 ktime_t ktime_get_clocktai(void)
425 timekeeping_clocktai(&ts);
426 return timespec_to_ktime(ts);
428 EXPORT_SYMBOL(ktime_get_clocktai);
430 #ifdef CONFIG_NTP_PPS
433 * getnstime_raw_and_real - get day and raw monotonic time in timespec format
434 * @ts_raw: pointer to the timespec to be set to raw monotonic time
435 * @ts_real: pointer to the timespec to be set to the time of day
437 * This function reads both the time of day and raw monotonic time at the
438 * same time atomically and stores the resulting timestamps in timespec
441 void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
443 struct timekeeper *tk = &timekeeper;
445 s64 nsecs_raw, nsecs_real;
447 WARN_ON_ONCE(timekeeping_suspended);
450 seq = read_seqcount_begin(&timekeeper_seq);
452 *ts_raw = tk->raw_time;
453 ts_real->tv_sec = tk->xtime_sec;
454 ts_real->tv_nsec = 0;
456 nsecs_raw = timekeeping_get_ns_raw(tk);
457 nsecs_real = timekeeping_get_ns(tk);
459 } while (read_seqcount_retry(&timekeeper_seq, seq));
461 timespec_add_ns(ts_raw, nsecs_raw);
462 timespec_add_ns(ts_real, nsecs_real);
464 EXPORT_SYMBOL(getnstime_raw_and_real);
466 #endif /* CONFIG_NTP_PPS */
469 * do_gettimeofday - Returns the time of day in a timeval
470 * @tv: pointer to the timeval to be set
472 * NOTE: Users should be converted to using getnstimeofday()
474 void do_gettimeofday(struct timeval *tv)
478 getnstimeofday(&now);
479 tv->tv_sec = now.tv_sec;
480 tv->tv_usec = now.tv_nsec/1000;
482 EXPORT_SYMBOL(do_gettimeofday);
485 * do_settimeofday - Sets the time of day
486 * @tv: pointer to the timespec variable containing the new time
488 * Sets the time of day to the new time and update NTP and notify hrtimers
490 int do_settimeofday(const struct timespec *tv)
492 struct timekeeper *tk = &timekeeper;
493 struct timespec ts_delta, xt;
496 if (!timespec_valid_strict(tv))
499 raw_spin_lock_irqsave(&timekeeper_lock, flags);
500 write_seqcount_begin(&timekeeper_seq);
502 timekeeping_forward_now(tk);
505 ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
506 ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec;
508 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, ts_delta));
510 tk_set_xtime(tk, tv);
512 timekeeping_update(tk, true, true);
514 write_seqcount_end(&timekeeper_seq);
515 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
517 /* signal hrtimers about time change */
522 EXPORT_SYMBOL(do_settimeofday);
525 * timekeeping_inject_offset - Adds or subtracts from the current time.
526 * @tv: pointer to the timespec variable containing the offset
528 * Adds or subtracts an offset value from the current time.
530 int timekeeping_inject_offset(struct timespec *ts)
532 struct timekeeper *tk = &timekeeper;
537 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
540 raw_spin_lock_irqsave(&timekeeper_lock, flags);
541 write_seqcount_begin(&timekeeper_seq);
543 timekeeping_forward_now(tk);
545 /* Make sure the proposed value is valid */
546 tmp = timespec_add(tk_xtime(tk), *ts);
547 if (!timespec_valid_strict(&tmp)) {
552 tk_xtime_add(tk, ts);
553 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts));
555 error: /* even if we error out, we forwarded the time, so call update */
556 timekeeping_update(tk, true, true);
558 write_seqcount_end(&timekeeper_seq);
559 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
561 /* signal hrtimers about time change */
566 EXPORT_SYMBOL(timekeeping_inject_offset);
570 * timekeeping_get_tai_offset - Returns current TAI offset from UTC
573 s32 timekeeping_get_tai_offset(void)
575 struct timekeeper *tk = &timekeeper;
580 seq = read_seqcount_begin(&timekeeper_seq);
581 ret = tk->tai_offset;
582 } while (read_seqcount_retry(&timekeeper_seq, seq));
588 * __timekeeping_set_tai_offset - Lock free worker function
591 static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
593 tk->tai_offset = tai_offset;
594 tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tai_offset, 0));
598 * timekeeping_set_tai_offset - Sets the current TAI offset from UTC
601 void timekeeping_set_tai_offset(s32 tai_offset)
603 struct timekeeper *tk = &timekeeper;
606 raw_spin_lock_irqsave(&timekeeper_lock, flags);
607 write_seqcount_begin(&timekeeper_seq);
608 __timekeeping_set_tai_offset(tk, tai_offset);
609 write_seqcount_end(&timekeeper_seq);
610 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
615 * change_clocksource - Swaps clocksources if a new one is available
617 * Accumulates current time interval and initializes new clocksource
619 static int change_clocksource(void *data)
621 struct timekeeper *tk = &timekeeper;
622 struct clocksource *new, *old;
625 new = (struct clocksource *) data;
627 raw_spin_lock_irqsave(&timekeeper_lock, flags);
628 write_seqcount_begin(&timekeeper_seq);
630 timekeeping_forward_now(tk);
632 * If the cs is in module, get a module reference. Succeeds
633 * for built-in code (owner == NULL) as well.
635 if (try_module_get(new->owner)) {
636 if (!new->enable || new->enable(new) == 0) {
638 tk_setup_internals(tk, new);
641 module_put(old->owner);
643 module_put(new->owner);
646 timekeeping_update(tk, true, true);
648 write_seqcount_end(&timekeeper_seq);
649 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
655 * timekeeping_notify - Install a new clock source
656 * @clock: pointer to the clock source
658 * This function is called from clocksource.c after a new, better clock
659 * source has been registered. The caller holds the clocksource_mutex.
661 int timekeeping_notify(struct clocksource *clock)
663 struct timekeeper *tk = &timekeeper;
665 if (tk->clock == clock)
667 stop_machine(change_clocksource, clock, NULL);
669 return tk->clock == clock ? 0 : -1;
673 * ktime_get_real - get the real (wall-) time in ktime_t format
675 * returns the time in ktime_t format
677 ktime_t ktime_get_real(void)
681 getnstimeofday(&now);
683 return timespec_to_ktime(now);
685 EXPORT_SYMBOL_GPL(ktime_get_real);
688 * getrawmonotonic - Returns the raw monotonic time in a timespec
689 * @ts: pointer to the timespec to be set
691 * Returns the raw monotonic time (completely un-modified by ntp)
693 void getrawmonotonic(struct timespec *ts)
695 struct timekeeper *tk = &timekeeper;
700 seq = read_seqcount_begin(&timekeeper_seq);
701 nsecs = timekeeping_get_ns_raw(tk);
704 } while (read_seqcount_retry(&timekeeper_seq, seq));
706 timespec_add_ns(ts, nsecs);
708 EXPORT_SYMBOL(getrawmonotonic);
711 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
713 int timekeeping_valid_for_hres(void)
715 struct timekeeper *tk = &timekeeper;
720 seq = read_seqcount_begin(&timekeeper_seq);
722 ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
724 } while (read_seqcount_retry(&timekeeper_seq, seq));
730 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
732 u64 timekeeping_max_deferment(void)
734 struct timekeeper *tk = &timekeeper;
739 seq = read_seqcount_begin(&timekeeper_seq);
741 ret = tk->clock->max_idle_ns;
743 } while (read_seqcount_retry(&timekeeper_seq, seq));
749 * read_persistent_clock - Return time from the persistent clock.
751 * Weak dummy function for arches that do not yet support it.
752 * Reads the time from the battery backed persistent clock.
753 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
755 * XXX - Do be sure to remove it once all arches implement it.
757 void __attribute__((weak)) read_persistent_clock(struct timespec *ts)
764 * read_boot_clock - Return time of the system start.
766 * Weak dummy function for arches that do not yet support it.
767 * Function to read the exact time the system has been started.
768 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
770 * XXX - Do be sure to remove it once all arches implement it.
772 void __attribute__((weak)) read_boot_clock(struct timespec *ts)
779 * timekeeping_init - Initializes the clocksource and common timekeeping values
781 void __init timekeeping_init(void)
783 struct timekeeper *tk = &timekeeper;
784 struct clocksource *clock;
786 struct timespec now, boot, tmp;
788 read_persistent_clock(&now);
790 if (!timespec_valid_strict(&now)) {
791 pr_warn("WARNING: Persistent clock returned invalid value!\n"
792 " Check your CMOS/BIOS settings.\n");
795 } else if (now.tv_sec || now.tv_nsec)
796 persistent_clock_exist = true;
798 read_boot_clock(&boot);
799 if (!timespec_valid_strict(&boot)) {
800 pr_warn("WARNING: Boot clock returned invalid value!\n"
801 " Check your CMOS/BIOS settings.\n");
806 raw_spin_lock_irqsave(&timekeeper_lock, flags);
807 write_seqcount_begin(&timekeeper_seq);
810 clock = clocksource_default_clock();
812 clock->enable(clock);
813 tk_setup_internals(tk, clock);
815 tk_set_xtime(tk, &now);
816 tk->raw_time.tv_sec = 0;
817 tk->raw_time.tv_nsec = 0;
818 if (boot.tv_sec == 0 && boot.tv_nsec == 0)
821 set_normalized_timespec(&tmp, -boot.tv_sec, -boot.tv_nsec);
822 tk_set_wall_to_mono(tk, tmp);
826 tk_set_sleep_time(tk, tmp);
828 memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper));
830 write_seqcount_end(&timekeeper_seq);
831 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
834 /* time in seconds when suspend began */
835 static struct timespec timekeeping_suspend_time;
838 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
839 * @delta: pointer to a timespec delta value
841 * Takes a timespec offset measuring a suspend interval and properly
842 * adds the sleep offset to the timekeeping variables.
844 static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
845 struct timespec *delta)
847 if (!timespec_valid_strict(delta)) {
848 printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
849 "sleep delta value!\n");
852 tk_xtime_add(tk, delta);
853 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *delta));
854 tk_set_sleep_time(tk, timespec_add(tk->total_sleep_time, *delta));
855 tk_debug_account_sleep_time(delta);
859 * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values
860 * @delta: pointer to a timespec delta value
862 * This hook is for architectures that cannot support read_persistent_clock
863 * because their RTC/persistent clock is only accessible when irqs are enabled.
865 * This function should only be called by rtc_resume(), and allows
866 * a suspend offset to be injected into the timekeeping values.
868 void timekeeping_inject_sleeptime(struct timespec *delta)
870 struct timekeeper *tk = &timekeeper;
874 * Make sure we don't set the clock twice, as timekeeping_resume()
877 if (has_persistent_clock())
880 raw_spin_lock_irqsave(&timekeeper_lock, flags);
881 write_seqcount_begin(&timekeeper_seq);
883 timekeeping_forward_now(tk);
885 __timekeeping_inject_sleeptime(tk, delta);
887 timekeeping_update(tk, true, true);
889 write_seqcount_end(&timekeeper_seq);
890 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
892 /* signal hrtimers about time change */
897 * timekeeping_resume - Resumes the generic timekeeping subsystem.
899 * This is for the generic clocksource timekeeping.
900 * xtime/wall_to_monotonic/jiffies/etc are
901 * still managed by arch specific suspend/resume code.
903 static void timekeeping_resume(void)
905 struct timekeeper *tk = &timekeeper;
906 struct clocksource *clock = tk->clock;
908 struct timespec ts_new, ts_delta;
909 cycle_t cycle_now, cycle_delta;
910 bool suspendtime_found = false;
912 read_persistent_clock(&ts_new);
914 clockevents_resume();
915 clocksource_resume();
917 raw_spin_lock_irqsave(&timekeeper_lock, flags);
918 write_seqcount_begin(&timekeeper_seq);
921 * After system resumes, we need to calculate the suspended time and
922 * compensate it for the OS time. There are 3 sources that could be
923 * used: Nonstop clocksource during suspend, persistent clock and rtc
926 * One specific platform may have 1 or 2 or all of them, and the
927 * preference will be:
928 * suspend-nonstop clocksource -> persistent clock -> rtc
929 * The less preferred source will only be tried if there is no better
930 * usable source. The rtc part is handled separately in rtc core code.
932 cycle_now = clock->read(clock);
933 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
934 cycle_now > clock->cycle_last) {
935 u64 num, max = ULLONG_MAX;
936 u32 mult = clock->mult;
937 u32 shift = clock->shift;
940 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
943 * "cycle_delta * mutl" may cause 64 bits overflow, if the
944 * suspended time is too long. In that case we need do the
945 * 64 bits math carefully
948 if (cycle_delta > max) {
949 num = div64_u64(cycle_delta, max);
950 nsec = (((u64) max * mult) >> shift) * num;
951 cycle_delta -= num * max;
953 nsec += ((u64) cycle_delta * mult) >> shift;
955 ts_delta = ns_to_timespec(nsec);
956 suspendtime_found = true;
957 } else if (timespec_compare(&ts_new, &timekeeping_suspend_time) > 0) {
958 ts_delta = timespec_sub(ts_new, timekeeping_suspend_time);
959 suspendtime_found = true;
962 if (suspendtime_found)
963 __timekeeping_inject_sleeptime(tk, &ts_delta);
965 /* Re-base the last cycle value */
966 tk->cycle_last = clock->cycle_last = cycle_now;
968 timekeeping_suspended = 0;
969 timekeeping_update(tk, false, true);
970 write_seqcount_end(&timekeeper_seq);
971 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
973 touch_softlockup_watchdog();
975 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
977 /* Resume hrtimers */
981 static int timekeeping_suspend(void)
983 struct timekeeper *tk = &timekeeper;
985 struct timespec delta, delta_delta;
986 static struct timespec old_delta;
988 read_persistent_clock(&timekeeping_suspend_time);
990 raw_spin_lock_irqsave(&timekeeper_lock, flags);
991 write_seqcount_begin(&timekeeper_seq);
992 timekeeping_forward_now(tk);
993 timekeeping_suspended = 1;
996 * To avoid drift caused by repeated suspend/resumes,
997 * which each can add ~1 second drift error,
998 * try to compensate so the difference in system time
999 * and persistent_clock time stays close to constant.
1001 delta = timespec_sub(tk_xtime(tk), timekeeping_suspend_time);
1002 delta_delta = timespec_sub(delta, old_delta);
1003 if (abs(delta_delta.tv_sec) >= 2) {
1005 * if delta_delta is too large, assume time correction
1006 * has occured and set old_delta to the current delta.
1010 /* Otherwise try to adjust old_system to compensate */
1011 timekeeping_suspend_time =
1012 timespec_add(timekeeping_suspend_time, delta_delta);
1014 write_seqcount_end(&timekeeper_seq);
1015 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1017 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
1018 clocksource_suspend();
1019 clockevents_suspend();
1024 /* sysfs resume/suspend bits for timekeeping */
1025 static struct syscore_ops timekeeping_syscore_ops = {
1026 .resume = timekeeping_resume,
1027 .suspend = timekeeping_suspend,
1030 static int __init timekeeping_init_ops(void)
1032 register_syscore_ops(&timekeeping_syscore_ops);
1036 device_initcall(timekeeping_init_ops);
1039 * If the error is already larger, we look ahead even further
1040 * to compensate for late or lost adjustments.
1042 static __always_inline int timekeeping_bigadjust(struct timekeeper *tk,
1043 s64 error, s64 *interval,
1047 u32 look_ahead, adj;
1051 * Use the current error value to determine how much to look ahead.
1052 * The larger the error the slower we adjust for it to avoid problems
1053 * with losing too many ticks, otherwise we would overadjust and
1054 * produce an even larger error. The smaller the adjustment the
1055 * faster we try to adjust for it, as lost ticks can do less harm
1056 * here. This is tuned so that an error of about 1 msec is adjusted
1057 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
1059 error2 = tk->ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
1060 error2 = abs(error2);
1061 for (look_ahead = 0; error2 > 0; look_ahead++)
1065 * Now calculate the error in (1 << look_ahead) ticks, but first
1066 * remove the single look ahead already included in the error.
1068 tick_error = ntp_tick_length() >> (tk->ntp_error_shift + 1);
1069 tick_error -= tk->xtime_interval >> 1;
1070 error = ((error - tick_error) >> look_ahead) + tick_error;
1072 /* Finally calculate the adjustment shift value. */
1077 *interval = -*interval;
1081 for (adj = 0; error > i; adj++)
1090 * Adjust the multiplier to reduce the error value,
1091 * this is optimized for the most common adjustments of -1,0,1,
1092 * for other values we can do a bit more work.
1094 static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1096 s64 error, interval = tk->cycle_interval;
1100 * The point of this is to check if the error is greater than half
1103 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
1105 * Note we subtract one in the shift, so that error is really error*2.
1106 * This "saves" dividing(shifting) interval twice, but keeps the
1107 * (error > interval) comparison as still measuring if error is
1108 * larger than half an interval.
1110 * Note: It does not "save" on aggravation when reading the code.
1112 error = tk->ntp_error >> (tk->ntp_error_shift - 1);
1113 if (error > interval) {
1115 * We now divide error by 4(via shift), which checks if
1116 * the error is greater than twice the interval.
1117 * If it is greater, we need a bigadjust, if its smaller,
1118 * we can adjust by 1.
1122 * XXX - In update_wall_time, we round up to the next
1123 * nanosecond, and store the amount rounded up into
1124 * the error. This causes the likely below to be unlikely.
1126 * The proper fix is to avoid rounding up by using
1127 * the high precision tk->xtime_nsec instead of
1128 * xtime.tv_nsec everywhere. Fixing this will take some
1131 if (likely(error <= interval))
1134 adj = timekeeping_bigadjust(tk, error, &interval, &offset);
1136 if (error < -interval) {
1137 /* See comment above, this is just switched for the negative */
1139 if (likely(error >= -interval)) {
1141 interval = -interval;
1144 adj = timekeeping_bigadjust(tk, error, &interval, &offset);
1151 if (unlikely(tk->clock->maxadj &&
1152 (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
1153 printk_once(KERN_WARNING
1154 "Adjusting %s more than 11%% (%ld vs %ld)\n",
1155 tk->clock->name, (long)tk->mult + adj,
1156 (long)tk->clock->mult + tk->clock->maxadj);
1159 * So the following can be confusing.
1161 * To keep things simple, lets assume adj == 1 for now.
1163 * When adj != 1, remember that the interval and offset values
1164 * have been appropriately scaled so the math is the same.
1166 * The basic idea here is that we're increasing the multiplier
1167 * by one, this causes the xtime_interval to be incremented by
1168 * one cycle_interval. This is because:
1169 * xtime_interval = cycle_interval * mult
1170 * So if mult is being incremented by one:
1171 * xtime_interval = cycle_interval * (mult + 1)
1173 * xtime_interval = (cycle_interval * mult) + cycle_interval
1174 * Which can be shortened to:
1175 * xtime_interval += cycle_interval
1177 * So offset stores the non-accumulated cycles. Thus the current
1178 * time (in shifted nanoseconds) is:
1179 * now = (offset * adj) + xtime_nsec
1180 * Now, even though we're adjusting the clock frequency, we have
1181 * to keep time consistent. In other words, we can't jump back
1182 * in time, and we also want to avoid jumping forward in time.
1184 * So given the same offset value, we need the time to be the same
1185 * both before and after the freq adjustment.
1186 * now = (offset * adj_1) + xtime_nsec_1
1187 * now = (offset * adj_2) + xtime_nsec_2
1189 * (offset * adj_1) + xtime_nsec_1 =
1190 * (offset * adj_2) + xtime_nsec_2
1194 * (offset * adj_1) + xtime_nsec_1 =
1195 * (offset * (adj_1+1)) + xtime_nsec_2
1196 * (offset * adj_1) + xtime_nsec_1 =
1197 * (offset * adj_1) + offset + xtime_nsec_2
1198 * Canceling the sides:
1199 * xtime_nsec_1 = offset + xtime_nsec_2
1201 * xtime_nsec_2 = xtime_nsec_1 - offset
1202 * Which simplfies to:
1203 * xtime_nsec -= offset
1205 * XXX - TODO: Doc ntp_error calculation.
1208 tk->xtime_interval += interval;
1209 tk->xtime_nsec -= offset;
1210 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1214 * It may be possible that when we entered this function, xtime_nsec
1215 * was very small. Further, if we're slightly speeding the clocksource
1216 * in the code above, its possible the required corrective factor to
1217 * xtime_nsec could cause it to underflow.
1219 * Now, since we already accumulated the second, cannot simply roll
1220 * the accumulated second back, since the NTP subsystem has been
1221 * notified via second_overflow. So instead we push xtime_nsec forward
1222 * by the amount we underflowed, and add that amount into the error.
1224 * We'll correct this error next time through this function, when
1225 * xtime_nsec is not as small.
1227 if (unlikely((s64)tk->xtime_nsec < 0)) {
1228 s64 neg = -(s64)tk->xtime_nsec;
1230 tk->ntp_error += neg << tk->ntp_error_shift;
1236 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
1238 * Helper function that accumulates a the nsecs greater then a second
1239 * from the xtime_nsec field to the xtime_secs field.
1240 * It also calls into the NTP code to handle leapsecond processing.
1243 static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
1245 u64 nsecps = (u64)NSEC_PER_SEC << tk->shift;
1247 while (tk->xtime_nsec >= nsecps) {
1250 tk->xtime_nsec -= nsecps;
1253 /* Figure out if its a leap sec and apply if needed */
1254 leap = second_overflow(tk->xtime_sec);
1255 if (unlikely(leap)) {
1258 tk->xtime_sec += leap;
1262 tk_set_wall_to_mono(tk,
1263 timespec_sub(tk->wall_to_monotonic, ts));
1265 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
1267 clock_was_set_delayed();
1273 * logarithmic_accumulation - shifted accumulation of cycles
1275 * This functions accumulates a shifted interval of cycles into
1276 * into a shifted interval nanoseconds. Allows for O(log) accumulation
1279 * Returns the unconsumed cycles.
1281 static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1284 cycle_t interval = tk->cycle_interval << shift;
1287 /* If the offset is smaller then a shifted interval, do nothing */
1288 if (offset < interval)
1291 /* Accumulate one shifted interval */
1293 tk->cycle_last += interval;
1295 tk->xtime_nsec += tk->xtime_interval << shift;
1296 accumulate_nsecs_to_secs(tk);
1298 /* Accumulate raw time */
1299 raw_nsecs = (u64)tk->raw_interval << shift;
1300 raw_nsecs += tk->raw_time.tv_nsec;
1301 if (raw_nsecs >= NSEC_PER_SEC) {
1302 u64 raw_secs = raw_nsecs;
1303 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
1304 tk->raw_time.tv_sec += raw_secs;
1306 tk->raw_time.tv_nsec = raw_nsecs;
1308 /* Accumulate error between NTP and clock interval */
1309 tk->ntp_error += ntp_tick_length() << shift;
1310 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
1311 (tk->ntp_error_shift + shift);
1316 #ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
1317 static inline void old_vsyscall_fixup(struct timekeeper *tk)
1322 * Store only full nanoseconds into xtime_nsec after rounding
1323 * it up and add the remainder to the error difference.
1324 * XXX - This is necessary to avoid small 1ns inconsistnecies caused
1325 * by truncating the remainder in vsyscalls. However, it causes
1326 * additional work to be done in timekeeping_adjust(). Once
1327 * the vsyscall implementations are converted to use xtime_nsec
1328 * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
1329 * users are removed, this can be killed.
1331 remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);
1332 tk->xtime_nsec -= remainder;
1333 tk->xtime_nsec += 1ULL << tk->shift;
1334 tk->ntp_error += remainder << tk->ntp_error_shift;
1338 #define old_vsyscall_fixup(tk)
1344 * update_wall_time - Uses the current clocksource to increment the wall time
1347 static void update_wall_time(void)
1349 struct clocksource *clock;
1350 struct timekeeper *real_tk = &timekeeper;
1351 struct timekeeper *tk = &shadow_timekeeper;
1353 int shift = 0, maxshift;
1354 unsigned long flags;
1356 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1358 /* Make sure we're fully resumed: */
1359 if (unlikely(timekeeping_suspended))
1362 clock = real_tk->clock;
1364 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1365 offset = real_tk->cycle_interval;
1367 offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
1370 /* Check if there's really nothing to do */
1371 if (offset < real_tk->cycle_interval)
1375 * With NO_HZ we may have to accumulate many cycle_intervals
1376 * (think "ticks") worth of time at once. To do this efficiently,
1377 * we calculate the largest doubling multiple of cycle_intervals
1378 * that is smaller than the offset. We then accumulate that
1379 * chunk in one go, and then try to consume the next smaller
1382 shift = ilog2(offset) - ilog2(tk->cycle_interval);
1383 shift = max(0, shift);
1384 /* Bound shift to one less than what overflows tick_length */
1385 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
1386 shift = min(shift, maxshift);
1387 while (offset >= tk->cycle_interval) {
1388 offset = logarithmic_accumulation(tk, offset, shift);
1389 if (offset < tk->cycle_interval<<shift)
1393 /* correct the clock when NTP error is too big */
1394 timekeeping_adjust(tk, offset);
1397 * XXX This can be killed once everyone converts
1398 * to the new update_vsyscall.
1400 old_vsyscall_fixup(tk);
1403 * Finally, make sure that after the rounding
1404 * xtime_nsec isn't larger than NSEC_PER_SEC
1406 accumulate_nsecs_to_secs(tk);
1408 write_seqcount_begin(&timekeeper_seq);
1409 /* Update clock->cycle_last with the new value */
1410 clock->cycle_last = tk->cycle_last;
1412 * Update the real timekeeper.
1414 * We could avoid this memcpy by switching pointers, but that
1415 * requires changes to all other timekeeper usage sites as
1416 * well, i.e. move the timekeeper pointer getter into the
1417 * spinlocked/seqcount protected sections. And we trade this
1418 * memcpy under the timekeeper_seq against one before we start
1421 memcpy(real_tk, tk, sizeof(*tk));
1422 timekeeping_update(real_tk, false, false);
1423 write_seqcount_end(&timekeeper_seq);
1425 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1429 * getboottime - Return the real time of system boot.
1430 * @ts: pointer to the timespec to be set
1432 * Returns the wall-time of boot in a timespec.
1434 * This is based on the wall_to_monotonic offset and the total suspend
1435 * time. Calls to settimeofday will affect the value returned (which
1436 * basically means that however wrong your real time clock is at boot time,
1437 * you get the right time here).
1439 void getboottime(struct timespec *ts)
1441 struct timekeeper *tk = &timekeeper;
1442 struct timespec boottime = {
1443 .tv_sec = tk->wall_to_monotonic.tv_sec +
1444 tk->total_sleep_time.tv_sec,
1445 .tv_nsec = tk->wall_to_monotonic.tv_nsec +
1446 tk->total_sleep_time.tv_nsec
1449 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
1451 EXPORT_SYMBOL_GPL(getboottime);
1454 * get_monotonic_boottime - Returns monotonic time since boot
1455 * @ts: pointer to the timespec to be set
1457 * Returns the monotonic time since boot in a timespec.
1459 * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also
1460 * includes the time spent in suspend.
1462 void get_monotonic_boottime(struct timespec *ts)
1464 struct timekeeper *tk = &timekeeper;
1465 struct timespec tomono, sleep;
1469 WARN_ON(timekeeping_suspended);
1472 seq = read_seqcount_begin(&timekeeper_seq);
1473 ts->tv_sec = tk->xtime_sec;
1474 nsec = timekeeping_get_ns(tk);
1475 tomono = tk->wall_to_monotonic;
1476 sleep = tk->total_sleep_time;
1478 } while (read_seqcount_retry(&timekeeper_seq, seq));
1480 ts->tv_sec += tomono.tv_sec + sleep.tv_sec;
1482 timespec_add_ns(ts, nsec + tomono.tv_nsec + sleep.tv_nsec);
1484 EXPORT_SYMBOL_GPL(get_monotonic_boottime);
1487 * ktime_get_boottime - Returns monotonic time since boot in a ktime
1489 * Returns the monotonic time since boot in a ktime
1491 * This is similar to CLOCK_MONTONIC/ktime_get, but also
1492 * includes the time spent in suspend.
1494 ktime_t ktime_get_boottime(void)
1498 get_monotonic_boottime(&ts);
1499 return timespec_to_ktime(ts);
1501 EXPORT_SYMBOL_GPL(ktime_get_boottime);
1504 * monotonic_to_bootbased - Convert the monotonic time to boot based.
1505 * @ts: pointer to the timespec to be converted
1507 void monotonic_to_bootbased(struct timespec *ts)
1509 struct timekeeper *tk = &timekeeper;
1511 *ts = timespec_add(*ts, tk->total_sleep_time);
1513 EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
1515 unsigned long get_seconds(void)
1517 struct timekeeper *tk = &timekeeper;
1519 return tk->xtime_sec;
1521 EXPORT_SYMBOL(get_seconds);
1523 struct timespec __current_kernel_time(void)
1525 struct timekeeper *tk = &timekeeper;
1527 return tk_xtime(tk);
1530 struct timespec current_kernel_time(void)
1532 struct timekeeper *tk = &timekeeper;
1533 struct timespec now;
1537 seq = read_seqcount_begin(&timekeeper_seq);
1540 } while (read_seqcount_retry(&timekeeper_seq, seq));
1544 EXPORT_SYMBOL(current_kernel_time);
1546 struct timespec get_monotonic_coarse(void)
1548 struct timekeeper *tk = &timekeeper;
1549 struct timespec now, mono;
1553 seq = read_seqcount_begin(&timekeeper_seq);
1556 mono = tk->wall_to_monotonic;
1557 } while (read_seqcount_retry(&timekeeper_seq, seq));
1559 set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
1560 now.tv_nsec + mono.tv_nsec);
1565 * Must hold jiffies_lock
1567 void do_timer(unsigned long ticks)
1569 jiffies_64 += ticks;
1571 calc_global_load(ticks);
1575 * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic,
1576 * and sleep offsets.
1577 * @xtim: pointer to timespec to be set with xtime
1578 * @wtom: pointer to timespec to be set with wall_to_monotonic
1579 * @sleep: pointer to timespec to be set with time in suspend
1581 void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
1582 struct timespec *wtom, struct timespec *sleep)
1584 struct timekeeper *tk = &timekeeper;
1588 seq = read_seqcount_begin(&timekeeper_seq);
1589 *xtim = tk_xtime(tk);
1590 *wtom = tk->wall_to_monotonic;
1591 *sleep = tk->total_sleep_time;
1592 } while (read_seqcount_retry(&timekeeper_seq, seq));
1595 #ifdef CONFIG_HIGH_RES_TIMERS
1597 * ktime_get_update_offsets - hrtimer helper
1598 * @offs_real: pointer to storage for monotonic -> realtime offset
1599 * @offs_boot: pointer to storage for monotonic -> boottime offset
1601 * Returns current monotonic time and updates the offsets
1602 * Called from hrtimer_interupt() or retrigger_next_event()
1604 ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot,
1607 struct timekeeper *tk = &timekeeper;
1613 seq = read_seqcount_begin(&timekeeper_seq);
1615 secs = tk->xtime_sec;
1616 nsecs = timekeeping_get_ns(tk);
1618 *offs_real = tk->offs_real;
1619 *offs_boot = tk->offs_boot;
1620 *offs_tai = tk->offs_tai;
1621 } while (read_seqcount_retry(&timekeeper_seq, seq));
1623 now = ktime_add_ns(ktime_set(secs, 0), nsecs);
1624 now = ktime_sub(now, *offs_real);
1630 * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
1632 ktime_t ktime_get_monotonic_offset(void)
1634 struct timekeeper *tk = &timekeeper;
1636 struct timespec wtom;
1639 seq = read_seqcount_begin(&timekeeper_seq);
1640 wtom = tk->wall_to_monotonic;
1641 } while (read_seqcount_retry(&timekeeper_seq, seq));
1643 return timespec_to_ktime(wtom);
1645 EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
1648 * do_adjtimex() - Accessor function to NTP __do_adjtimex function
1650 int do_adjtimex(struct timex *txc)
1652 struct timekeeper *tk = &timekeeper;
1653 unsigned long flags;
1658 /* Validate the data before disabling interrupts */
1659 ret = ntp_validate_timex(txc);
1663 if (txc->modes & ADJ_SETOFFSET) {
1664 struct timespec delta;
1665 delta.tv_sec = txc->time.tv_sec;
1666 delta.tv_nsec = txc->time.tv_usec;
1667 if (!(txc->modes & ADJ_NANO))
1668 delta.tv_nsec *= 1000;
1669 ret = timekeeping_inject_offset(&delta);
1674 getnstimeofday(&ts);
1676 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1677 write_seqcount_begin(&timekeeper_seq);
1679 orig_tai = tai = tk->tai_offset;
1680 ret = __do_adjtimex(txc, &ts, &tai);
1682 if (tai != orig_tai) {
1683 __timekeeping_set_tai_offset(tk, tai);
1684 clock_was_set_delayed();
1686 write_seqcount_end(&timekeeper_seq);
1687 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1692 #ifdef CONFIG_NTP_PPS
1694 * hardpps() - Accessor function to NTP __hardpps function
1696 void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
1698 unsigned long flags;
1700 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1701 write_seqcount_begin(&timekeeper_seq);
1703 __hardpps(phase_ts, raw_ts);
1705 write_seqcount_end(&timekeeper_seq);
1706 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1708 EXPORT_SYMBOL(hardpps);
1712 * xtime_update() - advances the timekeeping infrastructure
1713 * @ticks: number of ticks, that have elapsed since the last call.
1715 * Must be called with interrupts disabled.
1717 void xtime_update(unsigned long ticks)
1719 write_seqlock(&jiffies_lock);
1721 write_sequnlock(&jiffies_lock);