Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi...
[linux-2.6-microblaze.git] / kernel / time / timekeeping.c
1 /*
2  *  linux/kernel/time/timekeeping.c
3  *
4  *  Kernel timekeeping code and accessor functions
5  *
6  *  This code was moved from linux/kernel/timer.c.
7  *  Please see that file for copyright and history logs.
8  *
9  */
10
11 #include <linux/timekeeper_internal.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/percpu.h>
15 #include <linux/init.h>
16 #include <linux/mm.h>
17 #include <linux/nmi.h>
18 #include <linux/sched.h>
19 #include <linux/sched/loadavg.h>
20 #include <linux/syscore_ops.h>
21 #include <linux/clocksource.h>
22 #include <linux/jiffies.h>
23 #include <linux/time.h>
24 #include <linux/tick.h>
25 #include <linux/stop_machine.h>
26 #include <linux/pvclock_gtod.h>
27 #include <linux/compiler.h>
28
29 #include "tick-internal.h"
30 #include "ntp_internal.h"
31 #include "timekeeping_internal.h"
32
33 #define TK_CLEAR_NTP            (1 << 0)
34 #define TK_MIRROR               (1 << 1)
35 #define TK_CLOCK_WAS_SET        (1 << 2)
36
37 /*
38  * The most important data for readout fits into a single 64 byte
39  * cache line.
40  */
41 static struct {
42         seqcount_t              seq;
43         struct timekeeper       timekeeper;
44 } tk_core ____cacheline_aligned;
45
46 static DEFINE_RAW_SPINLOCK(timekeeper_lock);
47 static struct timekeeper shadow_timekeeper;
48
49 /**
50  * struct tk_fast - NMI safe timekeeper
51  * @seq:        Sequence counter for protecting updates. The lowest bit
52  *              is the index for the tk_read_base array
53  * @base:       tk_read_base array. Access is indexed by the lowest bit of
54  *              @seq.
55  *
56  * See @update_fast_timekeeper() below.
57  */
58 struct tk_fast {
59         seqcount_t              seq;
60         struct tk_read_base     base[2];
61 };
62
63 static struct tk_fast tk_fast_mono ____cacheline_aligned;
64 static struct tk_fast tk_fast_raw  ____cacheline_aligned;
65
66 /* flag for if timekeeping is suspended */
67 int __read_mostly timekeeping_suspended;
68
69 static inline void tk_normalize_xtime(struct timekeeper *tk)
70 {
71         while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
72                 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
73                 tk->xtime_sec++;
74         }
75 }
76
77 static inline struct timespec64 tk_xtime(struct timekeeper *tk)
78 {
79         struct timespec64 ts;
80
81         ts.tv_sec = tk->xtime_sec;
82         ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
83         return ts;
84 }
85
86 static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
87 {
88         tk->xtime_sec = ts->tv_sec;
89         tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
90 }
91
92 static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
93 {
94         tk->xtime_sec += ts->tv_sec;
95         tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
96         tk_normalize_xtime(tk);
97 }
98
99 static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
100 {
101         struct timespec64 tmp;
102
103         /*
104          * Verify consistency of: offset_real = -wall_to_monotonic
105          * before modifying anything
106          */
107         set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
108                                         -tk->wall_to_monotonic.tv_nsec);
109         WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp));
110         tk->wall_to_monotonic = wtm;
111         set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
112         tk->offs_real = timespec64_to_ktime(tmp);
113         tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
114 }
115
116 static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
117 {
118         tk->offs_boot = ktime_add(tk->offs_boot, delta);
119 }
120
121 #ifdef CONFIG_DEBUG_TIMEKEEPING
122 #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
123
124 static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
125 {
126
127         u64 max_cycles = tk->tkr_mono.clock->max_cycles;
128         const char *name = tk->tkr_mono.clock->name;
129
130         if (offset > max_cycles) {
131                 printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
132                                 offset, name, max_cycles);
133                 printk_deferred("         timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
134         } else {
135                 if (offset > (max_cycles >> 1)) {
136                         printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
137                                         offset, name, max_cycles >> 1);
138                         printk_deferred("      timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
139                 }
140         }
141
142         if (tk->underflow_seen) {
143                 if (jiffies - tk->last_warning > WARNING_FREQ) {
144                         printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
145                         printk_deferred("         Please report this, consider using a different clocksource, if possible.\n");
146                         printk_deferred("         Your kernel is probably still fine.\n");
147                         tk->last_warning = jiffies;
148                 }
149                 tk->underflow_seen = 0;
150         }
151
152         if (tk->overflow_seen) {
153                 if (jiffies - tk->last_warning > WARNING_FREQ) {
154                         printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
155                         printk_deferred("         Please report this, consider using a different clocksource, if possible.\n");
156                         printk_deferred("         Your kernel is probably still fine.\n");
157                         tk->last_warning = jiffies;
158                 }
159                 tk->overflow_seen = 0;
160         }
161 }
162
163 static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
164 {
165         struct timekeeper *tk = &tk_core.timekeeper;
166         u64 now, last, mask, max, delta;
167         unsigned int seq;
168
169         /*
170          * Since we're called holding a seqlock, the data may shift
171          * under us while we're doing the calculation. This can cause
172          * false positives, since we'd note a problem but throw the
173          * results away. So nest another seqlock here to atomically
174          * grab the points we are checking with.
175          */
176         do {
177                 seq = read_seqcount_begin(&tk_core.seq);
178                 now = tkr->read(tkr->clock);
179                 last = tkr->cycle_last;
180                 mask = tkr->mask;
181                 max = tkr->clock->max_cycles;
182         } while (read_seqcount_retry(&tk_core.seq, seq));
183
184         delta = clocksource_delta(now, last, mask);
185
186         /*
187          * Try to catch underflows by checking if we are seeing small
188          * mask-relative negative values.
189          */
190         if (unlikely((~delta & mask) < (mask >> 3))) {
191                 tk->underflow_seen = 1;
192                 delta = 0;
193         }
194
195         /* Cap delta value to the max_cycles values to avoid mult overflows */
196         if (unlikely(delta > max)) {
197                 tk->overflow_seen = 1;
198                 delta = tkr->clock->max_cycles;
199         }
200
201         return delta;
202 }
203 #else
204 static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
205 {
206 }
207 static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
208 {
209         u64 cycle_now, delta;
210
211         /* read clocksource */
212         cycle_now = tkr->read(tkr->clock);
213
214         /* calculate the delta since the last update_wall_time */
215         delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
216
217         return delta;
218 }
219 #endif
220
221 /**
222  * tk_setup_internals - Set up internals to use clocksource clock.
223  *
224  * @tk:         The target timekeeper to setup.
225  * @clock:              Pointer to clocksource.
226  *
227  * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
228  * pair and interval request.
229  *
230  * Unless you're the timekeeping code, you should not be using this!
231  */
232 static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
233 {
234         u64 interval;
235         u64 tmp, ntpinterval;
236         struct clocksource *old_clock;
237
238         ++tk->cs_was_changed_seq;
239         old_clock = tk->tkr_mono.clock;
240         tk->tkr_mono.clock = clock;
241         tk->tkr_mono.read = clock->read;
242         tk->tkr_mono.mask = clock->mask;
243         tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
244
245         tk->tkr_raw.clock = clock;
246         tk->tkr_raw.read = clock->read;
247         tk->tkr_raw.mask = clock->mask;
248         tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
249
250         /* Do the ns -> cycle conversion first, using original mult */
251         tmp = NTP_INTERVAL_LENGTH;
252         tmp <<= clock->shift;
253         ntpinterval = tmp;
254         tmp += clock->mult/2;
255         do_div(tmp, clock->mult);
256         if (tmp == 0)
257                 tmp = 1;
258
259         interval = (u64) tmp;
260         tk->cycle_interval = interval;
261
262         /* Go back from cycles -> shifted ns */
263         tk->xtime_interval = interval * clock->mult;
264         tk->xtime_remainder = ntpinterval - tk->xtime_interval;
265         tk->raw_interval = (interval * clock->mult) >> clock->shift;
266
267          /* if changing clocks, convert xtime_nsec shift units */
268         if (old_clock) {
269                 int shift_change = clock->shift - old_clock->shift;
270                 if (shift_change < 0)
271                         tk->tkr_mono.xtime_nsec >>= -shift_change;
272                 else
273                         tk->tkr_mono.xtime_nsec <<= shift_change;
274         }
275         tk->tkr_raw.xtime_nsec = 0;
276
277         tk->tkr_mono.shift = clock->shift;
278         tk->tkr_raw.shift = clock->shift;
279
280         tk->ntp_error = 0;
281         tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
282         tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
283
284         /*
285          * The timekeeper keeps its own mult values for the currently
286          * active clocksource. These value will be adjusted via NTP
287          * to counteract clock drifting.
288          */
289         tk->tkr_mono.mult = clock->mult;
290         tk->tkr_raw.mult = clock->mult;
291         tk->ntp_err_mult = 0;
292 }
293
294 /* Timekeeper helper functions. */
295
296 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
297 static u32 default_arch_gettimeoffset(void) { return 0; }
298 u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
299 #else
300 static inline u32 arch_gettimeoffset(void) { return 0; }
301 #endif
302
303 static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta)
304 {
305         u64 nsec;
306
307         nsec = delta * tkr->mult + tkr->xtime_nsec;
308         nsec >>= tkr->shift;
309
310         /* If arch requires, add in get_arch_timeoffset() */
311         return nsec + arch_gettimeoffset();
312 }
313
314 static inline u64 timekeeping_get_ns(struct tk_read_base *tkr)
315 {
316         u64 delta;
317
318         delta = timekeeping_get_delta(tkr);
319         return timekeeping_delta_to_ns(tkr, delta);
320 }
321
322 static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles)
323 {
324         u64 delta;
325
326         /* calculate the delta since the last update_wall_time */
327         delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
328         return timekeeping_delta_to_ns(tkr, delta);
329 }
330
331 /**
332  * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
333  * @tkr: Timekeeping readout base from which we take the update
334  *
335  * We want to use this from any context including NMI and tracing /
336  * instrumenting the timekeeping code itself.
337  *
338  * Employ the latch technique; see @raw_write_seqcount_latch.
339  *
340  * So if a NMI hits the update of base[0] then it will use base[1]
341  * which is still consistent. In the worst case this can result is a
342  * slightly wrong timestamp (a few nanoseconds). See
343  * @ktime_get_mono_fast_ns.
344  */
345 static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf)
346 {
347         struct tk_read_base *base = tkf->base;
348
349         /* Force readers off to base[1] */
350         raw_write_seqcount_latch(&tkf->seq);
351
352         /* Update base[0] */
353         memcpy(base, tkr, sizeof(*base));
354
355         /* Force readers back to base[0] */
356         raw_write_seqcount_latch(&tkf->seq);
357
358         /* Update base[1] */
359         memcpy(base + 1, base, sizeof(*base));
360 }
361
362 /**
363  * ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
364  *
365  * This timestamp is not guaranteed to be monotonic across an update.
366  * The timestamp is calculated by:
367  *
368  *      now = base_mono + clock_delta * slope
369  *
370  * So if the update lowers the slope, readers who are forced to the
371  * not yet updated second array are still using the old steeper slope.
372  *
373  * tmono
374  * ^
375  * |    o  n
376  * |   o n
377  * |  u
378  * | o
379  * |o
380  * |12345678---> reader order
381  *
382  * o = old slope
383  * u = update
384  * n = new slope
385  *
386  * So reader 6 will observe time going backwards versus reader 5.
387  *
388  * While other CPUs are likely to be able observe that, the only way
389  * for a CPU local observation is when an NMI hits in the middle of
390  * the update. Timestamps taken from that NMI context might be ahead
391  * of the following timestamps. Callers need to be aware of that and
392  * deal with it.
393  */
394 static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
395 {
396         struct tk_read_base *tkr;
397         unsigned int seq;
398         u64 now;
399
400         do {
401                 seq = raw_read_seqcount_latch(&tkf->seq);
402                 tkr = tkf->base + (seq & 0x01);
403                 now = ktime_to_ns(tkr->base);
404
405                 now += timekeeping_delta_to_ns(tkr,
406                                 clocksource_delta(
407                                         tkr->read(tkr->clock),
408                                         tkr->cycle_last,
409                                         tkr->mask));
410         } while (read_seqcount_retry(&tkf->seq, seq));
411
412         return now;
413 }
414
415 u64 ktime_get_mono_fast_ns(void)
416 {
417         return __ktime_get_fast_ns(&tk_fast_mono);
418 }
419 EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
420
421 u64 ktime_get_raw_fast_ns(void)
422 {
423         return __ktime_get_fast_ns(&tk_fast_raw);
424 }
425 EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
426
427 /**
428  * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
429  *
430  * To keep it NMI safe since we're accessing from tracing, we're not using a
431  * separate timekeeper with updates to monotonic clock and boot offset
432  * protected with seqlocks. This has the following minor side effects:
433  *
434  * (1) Its possible that a timestamp be taken after the boot offset is updated
435  * but before the timekeeper is updated. If this happens, the new boot offset
436  * is added to the old timekeeping making the clock appear to update slightly
437  * earlier:
438  *    CPU 0                                        CPU 1
439  *    timekeeping_inject_sleeptime64()
440  *    __timekeeping_inject_sleeptime(tk, delta);
441  *                                                 timestamp();
442  *    timekeeping_update(tk, TK_CLEAR_NTP...);
443  *
444  * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
445  * partially updated.  Since the tk->offs_boot update is a rare event, this
446  * should be a rare occurrence which postprocessing should be able to handle.
447  */
448 u64 notrace ktime_get_boot_fast_ns(void)
449 {
450         struct timekeeper *tk = &tk_core.timekeeper;
451
452         return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
453 }
454 EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
455
456 /* Suspend-time cycles value for halted fast timekeeper. */
457 static u64 cycles_at_suspend;
458
459 static u64 dummy_clock_read(struct clocksource *cs)
460 {
461         return cycles_at_suspend;
462 }
463
464 /**
465  * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
466  * @tk: Timekeeper to snapshot.
467  *
468  * It generally is unsafe to access the clocksource after timekeeping has been
469  * suspended, so take a snapshot of the readout base of @tk and use it as the
470  * fast timekeeper's readout base while suspended.  It will return the same
471  * number of cycles every time until timekeeping is resumed at which time the
472  * proper readout base for the fast timekeeper will be restored automatically.
473  */
474 static void halt_fast_timekeeper(struct timekeeper *tk)
475 {
476         static struct tk_read_base tkr_dummy;
477         struct tk_read_base *tkr = &tk->tkr_mono;
478
479         memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
480         cycles_at_suspend = tkr->read(tkr->clock);
481         tkr_dummy.read = dummy_clock_read;
482         update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
483
484         tkr = &tk->tkr_raw;
485         memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
486         tkr_dummy.read = dummy_clock_read;
487         update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
488 }
489
490 #ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
491
492 static inline void update_vsyscall(struct timekeeper *tk)
493 {
494         struct timespec xt, wm;
495
496         xt = timespec64_to_timespec(tk_xtime(tk));
497         wm = timespec64_to_timespec(tk->wall_to_monotonic);
498         update_vsyscall_old(&xt, &wm, tk->tkr_mono.clock, tk->tkr_mono.mult,
499                             tk->tkr_mono.cycle_last);
500 }
501
502 static inline void old_vsyscall_fixup(struct timekeeper *tk)
503 {
504         s64 remainder;
505
506         /*
507         * Store only full nanoseconds into xtime_nsec after rounding
508         * it up and add the remainder to the error difference.
509         * XXX - This is necessary to avoid small 1ns inconsistnecies caused
510         * by truncating the remainder in vsyscalls. However, it causes
511         * additional work to be done in timekeeping_adjust(). Once
512         * the vsyscall implementations are converted to use xtime_nsec
513         * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
514         * users are removed, this can be killed.
515         */
516         remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1);
517         if (remainder != 0) {
518                 tk->tkr_mono.xtime_nsec -= remainder;
519                 tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
520                 tk->ntp_error += remainder << tk->ntp_error_shift;
521                 tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift;
522         }
523 }
524 #else
525 #define old_vsyscall_fixup(tk)
526 #endif
527
528 static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
529
530 static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
531 {
532         raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
533 }
534
535 /**
536  * pvclock_gtod_register_notifier - register a pvclock timedata update listener
537  */
538 int pvclock_gtod_register_notifier(struct notifier_block *nb)
539 {
540         struct timekeeper *tk = &tk_core.timekeeper;
541         unsigned long flags;
542         int ret;
543
544         raw_spin_lock_irqsave(&timekeeper_lock, flags);
545         ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
546         update_pvclock_gtod(tk, true);
547         raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
548
549         return ret;
550 }
551 EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
552
553 /**
554  * pvclock_gtod_unregister_notifier - unregister a pvclock
555  * timedata update listener
556  */
557 int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
558 {
559         unsigned long flags;
560         int ret;
561
562         raw_spin_lock_irqsave(&timekeeper_lock, flags);
563         ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
564         raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
565
566         return ret;
567 }
568 EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
569
570 /*
571  * tk_update_leap_state - helper to update the next_leap_ktime
572  */
573 static inline void tk_update_leap_state(struct timekeeper *tk)
574 {
575         tk->next_leap_ktime = ntp_get_next_leap();
576         if (tk->next_leap_ktime != KTIME_MAX)
577                 /* Convert to monotonic time */
578                 tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
579 }
580
581 /*
582  * Update the ktime_t based scalar nsec members of the timekeeper
583  */
584 static inline void tk_update_ktime_data(struct timekeeper *tk)
585 {
586         u64 seconds;
587         u32 nsec;
588
589         /*
590          * The xtime based monotonic readout is:
591          *      nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
592          * The ktime based monotonic readout is:
593          *      nsec = base_mono + now();
594          * ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
595          */
596         seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
597         nsec = (u32) tk->wall_to_monotonic.tv_nsec;
598         tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
599
600         /* Update the monotonic raw base */
601         tk->tkr_raw.base = timespec64_to_ktime(tk->raw_time);
602
603         /*
604          * The sum of the nanoseconds portions of xtime and
605          * wall_to_monotonic can be greater/equal one second. Take
606          * this into account before updating tk->ktime_sec.
607          */
608         nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
609         if (nsec >= NSEC_PER_SEC)
610                 seconds++;
611         tk->ktime_sec = seconds;
612 }
613
614 /* must hold timekeeper_lock */
615 static void timekeeping_update(struct timekeeper *tk, unsigned int action)
616 {
617         if (action & TK_CLEAR_NTP) {
618                 tk->ntp_error = 0;
619                 ntp_clear();
620         }
621
622         tk_update_leap_state(tk);
623         tk_update_ktime_data(tk);
624
625         update_vsyscall(tk);
626         update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
627
628         update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
629         update_fast_timekeeper(&tk->tkr_raw,  &tk_fast_raw);
630
631         if (action & TK_CLOCK_WAS_SET)
632                 tk->clock_was_set_seq++;
633         /*
634          * The mirroring of the data to the shadow-timekeeper needs
635          * to happen last here to ensure we don't over-write the
636          * timekeeper structure on the next update with stale data
637          */
638         if (action & TK_MIRROR)
639                 memcpy(&shadow_timekeeper, &tk_core.timekeeper,
640                        sizeof(tk_core.timekeeper));
641 }
642
643 /**
644  * timekeeping_forward_now - update clock to the current time
645  *
646  * Forward the current clock to update its state since the last call to
647  * update_wall_time(). This is useful before significant clock changes,
648  * as it avoids having to deal with this time offset explicitly.
649  */
650 static void timekeeping_forward_now(struct timekeeper *tk)
651 {
652         struct clocksource *clock = tk->tkr_mono.clock;
653         u64 cycle_now, delta;
654         u64 nsec;
655
656         cycle_now = tk->tkr_mono.read(clock);
657         delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
658         tk->tkr_mono.cycle_last = cycle_now;
659         tk->tkr_raw.cycle_last  = cycle_now;
660
661         tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
662
663         /* If arch requires, add in get_arch_timeoffset() */
664         tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
665
666         tk_normalize_xtime(tk);
667
668         nsec = clocksource_cyc2ns(delta, tk->tkr_raw.mult, tk->tkr_raw.shift);
669         timespec64_add_ns(&tk->raw_time, nsec);
670 }
671
672 /**
673  * __getnstimeofday64 - Returns the time of day in a timespec64.
674  * @ts:         pointer to the timespec to be set
675  *
676  * Updates the time of day in the timespec.
677  * Returns 0 on success, or -ve when suspended (timespec will be undefined).
678  */
679 int __getnstimeofday64(struct timespec64 *ts)
680 {
681         struct timekeeper *tk = &tk_core.timekeeper;
682         unsigned long seq;
683         u64 nsecs;
684
685         do {
686                 seq = read_seqcount_begin(&tk_core.seq);
687
688                 ts->tv_sec = tk->xtime_sec;
689                 nsecs = timekeeping_get_ns(&tk->tkr_mono);
690
691         } while (read_seqcount_retry(&tk_core.seq, seq));
692
693         ts->tv_nsec = 0;
694         timespec64_add_ns(ts, nsecs);
695
696         /*
697          * Do not bail out early, in case there were callers still using
698          * the value, even in the face of the WARN_ON.
699          */
700         if (unlikely(timekeeping_suspended))
701                 return -EAGAIN;
702         return 0;
703 }
704 EXPORT_SYMBOL(__getnstimeofday64);
705
706 /**
707  * getnstimeofday64 - Returns the time of day in a timespec64.
708  * @ts:         pointer to the timespec64 to be set
709  *
710  * Returns the time of day in a timespec64 (WARN if suspended).
711  */
712 void getnstimeofday64(struct timespec64 *ts)
713 {
714         WARN_ON(__getnstimeofday64(ts));
715 }
716 EXPORT_SYMBOL(getnstimeofday64);
717
718 ktime_t ktime_get(void)
719 {
720         struct timekeeper *tk = &tk_core.timekeeper;
721         unsigned int seq;
722         ktime_t base;
723         u64 nsecs;
724
725         WARN_ON(timekeeping_suspended);
726
727         do {
728                 seq = read_seqcount_begin(&tk_core.seq);
729                 base = tk->tkr_mono.base;
730                 nsecs = timekeeping_get_ns(&tk->tkr_mono);
731
732         } while (read_seqcount_retry(&tk_core.seq, seq));
733
734         return ktime_add_ns(base, nsecs);
735 }
736 EXPORT_SYMBOL_GPL(ktime_get);
737
738 u32 ktime_get_resolution_ns(void)
739 {
740         struct timekeeper *tk = &tk_core.timekeeper;
741         unsigned int seq;
742         u32 nsecs;
743
744         WARN_ON(timekeeping_suspended);
745
746         do {
747                 seq = read_seqcount_begin(&tk_core.seq);
748                 nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
749         } while (read_seqcount_retry(&tk_core.seq, seq));
750
751         return nsecs;
752 }
753 EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
754
755 static ktime_t *offsets[TK_OFFS_MAX] = {
756         [TK_OFFS_REAL]  = &tk_core.timekeeper.offs_real,
757         [TK_OFFS_BOOT]  = &tk_core.timekeeper.offs_boot,
758         [TK_OFFS_TAI]   = &tk_core.timekeeper.offs_tai,
759 };
760
761 ktime_t ktime_get_with_offset(enum tk_offsets offs)
762 {
763         struct timekeeper *tk = &tk_core.timekeeper;
764         unsigned int seq;
765         ktime_t base, *offset = offsets[offs];
766         u64 nsecs;
767
768         WARN_ON(timekeeping_suspended);
769
770         do {
771                 seq = read_seqcount_begin(&tk_core.seq);
772                 base = ktime_add(tk->tkr_mono.base, *offset);
773                 nsecs = timekeeping_get_ns(&tk->tkr_mono);
774
775         } while (read_seqcount_retry(&tk_core.seq, seq));
776
777         return ktime_add_ns(base, nsecs);
778
779 }
780 EXPORT_SYMBOL_GPL(ktime_get_with_offset);
781
782 /**
783  * ktime_mono_to_any() - convert mononotic time to any other time
784  * @tmono:      time to convert.
785  * @offs:       which offset to use
786  */
787 ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
788 {
789         ktime_t *offset = offsets[offs];
790         unsigned long seq;
791         ktime_t tconv;
792
793         do {
794                 seq = read_seqcount_begin(&tk_core.seq);
795                 tconv = ktime_add(tmono, *offset);
796         } while (read_seqcount_retry(&tk_core.seq, seq));
797
798         return tconv;
799 }
800 EXPORT_SYMBOL_GPL(ktime_mono_to_any);
801
802 /**
803  * ktime_get_raw - Returns the raw monotonic time in ktime_t format
804  */
805 ktime_t ktime_get_raw(void)
806 {
807         struct timekeeper *tk = &tk_core.timekeeper;
808         unsigned int seq;
809         ktime_t base;
810         u64 nsecs;
811
812         do {
813                 seq = read_seqcount_begin(&tk_core.seq);
814                 base = tk->tkr_raw.base;
815                 nsecs = timekeeping_get_ns(&tk->tkr_raw);
816
817         } while (read_seqcount_retry(&tk_core.seq, seq));
818
819         return ktime_add_ns(base, nsecs);
820 }
821 EXPORT_SYMBOL_GPL(ktime_get_raw);
822
823 /**
824  * ktime_get_ts64 - get the monotonic clock in timespec64 format
825  * @ts:         pointer to timespec variable
826  *
827  * The function calculates the monotonic clock from the realtime
828  * clock and the wall_to_monotonic offset and stores the result
829  * in normalized timespec64 format in the variable pointed to by @ts.
830  */
831 void ktime_get_ts64(struct timespec64 *ts)
832 {
833         struct timekeeper *tk = &tk_core.timekeeper;
834         struct timespec64 tomono;
835         unsigned int seq;
836         u64 nsec;
837
838         WARN_ON(timekeeping_suspended);
839
840         do {
841                 seq = read_seqcount_begin(&tk_core.seq);
842                 ts->tv_sec = tk->xtime_sec;
843                 nsec = timekeeping_get_ns(&tk->tkr_mono);
844                 tomono = tk->wall_to_monotonic;
845
846         } while (read_seqcount_retry(&tk_core.seq, seq));
847
848         ts->tv_sec += tomono.tv_sec;
849         ts->tv_nsec = 0;
850         timespec64_add_ns(ts, nsec + tomono.tv_nsec);
851 }
852 EXPORT_SYMBOL_GPL(ktime_get_ts64);
853
854 /**
855  * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
856  *
857  * Returns the seconds portion of CLOCK_MONOTONIC with a single non
858  * serialized read. tk->ktime_sec is of type 'unsigned long' so this
859  * works on both 32 and 64 bit systems. On 32 bit systems the readout
860  * covers ~136 years of uptime which should be enough to prevent
861  * premature wrap arounds.
862  */
863 time64_t ktime_get_seconds(void)
864 {
865         struct timekeeper *tk = &tk_core.timekeeper;
866
867         WARN_ON(timekeeping_suspended);
868         return tk->ktime_sec;
869 }
870 EXPORT_SYMBOL_GPL(ktime_get_seconds);
871
872 /**
873  * ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
874  *
875  * Returns the wall clock seconds since 1970. This replaces the
876  * get_seconds() interface which is not y2038 safe on 32bit systems.
877  *
878  * For 64bit systems the fast access to tk->xtime_sec is preserved. On
879  * 32bit systems the access must be protected with the sequence
880  * counter to provide "atomic" access to the 64bit tk->xtime_sec
881  * value.
882  */
883 time64_t ktime_get_real_seconds(void)
884 {
885         struct timekeeper *tk = &tk_core.timekeeper;
886         time64_t seconds;
887         unsigned int seq;
888
889         if (IS_ENABLED(CONFIG_64BIT))
890                 return tk->xtime_sec;
891
892         do {
893                 seq = read_seqcount_begin(&tk_core.seq);
894                 seconds = tk->xtime_sec;
895
896         } while (read_seqcount_retry(&tk_core.seq, seq));
897
898         return seconds;
899 }
900 EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
901
902 /**
903  * __ktime_get_real_seconds - The same as ktime_get_real_seconds
904  * but without the sequence counter protect. This internal function
905  * is called just when timekeeping lock is already held.
906  */
907 time64_t __ktime_get_real_seconds(void)
908 {
909         struct timekeeper *tk = &tk_core.timekeeper;
910
911         return tk->xtime_sec;
912 }
913
914 /**
915  * ktime_get_snapshot - snapshots the realtime/monotonic raw clocks with counter
916  * @systime_snapshot:   pointer to struct receiving the system time snapshot
917  */
918 void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
919 {
920         struct timekeeper *tk = &tk_core.timekeeper;
921         unsigned long seq;
922         ktime_t base_raw;
923         ktime_t base_real;
924         u64 nsec_raw;
925         u64 nsec_real;
926         u64 now;
927
928         WARN_ON_ONCE(timekeeping_suspended);
929
930         do {
931                 seq = read_seqcount_begin(&tk_core.seq);
932
933                 now = tk->tkr_mono.read(tk->tkr_mono.clock);
934                 systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
935                 systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
936                 base_real = ktime_add(tk->tkr_mono.base,
937                                       tk_core.timekeeper.offs_real);
938                 base_raw = tk->tkr_raw.base;
939                 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
940                 nsec_raw  = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
941         } while (read_seqcount_retry(&tk_core.seq, seq));
942
943         systime_snapshot->cycles = now;
944         systime_snapshot->real = ktime_add_ns(base_real, nsec_real);
945         systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw);
946 }
947 EXPORT_SYMBOL_GPL(ktime_get_snapshot);
948
949 /* Scale base by mult/div checking for overflow */
950 static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
951 {
952         u64 tmp, rem;
953
954         tmp = div64_u64_rem(*base, div, &rem);
955
956         if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) ||
957             ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
958                 return -EOVERFLOW;
959         tmp *= mult;
960         rem *= mult;
961
962         do_div(rem, div);
963         *base = tmp + rem;
964         return 0;
965 }
966
967 /**
968  * adjust_historical_crosststamp - adjust crosstimestamp previous to current interval
969  * @history:                    Snapshot representing start of history
970  * @partial_history_cycles:     Cycle offset into history (fractional part)
971  * @total_history_cycles:       Total history length in cycles
972  * @discontinuity:              True indicates clock was set on history period
973  * @ts:                         Cross timestamp that should be adjusted using
974  *      partial/total ratio
975  *
976  * Helper function used by get_device_system_crosststamp() to correct the
977  * crosstimestamp corresponding to the start of the current interval to the
978  * system counter value (timestamp point) provided by the driver. The
979  * total_history_* quantities are the total history starting at the provided
980  * reference point and ending at the start of the current interval. The cycle
981  * count between the driver timestamp point and the start of the current
982  * interval is partial_history_cycles.
983  */
984 static int adjust_historical_crosststamp(struct system_time_snapshot *history,
985                                          u64 partial_history_cycles,
986                                          u64 total_history_cycles,
987                                          bool discontinuity,
988                                          struct system_device_crosststamp *ts)
989 {
990         struct timekeeper *tk = &tk_core.timekeeper;
991         u64 corr_raw, corr_real;
992         bool interp_forward;
993         int ret;
994
995         if (total_history_cycles == 0 || partial_history_cycles == 0)
996                 return 0;
997
998         /* Interpolate shortest distance from beginning or end of history */
999         interp_forward = partial_history_cycles > total_history_cycles/2 ?
1000                 true : false;
1001         partial_history_cycles = interp_forward ?
1002                 total_history_cycles - partial_history_cycles :
1003                 partial_history_cycles;
1004
1005         /*
1006          * Scale the monotonic raw time delta by:
1007          *      partial_history_cycles / total_history_cycles
1008          */
1009         corr_raw = (u64)ktime_to_ns(
1010                 ktime_sub(ts->sys_monoraw, history->raw));
1011         ret = scale64_check_overflow(partial_history_cycles,
1012                                      total_history_cycles, &corr_raw);
1013         if (ret)
1014                 return ret;
1015
1016         /*
1017          * If there is a discontinuity in the history, scale monotonic raw
1018          *      correction by:
1019          *      mult(real)/mult(raw) yielding the realtime correction
1020          * Otherwise, calculate the realtime correction similar to monotonic
1021          *      raw calculation
1022          */
1023         if (discontinuity) {
1024                 corr_real = mul_u64_u32_div
1025                         (corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
1026         } else {
1027                 corr_real = (u64)ktime_to_ns(
1028                         ktime_sub(ts->sys_realtime, history->real));
1029                 ret = scale64_check_overflow(partial_history_cycles,
1030                                              total_history_cycles, &corr_real);
1031                 if (ret)
1032                         return ret;
1033         }
1034
1035         /* Fixup monotonic raw and real time time values */
1036         if (interp_forward) {
1037                 ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw);
1038                 ts->sys_realtime = ktime_add_ns(history->real, corr_real);
1039         } else {
1040                 ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw);
1041                 ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real);
1042         }
1043
1044         return 0;
1045 }
1046
1047 /*
1048  * cycle_between - true if test occurs chronologically between before and after
1049  */
1050 static bool cycle_between(u64 before, u64 test, u64 after)
1051 {
1052         if (test > before && test < after)
1053                 return true;
1054         if (test < before && before > after)
1055                 return true;
1056         return false;
1057 }
1058
1059 /**
1060  * get_device_system_crosststamp - Synchronously capture system/device timestamp
1061  * @get_time_fn:        Callback to get simultaneous device time and
1062  *      system counter from the device driver
1063  * @ctx:                Context passed to get_time_fn()
1064  * @history_begin:      Historical reference point used to interpolate system
1065  *      time when counter provided by the driver is before the current interval
1066  * @xtstamp:            Receives simultaneously captured system and device time
1067  *
1068  * Reads a timestamp from a device and correlates it to system time
1069  */
1070 int get_device_system_crosststamp(int (*get_time_fn)
1071                                   (ktime_t *device_time,
1072                                    struct system_counterval_t *sys_counterval,
1073                                    void *ctx),
1074                                   void *ctx,
1075                                   struct system_time_snapshot *history_begin,
1076                                   struct system_device_crosststamp *xtstamp)
1077 {
1078         struct system_counterval_t system_counterval;
1079         struct timekeeper *tk = &tk_core.timekeeper;
1080         u64 cycles, now, interval_start;
1081         unsigned int clock_was_set_seq = 0;
1082         ktime_t base_real, base_raw;
1083         u64 nsec_real, nsec_raw;
1084         u8 cs_was_changed_seq;
1085         unsigned long seq;
1086         bool do_interp;
1087         int ret;
1088
1089         do {
1090                 seq = read_seqcount_begin(&tk_core.seq);
1091                 /*
1092                  * Try to synchronously capture device time and a system
1093                  * counter value calling back into the device driver
1094                  */
1095                 ret = get_time_fn(&xtstamp->device, &system_counterval, ctx);
1096                 if (ret)
1097                         return ret;
1098
1099                 /*
1100                  * Verify that the clocksource associated with the captured
1101                  * system counter value is the same as the currently installed
1102                  * timekeeper clocksource
1103                  */
1104                 if (tk->tkr_mono.clock != system_counterval.cs)
1105                         return -ENODEV;
1106                 cycles = system_counterval.cycles;
1107
1108                 /*
1109                  * Check whether the system counter value provided by the
1110                  * device driver is on the current timekeeping interval.
1111                  */
1112                 now = tk->tkr_mono.read(tk->tkr_mono.clock);
1113                 interval_start = tk->tkr_mono.cycle_last;
1114                 if (!cycle_between(interval_start, cycles, now)) {
1115                         clock_was_set_seq = tk->clock_was_set_seq;
1116                         cs_was_changed_seq = tk->cs_was_changed_seq;
1117                         cycles = interval_start;
1118                         do_interp = true;
1119                 } else {
1120                         do_interp = false;
1121                 }
1122
1123                 base_real = ktime_add(tk->tkr_mono.base,
1124                                       tk_core.timekeeper.offs_real);
1125                 base_raw = tk->tkr_raw.base;
1126
1127                 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
1128                                                      system_counterval.cycles);
1129                 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
1130                                                     system_counterval.cycles);
1131         } while (read_seqcount_retry(&tk_core.seq, seq));
1132
1133         xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
1134         xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw);
1135
1136         /*
1137          * Interpolate if necessary, adjusting back from the start of the
1138          * current interval
1139          */
1140         if (do_interp) {
1141                 u64 partial_history_cycles, total_history_cycles;
1142                 bool discontinuity;
1143
1144                 /*
1145                  * Check that the counter value occurs after the provided
1146                  * history reference and that the history doesn't cross a
1147                  * clocksource change
1148                  */
1149                 if (!history_begin ||
1150                     !cycle_between(history_begin->cycles,
1151                                    system_counterval.cycles, cycles) ||
1152                     history_begin->cs_was_changed_seq != cs_was_changed_seq)
1153                         return -EINVAL;
1154                 partial_history_cycles = cycles - system_counterval.cycles;
1155                 total_history_cycles = cycles - history_begin->cycles;
1156                 discontinuity =
1157                         history_begin->clock_was_set_seq != clock_was_set_seq;
1158
1159                 ret = adjust_historical_crosststamp(history_begin,
1160                                                     partial_history_cycles,
1161                                                     total_history_cycles,
1162                                                     discontinuity, xtstamp);
1163                 if (ret)
1164                         return ret;
1165         }
1166
1167         return 0;
1168 }
1169 EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
1170
1171 /**
1172  * do_gettimeofday - Returns the time of day in a timeval
1173  * @tv:         pointer to the timeval to be set
1174  *
1175  * NOTE: Users should be converted to using getnstimeofday()
1176  */
1177 void do_gettimeofday(struct timeval *tv)
1178 {
1179         struct timespec64 now;
1180
1181         getnstimeofday64(&now);
1182         tv->tv_sec = now.tv_sec;
1183         tv->tv_usec = now.tv_nsec/1000;
1184 }
1185 EXPORT_SYMBOL(do_gettimeofday);
1186
1187 /**
1188  * do_settimeofday64 - Sets the time of day.
1189  * @ts:     pointer to the timespec64 variable containing the new time
1190  *
1191  * Sets the time of day to the new time and update NTP and notify hrtimers
1192  */
1193 int do_settimeofday64(const struct timespec64 *ts)
1194 {
1195         struct timekeeper *tk = &tk_core.timekeeper;
1196         struct timespec64 ts_delta, xt;
1197         unsigned long flags;
1198         int ret = 0;
1199
1200         if (!timespec64_valid_strict(ts))
1201                 return -EINVAL;
1202
1203         raw_spin_lock_irqsave(&timekeeper_lock, flags);
1204         write_seqcount_begin(&tk_core.seq);
1205
1206         timekeeping_forward_now(tk);
1207
1208         xt = tk_xtime(tk);
1209         ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
1210         ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
1211
1212         if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
1213                 ret = -EINVAL;
1214                 goto out;
1215         }
1216
1217         tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
1218
1219         tk_set_xtime(tk, ts);
1220 out:
1221         timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1222
1223         write_seqcount_end(&tk_core.seq);
1224         raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1225
1226         /* signal hrtimers about time change */
1227         clock_was_set();
1228
1229         return ret;
1230 }
1231 EXPORT_SYMBOL(do_settimeofday64);
1232
1233 /**
1234  * timekeeping_inject_offset - Adds or subtracts from the current time.
1235  * @tv:         pointer to the timespec variable containing the offset
1236  *
1237  * Adds or subtracts an offset value from the current time.
1238  */
1239 int timekeeping_inject_offset(struct timespec *ts)
1240 {
1241         struct timekeeper *tk = &tk_core.timekeeper;
1242         unsigned long flags;
1243         struct timespec64 ts64, tmp;
1244         int ret = 0;
1245
1246         if (!timespec_inject_offset_valid(ts))
1247                 return -EINVAL;
1248
1249         ts64 = timespec_to_timespec64(*ts);
1250
1251         raw_spin_lock_irqsave(&timekeeper_lock, flags);
1252         write_seqcount_begin(&tk_core.seq);
1253
1254         timekeeping_forward_now(tk);
1255
1256         /* Make sure the proposed value is valid */
1257         tmp = timespec64_add(tk_xtime(tk),  ts64);
1258         if (timespec64_compare(&tk->wall_to_monotonic, &ts64) > 0 ||
1259             !timespec64_valid_strict(&tmp)) {
1260                 ret = -EINVAL;
1261                 goto error;
1262         }
1263
1264         tk_xtime_add(tk, &ts64);
1265         tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts64));
1266
1267 error: /* even if we error out, we forwarded the time, so call update */
1268         timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1269
1270         write_seqcount_end(&tk_core.seq);
1271         raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1272
1273         /* signal hrtimers about time change */
1274         clock_was_set();
1275
1276         return ret;
1277 }
1278 EXPORT_SYMBOL(timekeeping_inject_offset);
1279
1280 /**
1281  * __timekeeping_set_tai_offset - Sets the TAI offset from UTC and monotonic
1282  *
1283  */
1284 static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
1285 {
1286         tk->tai_offset = tai_offset;
1287         tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
1288 }
1289
1290 /**
1291  * change_clocksource - Swaps clocksources if a new one is available
1292  *
1293  * Accumulates current time interval and initializes new clocksource
1294  */
1295 static int change_clocksource(void *data)
1296 {
1297         struct timekeeper *tk = &tk_core.timekeeper;
1298         struct clocksource *new, *old;
1299         unsigned long flags;
1300
1301         new = (struct clocksource *) data;
1302
1303         raw_spin_lock_irqsave(&timekeeper_lock, flags);
1304         write_seqcount_begin(&tk_core.seq);
1305
1306         timekeeping_forward_now(tk);
1307         /*
1308          * If the cs is in module, get a module reference. Succeeds
1309          * for built-in code (owner == NULL) as well.
1310          */
1311         if (try_module_get(new->owner)) {
1312                 if (!new->enable || new->enable(new) == 0) {
1313                         old = tk->tkr_mono.clock;
1314                         tk_setup_internals(tk, new);
1315                         if (old->disable)
1316                                 old->disable(old);
1317                         module_put(old->owner);
1318                 } else {
1319                         module_put(new->owner);
1320                 }
1321         }
1322         timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1323
1324         write_seqcount_end(&tk_core.seq);
1325         raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1326
1327         return 0;
1328 }
1329
1330 /**
1331  * timekeeping_notify - Install a new clock source
1332  * @clock:              pointer to the clock source
1333  *
1334  * This function is called from clocksource.c after a new, better clock
1335  * source has been registered. The caller holds the clocksource_mutex.
1336  */
1337 int timekeeping_notify(struct clocksource *clock)
1338 {
1339         struct timekeeper *tk = &tk_core.timekeeper;
1340
1341         if (tk->tkr_mono.clock == clock)
1342                 return 0;
1343         stop_machine(change_clocksource, clock, NULL);
1344         tick_clock_notify();
1345         return tk->tkr_mono.clock == clock ? 0 : -1;
1346 }
1347
1348 /**
1349  * getrawmonotonic64 - Returns the raw monotonic time in a timespec
1350  * @ts:         pointer to the timespec64 to be set
1351  *
1352  * Returns the raw monotonic time (completely un-modified by ntp)
1353  */
1354 void getrawmonotonic64(struct timespec64 *ts)
1355 {
1356         struct timekeeper *tk = &tk_core.timekeeper;
1357         struct timespec64 ts64;
1358         unsigned long seq;
1359         u64 nsecs;
1360
1361         do {
1362                 seq = read_seqcount_begin(&tk_core.seq);
1363                 nsecs = timekeeping_get_ns(&tk->tkr_raw);
1364                 ts64 = tk->raw_time;
1365
1366         } while (read_seqcount_retry(&tk_core.seq, seq));
1367
1368         timespec64_add_ns(&ts64, nsecs);
1369         *ts = ts64;
1370 }
1371 EXPORT_SYMBOL(getrawmonotonic64);
1372
1373
1374 /**
1375  * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
1376  */
1377 int timekeeping_valid_for_hres(void)
1378 {
1379         struct timekeeper *tk = &tk_core.timekeeper;
1380         unsigned long seq;
1381         int ret;
1382
1383         do {
1384                 seq = read_seqcount_begin(&tk_core.seq);
1385
1386                 ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
1387
1388         } while (read_seqcount_retry(&tk_core.seq, seq));
1389
1390         return ret;
1391 }
1392
1393 /**
1394  * timekeeping_max_deferment - Returns max time the clocksource can be deferred
1395  */
1396 u64 timekeeping_max_deferment(void)
1397 {
1398         struct timekeeper *tk = &tk_core.timekeeper;
1399         unsigned long seq;
1400         u64 ret;
1401
1402         do {
1403                 seq = read_seqcount_begin(&tk_core.seq);
1404
1405                 ret = tk->tkr_mono.clock->max_idle_ns;
1406
1407         } while (read_seqcount_retry(&tk_core.seq, seq));
1408
1409         return ret;
1410 }
1411
1412 /**
1413  * read_persistent_clock -  Return time from the persistent clock.
1414  *
1415  * Weak dummy function for arches that do not yet support it.
1416  * Reads the time from the battery backed persistent clock.
1417  * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
1418  *
1419  *  XXX - Do be sure to remove it once all arches implement it.
1420  */
1421 void __weak read_persistent_clock(struct timespec *ts)
1422 {
1423         ts->tv_sec = 0;
1424         ts->tv_nsec = 0;
1425 }
1426
1427 void __weak read_persistent_clock64(struct timespec64 *ts64)
1428 {
1429         struct timespec ts;
1430
1431         read_persistent_clock(&ts);
1432         *ts64 = timespec_to_timespec64(ts);
1433 }
1434
1435 /**
1436  * read_boot_clock64 -  Return time of the system start.
1437  *
1438  * Weak dummy function for arches that do not yet support it.
1439  * Function to read the exact time the system has been started.
1440  * Returns a timespec64 with tv_sec=0 and tv_nsec=0 if unsupported.
1441  *
1442  *  XXX - Do be sure to remove it once all arches implement it.
1443  */
1444 void __weak read_boot_clock64(struct timespec64 *ts)
1445 {
1446         ts->tv_sec = 0;
1447         ts->tv_nsec = 0;
1448 }
1449
1450 /* Flag for if timekeeping_resume() has injected sleeptime */
1451 static bool sleeptime_injected;
1452
1453 /* Flag for if there is a persistent clock on this platform */
1454 static bool persistent_clock_exists;
1455
1456 /*
1457  * timekeeping_init - Initializes the clocksource and common timekeeping values
1458  */
1459 void __init timekeeping_init(void)
1460 {
1461         struct timekeeper *tk = &tk_core.timekeeper;
1462         struct clocksource *clock;
1463         unsigned long flags;
1464         struct timespec64 now, boot, tmp;
1465
1466         read_persistent_clock64(&now);
1467         if (!timespec64_valid_strict(&now)) {
1468                 pr_warn("WARNING: Persistent clock returned invalid value!\n"
1469                         "         Check your CMOS/BIOS settings.\n");
1470                 now.tv_sec = 0;
1471                 now.tv_nsec = 0;
1472         } else if (now.tv_sec || now.tv_nsec)
1473                 persistent_clock_exists = true;
1474
1475         read_boot_clock64(&boot);
1476         if (!timespec64_valid_strict(&boot)) {
1477                 pr_warn("WARNING: Boot clock returned invalid value!\n"
1478                         "         Check your CMOS/BIOS settings.\n");
1479                 boot.tv_sec = 0;
1480                 boot.tv_nsec = 0;
1481         }
1482
1483         raw_spin_lock_irqsave(&timekeeper_lock, flags);
1484         write_seqcount_begin(&tk_core.seq);
1485         ntp_init();
1486
1487         clock = clocksource_default_clock();
1488         if (clock->enable)
1489                 clock->enable(clock);
1490         tk_setup_internals(tk, clock);
1491
1492         tk_set_xtime(tk, &now);
1493         tk->raw_time.tv_sec = 0;
1494         tk->raw_time.tv_nsec = 0;
1495         if (boot.tv_sec == 0 && boot.tv_nsec == 0)
1496                 boot = tk_xtime(tk);
1497
1498         set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
1499         tk_set_wall_to_mono(tk, tmp);
1500
1501         timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1502
1503         write_seqcount_end(&tk_core.seq);
1504         raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1505 }
1506
1507 /* time in seconds when suspend began for persistent clock */
1508 static struct timespec64 timekeeping_suspend_time;
1509
1510 /**
1511  * __timekeeping_inject_sleeptime - Internal function to add sleep interval
1512  * @delta: pointer to a timespec delta value
1513  *
1514  * Takes a timespec offset measuring a suspend interval and properly
1515  * adds the sleep offset to the timekeeping variables.
1516  */
1517 static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1518                                            struct timespec64 *delta)
1519 {
1520         if (!timespec64_valid_strict(delta)) {
1521                 printk_deferred(KERN_WARNING
1522                                 "__timekeeping_inject_sleeptime: Invalid "
1523                                 "sleep delta value!\n");
1524                 return;
1525         }
1526         tk_xtime_add(tk, delta);
1527         tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1528         tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1529         tk_debug_account_sleep_time(delta);
1530 }
1531
1532 #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
1533 /**
1534  * We have three kinds of time sources to use for sleep time
1535  * injection, the preference order is:
1536  * 1) non-stop clocksource
1537  * 2) persistent clock (ie: RTC accessible when irqs are off)
1538  * 3) RTC
1539  *
1540  * 1) and 2) are used by timekeeping, 3) by RTC subsystem.
1541  * If system has neither 1) nor 2), 3) will be used finally.
1542  *
1543  *
1544  * If timekeeping has injected sleeptime via either 1) or 2),
1545  * 3) becomes needless, so in this case we don't need to call
1546  * rtc_resume(), and this is what timekeeping_rtc_skipresume()
1547  * means.
1548  */
1549 bool timekeeping_rtc_skipresume(void)
1550 {
1551         return sleeptime_injected;
1552 }
1553
1554 /**
1555  * 1) can be determined whether to use or not only when doing
1556  * timekeeping_resume() which is invoked after rtc_suspend(),
1557  * so we can't skip rtc_suspend() surely if system has 1).
1558  *
1559  * But if system has 2), 2) will definitely be used, so in this
1560  * case we don't need to call rtc_suspend(), and this is what
1561  * timekeeping_rtc_skipsuspend() means.
1562  */
1563 bool timekeeping_rtc_skipsuspend(void)
1564 {
1565         return persistent_clock_exists;
1566 }
1567
1568 /**
1569  * timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values
1570  * @delta: pointer to a timespec64 delta value
1571  *
1572  * This hook is for architectures that cannot support read_persistent_clock64
1573  * because their RTC/persistent clock is only accessible when irqs are enabled.
1574  * and also don't have an effective nonstop clocksource.
1575  *
1576  * This function should only be called by rtc_resume(), and allows
1577  * a suspend offset to be injected into the timekeeping values.
1578  */
1579 void timekeeping_inject_sleeptime64(struct timespec64 *delta)
1580 {
1581         struct timekeeper *tk = &tk_core.timekeeper;
1582         unsigned long flags;
1583
1584         raw_spin_lock_irqsave(&timekeeper_lock, flags);
1585         write_seqcount_begin(&tk_core.seq);
1586
1587         timekeeping_forward_now(tk);
1588
1589         __timekeeping_inject_sleeptime(tk, delta);
1590
1591         timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1592
1593         write_seqcount_end(&tk_core.seq);
1594         raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1595
1596         /* signal hrtimers about time change */
1597         clock_was_set();
1598 }
1599 #endif
1600
1601 /**
1602  * timekeeping_resume - Resumes the generic timekeeping subsystem.
1603  */
1604 void timekeeping_resume(void)
1605 {
1606         struct timekeeper *tk = &tk_core.timekeeper;
1607         struct clocksource *clock = tk->tkr_mono.clock;
1608         unsigned long flags;
1609         struct timespec64 ts_new, ts_delta;
1610         u64 cycle_now;
1611
1612         sleeptime_injected = false;
1613         read_persistent_clock64(&ts_new);
1614
1615         clockevents_resume();
1616         clocksource_resume();
1617
1618         raw_spin_lock_irqsave(&timekeeper_lock, flags);
1619         write_seqcount_begin(&tk_core.seq);
1620
1621         /*
1622          * After system resumes, we need to calculate the suspended time and
1623          * compensate it for the OS time. There are 3 sources that could be
1624          * used: Nonstop clocksource during suspend, persistent clock and rtc
1625          * device.
1626          *
1627          * One specific platform may have 1 or 2 or all of them, and the
1628          * preference will be:
1629          *      suspend-nonstop clocksource -> persistent clock -> rtc
1630          * The less preferred source will only be tried if there is no better
1631          * usable source. The rtc part is handled separately in rtc core code.
1632          */
1633         cycle_now = tk->tkr_mono.read(clock);
1634         if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
1635                 cycle_now > tk->tkr_mono.cycle_last) {
1636                 u64 nsec, cyc_delta;
1637
1638                 cyc_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last,
1639                                               tk->tkr_mono.mask);
1640                 nsec = mul_u64_u32_shr(cyc_delta, clock->mult, clock->shift);
1641                 ts_delta = ns_to_timespec64(nsec);
1642                 sleeptime_injected = true;
1643         } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
1644                 ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1645                 sleeptime_injected = true;
1646         }
1647
1648         if (sleeptime_injected)
1649                 __timekeeping_inject_sleeptime(tk, &ts_delta);
1650
1651         /* Re-base the last cycle value */
1652         tk->tkr_mono.cycle_last = cycle_now;
1653         tk->tkr_raw.cycle_last  = cycle_now;
1654
1655         tk->ntp_error = 0;
1656         timekeeping_suspended = 0;
1657         timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1658         write_seqcount_end(&tk_core.seq);
1659         raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1660
1661         touch_softlockup_watchdog();
1662
1663         tick_resume();
1664         hrtimers_resume();
1665 }
1666
1667 int timekeeping_suspend(void)
1668 {
1669         struct timekeeper *tk = &tk_core.timekeeper;
1670         unsigned long flags;
1671         struct timespec64               delta, delta_delta;
1672         static struct timespec64        old_delta;
1673
1674         read_persistent_clock64(&timekeeping_suspend_time);
1675
1676         /*
1677          * On some systems the persistent_clock can not be detected at
1678          * timekeeping_init by its return value, so if we see a valid
1679          * value returned, update the persistent_clock_exists flag.
1680          */
1681         if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1682                 persistent_clock_exists = true;
1683
1684         raw_spin_lock_irqsave(&timekeeper_lock, flags);
1685         write_seqcount_begin(&tk_core.seq);
1686         timekeeping_forward_now(tk);
1687         timekeeping_suspended = 1;
1688
1689         if (persistent_clock_exists) {
1690                 /*
1691                  * To avoid drift caused by repeated suspend/resumes,
1692                  * which each can add ~1 second drift error,
1693                  * try to compensate so the difference in system time
1694                  * and persistent_clock time stays close to constant.
1695                  */
1696                 delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
1697                 delta_delta = timespec64_sub(delta, old_delta);
1698                 if (abs(delta_delta.tv_sec) >= 2) {
1699                         /*
1700                          * if delta_delta is too large, assume time correction
1701                          * has occurred and set old_delta to the current delta.
1702                          */
1703                         old_delta = delta;
1704                 } else {
1705                         /* Otherwise try to adjust old_system to compensate */
1706                         timekeeping_suspend_time =
1707                                 timespec64_add(timekeeping_suspend_time, delta_delta);
1708                 }
1709         }
1710
1711         timekeeping_update(tk, TK_MIRROR);
1712         halt_fast_timekeeper(tk);
1713         write_seqcount_end(&tk_core.seq);
1714         raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1715
1716         tick_suspend();
1717         clocksource_suspend();
1718         clockevents_suspend();
1719
1720         return 0;
1721 }
1722
1723 /* sysfs resume/suspend bits for timekeeping */
1724 static struct syscore_ops timekeeping_syscore_ops = {
1725         .resume         = timekeeping_resume,
1726         .suspend        = timekeeping_suspend,
1727 };
1728
1729 static int __init timekeeping_init_ops(void)
1730 {
1731         register_syscore_ops(&timekeeping_syscore_ops);
1732         return 0;
1733 }
1734 device_initcall(timekeeping_init_ops);
1735
1736 /*
1737  * Apply a multiplier adjustment to the timekeeper
1738  */
1739 static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
1740                                                          s64 offset,
1741                                                          bool negative,
1742                                                          int adj_scale)
1743 {
1744         s64 interval = tk->cycle_interval;
1745         s32 mult_adj = 1;
1746
1747         if (negative) {
1748                 mult_adj = -mult_adj;
1749                 interval = -interval;
1750                 offset  = -offset;
1751         }
1752         mult_adj <<= adj_scale;
1753         interval <<= adj_scale;
1754         offset <<= adj_scale;
1755
1756         /*
1757          * So the following can be confusing.
1758          *
1759          * To keep things simple, lets assume mult_adj == 1 for now.
1760          *
1761          * When mult_adj != 1, remember that the interval and offset values
1762          * have been appropriately scaled so the math is the same.
1763          *
1764          * The basic idea here is that we're increasing the multiplier
1765          * by one, this causes the xtime_interval to be incremented by
1766          * one cycle_interval. This is because:
1767          *      xtime_interval = cycle_interval * mult
1768          * So if mult is being incremented by one:
1769          *      xtime_interval = cycle_interval * (mult + 1)
1770          * Its the same as:
1771          *      xtime_interval = (cycle_interval * mult) + cycle_interval
1772          * Which can be shortened to:
1773          *      xtime_interval += cycle_interval
1774          *
1775          * So offset stores the non-accumulated cycles. Thus the current
1776          * time (in shifted nanoseconds) is:
1777          *      now = (offset * adj) + xtime_nsec
1778          * Now, even though we're adjusting the clock frequency, we have
1779          * to keep time consistent. In other words, we can't jump back
1780          * in time, and we also want to avoid jumping forward in time.
1781          *
1782          * So given the same offset value, we need the time to be the same
1783          * both before and after the freq adjustment.
1784          *      now = (offset * adj_1) + xtime_nsec_1
1785          *      now = (offset * adj_2) + xtime_nsec_2
1786          * So:
1787          *      (offset * adj_1) + xtime_nsec_1 =
1788          *              (offset * adj_2) + xtime_nsec_2
1789          * And we know:
1790          *      adj_2 = adj_1 + 1
1791          * So:
1792          *      (offset * adj_1) + xtime_nsec_1 =
1793          *              (offset * (adj_1+1)) + xtime_nsec_2
1794          *      (offset * adj_1) + xtime_nsec_1 =
1795          *              (offset * adj_1) + offset + xtime_nsec_2
1796          * Canceling the sides:
1797          *      xtime_nsec_1 = offset + xtime_nsec_2
1798          * Which gives us:
1799          *      xtime_nsec_2 = xtime_nsec_1 - offset
1800          * Which simplfies to:
1801          *      xtime_nsec -= offset
1802          *
1803          * XXX - TODO: Doc ntp_error calculation.
1804          */
1805         if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
1806                 /* NTP adjustment caused clocksource mult overflow */
1807                 WARN_ON_ONCE(1);
1808                 return;
1809         }
1810
1811         tk->tkr_mono.mult += mult_adj;
1812         tk->xtime_interval += interval;
1813         tk->tkr_mono.xtime_nsec -= offset;
1814         tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1815 }
1816
1817 /*
1818  * Calculate the multiplier adjustment needed to match the frequency
1819  * specified by NTP
1820  */
1821 static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
1822                                                         s64 offset)
1823 {
1824         s64 interval = tk->cycle_interval;
1825         s64 xinterval = tk->xtime_interval;
1826         u32 base = tk->tkr_mono.clock->mult;
1827         u32 max = tk->tkr_mono.clock->maxadj;
1828         u32 cur_adj = tk->tkr_mono.mult;
1829         s64 tick_error;
1830         bool negative;
1831         u32 adj_scale;
1832
1833         /* Remove any current error adj from freq calculation */
1834         if (tk->ntp_err_mult)
1835                 xinterval -= tk->cycle_interval;
1836
1837         tk->ntp_tick = ntp_tick_length();
1838
1839         /* Calculate current error per tick */
1840         tick_error = ntp_tick_length() >> tk->ntp_error_shift;
1841         tick_error -= (xinterval + tk->xtime_remainder);
1842
1843         /* Don't worry about correcting it if its small */
1844         if (likely((tick_error >= 0) && (tick_error <= interval)))
1845                 return;
1846
1847         /* preserve the direction of correction */
1848         negative = (tick_error < 0);
1849
1850         /* If any adjustment would pass the max, just return */
1851         if (negative && (cur_adj - 1) <= (base - max))
1852                 return;
1853         if (!negative && (cur_adj + 1) >= (base + max))
1854                 return;
1855         /*
1856          * Sort out the magnitude of the correction, but
1857          * avoid making so large a correction that we go
1858          * over the max adjustment.
1859          */
1860         adj_scale = 0;
1861         tick_error = abs(tick_error);
1862         while (tick_error > interval) {
1863                 u32 adj = 1 << (adj_scale + 1);
1864
1865                 /* Check if adjustment gets us within 1 unit from the max */
1866                 if (negative && (cur_adj - adj) <= (base - max))
1867                         break;
1868                 if (!negative && (cur_adj + adj) >= (base + max))
1869                         break;
1870
1871                 adj_scale++;
1872                 tick_error >>= 1;
1873         }
1874
1875         /* scale the corrections */
1876         timekeeping_apply_adjustment(tk, offset, negative, adj_scale);
1877 }
1878
1879 /*
1880  * Adjust the timekeeper's multiplier to the correct frequency
1881  * and also to reduce the accumulated error value.
1882  */
1883 static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1884 {
1885         /* Correct for the current frequency error */
1886         timekeeping_freqadjust(tk, offset);
1887
1888         /* Next make a small adjustment to fix any cumulative error */
1889         if (!tk->ntp_err_mult && (tk->ntp_error > 0)) {
1890                 tk->ntp_err_mult = 1;
1891                 timekeeping_apply_adjustment(tk, offset, 0, 0);
1892         } else if (tk->ntp_err_mult && (tk->ntp_error <= 0)) {
1893                 /* Undo any existing error adjustment */
1894                 timekeeping_apply_adjustment(tk, offset, 1, 0);
1895                 tk->ntp_err_mult = 0;
1896         }
1897
1898         if (unlikely(tk->tkr_mono.clock->maxadj &&
1899                 (abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
1900                         > tk->tkr_mono.clock->maxadj))) {
1901                 printk_once(KERN_WARNING
1902                         "Adjusting %s more than 11%% (%ld vs %ld)\n",
1903                         tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
1904                         (long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
1905         }
1906
1907         /*
1908          * It may be possible that when we entered this function, xtime_nsec
1909          * was very small.  Further, if we're slightly speeding the clocksource
1910          * in the code above, its possible the required corrective factor to
1911          * xtime_nsec could cause it to underflow.
1912          *
1913          * Now, since we already accumulated the second, cannot simply roll
1914          * the accumulated second back, since the NTP subsystem has been
1915          * notified via second_overflow. So instead we push xtime_nsec forward
1916          * by the amount we underflowed, and add that amount into the error.
1917          *
1918          * We'll correct this error next time through this function, when
1919          * xtime_nsec is not as small.
1920          */
1921         if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
1922                 s64 neg = -(s64)tk->tkr_mono.xtime_nsec;
1923                 tk->tkr_mono.xtime_nsec = 0;
1924                 tk->ntp_error += neg << tk->ntp_error_shift;
1925         }
1926 }
1927
1928 /**
1929  * accumulate_nsecs_to_secs - Accumulates nsecs into secs
1930  *
1931  * Helper function that accumulates the nsecs greater than a second
1932  * from the xtime_nsec field to the xtime_secs field.
1933  * It also calls into the NTP code to handle leapsecond processing.
1934  *
1935  */
1936 static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1937 {
1938         u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
1939         unsigned int clock_set = 0;
1940
1941         while (tk->tkr_mono.xtime_nsec >= nsecps) {
1942                 int leap;
1943
1944                 tk->tkr_mono.xtime_nsec -= nsecps;
1945                 tk->xtime_sec++;
1946
1947                 /* Figure out if its a leap sec and apply if needed */
1948                 leap = second_overflow(tk->xtime_sec);
1949                 if (unlikely(leap)) {
1950                         struct timespec64 ts;
1951
1952                         tk->xtime_sec += leap;
1953
1954                         ts.tv_sec = leap;
1955                         ts.tv_nsec = 0;
1956                         tk_set_wall_to_mono(tk,
1957                                 timespec64_sub(tk->wall_to_monotonic, ts));
1958
1959                         __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
1960
1961                         clock_set = TK_CLOCK_WAS_SET;
1962                 }
1963         }
1964         return clock_set;
1965 }
1966
1967 /**
1968  * logarithmic_accumulation - shifted accumulation of cycles
1969  *
1970  * This functions accumulates a shifted interval of cycles into
1971  * into a shifted interval nanoseconds. Allows for O(log) accumulation
1972  * loop.
1973  *
1974  * Returns the unconsumed cycles.
1975  */
1976 static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
1977                                     u32 shift, unsigned int *clock_set)
1978 {
1979         u64 interval = tk->cycle_interval << shift;
1980         u64 raw_nsecs;
1981
1982         /* If the offset is smaller than a shifted interval, do nothing */
1983         if (offset < interval)
1984                 return offset;
1985
1986         /* Accumulate one shifted interval */
1987         offset -= interval;
1988         tk->tkr_mono.cycle_last += interval;
1989         tk->tkr_raw.cycle_last  += interval;
1990
1991         tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
1992         *clock_set |= accumulate_nsecs_to_secs(tk);
1993
1994         /* Accumulate raw time */
1995         raw_nsecs = (u64)tk->raw_interval << shift;
1996         raw_nsecs += tk->raw_time.tv_nsec;
1997         if (raw_nsecs >= NSEC_PER_SEC) {
1998                 u64 raw_secs = raw_nsecs;
1999                 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
2000                 tk->raw_time.tv_sec += raw_secs;
2001         }
2002         tk->raw_time.tv_nsec = raw_nsecs;
2003
2004         /* Accumulate error between NTP and clock interval */
2005         tk->ntp_error += tk->ntp_tick << shift;
2006         tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
2007                                                 (tk->ntp_error_shift + shift);
2008
2009         return offset;
2010 }
2011
2012 /**
2013  * update_wall_time - Uses the current clocksource to increment the wall time
2014  *
2015  */
2016 void update_wall_time(void)
2017 {
2018         struct timekeeper *real_tk = &tk_core.timekeeper;
2019         struct timekeeper *tk = &shadow_timekeeper;
2020         u64 offset;
2021         int shift = 0, maxshift;
2022         unsigned int clock_set = 0;
2023         unsigned long flags;
2024
2025         raw_spin_lock_irqsave(&timekeeper_lock, flags);
2026
2027         /* Make sure we're fully resumed: */
2028         if (unlikely(timekeeping_suspended))
2029                 goto out;
2030
2031 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
2032         offset = real_tk->cycle_interval;
2033 #else
2034         offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock),
2035                                    tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
2036 #endif
2037
2038         /* Check if there's really nothing to do */
2039         if (offset < real_tk->cycle_interval)
2040                 goto out;
2041
2042         /* Do some additional sanity checking */
2043         timekeeping_check_update(real_tk, offset);
2044
2045         /*
2046          * With NO_HZ we may have to accumulate many cycle_intervals
2047          * (think "ticks") worth of time at once. To do this efficiently,
2048          * we calculate the largest doubling multiple of cycle_intervals
2049          * that is smaller than the offset.  We then accumulate that
2050          * chunk in one go, and then try to consume the next smaller
2051          * doubled multiple.
2052          */
2053         shift = ilog2(offset) - ilog2(tk->cycle_interval);
2054         shift = max(0, shift);
2055         /* Bound shift to one less than what overflows tick_length */
2056         maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
2057         shift = min(shift, maxshift);
2058         while (offset >= tk->cycle_interval) {
2059                 offset = logarithmic_accumulation(tk, offset, shift,
2060                                                         &clock_set);
2061                 if (offset < tk->cycle_interval<<shift)
2062                         shift--;
2063         }
2064
2065         /* correct the clock when NTP error is too big */
2066         timekeeping_adjust(tk, offset);
2067
2068         /*
2069          * XXX This can be killed once everyone converts
2070          * to the new update_vsyscall.
2071          */
2072         old_vsyscall_fixup(tk);
2073
2074         /*
2075          * Finally, make sure that after the rounding
2076          * xtime_nsec isn't larger than NSEC_PER_SEC
2077          */
2078         clock_set |= accumulate_nsecs_to_secs(tk);
2079
2080         write_seqcount_begin(&tk_core.seq);
2081         /*
2082          * Update the real timekeeper.
2083          *
2084          * We could avoid this memcpy by switching pointers, but that
2085          * requires changes to all other timekeeper usage sites as
2086          * well, i.e. move the timekeeper pointer getter into the
2087          * spinlocked/seqcount protected sections. And we trade this
2088          * memcpy under the tk_core.seq against one before we start
2089          * updating.
2090          */
2091         timekeeping_update(tk, clock_set);
2092         memcpy(real_tk, tk, sizeof(*tk));
2093         /* The memcpy must come last. Do not put anything here! */
2094         write_seqcount_end(&tk_core.seq);
2095 out:
2096         raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2097         if (clock_set)
2098                 /* Have to call _delayed version, since in irq context*/
2099                 clock_was_set_delayed();
2100 }
2101
2102 /**
2103  * getboottime64 - Return the real time of system boot.
2104  * @ts:         pointer to the timespec64 to be set
2105  *
2106  * Returns the wall-time of boot in a timespec64.
2107  *
2108  * This is based on the wall_to_monotonic offset and the total suspend
2109  * time. Calls to settimeofday will affect the value returned (which
2110  * basically means that however wrong your real time clock is at boot time,
2111  * you get the right time here).
2112  */
2113 void getboottime64(struct timespec64 *ts)
2114 {
2115         struct timekeeper *tk = &tk_core.timekeeper;
2116         ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
2117
2118         *ts = ktime_to_timespec64(t);
2119 }
2120 EXPORT_SYMBOL_GPL(getboottime64);
2121
2122 unsigned long get_seconds(void)
2123 {
2124         struct timekeeper *tk = &tk_core.timekeeper;
2125
2126         return tk->xtime_sec;
2127 }
2128 EXPORT_SYMBOL(get_seconds);
2129
2130 struct timespec __current_kernel_time(void)
2131 {
2132         struct timekeeper *tk = &tk_core.timekeeper;
2133
2134         return timespec64_to_timespec(tk_xtime(tk));
2135 }
2136
2137 struct timespec64 current_kernel_time64(void)
2138 {
2139         struct timekeeper *tk = &tk_core.timekeeper;
2140         struct timespec64 now;
2141         unsigned long seq;
2142
2143         do {
2144                 seq = read_seqcount_begin(&tk_core.seq);
2145
2146                 now = tk_xtime(tk);
2147         } while (read_seqcount_retry(&tk_core.seq, seq));
2148
2149         return now;
2150 }
2151 EXPORT_SYMBOL(current_kernel_time64);
2152
2153 struct timespec64 get_monotonic_coarse64(void)
2154 {
2155         struct timekeeper *tk = &tk_core.timekeeper;
2156         struct timespec64 now, mono;
2157         unsigned long seq;
2158
2159         do {
2160                 seq = read_seqcount_begin(&tk_core.seq);
2161
2162                 now = tk_xtime(tk);
2163                 mono = tk->wall_to_monotonic;
2164         } while (read_seqcount_retry(&tk_core.seq, seq));
2165
2166         set_normalized_timespec64(&now, now.tv_sec + mono.tv_sec,
2167                                 now.tv_nsec + mono.tv_nsec);
2168
2169         return now;
2170 }
2171 EXPORT_SYMBOL(get_monotonic_coarse64);
2172
2173 /*
2174  * Must hold jiffies_lock
2175  */
2176 void do_timer(unsigned long ticks)
2177 {
2178         jiffies_64 += ticks;
2179         calc_global_load(ticks);
2180 }
2181
2182 /**
2183  * ktime_get_update_offsets_now - hrtimer helper
2184  * @cwsseq:     pointer to check and store the clock was set sequence number
2185  * @offs_real:  pointer to storage for monotonic -> realtime offset
2186  * @offs_boot:  pointer to storage for monotonic -> boottime offset
2187  * @offs_tai:   pointer to storage for monotonic -> clock tai offset
2188  *
2189  * Returns current monotonic time and updates the offsets if the
2190  * sequence number in @cwsseq and timekeeper.clock_was_set_seq are
2191  * different.
2192  *
2193  * Called from hrtimer_interrupt() or retrigger_next_event()
2194  */
2195 ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
2196                                      ktime_t *offs_boot, ktime_t *offs_tai)
2197 {
2198         struct timekeeper *tk = &tk_core.timekeeper;
2199         unsigned int seq;
2200         ktime_t base;
2201         u64 nsecs;
2202
2203         do {
2204                 seq = read_seqcount_begin(&tk_core.seq);
2205
2206                 base = tk->tkr_mono.base;
2207                 nsecs = timekeeping_get_ns(&tk->tkr_mono);
2208                 base = ktime_add_ns(base, nsecs);
2209
2210                 if (*cwsseq != tk->clock_was_set_seq) {
2211                         *cwsseq = tk->clock_was_set_seq;
2212                         *offs_real = tk->offs_real;
2213                         *offs_boot = tk->offs_boot;
2214                         *offs_tai = tk->offs_tai;
2215                 }
2216
2217                 /* Handle leapsecond insertion adjustments */
2218                 if (unlikely(base >= tk->next_leap_ktime))
2219                         *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
2220
2221         } while (read_seqcount_retry(&tk_core.seq, seq));
2222
2223         return base;
2224 }
2225
2226 /**
2227  * do_adjtimex() - Accessor function to NTP __do_adjtimex function
2228  */
2229 int do_adjtimex(struct timex *txc)
2230 {
2231         struct timekeeper *tk = &tk_core.timekeeper;
2232         unsigned long flags;
2233         struct timespec64 ts;
2234         s32 orig_tai, tai;
2235         int ret;
2236
2237         /* Validate the data before disabling interrupts */
2238         ret = ntp_validate_timex(txc);
2239         if (ret)
2240                 return ret;
2241
2242         if (txc->modes & ADJ_SETOFFSET) {
2243                 struct timespec delta;
2244                 delta.tv_sec  = txc->time.tv_sec;
2245                 delta.tv_nsec = txc->time.tv_usec;
2246                 if (!(txc->modes & ADJ_NANO))
2247                         delta.tv_nsec *= 1000;
2248                 ret = timekeeping_inject_offset(&delta);
2249                 if (ret)
2250                         return ret;
2251         }
2252
2253         getnstimeofday64(&ts);
2254
2255         raw_spin_lock_irqsave(&timekeeper_lock, flags);
2256         write_seqcount_begin(&tk_core.seq);
2257
2258         orig_tai = tai = tk->tai_offset;
2259         ret = __do_adjtimex(txc, &ts, &tai);
2260
2261         if (tai != orig_tai) {
2262                 __timekeeping_set_tai_offset(tk, tai);
2263                 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
2264         }
2265         tk_update_leap_state(tk);
2266
2267         write_seqcount_end(&tk_core.seq);
2268         raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2269
2270         if (tai != orig_tai)
2271                 clock_was_set();
2272
2273         ntp_notify_cmos_timer();
2274
2275         return ret;
2276 }
2277
2278 #ifdef CONFIG_NTP_PPS
2279 /**
2280  * hardpps() - Accessor function to NTP __hardpps function
2281  */
2282 void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
2283 {
2284         unsigned long flags;
2285
2286         raw_spin_lock_irqsave(&timekeeper_lock, flags);
2287         write_seqcount_begin(&tk_core.seq);
2288
2289         __hardpps(phase_ts, raw_ts);
2290
2291         write_seqcount_end(&tk_core.seq);
2292         raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2293 }
2294 EXPORT_SYMBOL(hardpps);
2295 #endif
2296
2297 /**
2298  * xtime_update() - advances the timekeeping infrastructure
2299  * @ticks:      number of ticks, that have elapsed since the last call.
2300  *
2301  * Must be called with interrupts disabled.
2302  */
2303 void xtime_update(unsigned long ticks)
2304 {
2305         write_seqlock(&jiffies_lock);
2306         do_timer(ticks);
2307         write_sequnlock(&jiffies_lock);
2308         update_wall_time();
2309 }