Merge tag 'timers-core-2020-08-14' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / kernel / time / timekeeping.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Kernel timekeeping code and accessor functions. Based on code from
4  *  timer.c, moved in commit 8524070b7982.
5  */
6 #include <linux/timekeeper_internal.h>
7 #include <linux/module.h>
8 #include <linux/interrupt.h>
9 #include <linux/percpu.h>
10 #include <linux/init.h>
11 #include <linux/mm.h>
12 #include <linux/nmi.h>
13 #include <linux/sched.h>
14 #include <linux/sched/loadavg.h>
15 #include <linux/sched/clock.h>
16 #include <linux/syscore_ops.h>
17 #include <linux/clocksource.h>
18 #include <linux/jiffies.h>
19 #include <linux/time.h>
20 #include <linux/tick.h>
21 #include <linux/stop_machine.h>
22 #include <linux/pvclock_gtod.h>
23 #include <linux/compiler.h>
24 #include <linux/audit.h>
25
26 #include "tick-internal.h"
27 #include "ntp_internal.h"
28 #include "timekeeping_internal.h"
29
30 #define TK_CLEAR_NTP            (1 << 0)
31 #define TK_MIRROR               (1 << 1)
32 #define TK_CLOCK_WAS_SET        (1 << 2)
33
34 enum timekeeping_adv_mode {
35         /* Update timekeeper when a tick has passed */
36         TK_ADV_TICK,
37
38         /* Update timekeeper on a direct frequency change */
39         TK_ADV_FREQ
40 };
41
42 static DEFINE_RAW_SPINLOCK(timekeeper_lock);
43
44 /*
45  * The most important data for readout fits into a single 64 byte
46  * cache line.
47  */
48 static struct {
49         seqcount_raw_spinlock_t seq;
50         struct timekeeper       timekeeper;
51 } tk_core ____cacheline_aligned = {
52         .seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_core.seq, &timekeeper_lock),
53 };
54
55 static struct timekeeper shadow_timekeeper;
56
57 /**
58  * struct tk_fast - NMI safe timekeeper
59  * @seq:        Sequence counter for protecting updates. The lowest bit
60  *              is the index for the tk_read_base array
61  * @base:       tk_read_base array. Access is indexed by the lowest bit of
62  *              @seq.
63  *
64  * See @update_fast_timekeeper() below.
65  */
66 struct tk_fast {
67         seqcount_raw_spinlock_t seq;
68         struct tk_read_base     base[2];
69 };
70
71 /* Suspend-time cycles value for halted fast timekeeper. */
72 static u64 cycles_at_suspend;
73
74 static u64 dummy_clock_read(struct clocksource *cs)
75 {
76         return cycles_at_suspend;
77 }
78
79 static struct clocksource dummy_clock = {
80         .read = dummy_clock_read,
81 };
82
83 static struct tk_fast tk_fast_mono ____cacheline_aligned = {
84         .seq     = SEQCNT_RAW_SPINLOCK_ZERO(tk_fast_mono.seq, &timekeeper_lock),
85         .base[0] = { .clock = &dummy_clock, },
86         .base[1] = { .clock = &dummy_clock, },
87 };
88
89 static struct tk_fast tk_fast_raw  ____cacheline_aligned = {
90         .seq     = SEQCNT_RAW_SPINLOCK_ZERO(tk_fast_raw.seq, &timekeeper_lock),
91         .base[0] = { .clock = &dummy_clock, },
92         .base[1] = { .clock = &dummy_clock, },
93 };
94
95 /* flag for if timekeeping is suspended */
96 int __read_mostly timekeeping_suspended;
97
98 static inline void tk_normalize_xtime(struct timekeeper *tk)
99 {
100         while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
101                 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
102                 tk->xtime_sec++;
103         }
104         while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
105                 tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
106                 tk->raw_sec++;
107         }
108 }
109
110 static inline struct timespec64 tk_xtime(const struct timekeeper *tk)
111 {
112         struct timespec64 ts;
113
114         ts.tv_sec = tk->xtime_sec;
115         ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
116         return ts;
117 }
118
119 static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
120 {
121         tk->xtime_sec = ts->tv_sec;
122         tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
123 }
124
125 static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
126 {
127         tk->xtime_sec += ts->tv_sec;
128         tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
129         tk_normalize_xtime(tk);
130 }
131
132 static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
133 {
134         struct timespec64 tmp;
135
136         /*
137          * Verify consistency of: offset_real = -wall_to_monotonic
138          * before modifying anything
139          */
140         set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
141                                         -tk->wall_to_monotonic.tv_nsec);
142         WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp));
143         tk->wall_to_monotonic = wtm;
144         set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
145         tk->offs_real = timespec64_to_ktime(tmp);
146         tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
147 }
148
149 static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
150 {
151         tk->offs_boot = ktime_add(tk->offs_boot, delta);
152         /*
153          * Timespec representation for VDSO update to avoid 64bit division
154          * on every update.
155          */
156         tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot);
157 }
158
159 /*
160  * tk_clock_read - atomic clocksource read() helper
161  *
162  * This helper is necessary to use in the read paths because, while the
163  * seqcount ensures we don't return a bad value while structures are updated,
164  * it doesn't protect from potential crashes. There is the possibility that
165  * the tkr's clocksource may change between the read reference, and the
166  * clock reference passed to the read function.  This can cause crashes if
167  * the wrong clocksource is passed to the wrong read function.
168  * This isn't necessary to use when holding the timekeeper_lock or doing
169  * a read of the fast-timekeeper tkrs (which is protected by its own locking
170  * and update logic).
171  */
172 static inline u64 tk_clock_read(const struct tk_read_base *tkr)
173 {
174         struct clocksource *clock = READ_ONCE(tkr->clock);
175
176         return clock->read(clock);
177 }
178
179 #ifdef CONFIG_DEBUG_TIMEKEEPING
180 #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
181
182 static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
183 {
184
185         u64 max_cycles = tk->tkr_mono.clock->max_cycles;
186         const char *name = tk->tkr_mono.clock->name;
187
188         if (offset > max_cycles) {
189                 printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
190                                 offset, name, max_cycles);
191                 printk_deferred("         timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
192         } else {
193                 if (offset > (max_cycles >> 1)) {
194                         printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
195                                         offset, name, max_cycles >> 1);
196                         printk_deferred("      timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
197                 }
198         }
199
200         if (tk->underflow_seen) {
201                 if (jiffies - tk->last_warning > WARNING_FREQ) {
202                         printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
203                         printk_deferred("         Please report this, consider using a different clocksource, if possible.\n");
204                         printk_deferred("         Your kernel is probably still fine.\n");
205                         tk->last_warning = jiffies;
206                 }
207                 tk->underflow_seen = 0;
208         }
209
210         if (tk->overflow_seen) {
211                 if (jiffies - tk->last_warning > WARNING_FREQ) {
212                         printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
213                         printk_deferred("         Please report this, consider using a different clocksource, if possible.\n");
214                         printk_deferred("         Your kernel is probably still fine.\n");
215                         tk->last_warning = jiffies;
216                 }
217                 tk->overflow_seen = 0;
218         }
219 }
220
221 static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
222 {
223         struct timekeeper *tk = &tk_core.timekeeper;
224         u64 now, last, mask, max, delta;
225         unsigned int seq;
226
227         /*
228          * Since we're called holding a seqcount, the data may shift
229          * under us while we're doing the calculation. This can cause
230          * false positives, since we'd note a problem but throw the
231          * results away. So nest another seqcount here to atomically
232          * grab the points we are checking with.
233          */
234         do {
235                 seq = read_seqcount_begin(&tk_core.seq);
236                 now = tk_clock_read(tkr);
237                 last = tkr->cycle_last;
238                 mask = tkr->mask;
239                 max = tkr->clock->max_cycles;
240         } while (read_seqcount_retry(&tk_core.seq, seq));
241
242         delta = clocksource_delta(now, last, mask);
243
244         /*
245          * Try to catch underflows by checking if we are seeing small
246          * mask-relative negative values.
247          */
248         if (unlikely((~delta & mask) < (mask >> 3))) {
249                 tk->underflow_seen = 1;
250                 delta = 0;
251         }
252
253         /* Cap delta value to the max_cycles values to avoid mult overflows */
254         if (unlikely(delta > max)) {
255                 tk->overflow_seen = 1;
256                 delta = tkr->clock->max_cycles;
257         }
258
259         return delta;
260 }
261 #else
262 static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
263 {
264 }
265 static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
266 {
267         u64 cycle_now, delta;
268
269         /* read clocksource */
270         cycle_now = tk_clock_read(tkr);
271
272         /* calculate the delta since the last update_wall_time */
273         delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
274
275         return delta;
276 }
277 #endif
278
279 /**
280  * tk_setup_internals - Set up internals to use clocksource clock.
281  *
282  * @tk:         The target timekeeper to setup.
283  * @clock:              Pointer to clocksource.
284  *
285  * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
286  * pair and interval request.
287  *
288  * Unless you're the timekeeping code, you should not be using this!
289  */
290 static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
291 {
292         u64 interval;
293         u64 tmp, ntpinterval;
294         struct clocksource *old_clock;
295
296         ++tk->cs_was_changed_seq;
297         old_clock = tk->tkr_mono.clock;
298         tk->tkr_mono.clock = clock;
299         tk->tkr_mono.mask = clock->mask;
300         tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
301
302         tk->tkr_raw.clock = clock;
303         tk->tkr_raw.mask = clock->mask;
304         tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
305
306         /* Do the ns -> cycle conversion first, using original mult */
307         tmp = NTP_INTERVAL_LENGTH;
308         tmp <<= clock->shift;
309         ntpinterval = tmp;
310         tmp += clock->mult/2;
311         do_div(tmp, clock->mult);
312         if (tmp == 0)
313                 tmp = 1;
314
315         interval = (u64) tmp;
316         tk->cycle_interval = interval;
317
318         /* Go back from cycles -> shifted ns */
319         tk->xtime_interval = interval * clock->mult;
320         tk->xtime_remainder = ntpinterval - tk->xtime_interval;
321         tk->raw_interval = interval * clock->mult;
322
323          /* if changing clocks, convert xtime_nsec shift units */
324         if (old_clock) {
325                 int shift_change = clock->shift - old_clock->shift;
326                 if (shift_change < 0) {
327                         tk->tkr_mono.xtime_nsec >>= -shift_change;
328                         tk->tkr_raw.xtime_nsec >>= -shift_change;
329                 } else {
330                         tk->tkr_mono.xtime_nsec <<= shift_change;
331                         tk->tkr_raw.xtime_nsec <<= shift_change;
332                 }
333         }
334
335         tk->tkr_mono.shift = clock->shift;
336         tk->tkr_raw.shift = clock->shift;
337
338         tk->ntp_error = 0;
339         tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
340         tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
341
342         /*
343          * The timekeeper keeps its own mult values for the currently
344          * active clocksource. These value will be adjusted via NTP
345          * to counteract clock drifting.
346          */
347         tk->tkr_mono.mult = clock->mult;
348         tk->tkr_raw.mult = clock->mult;
349         tk->ntp_err_mult = 0;
350         tk->skip_second_overflow = 0;
351 }
352
353 /* Timekeeper helper functions. */
354
355 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
356 static u32 default_arch_gettimeoffset(void) { return 0; }
357 u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
358 #else
359 static inline u32 arch_gettimeoffset(void) { return 0; }
360 #endif
361
362 static inline u64 timekeeping_delta_to_ns(const struct tk_read_base *tkr, u64 delta)
363 {
364         u64 nsec;
365
366         nsec = delta * tkr->mult + tkr->xtime_nsec;
367         nsec >>= tkr->shift;
368
369         /* If arch requires, add in get_arch_timeoffset() */
370         return nsec + arch_gettimeoffset();
371 }
372
373 static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)
374 {
375         u64 delta;
376
377         delta = timekeeping_get_delta(tkr);
378         return timekeeping_delta_to_ns(tkr, delta);
379 }
380
381 static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles)
382 {
383         u64 delta;
384
385         /* calculate the delta since the last update_wall_time */
386         delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
387         return timekeeping_delta_to_ns(tkr, delta);
388 }
389
390 /**
391  * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
392  * @tkr: Timekeeping readout base from which we take the update
393  *
394  * We want to use this from any context including NMI and tracing /
395  * instrumenting the timekeeping code itself.
396  *
397  * Employ the latch technique; see @raw_write_seqcount_latch.
398  *
399  * So if a NMI hits the update of base[0] then it will use base[1]
400  * which is still consistent. In the worst case this can result is a
401  * slightly wrong timestamp (a few nanoseconds). See
402  * @ktime_get_mono_fast_ns.
403  */
404 static void update_fast_timekeeper(const struct tk_read_base *tkr,
405                                    struct tk_fast *tkf)
406 {
407         struct tk_read_base *base = tkf->base;
408
409         /* Force readers off to base[1] */
410         raw_write_seqcount_latch(&tkf->seq);
411
412         /* Update base[0] */
413         memcpy(base, tkr, sizeof(*base));
414
415         /* Force readers back to base[0] */
416         raw_write_seqcount_latch(&tkf->seq);
417
418         /* Update base[1] */
419         memcpy(base + 1, base, sizeof(*base));
420 }
421
422 /**
423  * ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
424  *
425  * This timestamp is not guaranteed to be monotonic across an update.
426  * The timestamp is calculated by:
427  *
428  *      now = base_mono + clock_delta * slope
429  *
430  * So if the update lowers the slope, readers who are forced to the
431  * not yet updated second array are still using the old steeper slope.
432  *
433  * tmono
434  * ^
435  * |    o  n
436  * |   o n
437  * |  u
438  * | o
439  * |o
440  * |12345678---> reader order
441  *
442  * o = old slope
443  * u = update
444  * n = new slope
445  *
446  * So reader 6 will observe time going backwards versus reader 5.
447  *
448  * While other CPUs are likely to be able observe that, the only way
449  * for a CPU local observation is when an NMI hits in the middle of
450  * the update. Timestamps taken from that NMI context might be ahead
451  * of the following timestamps. Callers need to be aware of that and
452  * deal with it.
453  */
454 static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
455 {
456         struct tk_read_base *tkr;
457         unsigned int seq;
458         u64 now;
459
460         do {
461                 seq = raw_read_seqcount_latch(&tkf->seq);
462                 tkr = tkf->base + (seq & 0x01);
463                 now = ktime_to_ns(tkr->base);
464
465                 now += timekeeping_delta_to_ns(tkr,
466                                 clocksource_delta(
467                                         tk_clock_read(tkr),
468                                         tkr->cycle_last,
469                                         tkr->mask));
470         } while (read_seqcount_retry(&tkf->seq, seq));
471
472         return now;
473 }
474
475 u64 ktime_get_mono_fast_ns(void)
476 {
477         return __ktime_get_fast_ns(&tk_fast_mono);
478 }
479 EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
480
481 u64 ktime_get_raw_fast_ns(void)
482 {
483         return __ktime_get_fast_ns(&tk_fast_raw);
484 }
485 EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
486
487 /**
488  * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
489  *
490  * To keep it NMI safe since we're accessing from tracing, we're not using a
491  * separate timekeeper with updates to monotonic clock and boot offset
492  * protected with seqcounts. This has the following minor side effects:
493  *
494  * (1) Its possible that a timestamp be taken after the boot offset is updated
495  * but before the timekeeper is updated. If this happens, the new boot offset
496  * is added to the old timekeeping making the clock appear to update slightly
497  * earlier:
498  *    CPU 0                                        CPU 1
499  *    timekeeping_inject_sleeptime64()
500  *    __timekeeping_inject_sleeptime(tk, delta);
501  *                                                 timestamp();
502  *    timekeeping_update(tk, TK_CLEAR_NTP...);
503  *
504  * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
505  * partially updated.  Since the tk->offs_boot update is a rare event, this
506  * should be a rare occurrence which postprocessing should be able to handle.
507  */
508 u64 notrace ktime_get_boot_fast_ns(void)
509 {
510         struct timekeeper *tk = &tk_core.timekeeper;
511
512         return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
513 }
514 EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
515
516
517 /*
518  * See comment for __ktime_get_fast_ns() vs. timestamp ordering
519  */
520 static __always_inline u64 __ktime_get_real_fast_ns(struct tk_fast *tkf)
521 {
522         struct tk_read_base *tkr;
523         unsigned int seq;
524         u64 now;
525
526         do {
527                 seq = raw_read_seqcount_latch(&tkf->seq);
528                 tkr = tkf->base + (seq & 0x01);
529                 now = ktime_to_ns(tkr->base_real);
530
531                 now += timekeeping_delta_to_ns(tkr,
532                                 clocksource_delta(
533                                         tk_clock_read(tkr),
534                                         tkr->cycle_last,
535                                         tkr->mask));
536         } while (read_seqcount_retry(&tkf->seq, seq));
537
538         return now;
539 }
540
541 /**
542  * ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
543  */
544 u64 ktime_get_real_fast_ns(void)
545 {
546         return __ktime_get_real_fast_ns(&tk_fast_mono);
547 }
548 EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
549
550 /**
551  * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
552  * @tk: Timekeeper to snapshot.
553  *
554  * It generally is unsafe to access the clocksource after timekeeping has been
555  * suspended, so take a snapshot of the readout base of @tk and use it as the
556  * fast timekeeper's readout base while suspended.  It will return the same
557  * number of cycles every time until timekeeping is resumed at which time the
558  * proper readout base for the fast timekeeper will be restored automatically.
559  */
560 static void halt_fast_timekeeper(const struct timekeeper *tk)
561 {
562         static struct tk_read_base tkr_dummy;
563         const struct tk_read_base *tkr = &tk->tkr_mono;
564
565         memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
566         cycles_at_suspend = tk_clock_read(tkr);
567         tkr_dummy.clock = &dummy_clock;
568         tkr_dummy.base_real = tkr->base + tk->offs_real;
569         update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
570
571         tkr = &tk->tkr_raw;
572         memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
573         tkr_dummy.clock = &dummy_clock;
574         update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
575 }
576
577 static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
578
579 static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
580 {
581         raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
582 }
583
584 /**
585  * pvclock_gtod_register_notifier - register a pvclock timedata update listener
586  */
587 int pvclock_gtod_register_notifier(struct notifier_block *nb)
588 {
589         struct timekeeper *tk = &tk_core.timekeeper;
590         unsigned long flags;
591         int ret;
592
593         raw_spin_lock_irqsave(&timekeeper_lock, flags);
594         ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
595         update_pvclock_gtod(tk, true);
596         raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
597
598         return ret;
599 }
600 EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
601
602 /**
603  * pvclock_gtod_unregister_notifier - unregister a pvclock
604  * timedata update listener
605  */
606 int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
607 {
608         unsigned long flags;
609         int ret;
610
611         raw_spin_lock_irqsave(&timekeeper_lock, flags);
612         ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
613         raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
614
615         return ret;
616 }
617 EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
618
619 /*
620  * tk_update_leap_state - helper to update the next_leap_ktime
621  */
622 static inline void tk_update_leap_state(struct timekeeper *tk)
623 {
624         tk->next_leap_ktime = ntp_get_next_leap();
625         if (tk->next_leap_ktime != KTIME_MAX)
626                 /* Convert to monotonic time */
627                 tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
628 }
629
630 /*
631  * Update the ktime_t based scalar nsec members of the timekeeper
632  */
633 static inline void tk_update_ktime_data(struct timekeeper *tk)
634 {
635         u64 seconds;
636         u32 nsec;
637
638         /*
639          * The xtime based monotonic readout is:
640          *      nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
641          * The ktime based monotonic readout is:
642          *      nsec = base_mono + now();
643          * ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
644          */
645         seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
646         nsec = (u32) tk->wall_to_monotonic.tv_nsec;
647         tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
648
649         /*
650          * The sum of the nanoseconds portions of xtime and
651          * wall_to_monotonic can be greater/equal one second. Take
652          * this into account before updating tk->ktime_sec.
653          */
654         nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
655         if (nsec >= NSEC_PER_SEC)
656                 seconds++;
657         tk->ktime_sec = seconds;
658
659         /* Update the monotonic raw base */
660         tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC);
661 }
662
663 /* must hold timekeeper_lock */
664 static void timekeeping_update(struct timekeeper *tk, unsigned int action)
665 {
666         if (action & TK_CLEAR_NTP) {
667                 tk->ntp_error = 0;
668                 ntp_clear();
669         }
670
671         tk_update_leap_state(tk);
672         tk_update_ktime_data(tk);
673
674         update_vsyscall(tk);
675         update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
676
677         tk->tkr_mono.base_real = tk->tkr_mono.base + tk->offs_real;
678         update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
679         update_fast_timekeeper(&tk->tkr_raw,  &tk_fast_raw);
680
681         if (action & TK_CLOCK_WAS_SET)
682                 tk->clock_was_set_seq++;
683         /*
684          * The mirroring of the data to the shadow-timekeeper needs
685          * to happen last here to ensure we don't over-write the
686          * timekeeper structure on the next update with stale data
687          */
688         if (action & TK_MIRROR)
689                 memcpy(&shadow_timekeeper, &tk_core.timekeeper,
690                        sizeof(tk_core.timekeeper));
691 }
692
693 /**
694  * timekeeping_forward_now - update clock to the current time
695  *
696  * Forward the current clock to update its state since the last call to
697  * update_wall_time(). This is useful before significant clock changes,
698  * as it avoids having to deal with this time offset explicitly.
699  */
700 static void timekeeping_forward_now(struct timekeeper *tk)
701 {
702         u64 cycle_now, delta;
703
704         cycle_now = tk_clock_read(&tk->tkr_mono);
705         delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
706         tk->tkr_mono.cycle_last = cycle_now;
707         tk->tkr_raw.cycle_last  = cycle_now;
708
709         tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
710
711         /* If arch requires, add in get_arch_timeoffset() */
712         tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
713
714
715         tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
716
717         /* If arch requires, add in get_arch_timeoffset() */
718         tk->tkr_raw.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_raw.shift;
719
720         tk_normalize_xtime(tk);
721 }
722
723 /**
724  * ktime_get_real_ts64 - Returns the time of day in a timespec64.
725  * @ts:         pointer to the timespec to be set
726  *
727  * Returns the time of day in a timespec64 (WARN if suspended).
728  */
729 void ktime_get_real_ts64(struct timespec64 *ts)
730 {
731         struct timekeeper *tk = &tk_core.timekeeper;
732         unsigned int seq;
733         u64 nsecs;
734
735         WARN_ON(timekeeping_suspended);
736
737         do {
738                 seq = read_seqcount_begin(&tk_core.seq);
739
740                 ts->tv_sec = tk->xtime_sec;
741                 nsecs = timekeeping_get_ns(&tk->tkr_mono);
742
743         } while (read_seqcount_retry(&tk_core.seq, seq));
744
745         ts->tv_nsec = 0;
746         timespec64_add_ns(ts, nsecs);
747 }
748 EXPORT_SYMBOL(ktime_get_real_ts64);
749
750 ktime_t ktime_get(void)
751 {
752         struct timekeeper *tk = &tk_core.timekeeper;
753         unsigned int seq;
754         ktime_t base;
755         u64 nsecs;
756
757         WARN_ON(timekeeping_suspended);
758
759         do {
760                 seq = read_seqcount_begin(&tk_core.seq);
761                 base = tk->tkr_mono.base;
762                 nsecs = timekeeping_get_ns(&tk->tkr_mono);
763
764         } while (read_seqcount_retry(&tk_core.seq, seq));
765
766         return ktime_add_ns(base, nsecs);
767 }
768 EXPORT_SYMBOL_GPL(ktime_get);
769
770 u32 ktime_get_resolution_ns(void)
771 {
772         struct timekeeper *tk = &tk_core.timekeeper;
773         unsigned int seq;
774         u32 nsecs;
775
776         WARN_ON(timekeeping_suspended);
777
778         do {
779                 seq = read_seqcount_begin(&tk_core.seq);
780                 nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
781         } while (read_seqcount_retry(&tk_core.seq, seq));
782
783         return nsecs;
784 }
785 EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
786
787 static ktime_t *offsets[TK_OFFS_MAX] = {
788         [TK_OFFS_REAL]  = &tk_core.timekeeper.offs_real,
789         [TK_OFFS_BOOT]  = &tk_core.timekeeper.offs_boot,
790         [TK_OFFS_TAI]   = &tk_core.timekeeper.offs_tai,
791 };
792
793 ktime_t ktime_get_with_offset(enum tk_offsets offs)
794 {
795         struct timekeeper *tk = &tk_core.timekeeper;
796         unsigned int seq;
797         ktime_t base, *offset = offsets[offs];
798         u64 nsecs;
799
800         WARN_ON(timekeeping_suspended);
801
802         do {
803                 seq = read_seqcount_begin(&tk_core.seq);
804                 base = ktime_add(tk->tkr_mono.base, *offset);
805                 nsecs = timekeeping_get_ns(&tk->tkr_mono);
806
807         } while (read_seqcount_retry(&tk_core.seq, seq));
808
809         return ktime_add_ns(base, nsecs);
810
811 }
812 EXPORT_SYMBOL_GPL(ktime_get_with_offset);
813
814 ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
815 {
816         struct timekeeper *tk = &tk_core.timekeeper;
817         unsigned int seq;
818         ktime_t base, *offset = offsets[offs];
819         u64 nsecs;
820
821         WARN_ON(timekeeping_suspended);
822
823         do {
824                 seq = read_seqcount_begin(&tk_core.seq);
825                 base = ktime_add(tk->tkr_mono.base, *offset);
826                 nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
827
828         } while (read_seqcount_retry(&tk_core.seq, seq));
829
830         return ktime_add_ns(base, nsecs);
831 }
832 EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
833
834 /**
835  * ktime_mono_to_any() - convert mononotic time to any other time
836  * @tmono:      time to convert.
837  * @offs:       which offset to use
838  */
839 ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
840 {
841         ktime_t *offset = offsets[offs];
842         unsigned int seq;
843         ktime_t tconv;
844
845         do {
846                 seq = read_seqcount_begin(&tk_core.seq);
847                 tconv = ktime_add(tmono, *offset);
848         } while (read_seqcount_retry(&tk_core.seq, seq));
849
850         return tconv;
851 }
852 EXPORT_SYMBOL_GPL(ktime_mono_to_any);
853
854 /**
855  * ktime_get_raw - Returns the raw monotonic time in ktime_t format
856  */
857 ktime_t ktime_get_raw(void)
858 {
859         struct timekeeper *tk = &tk_core.timekeeper;
860         unsigned int seq;
861         ktime_t base;
862         u64 nsecs;
863
864         do {
865                 seq = read_seqcount_begin(&tk_core.seq);
866                 base = tk->tkr_raw.base;
867                 nsecs = timekeeping_get_ns(&tk->tkr_raw);
868
869         } while (read_seqcount_retry(&tk_core.seq, seq));
870
871         return ktime_add_ns(base, nsecs);
872 }
873 EXPORT_SYMBOL_GPL(ktime_get_raw);
874
875 /**
876  * ktime_get_ts64 - get the monotonic clock in timespec64 format
877  * @ts:         pointer to timespec variable
878  *
879  * The function calculates the monotonic clock from the realtime
880  * clock and the wall_to_monotonic offset and stores the result
881  * in normalized timespec64 format in the variable pointed to by @ts.
882  */
883 void ktime_get_ts64(struct timespec64 *ts)
884 {
885         struct timekeeper *tk = &tk_core.timekeeper;
886         struct timespec64 tomono;
887         unsigned int seq;
888         u64 nsec;
889
890         WARN_ON(timekeeping_suspended);
891
892         do {
893                 seq = read_seqcount_begin(&tk_core.seq);
894                 ts->tv_sec = tk->xtime_sec;
895                 nsec = timekeeping_get_ns(&tk->tkr_mono);
896                 tomono = tk->wall_to_monotonic;
897
898         } while (read_seqcount_retry(&tk_core.seq, seq));
899
900         ts->tv_sec += tomono.tv_sec;
901         ts->tv_nsec = 0;
902         timespec64_add_ns(ts, nsec + tomono.tv_nsec);
903 }
904 EXPORT_SYMBOL_GPL(ktime_get_ts64);
905
906 /**
907  * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
908  *
909  * Returns the seconds portion of CLOCK_MONOTONIC with a single non
910  * serialized read. tk->ktime_sec is of type 'unsigned long' so this
911  * works on both 32 and 64 bit systems. On 32 bit systems the readout
912  * covers ~136 years of uptime which should be enough to prevent
913  * premature wrap arounds.
914  */
915 time64_t ktime_get_seconds(void)
916 {
917         struct timekeeper *tk = &tk_core.timekeeper;
918
919         WARN_ON(timekeeping_suspended);
920         return tk->ktime_sec;
921 }
922 EXPORT_SYMBOL_GPL(ktime_get_seconds);
923
924 /**
925  * ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
926  *
927  * Returns the wall clock seconds since 1970. This replaces the
928  * get_seconds() interface which is not y2038 safe on 32bit systems.
929  *
930  * For 64bit systems the fast access to tk->xtime_sec is preserved. On
931  * 32bit systems the access must be protected with the sequence
932  * counter to provide "atomic" access to the 64bit tk->xtime_sec
933  * value.
934  */
935 time64_t ktime_get_real_seconds(void)
936 {
937         struct timekeeper *tk = &tk_core.timekeeper;
938         time64_t seconds;
939         unsigned int seq;
940
941         if (IS_ENABLED(CONFIG_64BIT))
942                 return tk->xtime_sec;
943
944         do {
945                 seq = read_seqcount_begin(&tk_core.seq);
946                 seconds = tk->xtime_sec;
947
948         } while (read_seqcount_retry(&tk_core.seq, seq));
949
950         return seconds;
951 }
952 EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
953
954 /**
955  * __ktime_get_real_seconds - The same as ktime_get_real_seconds
956  * but without the sequence counter protect. This internal function
957  * is called just when timekeeping lock is already held.
958  */
959 noinstr time64_t __ktime_get_real_seconds(void)
960 {
961         struct timekeeper *tk = &tk_core.timekeeper;
962
963         return tk->xtime_sec;
964 }
965
966 /**
967  * ktime_get_snapshot - snapshots the realtime/monotonic raw clocks with counter
968  * @systime_snapshot:   pointer to struct receiving the system time snapshot
969  */
970 void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
971 {
972         struct timekeeper *tk = &tk_core.timekeeper;
973         unsigned int seq;
974         ktime_t base_raw;
975         ktime_t base_real;
976         u64 nsec_raw;
977         u64 nsec_real;
978         u64 now;
979
980         WARN_ON_ONCE(timekeeping_suspended);
981
982         do {
983                 seq = read_seqcount_begin(&tk_core.seq);
984                 now = tk_clock_read(&tk->tkr_mono);
985                 systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
986                 systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
987                 base_real = ktime_add(tk->tkr_mono.base,
988                                       tk_core.timekeeper.offs_real);
989                 base_raw = tk->tkr_raw.base;
990                 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
991                 nsec_raw  = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
992         } while (read_seqcount_retry(&tk_core.seq, seq));
993
994         systime_snapshot->cycles = now;
995         systime_snapshot->real = ktime_add_ns(base_real, nsec_real);
996         systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw);
997 }
998 EXPORT_SYMBOL_GPL(ktime_get_snapshot);
999
1000 /* Scale base by mult/div checking for overflow */
1001 static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
1002 {
1003         u64 tmp, rem;
1004
1005         tmp = div64_u64_rem(*base, div, &rem);
1006
1007         if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) ||
1008             ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
1009                 return -EOVERFLOW;
1010         tmp *= mult;
1011
1012         rem = div64_u64(rem * mult, div);
1013         *base = tmp + rem;
1014         return 0;
1015 }
1016
1017 /**
1018  * adjust_historical_crosststamp - adjust crosstimestamp previous to current interval
1019  * @history:                    Snapshot representing start of history
1020  * @partial_history_cycles:     Cycle offset into history (fractional part)
1021  * @total_history_cycles:       Total history length in cycles
1022  * @discontinuity:              True indicates clock was set on history period
1023  * @ts:                         Cross timestamp that should be adjusted using
1024  *      partial/total ratio
1025  *
1026  * Helper function used by get_device_system_crosststamp() to correct the
1027  * crosstimestamp corresponding to the start of the current interval to the
1028  * system counter value (timestamp point) provided by the driver. The
1029  * total_history_* quantities are the total history starting at the provided
1030  * reference point and ending at the start of the current interval. The cycle
1031  * count between the driver timestamp point and the start of the current
1032  * interval is partial_history_cycles.
1033  */
1034 static int adjust_historical_crosststamp(struct system_time_snapshot *history,
1035                                          u64 partial_history_cycles,
1036                                          u64 total_history_cycles,
1037                                          bool discontinuity,
1038                                          struct system_device_crosststamp *ts)
1039 {
1040         struct timekeeper *tk = &tk_core.timekeeper;
1041         u64 corr_raw, corr_real;
1042         bool interp_forward;
1043         int ret;
1044
1045         if (total_history_cycles == 0 || partial_history_cycles == 0)
1046                 return 0;
1047
1048         /* Interpolate shortest distance from beginning or end of history */
1049         interp_forward = partial_history_cycles > total_history_cycles / 2;
1050         partial_history_cycles = interp_forward ?
1051                 total_history_cycles - partial_history_cycles :
1052                 partial_history_cycles;
1053
1054         /*
1055          * Scale the monotonic raw time delta by:
1056          *      partial_history_cycles / total_history_cycles
1057          */
1058         corr_raw = (u64)ktime_to_ns(
1059                 ktime_sub(ts->sys_monoraw, history->raw));
1060         ret = scale64_check_overflow(partial_history_cycles,
1061                                      total_history_cycles, &corr_raw);
1062         if (ret)
1063                 return ret;
1064
1065         /*
1066          * If there is a discontinuity in the history, scale monotonic raw
1067          *      correction by:
1068          *      mult(real)/mult(raw) yielding the realtime correction
1069          * Otherwise, calculate the realtime correction similar to monotonic
1070          *      raw calculation
1071          */
1072         if (discontinuity) {
1073                 corr_real = mul_u64_u32_div
1074                         (corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
1075         } else {
1076                 corr_real = (u64)ktime_to_ns(
1077                         ktime_sub(ts->sys_realtime, history->real));
1078                 ret = scale64_check_overflow(partial_history_cycles,
1079                                              total_history_cycles, &corr_real);
1080                 if (ret)
1081                         return ret;
1082         }
1083
1084         /* Fixup monotonic raw and real time time values */
1085         if (interp_forward) {
1086                 ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw);
1087                 ts->sys_realtime = ktime_add_ns(history->real, corr_real);
1088         } else {
1089                 ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw);
1090                 ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real);
1091         }
1092
1093         return 0;
1094 }
1095
1096 /*
1097  * cycle_between - true if test occurs chronologically between before and after
1098  */
1099 static bool cycle_between(u64 before, u64 test, u64 after)
1100 {
1101         if (test > before && test < after)
1102                 return true;
1103         if (test < before && before > after)
1104                 return true;
1105         return false;
1106 }
1107
1108 /**
1109  * get_device_system_crosststamp - Synchronously capture system/device timestamp
1110  * @get_time_fn:        Callback to get simultaneous device time and
1111  *      system counter from the device driver
1112  * @ctx:                Context passed to get_time_fn()
1113  * @history_begin:      Historical reference point used to interpolate system
1114  *      time when counter provided by the driver is before the current interval
1115  * @xtstamp:            Receives simultaneously captured system and device time
1116  *
1117  * Reads a timestamp from a device and correlates it to system time
1118  */
1119 int get_device_system_crosststamp(int (*get_time_fn)
1120                                   (ktime_t *device_time,
1121                                    struct system_counterval_t *sys_counterval,
1122                                    void *ctx),
1123                                   void *ctx,
1124                                   struct system_time_snapshot *history_begin,
1125                                   struct system_device_crosststamp *xtstamp)
1126 {
1127         struct system_counterval_t system_counterval;
1128         struct timekeeper *tk = &tk_core.timekeeper;
1129         u64 cycles, now, interval_start;
1130         unsigned int clock_was_set_seq = 0;
1131         ktime_t base_real, base_raw;
1132         u64 nsec_real, nsec_raw;
1133         u8 cs_was_changed_seq;
1134         unsigned int seq;
1135         bool do_interp;
1136         int ret;
1137
1138         do {
1139                 seq = read_seqcount_begin(&tk_core.seq);
1140                 /*
1141                  * Try to synchronously capture device time and a system
1142                  * counter value calling back into the device driver
1143                  */
1144                 ret = get_time_fn(&xtstamp->device, &system_counterval, ctx);
1145                 if (ret)
1146                         return ret;
1147
1148                 /*
1149                  * Verify that the clocksource associated with the captured
1150                  * system counter value is the same as the currently installed
1151                  * timekeeper clocksource
1152                  */
1153                 if (tk->tkr_mono.clock != system_counterval.cs)
1154                         return -ENODEV;
1155                 cycles = system_counterval.cycles;
1156
1157                 /*
1158                  * Check whether the system counter value provided by the
1159                  * device driver is on the current timekeeping interval.
1160                  */
1161                 now = tk_clock_read(&tk->tkr_mono);
1162                 interval_start = tk->tkr_mono.cycle_last;
1163                 if (!cycle_between(interval_start, cycles, now)) {
1164                         clock_was_set_seq = tk->clock_was_set_seq;
1165                         cs_was_changed_seq = tk->cs_was_changed_seq;
1166                         cycles = interval_start;
1167                         do_interp = true;
1168                 } else {
1169                         do_interp = false;
1170                 }
1171
1172                 base_real = ktime_add(tk->tkr_mono.base,
1173                                       tk_core.timekeeper.offs_real);
1174                 base_raw = tk->tkr_raw.base;
1175
1176                 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
1177                                                      system_counterval.cycles);
1178                 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
1179                                                     system_counterval.cycles);
1180         } while (read_seqcount_retry(&tk_core.seq, seq));
1181
1182         xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
1183         xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw);
1184
1185         /*
1186          * Interpolate if necessary, adjusting back from the start of the
1187          * current interval
1188          */
1189         if (do_interp) {
1190                 u64 partial_history_cycles, total_history_cycles;
1191                 bool discontinuity;
1192
1193                 /*
1194                  * Check that the counter value occurs after the provided
1195                  * history reference and that the history doesn't cross a
1196                  * clocksource change
1197                  */
1198                 if (!history_begin ||
1199                     !cycle_between(history_begin->cycles,
1200                                    system_counterval.cycles, cycles) ||
1201                     history_begin->cs_was_changed_seq != cs_was_changed_seq)
1202                         return -EINVAL;
1203                 partial_history_cycles = cycles - system_counterval.cycles;
1204                 total_history_cycles = cycles - history_begin->cycles;
1205                 discontinuity =
1206                         history_begin->clock_was_set_seq != clock_was_set_seq;
1207
1208                 ret = adjust_historical_crosststamp(history_begin,
1209                                                     partial_history_cycles,
1210                                                     total_history_cycles,
1211                                                     discontinuity, xtstamp);
1212                 if (ret)
1213                         return ret;
1214         }
1215
1216         return 0;
1217 }
1218 EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
1219
1220 /**
1221  * do_settimeofday64 - Sets the time of day.
1222  * @ts:     pointer to the timespec64 variable containing the new time
1223  *
1224  * Sets the time of day to the new time and update NTP and notify hrtimers
1225  */
1226 int do_settimeofday64(const struct timespec64 *ts)
1227 {
1228         struct timekeeper *tk = &tk_core.timekeeper;
1229         struct timespec64 ts_delta, xt;
1230         unsigned long flags;
1231         int ret = 0;
1232
1233         if (!timespec64_valid_settod(ts))
1234                 return -EINVAL;
1235
1236         raw_spin_lock_irqsave(&timekeeper_lock, flags);
1237         write_seqcount_begin(&tk_core.seq);
1238
1239         timekeeping_forward_now(tk);
1240
1241         xt = tk_xtime(tk);
1242         ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
1243         ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
1244
1245         if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
1246                 ret = -EINVAL;
1247                 goto out;
1248         }
1249
1250         tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
1251
1252         tk_set_xtime(tk, ts);
1253 out:
1254         timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1255
1256         write_seqcount_end(&tk_core.seq);
1257         raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1258
1259         /* signal hrtimers about time change */
1260         clock_was_set();
1261
1262         if (!ret)
1263                 audit_tk_injoffset(ts_delta);
1264
1265         return ret;
1266 }
1267 EXPORT_SYMBOL(do_settimeofday64);
1268
1269 /**
1270  * timekeeping_inject_offset - Adds or subtracts from the current time.
1271  * @tv:         pointer to the timespec variable containing the offset
1272  *
1273  * Adds or subtracts an offset value from the current time.
1274  */
1275 static int timekeeping_inject_offset(const struct timespec64 *ts)
1276 {
1277         struct timekeeper *tk = &tk_core.timekeeper;
1278         unsigned long flags;
1279         struct timespec64 tmp;
1280         int ret = 0;
1281
1282         if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC)
1283                 return -EINVAL;
1284
1285         raw_spin_lock_irqsave(&timekeeper_lock, flags);
1286         write_seqcount_begin(&tk_core.seq);
1287
1288         timekeeping_forward_now(tk);
1289
1290         /* Make sure the proposed value is valid */
1291         tmp = timespec64_add(tk_xtime(tk), *ts);
1292         if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 ||
1293             !timespec64_valid_settod(&tmp)) {
1294                 ret = -EINVAL;
1295                 goto error;
1296         }
1297
1298         tk_xtime_add(tk, ts);
1299         tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *ts));
1300
1301 error: /* even if we error out, we forwarded the time, so call update */
1302         timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1303
1304         write_seqcount_end(&tk_core.seq);
1305         raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1306
1307         /* signal hrtimers about time change */
1308         clock_was_set();
1309
1310         return ret;
1311 }
1312
1313 /*
1314  * Indicates if there is an offset between the system clock and the hardware
1315  * clock/persistent clock/rtc.
1316  */
1317 int persistent_clock_is_local;
1318
1319 /*
1320  * Adjust the time obtained from the CMOS to be UTC time instead of
1321  * local time.
1322  *
1323  * This is ugly, but preferable to the alternatives.  Otherwise we
1324  * would either need to write a program to do it in /etc/rc (and risk
1325  * confusion if the program gets run more than once; it would also be
1326  * hard to make the program warp the clock precisely n hours)  or
1327  * compile in the timezone information into the kernel.  Bad, bad....
1328  *
1329  *                                              - TYT, 1992-01-01
1330  *
1331  * The best thing to do is to keep the CMOS clock in universal time (UTC)
1332  * as real UNIX machines always do it. This avoids all headaches about
1333  * daylight saving times and warping kernel clocks.
1334  */
1335 void timekeeping_warp_clock(void)
1336 {
1337         if (sys_tz.tz_minuteswest != 0) {
1338                 struct timespec64 adjust;
1339
1340                 persistent_clock_is_local = 1;
1341                 adjust.tv_sec = sys_tz.tz_minuteswest * 60;
1342                 adjust.tv_nsec = 0;
1343                 timekeeping_inject_offset(&adjust);
1344         }
1345 }
1346
1347 /**
1348  * __timekeeping_set_tai_offset - Sets the TAI offset from UTC and monotonic
1349  *
1350  */
1351 static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
1352 {
1353         tk->tai_offset = tai_offset;
1354         tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
1355 }
1356
1357 /**
1358  * change_clocksource - Swaps clocksources if a new one is available
1359  *
1360  * Accumulates current time interval and initializes new clocksource
1361  */
1362 static int change_clocksource(void *data)
1363 {
1364         struct timekeeper *tk = &tk_core.timekeeper;
1365         struct clocksource *new, *old;
1366         unsigned long flags;
1367
1368         new = (struct clocksource *) data;
1369
1370         raw_spin_lock_irqsave(&timekeeper_lock, flags);
1371         write_seqcount_begin(&tk_core.seq);
1372
1373         timekeeping_forward_now(tk);
1374         /*
1375          * If the cs is in module, get a module reference. Succeeds
1376          * for built-in code (owner == NULL) as well.
1377          */
1378         if (try_module_get(new->owner)) {
1379                 if (!new->enable || new->enable(new) == 0) {
1380                         old = tk->tkr_mono.clock;
1381                         tk_setup_internals(tk, new);
1382                         if (old->disable)
1383                                 old->disable(old);
1384                         module_put(old->owner);
1385                 } else {
1386                         module_put(new->owner);
1387                 }
1388         }
1389         timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1390
1391         write_seqcount_end(&tk_core.seq);
1392         raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1393
1394         return 0;
1395 }
1396
1397 /**
1398  * timekeeping_notify - Install a new clock source
1399  * @clock:              pointer to the clock source
1400  *
1401  * This function is called from clocksource.c after a new, better clock
1402  * source has been registered. The caller holds the clocksource_mutex.
1403  */
1404 int timekeeping_notify(struct clocksource *clock)
1405 {
1406         struct timekeeper *tk = &tk_core.timekeeper;
1407
1408         if (tk->tkr_mono.clock == clock)
1409                 return 0;
1410         stop_machine(change_clocksource, clock, NULL);
1411         tick_clock_notify();
1412         return tk->tkr_mono.clock == clock ? 0 : -1;
1413 }
1414
1415 /**
1416  * ktime_get_raw_ts64 - Returns the raw monotonic time in a timespec
1417  * @ts:         pointer to the timespec64 to be set
1418  *
1419  * Returns the raw monotonic time (completely un-modified by ntp)
1420  */
1421 void ktime_get_raw_ts64(struct timespec64 *ts)
1422 {
1423         struct timekeeper *tk = &tk_core.timekeeper;
1424         unsigned int seq;
1425         u64 nsecs;
1426
1427         do {
1428                 seq = read_seqcount_begin(&tk_core.seq);
1429                 ts->tv_sec = tk->raw_sec;
1430                 nsecs = timekeeping_get_ns(&tk->tkr_raw);
1431
1432         } while (read_seqcount_retry(&tk_core.seq, seq));
1433
1434         ts->tv_nsec = 0;
1435         timespec64_add_ns(ts, nsecs);
1436 }
1437 EXPORT_SYMBOL(ktime_get_raw_ts64);
1438
1439
1440 /**
1441  * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
1442  */
1443 int timekeeping_valid_for_hres(void)
1444 {
1445         struct timekeeper *tk = &tk_core.timekeeper;
1446         unsigned int seq;
1447         int ret;
1448
1449         do {
1450                 seq = read_seqcount_begin(&tk_core.seq);
1451
1452                 ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
1453
1454         } while (read_seqcount_retry(&tk_core.seq, seq));
1455
1456         return ret;
1457 }
1458
1459 /**
1460  * timekeeping_max_deferment - Returns max time the clocksource can be deferred
1461  */
1462 u64 timekeeping_max_deferment(void)
1463 {
1464         struct timekeeper *tk = &tk_core.timekeeper;
1465         unsigned int seq;
1466         u64 ret;
1467
1468         do {
1469                 seq = read_seqcount_begin(&tk_core.seq);
1470
1471                 ret = tk->tkr_mono.clock->max_idle_ns;
1472
1473         } while (read_seqcount_retry(&tk_core.seq, seq));
1474
1475         return ret;
1476 }
1477
1478 /**
1479  * read_persistent_clock64 -  Return time from the persistent clock.
1480  *
1481  * Weak dummy function for arches that do not yet support it.
1482  * Reads the time from the battery backed persistent clock.
1483  * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
1484  *
1485  *  XXX - Do be sure to remove it once all arches implement it.
1486  */
1487 void __weak read_persistent_clock64(struct timespec64 *ts)
1488 {
1489         ts->tv_sec = 0;
1490         ts->tv_nsec = 0;
1491 }
1492
1493 /**
1494  * read_persistent_wall_and_boot_offset - Read persistent clock, and also offset
1495  *                                        from the boot.
1496  *
1497  * Weak dummy function for arches that do not yet support it.
1498  * wall_time    - current time as returned by persistent clock
1499  * boot_offset  - offset that is defined as wall_time - boot_time
1500  * The default function calculates offset based on the current value of
1501  * local_clock(). This way architectures that support sched_clock() but don't
1502  * support dedicated boot time clock will provide the best estimate of the
1503  * boot time.
1504  */
1505 void __weak __init
1506 read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
1507                                      struct timespec64 *boot_offset)
1508 {
1509         read_persistent_clock64(wall_time);
1510         *boot_offset = ns_to_timespec64(local_clock());
1511 }
1512
1513 /*
1514  * Flag reflecting whether timekeeping_resume() has injected sleeptime.
1515  *
1516  * The flag starts of false and is only set when a suspend reaches
1517  * timekeeping_suspend(), timekeeping_resume() sets it to false when the
1518  * timekeeper clocksource is not stopping across suspend and has been
1519  * used to update sleep time. If the timekeeper clocksource has stopped
1520  * then the flag stays true and is used by the RTC resume code to decide
1521  * whether sleeptime must be injected and if so the flag gets false then.
1522  *
1523  * If a suspend fails before reaching timekeeping_resume() then the flag
1524  * stays false and prevents erroneous sleeptime injection.
1525  */
1526 static bool suspend_timing_needed;
1527
1528 /* Flag for if there is a persistent clock on this platform */
1529 static bool persistent_clock_exists;
1530
1531 /*
1532  * timekeeping_init - Initializes the clocksource and common timekeeping values
1533  */
1534 void __init timekeeping_init(void)
1535 {
1536         struct timespec64 wall_time, boot_offset, wall_to_mono;
1537         struct timekeeper *tk = &tk_core.timekeeper;
1538         struct clocksource *clock;
1539         unsigned long flags;
1540
1541         read_persistent_wall_and_boot_offset(&wall_time, &boot_offset);
1542         if (timespec64_valid_settod(&wall_time) &&
1543             timespec64_to_ns(&wall_time) > 0) {
1544                 persistent_clock_exists = true;
1545         } else if (timespec64_to_ns(&wall_time) != 0) {
1546                 pr_warn("Persistent clock returned invalid value");
1547                 wall_time = (struct timespec64){0};
1548         }
1549
1550         if (timespec64_compare(&wall_time, &boot_offset) < 0)
1551                 boot_offset = (struct timespec64){0};
1552
1553         /*
1554          * We want set wall_to_mono, so the following is true:
1555          * wall time + wall_to_mono = boot time
1556          */
1557         wall_to_mono = timespec64_sub(boot_offset, wall_time);
1558
1559         raw_spin_lock_irqsave(&timekeeper_lock, flags);
1560         write_seqcount_begin(&tk_core.seq);
1561         ntp_init();
1562
1563         clock = clocksource_default_clock();
1564         if (clock->enable)
1565                 clock->enable(clock);
1566         tk_setup_internals(tk, clock);
1567
1568         tk_set_xtime(tk, &wall_time);
1569         tk->raw_sec = 0;
1570
1571         tk_set_wall_to_mono(tk, wall_to_mono);
1572
1573         timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1574
1575         write_seqcount_end(&tk_core.seq);
1576         raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1577 }
1578
1579 /* time in seconds when suspend began for persistent clock */
1580 static struct timespec64 timekeeping_suspend_time;
1581
1582 /**
1583  * __timekeeping_inject_sleeptime - Internal function to add sleep interval
1584  * @delta: pointer to a timespec delta value
1585  *
1586  * Takes a timespec offset measuring a suspend interval and properly
1587  * adds the sleep offset to the timekeeping variables.
1588  */
1589 static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1590                                            const struct timespec64 *delta)
1591 {
1592         if (!timespec64_valid_strict(delta)) {
1593                 printk_deferred(KERN_WARNING
1594                                 "__timekeeping_inject_sleeptime: Invalid "
1595                                 "sleep delta value!\n");
1596                 return;
1597         }
1598         tk_xtime_add(tk, delta);
1599         tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1600         tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1601         tk_debug_account_sleep_time(delta);
1602 }
1603
1604 #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
1605 /**
1606  * We have three kinds of time sources to use for sleep time
1607  * injection, the preference order is:
1608  * 1) non-stop clocksource
1609  * 2) persistent clock (ie: RTC accessible when irqs are off)
1610  * 3) RTC
1611  *
1612  * 1) and 2) are used by timekeeping, 3) by RTC subsystem.
1613  * If system has neither 1) nor 2), 3) will be used finally.
1614  *
1615  *
1616  * If timekeeping has injected sleeptime via either 1) or 2),
1617  * 3) becomes needless, so in this case we don't need to call
1618  * rtc_resume(), and this is what timekeeping_rtc_skipresume()
1619  * means.
1620  */
1621 bool timekeeping_rtc_skipresume(void)
1622 {
1623         return !suspend_timing_needed;
1624 }
1625
1626 /**
1627  * 1) can be determined whether to use or not only when doing
1628  * timekeeping_resume() which is invoked after rtc_suspend(),
1629  * so we can't skip rtc_suspend() surely if system has 1).
1630  *
1631  * But if system has 2), 2) will definitely be used, so in this
1632  * case we don't need to call rtc_suspend(), and this is what
1633  * timekeeping_rtc_skipsuspend() means.
1634  */
1635 bool timekeeping_rtc_skipsuspend(void)
1636 {
1637         return persistent_clock_exists;
1638 }
1639
1640 /**
1641  * timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values
1642  * @delta: pointer to a timespec64 delta value
1643  *
1644  * This hook is for architectures that cannot support read_persistent_clock64
1645  * because their RTC/persistent clock is only accessible when irqs are enabled.
1646  * and also don't have an effective nonstop clocksource.
1647  *
1648  * This function should only be called by rtc_resume(), and allows
1649  * a suspend offset to be injected into the timekeeping values.
1650  */
1651 void timekeeping_inject_sleeptime64(const struct timespec64 *delta)
1652 {
1653         struct timekeeper *tk = &tk_core.timekeeper;
1654         unsigned long flags;
1655
1656         raw_spin_lock_irqsave(&timekeeper_lock, flags);
1657         write_seqcount_begin(&tk_core.seq);
1658
1659         suspend_timing_needed = false;
1660
1661         timekeeping_forward_now(tk);
1662
1663         __timekeeping_inject_sleeptime(tk, delta);
1664
1665         timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1666
1667         write_seqcount_end(&tk_core.seq);
1668         raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1669
1670         /* signal hrtimers about time change */
1671         clock_was_set();
1672 }
1673 #endif
1674
1675 /**
1676  * timekeeping_resume - Resumes the generic timekeeping subsystem.
1677  */
1678 void timekeeping_resume(void)
1679 {
1680         struct timekeeper *tk = &tk_core.timekeeper;
1681         struct clocksource *clock = tk->tkr_mono.clock;
1682         unsigned long flags;
1683         struct timespec64 ts_new, ts_delta;
1684         u64 cycle_now, nsec;
1685         bool inject_sleeptime = false;
1686
1687         read_persistent_clock64(&ts_new);
1688
1689         clockevents_resume();
1690         clocksource_resume();
1691
1692         raw_spin_lock_irqsave(&timekeeper_lock, flags);
1693         write_seqcount_begin(&tk_core.seq);
1694
1695         /*
1696          * After system resumes, we need to calculate the suspended time and
1697          * compensate it for the OS time. There are 3 sources that could be
1698          * used: Nonstop clocksource during suspend, persistent clock and rtc
1699          * device.
1700          *
1701          * One specific platform may have 1 or 2 or all of them, and the
1702          * preference will be:
1703          *      suspend-nonstop clocksource -> persistent clock -> rtc
1704          * The less preferred source will only be tried if there is no better
1705          * usable source. The rtc part is handled separately in rtc core code.
1706          */
1707         cycle_now = tk_clock_read(&tk->tkr_mono);
1708         nsec = clocksource_stop_suspend_timing(clock, cycle_now);
1709         if (nsec > 0) {
1710                 ts_delta = ns_to_timespec64(nsec);
1711                 inject_sleeptime = true;
1712         } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
1713                 ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1714                 inject_sleeptime = true;
1715         }
1716
1717         if (inject_sleeptime) {
1718                 suspend_timing_needed = false;
1719                 __timekeeping_inject_sleeptime(tk, &ts_delta);
1720         }
1721
1722         /* Re-base the last cycle value */
1723         tk->tkr_mono.cycle_last = cycle_now;
1724         tk->tkr_raw.cycle_last  = cycle_now;
1725
1726         tk->ntp_error = 0;
1727         timekeeping_suspended = 0;
1728         timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1729         write_seqcount_end(&tk_core.seq);
1730         raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1731
1732         touch_softlockup_watchdog();
1733
1734         tick_resume();
1735         hrtimers_resume();
1736 }
1737
1738 int timekeeping_suspend(void)
1739 {
1740         struct timekeeper *tk = &tk_core.timekeeper;
1741         unsigned long flags;
1742         struct timespec64               delta, delta_delta;
1743         static struct timespec64        old_delta;
1744         struct clocksource *curr_clock;
1745         u64 cycle_now;
1746
1747         read_persistent_clock64(&timekeeping_suspend_time);
1748
1749         /*
1750          * On some systems the persistent_clock can not be detected at
1751          * timekeeping_init by its return value, so if we see a valid
1752          * value returned, update the persistent_clock_exists flag.
1753          */
1754         if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1755                 persistent_clock_exists = true;
1756
1757         suspend_timing_needed = true;
1758
1759         raw_spin_lock_irqsave(&timekeeper_lock, flags);
1760         write_seqcount_begin(&tk_core.seq);
1761         timekeeping_forward_now(tk);
1762         timekeeping_suspended = 1;
1763
1764         /*
1765          * Since we've called forward_now, cycle_last stores the value
1766          * just read from the current clocksource. Save this to potentially
1767          * use in suspend timing.
1768          */
1769         curr_clock = tk->tkr_mono.clock;
1770         cycle_now = tk->tkr_mono.cycle_last;
1771         clocksource_start_suspend_timing(curr_clock, cycle_now);
1772
1773         if (persistent_clock_exists) {
1774                 /*
1775                  * To avoid drift caused by repeated suspend/resumes,
1776                  * which each can add ~1 second drift error,
1777                  * try to compensate so the difference in system time
1778                  * and persistent_clock time stays close to constant.
1779                  */
1780                 delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
1781                 delta_delta = timespec64_sub(delta, old_delta);
1782                 if (abs(delta_delta.tv_sec) >= 2) {
1783                         /*
1784                          * if delta_delta is too large, assume time correction
1785                          * has occurred and set old_delta to the current delta.
1786                          */
1787                         old_delta = delta;
1788                 } else {
1789                         /* Otherwise try to adjust old_system to compensate */
1790                         timekeeping_suspend_time =
1791                                 timespec64_add(timekeeping_suspend_time, delta_delta);
1792                 }
1793         }
1794
1795         timekeeping_update(tk, TK_MIRROR);
1796         halt_fast_timekeeper(tk);
1797         write_seqcount_end(&tk_core.seq);
1798         raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1799
1800         tick_suspend();
1801         clocksource_suspend();
1802         clockevents_suspend();
1803
1804         return 0;
1805 }
1806
1807 /* sysfs resume/suspend bits for timekeeping */
1808 static struct syscore_ops timekeeping_syscore_ops = {
1809         .resume         = timekeeping_resume,
1810         .suspend        = timekeeping_suspend,
1811 };
1812
1813 static int __init timekeeping_init_ops(void)
1814 {
1815         register_syscore_ops(&timekeeping_syscore_ops);
1816         return 0;
1817 }
1818 device_initcall(timekeeping_init_ops);
1819
1820 /*
1821  * Apply a multiplier adjustment to the timekeeper
1822  */
1823 static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
1824                                                          s64 offset,
1825                                                          s32 mult_adj)
1826 {
1827         s64 interval = tk->cycle_interval;
1828
1829         if (mult_adj == 0) {
1830                 return;
1831         } else if (mult_adj == -1) {
1832                 interval = -interval;
1833                 offset = -offset;
1834         } else if (mult_adj != 1) {
1835                 interval *= mult_adj;
1836                 offset *= mult_adj;
1837         }
1838
1839         /*
1840          * So the following can be confusing.
1841          *
1842          * To keep things simple, lets assume mult_adj == 1 for now.
1843          *
1844          * When mult_adj != 1, remember that the interval and offset values
1845          * have been appropriately scaled so the math is the same.
1846          *
1847          * The basic idea here is that we're increasing the multiplier
1848          * by one, this causes the xtime_interval to be incremented by
1849          * one cycle_interval. This is because:
1850          *      xtime_interval = cycle_interval * mult
1851          * So if mult is being incremented by one:
1852          *      xtime_interval = cycle_interval * (mult + 1)
1853          * Its the same as:
1854          *      xtime_interval = (cycle_interval * mult) + cycle_interval
1855          * Which can be shortened to:
1856          *      xtime_interval += cycle_interval
1857          *
1858          * So offset stores the non-accumulated cycles. Thus the current
1859          * time (in shifted nanoseconds) is:
1860          *      now = (offset * adj) + xtime_nsec
1861          * Now, even though we're adjusting the clock frequency, we have
1862          * to keep time consistent. In other words, we can't jump back
1863          * in time, and we also want to avoid jumping forward in time.
1864          *
1865          * So given the same offset value, we need the time to be the same
1866          * both before and after the freq adjustment.
1867          *      now = (offset * adj_1) + xtime_nsec_1
1868          *      now = (offset * adj_2) + xtime_nsec_2
1869          * So:
1870          *      (offset * adj_1) + xtime_nsec_1 =
1871          *              (offset * adj_2) + xtime_nsec_2
1872          * And we know:
1873          *      adj_2 = adj_1 + 1
1874          * So:
1875          *      (offset * adj_1) + xtime_nsec_1 =
1876          *              (offset * (adj_1+1)) + xtime_nsec_2
1877          *      (offset * adj_1) + xtime_nsec_1 =
1878          *              (offset * adj_1) + offset + xtime_nsec_2
1879          * Canceling the sides:
1880          *      xtime_nsec_1 = offset + xtime_nsec_2
1881          * Which gives us:
1882          *      xtime_nsec_2 = xtime_nsec_1 - offset
1883          * Which simplfies to:
1884          *      xtime_nsec -= offset
1885          */
1886         if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
1887                 /* NTP adjustment caused clocksource mult overflow */
1888                 WARN_ON_ONCE(1);
1889                 return;
1890         }
1891
1892         tk->tkr_mono.mult += mult_adj;
1893         tk->xtime_interval += interval;
1894         tk->tkr_mono.xtime_nsec -= offset;
1895 }
1896
1897 /*
1898  * Adjust the timekeeper's multiplier to the correct frequency
1899  * and also to reduce the accumulated error value.
1900  */
1901 static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1902 {
1903         u32 mult;
1904
1905         /*
1906          * Determine the multiplier from the current NTP tick length.
1907          * Avoid expensive division when the tick length doesn't change.
1908          */
1909         if (likely(tk->ntp_tick == ntp_tick_length())) {
1910                 mult = tk->tkr_mono.mult - tk->ntp_err_mult;
1911         } else {
1912                 tk->ntp_tick = ntp_tick_length();
1913                 mult = div64_u64((tk->ntp_tick >> tk->ntp_error_shift) -
1914                                  tk->xtime_remainder, tk->cycle_interval);
1915         }
1916
1917         /*
1918          * If the clock is behind the NTP time, increase the multiplier by 1
1919          * to catch up with it. If it's ahead and there was a remainder in the
1920          * tick division, the clock will slow down. Otherwise it will stay
1921          * ahead until the tick length changes to a non-divisible value.
1922          */
1923         tk->ntp_err_mult = tk->ntp_error > 0 ? 1 : 0;
1924         mult += tk->ntp_err_mult;
1925
1926         timekeeping_apply_adjustment(tk, offset, mult - tk->tkr_mono.mult);
1927
1928         if (unlikely(tk->tkr_mono.clock->maxadj &&
1929                 (abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
1930                         > tk->tkr_mono.clock->maxadj))) {
1931                 printk_once(KERN_WARNING
1932                         "Adjusting %s more than 11%% (%ld vs %ld)\n",
1933                         tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
1934                         (long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
1935         }
1936
1937         /*
1938          * It may be possible that when we entered this function, xtime_nsec
1939          * was very small.  Further, if we're slightly speeding the clocksource
1940          * in the code above, its possible the required corrective factor to
1941          * xtime_nsec could cause it to underflow.
1942          *
1943          * Now, since we have already accumulated the second and the NTP
1944          * subsystem has been notified via second_overflow(), we need to skip
1945          * the next update.
1946          */
1947         if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
1948                 tk->tkr_mono.xtime_nsec += (u64)NSEC_PER_SEC <<
1949                                                         tk->tkr_mono.shift;
1950                 tk->xtime_sec--;
1951                 tk->skip_second_overflow = 1;
1952         }
1953 }
1954
1955 /**
1956  * accumulate_nsecs_to_secs - Accumulates nsecs into secs
1957  *
1958  * Helper function that accumulates the nsecs greater than a second
1959  * from the xtime_nsec field to the xtime_secs field.
1960  * It also calls into the NTP code to handle leapsecond processing.
1961  *
1962  */
1963 static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1964 {
1965         u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
1966         unsigned int clock_set = 0;
1967
1968         while (tk->tkr_mono.xtime_nsec >= nsecps) {
1969                 int leap;
1970
1971                 tk->tkr_mono.xtime_nsec -= nsecps;
1972                 tk->xtime_sec++;
1973
1974                 /*
1975                  * Skip NTP update if this second was accumulated before,
1976                  * i.e. xtime_nsec underflowed in timekeeping_adjust()
1977                  */
1978                 if (unlikely(tk->skip_second_overflow)) {
1979                         tk->skip_second_overflow = 0;
1980                         continue;
1981                 }
1982
1983                 /* Figure out if its a leap sec and apply if needed */
1984                 leap = second_overflow(tk->xtime_sec);
1985                 if (unlikely(leap)) {
1986                         struct timespec64 ts;
1987
1988                         tk->xtime_sec += leap;
1989
1990                         ts.tv_sec = leap;
1991                         ts.tv_nsec = 0;
1992                         tk_set_wall_to_mono(tk,
1993                                 timespec64_sub(tk->wall_to_monotonic, ts));
1994
1995                         __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
1996
1997                         clock_set = TK_CLOCK_WAS_SET;
1998                 }
1999         }
2000         return clock_set;
2001 }
2002
2003 /**
2004  * logarithmic_accumulation - shifted accumulation of cycles
2005  *
2006  * This functions accumulates a shifted interval of cycles into
2007  * into a shifted interval nanoseconds. Allows for O(log) accumulation
2008  * loop.
2009  *
2010  * Returns the unconsumed cycles.
2011  */
2012 static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
2013                                     u32 shift, unsigned int *clock_set)
2014 {
2015         u64 interval = tk->cycle_interval << shift;
2016         u64 snsec_per_sec;
2017
2018         /* If the offset is smaller than a shifted interval, do nothing */
2019         if (offset < interval)
2020                 return offset;
2021
2022         /* Accumulate one shifted interval */
2023         offset -= interval;
2024         tk->tkr_mono.cycle_last += interval;
2025         tk->tkr_raw.cycle_last  += interval;
2026
2027         tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
2028         *clock_set |= accumulate_nsecs_to_secs(tk);
2029
2030         /* Accumulate raw time */
2031         tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
2032         snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
2033         while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
2034                 tk->tkr_raw.xtime_nsec -= snsec_per_sec;
2035                 tk->raw_sec++;
2036         }
2037
2038         /* Accumulate error between NTP and clock interval */
2039         tk->ntp_error += tk->ntp_tick << shift;
2040         tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
2041                                                 (tk->ntp_error_shift + shift);
2042
2043         return offset;
2044 }
2045
2046 /*
2047  * timekeeping_advance - Updates the timekeeper to the current time and
2048  * current NTP tick length
2049  */
2050 static void timekeeping_advance(enum timekeeping_adv_mode mode)
2051 {
2052         struct timekeeper *real_tk = &tk_core.timekeeper;
2053         struct timekeeper *tk = &shadow_timekeeper;
2054         u64 offset;
2055         int shift = 0, maxshift;
2056         unsigned int clock_set = 0;
2057         unsigned long flags;
2058
2059         raw_spin_lock_irqsave(&timekeeper_lock, flags);
2060
2061         /* Make sure we're fully resumed: */
2062         if (unlikely(timekeeping_suspended))
2063                 goto out;
2064
2065 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
2066         offset = real_tk->cycle_interval;
2067
2068         if (mode != TK_ADV_TICK)
2069                 goto out;
2070 #else
2071         offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
2072                                    tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
2073
2074         /* Check if there's really nothing to do */
2075         if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK)
2076                 goto out;
2077 #endif
2078
2079         /* Do some additional sanity checking */
2080         timekeeping_check_update(tk, offset);
2081
2082         /*
2083          * With NO_HZ we may have to accumulate many cycle_intervals
2084          * (think "ticks") worth of time at once. To do this efficiently,
2085          * we calculate the largest doubling multiple of cycle_intervals
2086          * that is smaller than the offset.  We then accumulate that
2087          * chunk in one go, and then try to consume the next smaller
2088          * doubled multiple.
2089          */
2090         shift = ilog2(offset) - ilog2(tk->cycle_interval);
2091         shift = max(0, shift);
2092         /* Bound shift to one less than what overflows tick_length */
2093         maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
2094         shift = min(shift, maxshift);
2095         while (offset >= tk->cycle_interval) {
2096                 offset = logarithmic_accumulation(tk, offset, shift,
2097                                                         &clock_set);
2098                 if (offset < tk->cycle_interval<<shift)
2099                         shift--;
2100         }
2101
2102         /* Adjust the multiplier to correct NTP error */
2103         timekeeping_adjust(tk, offset);
2104
2105         /*
2106          * Finally, make sure that after the rounding
2107          * xtime_nsec isn't larger than NSEC_PER_SEC
2108          */
2109         clock_set |= accumulate_nsecs_to_secs(tk);
2110
2111         write_seqcount_begin(&tk_core.seq);
2112         /*
2113          * Update the real timekeeper.
2114          *
2115          * We could avoid this memcpy by switching pointers, but that
2116          * requires changes to all other timekeeper usage sites as
2117          * well, i.e. move the timekeeper pointer getter into the
2118          * spinlocked/seqcount protected sections. And we trade this
2119          * memcpy under the tk_core.seq against one before we start
2120          * updating.
2121          */
2122         timekeeping_update(tk, clock_set);
2123         memcpy(real_tk, tk, sizeof(*tk));
2124         /* The memcpy must come last. Do not put anything here! */
2125         write_seqcount_end(&tk_core.seq);
2126 out:
2127         raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2128         if (clock_set)
2129                 /* Have to call _delayed version, since in irq context*/
2130                 clock_was_set_delayed();
2131 }
2132
2133 /**
2134  * update_wall_time - Uses the current clocksource to increment the wall time
2135  *
2136  */
2137 void update_wall_time(void)
2138 {
2139         timekeeping_advance(TK_ADV_TICK);
2140 }
2141
2142 /**
2143  * getboottime64 - Return the real time of system boot.
2144  * @ts:         pointer to the timespec64 to be set
2145  *
2146  * Returns the wall-time of boot in a timespec64.
2147  *
2148  * This is based on the wall_to_monotonic offset and the total suspend
2149  * time. Calls to settimeofday will affect the value returned (which
2150  * basically means that however wrong your real time clock is at boot time,
2151  * you get the right time here).
2152  */
2153 void getboottime64(struct timespec64 *ts)
2154 {
2155         struct timekeeper *tk = &tk_core.timekeeper;
2156         ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
2157
2158         *ts = ktime_to_timespec64(t);
2159 }
2160 EXPORT_SYMBOL_GPL(getboottime64);
2161
2162 void ktime_get_coarse_real_ts64(struct timespec64 *ts)
2163 {
2164         struct timekeeper *tk = &tk_core.timekeeper;
2165         unsigned int seq;
2166
2167         do {
2168                 seq = read_seqcount_begin(&tk_core.seq);
2169
2170                 *ts = tk_xtime(tk);
2171         } while (read_seqcount_retry(&tk_core.seq, seq));
2172 }
2173 EXPORT_SYMBOL(ktime_get_coarse_real_ts64);
2174
2175 void ktime_get_coarse_ts64(struct timespec64 *ts)
2176 {
2177         struct timekeeper *tk = &tk_core.timekeeper;
2178         struct timespec64 now, mono;
2179         unsigned int seq;
2180
2181         do {
2182                 seq = read_seqcount_begin(&tk_core.seq);
2183
2184                 now = tk_xtime(tk);
2185                 mono = tk->wall_to_monotonic;
2186         } while (read_seqcount_retry(&tk_core.seq, seq));
2187
2188         set_normalized_timespec64(ts, now.tv_sec + mono.tv_sec,
2189                                 now.tv_nsec + mono.tv_nsec);
2190 }
2191 EXPORT_SYMBOL(ktime_get_coarse_ts64);
2192
2193 /*
2194  * Must hold jiffies_lock
2195  */
2196 void do_timer(unsigned long ticks)
2197 {
2198         jiffies_64 += ticks;
2199         calc_global_load();
2200 }
2201
2202 /**
2203  * ktime_get_update_offsets_now - hrtimer helper
2204  * @cwsseq:     pointer to check and store the clock was set sequence number
2205  * @offs_real:  pointer to storage for monotonic -> realtime offset
2206  * @offs_boot:  pointer to storage for monotonic -> boottime offset
2207  * @offs_tai:   pointer to storage for monotonic -> clock tai offset
2208  *
2209  * Returns current monotonic time and updates the offsets if the
2210  * sequence number in @cwsseq and timekeeper.clock_was_set_seq are
2211  * different.
2212  *
2213  * Called from hrtimer_interrupt() or retrigger_next_event()
2214  */
2215 ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
2216                                      ktime_t *offs_boot, ktime_t *offs_tai)
2217 {
2218         struct timekeeper *tk = &tk_core.timekeeper;
2219         unsigned int seq;
2220         ktime_t base;
2221         u64 nsecs;
2222
2223         do {
2224                 seq = read_seqcount_begin(&tk_core.seq);
2225
2226                 base = tk->tkr_mono.base;
2227                 nsecs = timekeeping_get_ns(&tk->tkr_mono);
2228                 base = ktime_add_ns(base, nsecs);
2229
2230                 if (*cwsseq != tk->clock_was_set_seq) {
2231                         *cwsseq = tk->clock_was_set_seq;
2232                         *offs_real = tk->offs_real;
2233                         *offs_boot = tk->offs_boot;
2234                         *offs_tai = tk->offs_tai;
2235                 }
2236
2237                 /* Handle leapsecond insertion adjustments */
2238                 if (unlikely(base >= tk->next_leap_ktime))
2239                         *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
2240
2241         } while (read_seqcount_retry(&tk_core.seq, seq));
2242
2243         return base;
2244 }
2245
2246 /**
2247  * timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
2248  */
2249 static int timekeeping_validate_timex(const struct __kernel_timex *txc)
2250 {
2251         if (txc->modes & ADJ_ADJTIME) {
2252                 /* singleshot must not be used with any other mode bits */
2253                 if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
2254                         return -EINVAL;
2255                 if (!(txc->modes & ADJ_OFFSET_READONLY) &&
2256                     !capable(CAP_SYS_TIME))
2257                         return -EPERM;
2258         } else {
2259                 /* In order to modify anything, you gotta be super-user! */
2260                 if (txc->modes && !capable(CAP_SYS_TIME))
2261                         return -EPERM;
2262                 /*
2263                  * if the quartz is off by more than 10% then
2264                  * something is VERY wrong!
2265                  */
2266                 if (txc->modes & ADJ_TICK &&
2267                     (txc->tick <  900000/USER_HZ ||
2268                      txc->tick > 1100000/USER_HZ))
2269                         return -EINVAL;
2270         }
2271
2272         if (txc->modes & ADJ_SETOFFSET) {
2273                 /* In order to inject time, you gotta be super-user! */
2274                 if (!capable(CAP_SYS_TIME))
2275                         return -EPERM;
2276
2277                 /*
2278                  * Validate if a timespec/timeval used to inject a time
2279                  * offset is valid.  Offsets can be postive or negative, so
2280                  * we don't check tv_sec. The value of the timeval/timespec
2281                  * is the sum of its fields,but *NOTE*:
2282                  * The field tv_usec/tv_nsec must always be non-negative and
2283                  * we can't have more nanoseconds/microseconds than a second.
2284                  */
2285                 if (txc->time.tv_usec < 0)
2286                         return -EINVAL;
2287
2288                 if (txc->modes & ADJ_NANO) {
2289                         if (txc->time.tv_usec >= NSEC_PER_SEC)
2290                                 return -EINVAL;
2291                 } else {
2292                         if (txc->time.tv_usec >= USEC_PER_SEC)
2293                                 return -EINVAL;
2294                 }
2295         }
2296
2297         /*
2298          * Check for potential multiplication overflows that can
2299          * only happen on 64-bit systems:
2300          */
2301         if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
2302                 if (LLONG_MIN / PPM_SCALE > txc->freq)
2303                         return -EINVAL;
2304                 if (LLONG_MAX / PPM_SCALE < txc->freq)
2305                         return -EINVAL;
2306         }
2307
2308         return 0;
2309 }
2310
2311
2312 /**
2313  * do_adjtimex() - Accessor function to NTP __do_adjtimex function
2314  */
2315 int do_adjtimex(struct __kernel_timex *txc)
2316 {
2317         struct timekeeper *tk = &tk_core.timekeeper;
2318         struct audit_ntp_data ad;
2319         unsigned long flags;
2320         struct timespec64 ts;
2321         s32 orig_tai, tai;
2322         int ret;
2323
2324         /* Validate the data before disabling interrupts */
2325         ret = timekeeping_validate_timex(txc);
2326         if (ret)
2327                 return ret;
2328
2329         if (txc->modes & ADJ_SETOFFSET) {
2330                 struct timespec64 delta;
2331                 delta.tv_sec  = txc->time.tv_sec;
2332                 delta.tv_nsec = txc->time.tv_usec;
2333                 if (!(txc->modes & ADJ_NANO))
2334                         delta.tv_nsec *= 1000;
2335                 ret = timekeeping_inject_offset(&delta);
2336                 if (ret)
2337                         return ret;
2338
2339                 audit_tk_injoffset(delta);
2340         }
2341
2342         audit_ntp_init(&ad);
2343
2344         ktime_get_real_ts64(&ts);
2345
2346         raw_spin_lock_irqsave(&timekeeper_lock, flags);
2347         write_seqcount_begin(&tk_core.seq);
2348
2349         orig_tai = tai = tk->tai_offset;
2350         ret = __do_adjtimex(txc, &ts, &tai, &ad);
2351
2352         if (tai != orig_tai) {
2353                 __timekeeping_set_tai_offset(tk, tai);
2354                 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
2355         }
2356         tk_update_leap_state(tk);
2357
2358         write_seqcount_end(&tk_core.seq);
2359         raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2360
2361         audit_ntp_log(&ad);
2362
2363         /* Update the multiplier immediately if frequency was set directly */
2364         if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK))
2365                 timekeeping_advance(TK_ADV_FREQ);
2366
2367         if (tai != orig_tai)
2368                 clock_was_set();
2369
2370         ntp_notify_cmos_timer();
2371
2372         return ret;
2373 }
2374
2375 #ifdef CONFIG_NTP_PPS
2376 /**
2377  * hardpps() - Accessor function to NTP __hardpps function
2378  */
2379 void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
2380 {
2381         unsigned long flags;
2382
2383         raw_spin_lock_irqsave(&timekeeper_lock, flags);
2384         write_seqcount_begin(&tk_core.seq);
2385
2386         __hardpps(phase_ts, raw_ts);
2387
2388         write_seqcount_end(&tk_core.seq);
2389         raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2390 }
2391 EXPORT_SYMBOL(hardpps);
2392 #endif /* CONFIG_NTP_PPS */
2393
2394 /**
2395  * xtime_update() - advances the timekeeping infrastructure
2396  * @ticks:      number of ticks, that have elapsed since the last call.
2397  *
2398  * Must be called with interrupts disabled.
2399  */
2400 void xtime_update(unsigned long ticks)
2401 {
2402         raw_spin_lock(&jiffies_lock);
2403         write_seqcount_begin(&jiffies_seq);
2404         do_timer(ticks);
2405         write_seqcount_end(&jiffies_seq);
2406         raw_spin_unlock(&jiffies_lock);
2407         update_wall_time();
2408 }