1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Common time routines among all ppc machines.
5 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
6 * Paul Mackerras' version and mine for PReP and Pmac.
7 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
8 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
10 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
11 * to make clock more stable (2.4.0-test5). The only thing
12 * that this code assumes is that the timebases have been synchronized
13 * by firmware on SMP and are never stopped (never do sleep
14 * on SMP then, nap and doze are OK).
16 * Speeded up do_gettimeofday by getting rid of references to
17 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
19 * TODO (not necessarily in this file):
20 * - improve precision and reproducibility of timebase frequency
21 * measurement at boot time.
22 * - for astronomical applications: add a new function to get
23 * non ambiguous timestamps even around leap seconds. This needs
24 * a new timestamp format and a good name.
26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
27 * "A Kernel Model for Precision Timekeeping" by Dave Mills
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/sched.h>
33 #include <linux/sched/clock.h>
34 #include <linux/sched/cputime.h>
35 #include <linux/kernel.h>
36 #include <linux/param.h>
37 #include <linux/string.h>
39 #include <linux/interrupt.h>
40 #include <linux/timex.h>
41 #include <linux/kernel_stat.h>
42 #include <linux/time.h>
43 #include <linux/init.h>
44 #include <linux/profile.h>
45 #include <linux/cpu.h>
46 #include <linux/security.h>
47 #include <linux/percpu.h>
48 #include <linux/rtc.h>
49 #include <linux/jiffies.h>
50 #include <linux/posix-timers.h>
51 #include <linux/irq.h>
52 #include <linux/delay.h>
53 #include <linux/irq_work.h>
54 #include <linux/of_clk.h>
55 #include <linux/suspend.h>
56 #include <linux/processor.h>
57 #include <asm/trace.h>
59 #include <asm/interrupt.h>
61 #include <asm/nvram.h>
62 #include <asm/cache.h>
63 #include <asm/machdep.h>
64 #include <linux/uaccess.h>
68 #include <asm/div64.h>
70 #include <asm/vdso_datapage.h>
71 #include <asm/firmware.h>
72 #include <asm/asm-prototypes.h>
74 /* powerpc clocksource/clockevent code */
76 #include <linux/clockchips.h>
77 #include <linux/timekeeper_internal.h>
79 static u64 timebase_read(struct clocksource *);
80 static struct clocksource clocksource_timebase = {
83 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
84 .mask = CLOCKSOURCE_MASK(64),
85 .read = timebase_read,
86 .vdso_clock_mode = VDSO_CLOCKMODE_ARCHTIMER,
89 #define DECREMENTER_DEFAULT_MAX 0x7FFFFFFF
90 u64 decrementer_max = DECREMENTER_DEFAULT_MAX;
91 EXPORT_SYMBOL_GPL(decrementer_max); /* for KVM HDEC */
93 static int decrementer_set_next_event(unsigned long evt,
94 struct clock_event_device *dev);
95 static int decrementer_shutdown(struct clock_event_device *evt);
97 struct clock_event_device decrementer_clockevent = {
98 .name = "decrementer",
101 .set_next_event = decrementer_set_next_event,
102 .set_state_oneshot_stopped = decrementer_shutdown,
103 .set_state_shutdown = decrementer_shutdown,
104 .tick_resume = decrementer_shutdown,
105 .features = CLOCK_EVT_FEAT_ONESHOT |
106 CLOCK_EVT_FEAT_C3STOP,
108 EXPORT_SYMBOL(decrementer_clockevent);
110 DEFINE_PER_CPU(u64, decrementers_next_tb);
111 EXPORT_SYMBOL_GPL(decrementers_next_tb);
112 static DEFINE_PER_CPU(struct clock_event_device, decrementers);
114 #define XSEC_PER_SEC (1024*1024)
117 #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
119 /* compute ((xsec << 12) * max) >> 32 */
120 #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
123 unsigned long tb_ticks_per_jiffy;
124 unsigned long tb_ticks_per_usec = 100; /* sane default */
125 EXPORT_SYMBOL(tb_ticks_per_usec);
126 unsigned long tb_ticks_per_sec;
127 EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
129 DEFINE_SPINLOCK(rtc_lock);
130 EXPORT_SYMBOL_GPL(rtc_lock);
132 static u64 tb_to_ns_scale __read_mostly;
133 static unsigned tb_to_ns_shift __read_mostly;
134 static u64 boot_tb __read_mostly;
136 extern struct timezone sys_tz;
137 static long timezone_offset;
139 unsigned long ppc_proc_freq;
140 EXPORT_SYMBOL_GPL(ppc_proc_freq);
141 unsigned long ppc_tb_freq;
142 EXPORT_SYMBOL_GPL(ppc_tb_freq);
146 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
148 * Factor for converting from cputime_t (timebase ticks) to
149 * microseconds. This is stored as 0.64 fixed-point binary fraction.
151 u64 __cputime_usec_factor;
152 EXPORT_SYMBOL(__cputime_usec_factor);
154 #ifdef CONFIG_PPC_SPLPAR
155 void (*dtl_consumer)(struct dtl_entry *, u64);
158 static void calc_cputime_factors(void)
160 struct div_result res;
162 div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
163 __cputime_usec_factor = res.result_low;
167 * Read the SPURR on systems that have it, otherwise the PURR,
168 * or if that doesn't exist return the timebase value passed in.
170 static inline unsigned long read_spurr(unsigned long tb)
172 if (cpu_has_feature(CPU_FTR_SPURR))
173 return mfspr(SPRN_SPURR);
174 if (cpu_has_feature(CPU_FTR_PURR))
175 return mfspr(SPRN_PURR);
179 #ifdef CONFIG_PPC_SPLPAR
184 * Scan the dispatch trace log and count up the stolen time.
185 * Should be called with interrupts disabled.
187 static u64 scan_dispatch_log(u64 stop_tb)
189 u64 i = local_paca->dtl_ridx;
190 struct dtl_entry *dtl = local_paca->dtl_curr;
191 struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
192 struct lppaca *vpa = local_paca->lppaca_ptr;
200 if (i == be64_to_cpu(vpa->dtl_idx))
202 while (i < be64_to_cpu(vpa->dtl_idx)) {
203 dtb = be64_to_cpu(dtl->timebase);
204 tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
205 be32_to_cpu(dtl->ready_to_enqueue_time);
207 if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
208 /* buffer has overflowed */
209 i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
210 dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
216 dtl_consumer(dtl, i);
221 dtl = local_paca->dispatch_log;
223 local_paca->dtl_ridx = i;
224 local_paca->dtl_curr = dtl;
229 * Accumulate stolen time by scanning the dispatch trace log.
230 * Called on entry from user mode.
232 void notrace accumulate_stolen_time(void)
235 struct cpu_accounting_data *acct = &local_paca->accounting;
237 sst = scan_dispatch_log(acct->starttime_user);
238 ust = scan_dispatch_log(acct->starttime);
241 acct->steal_time += ust + sst;
244 static inline u64 calculate_stolen_time(u64 stop_tb)
246 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
249 if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx))
250 return scan_dispatch_log(stop_tb);
255 #else /* CONFIG_PPC_SPLPAR */
256 static inline u64 calculate_stolen_time(u64 stop_tb)
261 #endif /* CONFIG_PPC_SPLPAR */
264 * Account time for a transition between system, hard irq
267 static unsigned long vtime_delta_scaled(struct cpu_accounting_data *acct,
268 unsigned long now, unsigned long stime)
270 unsigned long stime_scaled = 0;
271 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
272 unsigned long nowscaled, deltascaled;
273 unsigned long utime, utime_scaled;
275 nowscaled = read_spurr(now);
276 deltascaled = nowscaled - acct->startspurr;
277 acct->startspurr = nowscaled;
278 utime = acct->utime - acct->utime_sspurr;
279 acct->utime_sspurr = acct->utime;
282 * Because we don't read the SPURR on every kernel entry/exit,
283 * deltascaled includes both user and system SPURR ticks.
284 * Apportion these ticks to system SPURR ticks and user
285 * SPURR ticks in the same ratio as the system time (delta)
286 * and user time (udelta) values obtained from the timebase
287 * over the same interval. The system ticks get accounted here;
288 * the user ticks get saved up in paca->user_time_scaled to be
289 * used by account_process_tick.
291 stime_scaled = stime;
292 utime_scaled = utime;
293 if (deltascaled != stime + utime) {
295 stime_scaled = deltascaled * stime / (stime + utime);
296 utime_scaled = deltascaled - stime_scaled;
298 stime_scaled = deltascaled;
301 acct->utime_scaled += utime_scaled;
307 static unsigned long vtime_delta(struct cpu_accounting_data *acct,
308 unsigned long *stime_scaled,
309 unsigned long *steal_time)
311 unsigned long now, stime;
313 WARN_ON_ONCE(!irqs_disabled());
316 stime = now - acct->starttime;
317 acct->starttime = now;
319 *stime_scaled = vtime_delta_scaled(acct, now, stime);
321 *steal_time = calculate_stolen_time(now);
326 static void vtime_delta_kernel(struct cpu_accounting_data *acct,
327 unsigned long *stime, unsigned long *stime_scaled)
329 unsigned long steal_time;
331 *stime = vtime_delta(acct, stime_scaled, &steal_time);
332 *stime -= min(*stime, steal_time);
333 acct->steal_time += steal_time;
336 void vtime_account_kernel(struct task_struct *tsk)
338 struct cpu_accounting_data *acct = get_accounting(tsk);
339 unsigned long stime, stime_scaled;
341 vtime_delta_kernel(acct, &stime, &stime_scaled);
343 if (tsk->flags & PF_VCPU) {
344 acct->gtime += stime;
345 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
346 acct->utime_scaled += stime_scaled;
349 acct->stime += stime;
350 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
351 acct->stime_scaled += stime_scaled;
355 EXPORT_SYMBOL_GPL(vtime_account_kernel);
357 void vtime_account_idle(struct task_struct *tsk)
359 unsigned long stime, stime_scaled, steal_time;
360 struct cpu_accounting_data *acct = get_accounting(tsk);
362 stime = vtime_delta(acct, &stime_scaled, &steal_time);
363 acct->idle_time += stime + steal_time;
366 static void vtime_account_irq_field(struct cpu_accounting_data *acct,
367 unsigned long *field)
369 unsigned long stime, stime_scaled;
371 vtime_delta_kernel(acct, &stime, &stime_scaled);
373 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
374 acct->stime_scaled += stime_scaled;
378 void vtime_account_softirq(struct task_struct *tsk)
380 struct cpu_accounting_data *acct = get_accounting(tsk);
381 vtime_account_irq_field(acct, &acct->softirq_time);
384 void vtime_account_hardirq(struct task_struct *tsk)
386 struct cpu_accounting_data *acct = get_accounting(tsk);
387 vtime_account_irq_field(acct, &acct->hardirq_time);
390 static void vtime_flush_scaled(struct task_struct *tsk,
391 struct cpu_accounting_data *acct)
393 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
394 if (acct->utime_scaled)
395 tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled);
396 if (acct->stime_scaled)
397 tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled);
399 acct->utime_scaled = 0;
400 acct->utime_sspurr = 0;
401 acct->stime_scaled = 0;
406 * Account the whole cputime accumulated in the paca
407 * Must be called with interrupts disabled.
408 * Assumes that vtime_account_kernel/idle() has been called
409 * recently (i.e. since the last entry from usermode) so that
410 * get_paca()->user_time_scaled is up to date.
412 void vtime_flush(struct task_struct *tsk)
414 struct cpu_accounting_data *acct = get_accounting(tsk);
417 account_user_time(tsk, cputime_to_nsecs(acct->utime));
420 account_guest_time(tsk, cputime_to_nsecs(acct->gtime));
422 if (IS_ENABLED(CONFIG_PPC_SPLPAR) && acct->steal_time) {
423 account_steal_time(cputime_to_nsecs(acct->steal_time));
424 acct->steal_time = 0;
428 account_idle_time(cputime_to_nsecs(acct->idle_time));
431 account_system_index_time(tsk, cputime_to_nsecs(acct->stime),
434 if (acct->hardirq_time)
435 account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time),
437 if (acct->softirq_time)
438 account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time),
441 vtime_flush_scaled(tsk, acct);
447 acct->hardirq_time = 0;
448 acct->softirq_time = 0;
451 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
452 #define calc_cputime_factors()
455 void __delay(unsigned long loops)
462 * TB is in error state and isn't ticking anymore.
463 * HMI handler was unable to recover from TB error.
464 * Return immediately, so that kernel won't get stuck here.
469 while (mftb() - start < loops)
474 EXPORT_SYMBOL(__delay);
476 void udelay(unsigned long usecs)
478 __delay(tb_ticks_per_usec * usecs);
480 EXPORT_SYMBOL(udelay);
483 unsigned long profile_pc(struct pt_regs *regs)
485 unsigned long pc = instruction_pointer(regs);
487 if (in_lock_functions(pc))
492 EXPORT_SYMBOL(profile_pc);
495 #ifdef CONFIG_IRQ_WORK
498 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
501 static inline unsigned long test_irq_work_pending(void)
505 asm volatile("lbz %0,%1(13)"
507 : "i" (offsetof(struct paca_struct, irq_work_pending)));
511 static inline void set_irq_work_pending_flag(void)
513 asm volatile("stb %0,%1(13)" : :
515 "i" (offsetof(struct paca_struct, irq_work_pending)));
518 static inline void clear_irq_work_pending(void)
520 asm volatile("stb %0,%1(13)" : :
522 "i" (offsetof(struct paca_struct, irq_work_pending)));
527 DEFINE_PER_CPU(u8, irq_work_pending);
529 #define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1)
530 #define test_irq_work_pending() __this_cpu_read(irq_work_pending)
531 #define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
533 #endif /* 32 vs 64 bit */
535 void arch_irq_work_raise(void)
538 * 64-bit code that uses irq soft-mask can just cause an immediate
539 * interrupt here that gets soft masked, if this is called under
540 * local_irq_disable(). It might be possible to prevent that happening
541 * by noticing interrupts are disabled and setting decrementer pending
542 * to be replayed when irqs are enabled. The problem there is that
543 * tracing can call irq_work_raise, including in code that does low
544 * level manipulations of irq soft-mask state (e.g., trace_hardirqs_on)
545 * which could get tangled up if we're messing with the same state
549 set_irq_work_pending_flag();
554 static void set_dec_or_work(u64 val)
557 /* We may have raced with new irq work */
558 if (unlikely(test_irq_work_pending()))
562 #else /* CONFIG_IRQ_WORK */
564 #define test_irq_work_pending() 0
565 #define clear_irq_work_pending()
567 static void set_dec_or_work(u64 val)
571 #endif /* CONFIG_IRQ_WORK */
573 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
574 void timer_rearm_host_dec(u64 now)
576 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
578 WARN_ON_ONCE(!arch_irqs_disabled());
579 WARN_ON_ONCE(mfmsr() & MSR_EE);
581 if (now >= *next_tb) {
582 local_paca->irq_happened |= PACA_IRQ_DEC;
584 now = *next_tb - now;
585 if (now <= decrementer_max)
586 set_dec_or_work(now);
589 EXPORT_SYMBOL_GPL(timer_rearm_host_dec);
593 * timer_interrupt - gets called when the decrementer overflows,
594 * with interrupts disabled.
596 DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
598 struct clock_event_device *evt = this_cpu_ptr(&decrementers);
599 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
600 struct pt_regs *old_regs;
604 * Some implementations of hotplug will get timer interrupts while
605 * offline, just ignore these.
607 if (unlikely(!cpu_online(smp_processor_id()))) {
608 set_dec(decrementer_max);
612 /* Conditionally hard-enable interrupts. */
613 if (should_hard_irq_enable()) {
615 * Ensure a positive value is written to the decrementer, or
616 * else some CPUs will continue to take decrementer exceptions.
617 * When the PPC_WATCHDOG (decrementer based) is configured,
618 * keep this at most 31 bits, which is about 4 seconds on most
619 * systems, which gives the watchdog a chance of catching timer
620 * interrupt hard lockups.
622 if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
625 set_dec(decrementer_max);
627 do_hard_irq_enable();
630 #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
631 if (atomic_read(&ppc_n_lost_interrupts) != 0)
635 old_regs = set_irq_regs(regs);
637 trace_timer_interrupt_entry(regs);
639 if (test_irq_work_pending()) {
640 clear_irq_work_pending();
645 if (now >= *next_tb) {
647 if (evt->event_handler)
648 evt->event_handler(evt);
649 __this_cpu_inc(irq_stat.timer_irqs_event);
651 now = *next_tb - now;
652 if (now > decrementer_max)
653 now = decrementer_max;
654 set_dec_or_work(now);
655 __this_cpu_inc(irq_stat.timer_irqs_others);
658 trace_timer_interrupt_exit(regs);
660 set_irq_regs(old_regs);
662 EXPORT_SYMBOL(timer_interrupt);
664 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
665 void timer_broadcast_interrupt(void)
667 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
670 tick_receive_broadcast();
671 __this_cpu_inc(irq_stat.broadcast_irqs_event);
675 #ifdef CONFIG_SUSPEND
676 /* Overrides the weak version in kernel/power/main.c */
677 void arch_suspend_disable_irqs(void)
679 if (ppc_md.suspend_disable_irqs)
680 ppc_md.suspend_disable_irqs();
682 /* Disable the decrementer, so that it doesn't interfere
686 set_dec(decrementer_max);
688 set_dec(decrementer_max);
691 /* Overrides the weak version in kernel/power/main.c */
692 void arch_suspend_enable_irqs(void)
696 if (ppc_md.suspend_enable_irqs)
697 ppc_md.suspend_enable_irqs();
701 unsigned long long tb_to_ns(unsigned long long ticks)
703 return mulhdu(ticks, tb_to_ns_scale) << tb_to_ns_shift;
705 EXPORT_SYMBOL_GPL(tb_to_ns);
708 * Scheduler clock - returns current time in nanosec units.
710 * Note: mulhdu(a, b) (multiply high double unsigned) returns
711 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
712 * are 64-bit unsigned numbers.
714 notrace unsigned long long sched_clock(void)
716 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
720 #ifdef CONFIG_PPC_PSERIES
723 * Running clock - attempts to give a view of time passing for a virtualised
725 * Uses the VTB register if available otherwise a next best guess.
727 unsigned long long running_clock(void)
730 * Don't read the VTB as a host since KVM does not switch in host
731 * timebase into the VTB when it takes a guest off the CPU, reading the
732 * VTB would result in reading 'last switched out' guest VTB.
734 * Host kernels are often compiled with CONFIG_PPC_PSERIES checked, it
735 * would be unsafe to rely only on the #ifdef above.
737 if (firmware_has_feature(FW_FEATURE_LPAR) &&
738 cpu_has_feature(CPU_FTR_ARCH_207S))
739 return mulhdu(get_vtb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
742 * This is a next best approximation without a VTB.
743 * On a host which is running bare metal there should never be any stolen
744 * time and on a host which doesn't do any virtualisation TB *should* equal
745 * VTB so it makes no difference anyway.
747 return local_clock() - kcpustat_this_cpu->cpustat[CPUTIME_STEAL];
751 static int __init get_freq(char *name, int cells, unsigned long *val)
753 struct device_node *cpu;
757 /* The cpu node should have timebase and clock frequency properties */
758 cpu = of_find_node_by_type(NULL, "cpu");
761 fp = of_get_property(cpu, name, NULL);
764 *val = of_read_ulong(fp, cells);
773 static void start_cpu_decrementer(void)
775 #ifdef CONFIG_BOOKE_OR_40x
778 /* Clear any pending timer interrupts */
779 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
781 tcr = mfspr(SPRN_TCR);
783 * The watchdog may have already been enabled by u-boot. So leave
784 * TRC[WP] (Watchdog Period) alone.
786 tcr &= TCR_WP_MASK; /* Clear all bits except for TCR[WP] */
787 tcr |= TCR_DIE; /* Enable decrementer */
788 mtspr(SPRN_TCR, tcr);
792 void __init generic_calibrate_decr(void)
794 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
796 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
797 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
799 printk(KERN_ERR "WARNING: Estimating decrementer frequency "
803 ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
805 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
806 !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
808 printk(KERN_ERR "WARNING: Estimating processor frequency "
813 int update_persistent_clock64(struct timespec64 now)
817 if (!ppc_md.set_rtc_time)
820 rtc_time64_to_tm(now.tv_sec + 1 + timezone_offset, &tm);
822 return ppc_md.set_rtc_time(&tm);
825 static void __read_persistent_clock(struct timespec64 *ts)
828 static int first = 1;
831 /* XXX this is a litle fragile but will work okay in the short term */
834 if (ppc_md.time_init)
835 timezone_offset = ppc_md.time_init();
837 /* get_boot_time() isn't guaranteed to be safe to call late */
838 if (ppc_md.get_boot_time) {
839 ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
843 if (!ppc_md.get_rtc_time) {
847 ppc_md.get_rtc_time(&tm);
849 ts->tv_sec = rtc_tm_to_time64(&tm);
852 void read_persistent_clock64(struct timespec64 *ts)
854 __read_persistent_clock(ts);
856 /* Sanitize it in case real time clock is set below EPOCH */
857 if (ts->tv_sec < 0) {
864 /* clocksource code */
865 static notrace u64 timebase_read(struct clocksource *cs)
867 return (u64)get_tb();
870 static void __init clocksource_init(void)
872 struct clocksource *clock = &clocksource_timebase;
874 if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
875 printk(KERN_ERR "clocksource: %s is already registered\n",
880 printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
881 clock->name, clock->mult, clock->shift);
884 static int decrementer_set_next_event(unsigned long evt,
885 struct clock_event_device *dev)
887 __this_cpu_write(decrementers_next_tb, get_tb() + evt);
888 set_dec_or_work(evt);
893 static int decrementer_shutdown(struct clock_event_device *dev)
895 decrementer_set_next_event(decrementer_max, dev);
899 static void register_decrementer_clockevent(int cpu)
901 struct clock_event_device *dec = &per_cpu(decrementers, cpu);
903 *dec = decrementer_clockevent;
904 dec->cpumask = cpumask_of(cpu);
906 clockevents_config_and_register(dec, ppc_tb_freq, 2, decrementer_max);
908 printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
909 dec->name, dec->mult, dec->shift, cpu);
911 /* Set values for KVM, see kvm_emulate_dec() */
912 decrementer_clockevent.mult = dec->mult;
913 decrementer_clockevent.shift = dec->shift;
916 static void enable_large_decrementer(void)
918 if (!cpu_has_feature(CPU_FTR_ARCH_300))
921 if (decrementer_max <= DECREMENTER_DEFAULT_MAX)
925 * If we're running as the hypervisor we need to enable the LD manually
926 * otherwise firmware should have done it for us.
928 if (cpu_has_feature(CPU_FTR_HVMODE))
929 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_LD);
932 static void __init set_decrementer_max(void)
934 struct device_node *cpu;
937 /* Prior to ISAv3 the decrementer is always 32 bit */
938 if (!cpu_has_feature(CPU_FTR_ARCH_300))
941 cpu = of_find_node_by_type(NULL, "cpu");
943 if (of_property_read_u32(cpu, "ibm,dec-bits", &bits) == 0) {
944 if (bits > 64 || bits < 32) {
945 pr_warn("time_init: firmware supplied invalid ibm,dec-bits");
949 /* calculate the signed maximum given this many bits */
950 decrementer_max = (1ul << (bits - 1)) - 1;
955 pr_info("time_init: %u bit decrementer (max: %llx)\n",
956 bits, decrementer_max);
959 static void __init init_decrementer_clockevent(void)
961 register_decrementer_clockevent(smp_processor_id());
964 void secondary_cpu_time_init(void)
966 /* Enable and test the large decrementer for this cpu */
967 enable_large_decrementer();
969 /* Start the decrementer on CPUs that have manual control
972 start_cpu_decrementer();
974 /* FIME: Should make unrelatred change to move snapshot_timebase
976 register_decrementer_clockevent(smp_processor_id());
979 /* This function is only called on the boot processor */
980 void __init time_init(void)
982 struct div_result res;
986 /* Normal PowerPC with timebase register */
987 ppc_md.calibrate_decr();
988 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
989 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
990 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
991 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
993 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
994 tb_ticks_per_sec = ppc_tb_freq;
995 tb_ticks_per_usec = ppc_tb_freq / 1000000;
996 calc_cputime_factors();
999 * Compute scale factor for sched_clock.
1000 * The calibrate_decr() function has set tb_ticks_per_sec,
1001 * which is the timebase frequency.
1002 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
1003 * the 128-bit result as a 64.64 fixed-point number.
1004 * We then shift that number right until it is less than 1.0,
1005 * giving us the scale factor and shift count to use in
1008 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
1009 scale = res.result_low;
1010 for (shift = 0; res.result_high != 0; ++shift) {
1011 scale = (scale >> 1) | (res.result_high << 63);
1012 res.result_high >>= 1;
1014 tb_to_ns_scale = scale;
1015 tb_to_ns_shift = shift;
1016 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
1019 /* If platform provided a timezone (pmac), we correct the time */
1020 if (timezone_offset) {
1021 sys_tz.tz_minuteswest = -timezone_offset / 60;
1022 sys_tz.tz_dsttime = 0;
1025 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
1027 /* initialise and enable the large decrementer (if we have one) */
1028 set_decrementer_max();
1029 enable_large_decrementer();
1031 /* Start the decrementer on CPUs that have manual control
1034 start_cpu_decrementer();
1036 /* Register the clocksource */
1039 init_decrementer_clockevent();
1040 tick_setup_hrtimer_broadcast();
1043 enable_sched_clock_irqtime();
1047 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1050 void div128_by_32(u64 dividend_high, u64 dividend_low,
1051 unsigned divisor, struct div_result *dr)
1053 unsigned long a, b, c, d;
1054 unsigned long w, x, y, z;
1057 a = dividend_high >> 32;
1058 b = dividend_high & 0xffffffff;
1059 c = dividend_low >> 32;
1060 d = dividend_low & 0xffffffff;
1063 ra = ((u64)(a - (w * divisor)) << 32) + b;
1065 rb = ((u64) do_div(ra, divisor) << 32) + c;
1068 rc = ((u64) do_div(rb, divisor) << 32) + d;
1071 do_div(rc, divisor);
1074 dr->result_high = ((u64)w << 32) + x;
1075 dr->result_low = ((u64)y << 32) + z;
1079 /* We don't need to calibrate delay, we use the CPU timebase for that */
1080 void calibrate_delay(void)
1082 /* Some generic code (such as spinlock debug) use loops_per_jiffy
1083 * as the number of __delay(1) in a jiffy, so make it so
1085 loops_per_jiffy = tb_ticks_per_jiffy;
1088 #if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
1089 static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
1091 ppc_md.get_rtc_time(tm);
1095 static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
1097 if (!ppc_md.set_rtc_time)
1100 if (ppc_md.set_rtc_time(tm) < 0)
1106 static const struct rtc_class_ops rtc_generic_ops = {
1107 .read_time = rtc_generic_get_time,
1108 .set_time = rtc_generic_set_time,
1111 static int __init rtc_init(void)
1113 struct platform_device *pdev;
1115 if (!ppc_md.get_rtc_time)
1118 pdev = platform_device_register_data(NULL, "rtc-generic", -1,
1120 sizeof(rtc_generic_ops));
1122 return PTR_ERR_OR_ZERO(pdev);
1125 device_initcall(rtc_init);