Merge branch 'omap-for-v5.13/genpd-cleanup' into omap-for-v5.14/cleanup
[linux-2.6-microblaze.git] / arch / mips / kernel / time.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright 2001 MontaVista Software Inc.
4  * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
5  * Copyright (c) 2003, 2004  Maciej W. Rozycki
6  *
7  * Common time service routines for MIPS machines.
8  */
9 #include <linux/bug.h>
10 #include <linux/clockchips.h>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/sched.h>
15 #include <linux/param.h>
16 #include <linux/time.h>
17 #include <linux/timex.h>
18 #include <linux/smp.h>
19 #include <linux/spinlock.h>
20 #include <linux/export.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23
24 #include <asm/cpu-features.h>
25 #include <asm/cpu-type.h>
26 #include <asm/div64.h>
27 #include <asm/time.h>
28
29 #ifdef CONFIG_CPU_FREQ
30
31 static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref);
32 static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref_freq);
33 static unsigned long glb_lpj_ref;
34 static unsigned long glb_lpj_ref_freq;
35
36 static int cpufreq_callback(struct notifier_block *nb,
37                             unsigned long val, void *data)
38 {
39         struct cpufreq_freqs *freq = data;
40         struct cpumask *cpus = freq->policy->cpus;
41         unsigned long lpj;
42         int cpu;
43
44         /*
45          * Skip lpj numbers adjustment if the CPU-freq transition is safe for
46          * the loops delay. (Is this possible?)
47          */
48         if (freq->flags & CPUFREQ_CONST_LOOPS)
49                 return NOTIFY_OK;
50
51         /* Save the initial values of the lpjes for future scaling. */
52         if (!glb_lpj_ref) {
53                 glb_lpj_ref = boot_cpu_data.udelay_val;
54                 glb_lpj_ref_freq = freq->old;
55
56                 for_each_online_cpu(cpu) {
57                         per_cpu(pcp_lpj_ref, cpu) =
58                                 cpu_data[cpu].udelay_val;
59                         per_cpu(pcp_lpj_ref_freq, cpu) = freq->old;
60                 }
61         }
62
63         /*
64          * Adjust global lpj variable and per-CPU udelay_val number in
65          * accordance with the new CPU frequency.
66          */
67         if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
68             (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
69                 loops_per_jiffy = cpufreq_scale(glb_lpj_ref,
70                                                 glb_lpj_ref_freq,
71                                                 freq->new);
72
73                 for_each_cpu(cpu, cpus) {
74                         lpj = cpufreq_scale(per_cpu(pcp_lpj_ref, cpu),
75                                             per_cpu(pcp_lpj_ref_freq, cpu),
76                                             freq->new);
77                         cpu_data[cpu].udelay_val = (unsigned int)lpj;
78                 }
79         }
80
81         return NOTIFY_OK;
82 }
83
84 static struct notifier_block cpufreq_notifier = {
85         .notifier_call  = cpufreq_callback,
86 };
87
88 static int __init register_cpufreq_notifier(void)
89 {
90         return cpufreq_register_notifier(&cpufreq_notifier,
91                                          CPUFREQ_TRANSITION_NOTIFIER);
92 }
93 core_initcall(register_cpufreq_notifier);
94
95 #endif /* CONFIG_CPU_FREQ */
96
97 /*
98  * forward reference
99  */
100 DEFINE_SPINLOCK(rtc_lock);
101 EXPORT_SYMBOL(rtc_lock);
102
103 static int null_perf_irq(void)
104 {
105         return 0;
106 }
107
108 int (*perf_irq)(void) = null_perf_irq;
109
110 EXPORT_SYMBOL(perf_irq);
111
112 /*
113  * time_init() - it does the following things.
114  *
115  * 1) plat_time_init() -
116  *      a) (optional) set up RTC routines,
117  *      b) (optional) calibrate and set the mips_hpt_frequency
118  *          (only needed if you intended to use cpu counter as timer interrupt
119  *           source)
120  * 2) calculate a couple of cached variables for later usage
121  */
122
123 unsigned int mips_hpt_frequency;
124 EXPORT_SYMBOL_GPL(mips_hpt_frequency);
125
126 static __init int cpu_has_mfc0_count_bug(void)
127 {
128         switch (current_cpu_type()) {
129         case CPU_R4000PC:
130         case CPU_R4000SC:
131         case CPU_R4000MC:
132                 /*
133                  * V3.0 is documented as suffering from the mfc0 from count bug.
134                  * Afaik this is the last version of the R4000.  Later versions
135                  * were marketed as R4400.
136                  */
137                 return 1;
138
139         case CPU_R4400PC:
140         case CPU_R4400SC:
141         case CPU_R4400MC:
142                 /*
143                  * The published errata for the R4400 up to 3.0 say the CPU
144                  * has the mfc0 from count bug.
145                  */
146                 if ((current_cpu_data.processor_id & 0xff) <= 0x30)
147                         return 1;
148
149                 /*
150                  * we assume newer revisions are ok
151                  */
152                 return 0;
153         }
154
155         return 0;
156 }
157
158 void __init time_init(void)
159 {
160         plat_time_init();
161
162         /*
163          * The use of the R4k timer as a clock event takes precedence;
164          * if reading the Count register might interfere with the timer
165          * interrupt, then we don't use the timer as a clock source.
166          * We may still use the timer as a clock source though if the
167          * timer interrupt isn't reliable; the interference doesn't
168          * matter then, because we don't use the interrupt.
169          */
170         if (mips_clockevent_init() != 0 || !cpu_has_mfc0_count_bug())
171                 init_mips_clocksource();
172 }