2 * sched_clock for unstable cpu clocks
4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
6 * Updates and enhancements:
7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
10 * Ingo Molnar <mingo@redhat.com>
11 * Guillaume Chazarain <guichaz@gmail.com>
13 * Create a semi stable clock from a mixture of other events, including:
16 * - explicit idle events
18 * We use gtod as base and the unstable clock deltas. The deltas are filtered,
19 * making it monotonic and keeping it within an expected window.
21 * Furthermore, explicit sleep and wakeup hooks allow us to account for time
22 * that is otherwise invisible (TSC gets stopped).
24 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
25 * consistent between cpus (never more than 2 jiffies difference).
27 #include <linux/spinlock.h>
28 #include <linux/module.h>
29 #include <linux/percpu.h>
30 #include <linux/ktime.h>
31 #include <linux/sched.h>
34 * Scheduler clock - returns current time in nanosec units.
35 * This is default implementation.
36 * Architectures and sub-architectures can override this.
38 unsigned long long __attribute__((weak)) sched_clock(void)
40 return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
43 static __read_mostly int sched_clock_running;
45 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
46 __read_mostly int sched_clock_stable;
48 static const int sched_clock_stable = 1;
51 struct sched_clock_data {
53 * Raw spinlock - this is a special case: this might be called
54 * from within instrumentation code so we dont want to do any
55 * instrumentation ourselves.
64 static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
66 static inline struct sched_clock_data *this_scd(void)
68 return &__get_cpu_var(sched_clock_data);
71 static inline struct sched_clock_data *cpu_sdc(int cpu)
73 return &per_cpu(sched_clock_data, cpu);
76 void sched_clock_init(void)
78 u64 ktime_now = ktime_to_ns(ktime_get());
81 for_each_possible_cpu(cpu) {
82 struct sched_clock_data *scd = cpu_sdc(cpu);
84 scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
86 scd->tick_gtod = ktime_now;
87 scd->clock = ktime_now;
90 sched_clock_running = 1;
94 * min, max except they take wrapping into account
97 static inline u64 wrap_min(u64 x, u64 y)
99 return (s64)(x - y) < 0 ? x : y;
102 static inline u64 wrap_max(u64 x, u64 y)
104 return (s64)(x - y) > 0 ? x : y;
108 * update the percpu scd from the raw @now value
110 * - filter out backward motion
111 * - use the GTOD tick value to create a window to filter crazy TSC values
113 static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
115 s64 delta = now - scd->tick_raw;
116 u64 clock, min_clock, max_clock;
118 WARN_ON_ONCE(!irqs_disabled());
120 if (unlikely(delta < 0))
123 if (unlikely(!sched_clock_running))
127 * scd->clock = clamp(scd->tick_gtod + delta,
128 * max(scd->tick_gtod, scd->clock),
129 * scd->tick_gtod + TICK_NSEC);
132 clock = scd->tick_gtod + delta;
133 min_clock = wrap_max(scd->tick_gtod, scd->clock);
134 max_clock = wrap_max(scd->clock, scd->tick_gtod + TICK_NSEC);
136 clock = wrap_max(clock, min_clock);
137 clock = wrap_min(clock, max_clock);
144 static void lock_double_clock(struct sched_clock_data *data1,
145 struct sched_clock_data *data2)
148 __raw_spin_lock(&data1->lock);
149 __raw_spin_lock(&data2->lock);
151 __raw_spin_lock(&data2->lock);
152 __raw_spin_lock(&data1->lock);
156 u64 sched_clock_cpu(int cpu)
158 u64 now, clock, this_clock, remote_clock;
159 struct sched_clock_data *scd;
161 if (sched_clock_stable)
162 return sched_clock();
165 WARN_ON_ONCE(!irqs_disabled());
168 if (cpu != raw_smp_processor_id()) {
169 struct sched_clock_data *my_scd = this_scd();
171 lock_double_clock(scd, my_scd);
173 this_clock = __update_sched_clock(my_scd, now);
174 remote_clock = scd->clock;
177 * Use the opportunity that we have both locks
178 * taken to couple the two clocks: we take the
179 * larger time as the latest time for both
180 * runqueues. (this creates monotonic movement)
182 if (likely((s64)(remote_clock - this_clock) < 0)) {
187 * Should be rare, but possible:
189 clock = remote_clock;
190 my_scd->clock = remote_clock;
193 __raw_spin_unlock(&my_scd->lock);
195 __raw_spin_lock(&scd->lock);
196 clock = __update_sched_clock(scd, now);
199 __raw_spin_unlock(&scd->lock);
204 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
206 void sched_clock_tick(void)
208 struct sched_clock_data *scd = this_scd();
211 if (unlikely(!sched_clock_running))
214 WARN_ON_ONCE(!irqs_disabled());
216 now_gtod = ktime_to_ns(ktime_get());
219 __raw_spin_lock(&scd->lock);
221 scd->tick_gtod = now_gtod;
222 __update_sched_clock(scd, now);
223 __raw_spin_unlock(&scd->lock);
227 * We are going deep-idle (irqs are disabled):
229 void sched_clock_idle_sleep_event(void)
231 sched_clock_cpu(smp_processor_id());
233 EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
236 * We just idled delta nanoseconds (called with irqs disabled):
238 void sched_clock_idle_wakeup_event(u64 delta_ns)
240 if (timekeeping_suspended)
244 touch_softlockup_watchdog();
246 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
248 #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
250 unsigned long long cpu_clock(int cpu)
252 unsigned long long clock;
255 local_irq_save(flags);
256 clock = sched_clock_cpu(cpu);
257 local_irq_restore(flags);
261 EXPORT_SYMBOL_GPL(cpu_clock);