2 * sched_clock.c: support for extending counters to full 64-bit ns counter
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 #include <linux/clocksource.h>
9 #include <linux/init.h>
10 #include <linux/jiffies.h>
11 #include <linux/ktime.h>
12 #include <linux/kernel.h>
13 #include <linux/moduleparam.h>
14 #include <linux/sched.h>
15 #include <linux/syscore_ops.h>
16 #include <linux/hrtimer.h>
17 #include <linux/sched_clock.h>
18 #include <linux/seqlock.h>
19 #include <linux/bitops.h>
22 * struct clock_read_data - data required to read from sched_clock
24 * @epoch_ns: sched_clock value at last update
25 * @epoch_cyc: Clock cycle value at last update
26 * @sched_clock_mask: Bitmask for two's complement subtraction of non 64bit
28 * @read_sched_clock: Current clock source (or dummy source when suspended)
29 * @mult: Multipler for scaled math conversion
30 * @shift: Shift value for scaled math conversion
31 * @suspended: Flag to indicate if the clock is suspended (stopped)
33 * Care must be taken when updating this structure; it is read by
34 * some very hot code paths. It occupies <=48 bytes and, when combined
35 * with the seqcount used to synchronize access, comfortably fits into
36 * a 64 byte cache line.
38 struct clock_read_data {
42 u64 (*read_sched_clock)(void);
49 * struct clock_data - all data needed for sched_clock (including
50 * registration of a new clock source)
52 * @seq: Sequence counter for protecting updates.
53 * @read_data: Data required to read from sched_clock.
54 * @wrap_kt: Duration for which clock can run before wrapping
55 * @rate: Tick rate of the registered clock
56 * @actual_read_sched_clock: Registered clock read function
58 * The ordering of this structure has been chosen to optimize cache
59 * performance. In particular seq and read_data (combined) should fit
60 * into a single 64 byte cache line.
64 struct clock_read_data read_data;
69 static struct hrtimer sched_clock_timer;
70 static int irqtime = -1;
72 core_param(irqtime, irqtime, int, 0400);
74 static u64 notrace jiffy_sched_clock_read(void)
77 * We don't need to use get_jiffies_64 on 32-bit arches here
78 * because we register with BITS_PER_LONG
80 return (u64)(jiffies - INITIAL_JIFFIES);
83 static struct clock_data cd ____cacheline_aligned = {
84 .read_data = { .mult = NSEC_PER_SEC / HZ,
85 .read_sched_clock = jiffy_sched_clock_read, },
88 static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
90 return (cyc * mult) >> shift;
93 unsigned long long notrace sched_clock(void)
97 struct clock_read_data *rd = &cd.read_data;
100 seq = raw_read_seqcount_begin(&cd.seq);
103 if (!rd->suspended) {
104 cyc = rd->read_sched_clock();
105 cyc = (cyc - rd->epoch_cyc) & rd->sched_clock_mask;
106 res += cyc_to_ns(cyc, rd->mult, rd->shift);
108 } while (read_seqcount_retry(&cd.seq, seq));
114 * Atomically update the sched_clock epoch.
116 static void notrace update_sched_clock(void)
121 struct clock_read_data *rd = &cd.read_data;
123 cyc = rd->read_sched_clock();
125 cyc_to_ns((cyc - rd->epoch_cyc) & rd->sched_clock_mask,
126 rd->mult, rd->shift);
128 raw_local_irq_save(flags);
129 raw_write_seqcount_begin(&cd.seq);
132 raw_write_seqcount_end(&cd.seq);
133 raw_local_irq_restore(flags);
136 static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
138 update_sched_clock();
139 hrtimer_forward_now(hrt, cd.wrap_kt);
140 return HRTIMER_RESTART;
143 void __init sched_clock_register(u64 (*read)(void), int bits,
146 u64 res, wrap, new_mask, new_epoch, cyc, ns;
147 u32 new_mult, new_shift;
150 struct clock_read_data *rd = &cd.read_data;
155 WARN_ON(!irqs_disabled());
157 /* calculate the mult/shift to convert counter ticks to ns. */
158 clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
160 new_mask = CLOCKSOURCE_MASK(bits);
163 /* calculate how many nanosecs until we risk wrapping */
164 wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL);
165 cd.wrap_kt = ns_to_ktime(wrap);
167 /* update epoch for new counter and update epoch_ns from old counter*/
169 cyc = rd->read_sched_clock();
171 cyc_to_ns((cyc - rd->epoch_cyc) & rd->sched_clock_mask,
172 rd->mult, rd->shift);
174 raw_write_seqcount_begin(&cd.seq);
175 rd->read_sched_clock = read;
176 rd->sched_clock_mask = new_mask;
178 rd->shift = new_shift;
179 rd->epoch_cyc = new_epoch;
181 raw_write_seqcount_end(&cd.seq);
187 } else if (r >= 1000) {
193 /* calculate the ns resolution of this counter */
194 res = cyc_to_ns(1ULL, new_mult, new_shift);
196 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
197 bits, r, r_unit, res, wrap);
199 /* Enable IRQ time accounting if we have a fast enough sched_clock */
200 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
201 enable_sched_clock_irqtime();
203 pr_debug("Registered %pF as sched_clock source\n", read);
206 void __init sched_clock_postinit(void)
209 * If no sched_clock function has been provided at that point,
210 * make it the final one one.
212 if (cd.read_data.read_sched_clock == jiffy_sched_clock_read)
213 sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
215 update_sched_clock();
218 * Start the timer to keep sched_clock() properly updated and
219 * sets the initial epoch.
221 hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
222 sched_clock_timer.function = sched_clock_poll;
223 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
226 static int sched_clock_suspend(void)
228 struct clock_read_data *rd = &cd.read_data;
230 update_sched_clock();
231 hrtimer_cancel(&sched_clock_timer);
232 rd->suspended = true;
236 static void sched_clock_resume(void)
238 struct clock_read_data *rd = &cd.read_data;
240 rd->epoch_cyc = rd->read_sched_clock();
241 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
242 rd->suspended = false;
245 static struct syscore_ops sched_clock_ops = {
246 .suspend = sched_clock_suspend,
247 .resume = sched_clock_resume,
250 static int __init sched_clock_syscore_init(void)
252 register_syscore_ops(&sched_clock_ops);
255 device_initcall(sched_clock_syscore_init);